From 09c82c9c12cbc66ce57360b1cb37ac7dae1d23da Mon Sep 17 00:00:00 2001 From: Rusty Lynch Date: Thu, 5 Jul 2012 16:06:44 -0700 Subject: [PATCH] Updating to tarball snapshot of version 2.37.92 --- COPYING | 20 +- HACKING | 66 - Makefile.am | 5 +- Makefile.glib | 302 + Makefile.in | 868 + NEWS | 240 + aclocal.m4 | 1408 ++ autogen.sh | 22 - config.guess | 1522 ++ config.h.in | 91 + config.sub | 1766 +++ configure | 15651 +++++++++++++++++++ configure.ac | 93 +- debian/changelog | 483 - debian/compat | 1 - debian/control | 26 - debian/copyright | 0 debian/docs | 2 - debian/libsoup2.4-dev.install.in | 4 - debian/libsoup2.4.install.in | 1 - debian/rules | 120 - depcomp | 688 + docs/Makefile.in | 616 + docs/reference/Makefile.am | 14 +- docs/reference/Makefile.in | 760 + docs/reference/client-howto.xml | 32 +- docs/reference/html/SoupAddress.html | 800 + docs/reference/html/SoupAuth.html | 624 + docs/reference/html/SoupAuthDomain.html | 725 + docs/reference/html/SoupAuthDomainBasic.html | 268 + docs/reference/html/SoupAuthDomainDigest.html | 301 + docs/reference/html/SoupCache.html | 240 + docs/reference/html/SoupContentDecoder.html | 106 + docs/reference/html/SoupContentSniffer.html | 152 + docs/reference/html/SoupCookie.html | 1046 ++ docs/reference/html/SoupCookieJar.html | 529 + docs/reference/html/SoupCookieJarSqlite.html | 154 + docs/reference/html/SoupCookieJarText.html | 153 + docs/reference/html/SoupLogger.html | 527 + docs/reference/html/SoupMessage.html | 1832 +++ docs/reference/html/SoupMessageBody.html | 912 ++ docs/reference/html/SoupMessageHeaders.html | 1332 ++ docs/reference/html/SoupMultipart.html | 386 + docs/reference/html/SoupProxyResolverDefault.html | 101 + docs/reference/html/SoupProxyURIResolver.html | 237 + docs/reference/html/SoupRequest.html | 359 + docs/reference/html/SoupRequestData.html | 83 + docs/reference/html/SoupRequestFile.html | 108 + docs/reference/html/SoupRequestHTTP.html | 107 + docs/reference/html/SoupRequester.html | 222 + docs/reference/html/SoupServer.html | 1188 ++ docs/reference/html/SoupSession.html | 1509 ++ docs/reference/html/SoupSessionAsync.html | 122 + docs/reference/html/SoupSessionFeature.html | 179 + docs/reference/html/SoupSessionSync.html | 140 + docs/reference/html/SoupSocket.html | 1119 ++ docs/reference/html/SoupURI.html | 1119 ++ docs/reference/html/annotation-glossary.html | 95 + docs/reference/html/ch01.html | 41 + docs/reference/html/ch02.html | 84 + docs/reference/html/ch03.html | 56 + docs/reference/html/ch04.html | 39 + docs/reference/html/ch05.html | 38 + docs/reference/html/ch06.html | 38 + docs/reference/html/ch07.html | 50 + docs/reference/html/home.png | Bin 0 -> 654 bytes docs/reference/html/index.html | 169 + docs/reference/html/index.sgml | 1032 ++ docs/reference/html/ix01.html | 1325 ++ docs/reference/html/left.png | Bin 0 -> 459 bytes .../reference/html/libsoup-2.4-GValue-Support.html | 659 + .../libsoup-2.4-Soup-Miscellaneous-Utilities.html | 1422 ++ .../reference/html/libsoup-2.4-XMLRPC-Support.html | 600 + .../html/libsoup-2.4-soup-gnome-features.html | 94 + docs/reference/html/libsoup-2.4-soup-method.html | 240 + docs/reference/html/libsoup-2.4-soup-status.html | 698 + docs/reference/html/libsoup-2.4.devhelp2 | 844 + docs/reference/html/libsoup-build-howto.html | 161 + docs/reference/html/libsoup-client-howto.html | 550 + docs/reference/html/libsoup-server-howto.html | 441 + docs/reference/html/right.png | Bin 0 -> 472 bytes docs/reference/html/style.css | 266 + docs/reference/html/up.png | Bin 0 -> 406 bytes docs/reference/libsoup-2.4-docs.sgml | 13 +- docs/reference/libsoup-2.4-sections.txt | 291 +- docs/reference/libsoup-2.4.types | 41 + docs/reference/porting-2.2-2.4.xml | 878 -- docs/reference/server-howto.xml | 12 +- .../reference/tmpl/libsoup-2.4-unused.sgml | 0 docs/reference/tmpl/soup-address.sgml | 259 + docs/reference/tmpl/soup-auth-domain-basic.sgml | 86 + docs/reference/tmpl/soup-auth-domain-digest.sgml | 96 + docs/reference/tmpl/soup-auth-domain.sgml | 237 + docs/reference/tmpl/soup-auth.sgml | 233 + docs/reference/tmpl/soup-cache.sgml | 106 + docs/reference/tmpl/soup-content-decoder.sgml | 28 + docs/reference/tmpl/soup-content-sniffer.sgml | 49 + docs/reference/tmpl/soup-cookie-jar-sqlite.sgml | 50 + docs/reference/tmpl/soup-cookie-jar-text.sgml | 50 + docs/reference/tmpl/soup-cookie-jar.sgml | 156 + docs/reference/tmpl/soup-cookie.sgml | 322 + docs/reference/tmpl/soup-form.sgml | 140 + docs/reference/tmpl/soup-gnome-features.sgml | 36 + docs/reference/tmpl/soup-logger.sgml | 122 + docs/reference/tmpl/soup-message-body.sgml | 242 + docs/reference/tmpl/soup-message-headers.sgml | 362 + docs/reference/tmpl/soup-message.sgml | 551 + docs/reference/tmpl/soup-method.sgml | 127 + docs/reference/tmpl/soup-misc.sgml | 405 + docs/reference/tmpl/soup-multipart.sgml | 118 + .../tmpl/soup-proxy-resolver-default.sgml | 33 + docs/reference/tmpl/soup-proxy-uri-resolver.sgml | 64 + docs/reference/tmpl/soup-request-data.sgml | 28 + docs/reference/tmpl/soup-request-file.sgml | 37 + docs/reference/tmpl/soup-request-http.sgml | 37 + docs/reference/tmpl/soup-request.sgml | 121 + docs/reference/tmpl/soup-requester.sgml | 74 + docs/reference/tmpl/soup-server.sgml | 352 + docs/reference/tmpl/soup-session-async.sgml | 47 + docs/reference/tmpl/soup-session-feature.sgml | 43 + docs/reference/tmpl/soup-session-sync.sgml | 47 + docs/reference/tmpl/soup-session.sgml | 479 + docs/reference/tmpl/soup-socket.sgml | 390 + docs/reference/tmpl/soup-status.sgml | 164 + docs/reference/tmpl/soup-uri.sgml | 356 + docs/reference/tmpl/soup-value-utils.sgml | 203 + docs/reference/tmpl/soup-xmlrpc.sgml | 157 + docs/specs/README | 13 - docs/specs/rfc1945.txt | 3363 ---- docs/specs/rfc2068.txt | 9075 ----------- docs/specs/rfc2109.txt | 1179 -- docs/specs/rfc2145.txt | 395 - docs/specs/rfc2324.txt | 563 - docs/specs/rfc2388.txt | 507 - docs/specs/rfc2518.txt | 5267 ------- docs/specs/rfc2616.txt | 9934 ------------ docs/specs/rfc2617.txt | 1909 --- docs/specs/rfc2817.txt | 731 - docs/specs/rfc2818.txt | 395 - docs/specs/rfc2965.txt | 1459 -- docs/specs/rfc3986.txt | 3419 ---- gtk-doc.make | 280 + install-sh | 527 + libsoup.doap | 23 - libsoup/Makefile.am | 68 +- libsoup/Makefile.in | 1028 ++ libsoup/TIZEN.h | 16 - libsoup/soup-address.c | 26 +- libsoup/soup-auth-basic.c | 9 +- libsoup/soup-auth-domain-basic.c | 2 +- libsoup/soup-auth-domain-digest.c | 2 +- libsoup/soup-auth-domain.c | 4 +- libsoup/soup-auth-manager-ntlm.c | 62 +- libsoup/soup-auth-manager.c | 96 +- libsoup/soup-auth-manager.h | 11 +- libsoup/soup-auth.c | 4 +- libsoup/soup-cache.c | 67 +- libsoup/soup-connection.c | 204 +- libsoup/soup-connection.h | 2 + libsoup/soup-content-decoder.c | 13 +- libsoup/soup-content-sniffer.c | 263 - libsoup/soup-cookie-jar.c | 14 +- libsoup/soup-cookie.c | 12 + libsoup/soup-directory-input-stream.c | 41 +- libsoup/soup-enum-types.c | 516 + libsoup/soup-enum-types.c.tmpl | 36 - libsoup/soup-enum-types.h | 55 + libsoup/soup-enum-types.h.tmpl | 24 - libsoup/soup-form.c | 17 +- libsoup/soup-gnome-features.c | 2 +- libsoup/soup-headers.c | 44 +- libsoup/soup-http-input-stream.c | 374 +- libsoup/soup-http-input-stream.h | 36 +- libsoup/soup-logger.c | 33 +- libsoup/soup-marshal.c | 282 + libsoup/soup-marshal.h | 78 + libsoup/soup-marshal.list | 10 - libsoup/soup-message-body.c | 38 +- libsoup/soup-message-headers.c | 24 +- libsoup/soup-message-io.c | 81 +- libsoup/soup-message-private.h | 8 + libsoup/soup-message-queue.c | 73 +- libsoup/soup-message-queue.h | 11 +- libsoup/soup-message-server-io.c | 24 +- libsoup/soup-message.c | 240 +- libsoup/soup-message.h | 7 +- libsoup/soup-misc-private.h | 4 + libsoup/soup-misc.c | 2 + libsoup/soup-misc.h | 11 +- libsoup/soup-proxy-resolver.c | 1 + libsoup/soup-request-file.c | 49 +- libsoup/soup-request-http.c | 224 +- libsoup/soup-request.c | 98 +- libsoup/soup-requester.c | 55 + libsoup/soup-server.c | 103 +- libsoup/soup-server.h | 15 +- libsoup/soup-session-async.c | 71 +- libsoup/soup-session-feature.c | 3 + libsoup/soup-session-sync.c | 72 +- libsoup/soup-session.c | 954 +- libsoup/soup-session.h | 20 +- libsoup/soup-socket.c | 214 +- libsoup/soup-socket.h | 1 + libsoup/soup-ssl.c | 145 - libsoup/soup-ssl.h | 29 - libsoup/soup-status.c | 14 +- libsoup/soup-uri.c | 166 +- libsoup/soup-uri.h | 1 + libsoup/soup-value-utils.c | 7 + libsoup/soup-xmlrpc.c | 74 +- libsoup/soup-xmlrpc.h | 4 - ltmain.sh | 9636 ++++++++++++ m4/gtk-doc.m4 | 67 + m4/libgcrypt.m4 | 108 - m4/libtool.m4 | 7835 ++++++++++ m4/ltoptions.m4 | 369 + m4/ltsugar.m4 | 123 + m4/ltversion.m4 | 23 + m4/lt~obsolete.m4 | 98 + missing | 331 + packaging/libsoup2.4.changes | 2 + packaging/libsoup2.4.spec | 12 +- tests/Makefile.am | 14 +- tests/Makefile.in | 979 ++ tests/auth-test.c | 360 +- tests/chunk-test.c | 60 +- tests/coding-test.c | 371 +- tests/connection-test.c | 592 + tests/context-test.c | 211 +- tests/dns.c | 1 - tests/forms-test.c | 4 +- tests/get.c | 18 +- tests/getbug.c | 7 +- tests/header-parsing.c | 147 +- tests/libsoup.supp | 207 +- tests/misc-test.c | 597 +- tests/ntlm-test.c | 134 +- tests/redirect-test.c | 150 +- tests/requester-test.c | 232 +- tests/resources/mbox.raw | Bin 0 -> 287 bytes tests/resources/mbox.zlib | Bin 0 -> 293 bytes tests/simple-httpd.c | 13 +- tests/simple-proxy.c | 1 - tests/sniffing-test.c | 2 +- tests/ssl-test.c | 187 + tests/test-cert.pem | 35 +- tests/test-key.pem | 38 +- tests/test-utils.c | 5 +- tests/timeout-test.c | 1 + tests/uri-parsing.c | 488 +- tests/xmlrpc-server-test.c | 4 + tests/xmlrpc-test.c | 4 + 252 files changed, 84113 insertions(+), 42751 deletions(-) delete mode 100644 HACKING create mode 100644 Makefile.glib create mode 100644 Makefile.in create mode 100644 aclocal.m4 delete mode 100755 autogen.sh create mode 100755 config.guess create mode 100644 config.h.in create mode 100755 config.sub create mode 100755 configure delete mode 100644 debian/changelog delete mode 100644 debian/compat delete mode 100644 debian/control delete mode 100644 debian/copyright delete mode 100644 debian/docs delete mode 100644 debian/libsoup2.4-dev.install.in delete mode 100644 debian/libsoup2.4.install.in delete mode 100755 debian/rules create mode 100755 depcomp create mode 100644 docs/Makefile.in create mode 100644 docs/reference/Makefile.in create mode 100644 docs/reference/html/SoupAddress.html create mode 100644 docs/reference/html/SoupAuth.html create mode 100644 docs/reference/html/SoupAuthDomain.html create mode 100644 docs/reference/html/SoupAuthDomainBasic.html create mode 100644 docs/reference/html/SoupAuthDomainDigest.html create mode 100644 docs/reference/html/SoupCache.html create mode 100644 docs/reference/html/SoupContentDecoder.html create mode 100644 docs/reference/html/SoupContentSniffer.html create mode 100644 docs/reference/html/SoupCookie.html create mode 100644 docs/reference/html/SoupCookieJar.html create mode 100644 docs/reference/html/SoupCookieJarSqlite.html create mode 100644 docs/reference/html/SoupCookieJarText.html create mode 100644 docs/reference/html/SoupLogger.html create mode 100644 docs/reference/html/SoupMessage.html create mode 100644 docs/reference/html/SoupMessageBody.html create mode 100644 docs/reference/html/SoupMessageHeaders.html create mode 100644 docs/reference/html/SoupMultipart.html create mode 100644 docs/reference/html/SoupProxyResolverDefault.html create mode 100644 docs/reference/html/SoupProxyURIResolver.html create mode 100644 docs/reference/html/SoupRequest.html create mode 100644 docs/reference/html/SoupRequestData.html create mode 100644 docs/reference/html/SoupRequestFile.html create mode 100644 docs/reference/html/SoupRequestHTTP.html create mode 100644 docs/reference/html/SoupRequester.html create mode 100644 docs/reference/html/SoupServer.html create mode 100644 docs/reference/html/SoupSession.html create mode 100644 docs/reference/html/SoupSessionAsync.html create mode 100644 docs/reference/html/SoupSessionFeature.html create mode 100644 docs/reference/html/SoupSessionSync.html create mode 100644 docs/reference/html/SoupSocket.html create mode 100644 docs/reference/html/SoupURI.html create mode 100644 docs/reference/html/annotation-glossary.html create mode 100644 docs/reference/html/ch01.html create mode 100644 docs/reference/html/ch02.html create mode 100644 docs/reference/html/ch03.html create mode 100644 docs/reference/html/ch04.html create mode 100644 docs/reference/html/ch05.html create mode 100644 docs/reference/html/ch06.html create mode 100644 docs/reference/html/ch07.html create mode 100644 docs/reference/html/home.png create mode 100644 docs/reference/html/index.html create mode 100644 docs/reference/html/index.sgml create mode 100644 docs/reference/html/ix01.html create mode 100644 docs/reference/html/left.png create mode 100644 docs/reference/html/libsoup-2.4-GValue-Support.html create mode 100644 docs/reference/html/libsoup-2.4-Soup-Miscellaneous-Utilities.html create mode 100644 docs/reference/html/libsoup-2.4-XMLRPC-Support.html create mode 100644 docs/reference/html/libsoup-2.4-soup-gnome-features.html create mode 100644 docs/reference/html/libsoup-2.4-soup-method.html create mode 100644 docs/reference/html/libsoup-2.4-soup-status.html create mode 100644 docs/reference/html/libsoup-2.4.devhelp2 create mode 100644 docs/reference/html/libsoup-build-howto.html create mode 100644 docs/reference/html/libsoup-client-howto.html create mode 100644 docs/reference/html/libsoup-server-howto.html create mode 100644 docs/reference/html/right.png create mode 100644 docs/reference/html/style.css create mode 100644 docs/reference/html/up.png create mode 100644 docs/reference/libsoup-2.4.types delete mode 100644 docs/reference/porting-2.2-2.4.xml rename debian/dirs => docs/reference/tmpl/libsoup-2.4-unused.sgml (100%) create mode 100644 docs/reference/tmpl/soup-address.sgml create mode 100644 docs/reference/tmpl/soup-auth-domain-basic.sgml create mode 100644 docs/reference/tmpl/soup-auth-domain-digest.sgml create mode 100644 docs/reference/tmpl/soup-auth-domain.sgml create mode 100644 docs/reference/tmpl/soup-auth.sgml create mode 100644 docs/reference/tmpl/soup-cache.sgml create mode 100644 docs/reference/tmpl/soup-content-decoder.sgml create mode 100644 docs/reference/tmpl/soup-content-sniffer.sgml create mode 100644 docs/reference/tmpl/soup-cookie-jar-sqlite.sgml create mode 100644 docs/reference/tmpl/soup-cookie-jar-text.sgml create mode 100644 docs/reference/tmpl/soup-cookie-jar.sgml create mode 100644 docs/reference/tmpl/soup-cookie.sgml create mode 100644 docs/reference/tmpl/soup-form.sgml create mode 100644 docs/reference/tmpl/soup-gnome-features.sgml create mode 100644 docs/reference/tmpl/soup-logger.sgml create mode 100644 docs/reference/tmpl/soup-message-body.sgml create mode 100644 docs/reference/tmpl/soup-message-headers.sgml create mode 100644 docs/reference/tmpl/soup-message.sgml create mode 100644 docs/reference/tmpl/soup-method.sgml create mode 100644 docs/reference/tmpl/soup-misc.sgml create mode 100644 docs/reference/tmpl/soup-multipart.sgml create mode 100644 docs/reference/tmpl/soup-proxy-resolver-default.sgml create mode 100644 docs/reference/tmpl/soup-proxy-uri-resolver.sgml create mode 100644 docs/reference/tmpl/soup-request-data.sgml create mode 100644 docs/reference/tmpl/soup-request-file.sgml create mode 100644 docs/reference/tmpl/soup-request-http.sgml create mode 100644 docs/reference/tmpl/soup-request.sgml create mode 100644 docs/reference/tmpl/soup-requester.sgml create mode 100644 docs/reference/tmpl/soup-server.sgml create mode 100644 docs/reference/tmpl/soup-session-async.sgml create mode 100644 docs/reference/tmpl/soup-session-feature.sgml create mode 100644 docs/reference/tmpl/soup-session-sync.sgml create mode 100644 docs/reference/tmpl/soup-session.sgml create mode 100644 docs/reference/tmpl/soup-socket.sgml create mode 100644 docs/reference/tmpl/soup-status.sgml create mode 100644 docs/reference/tmpl/soup-uri.sgml create mode 100644 docs/reference/tmpl/soup-value-utils.sgml create mode 100644 docs/reference/tmpl/soup-xmlrpc.sgml delete mode 100644 docs/specs/README delete mode 100644 docs/specs/rfc1945.txt delete mode 100644 docs/specs/rfc2068.txt delete mode 100644 docs/specs/rfc2109.txt delete mode 100644 docs/specs/rfc2145.txt delete mode 100644 docs/specs/rfc2324.txt delete mode 100644 docs/specs/rfc2388.txt delete mode 100644 docs/specs/rfc2518.txt delete mode 100644 docs/specs/rfc2616.txt delete mode 100644 docs/specs/rfc2617.txt delete mode 100644 docs/specs/rfc2817.txt delete mode 100644 docs/specs/rfc2818.txt delete mode 100644 docs/specs/rfc2965.txt delete mode 100644 docs/specs/rfc3986.txt create mode 100644 gtk-doc.make create mode 100755 install-sh delete mode 100644 libsoup.doap create mode 100644 libsoup/Makefile.in delete mode 100644 libsoup/TIZEN.h create mode 100644 libsoup/soup-enum-types.c delete mode 100644 libsoup/soup-enum-types.c.tmpl create mode 100644 libsoup/soup-enum-types.h delete mode 100644 libsoup/soup-enum-types.h.tmpl create mode 100644 libsoup/soup-marshal.c create mode 100644 libsoup/soup-marshal.h delete mode 100644 libsoup/soup-marshal.list delete mode 100644 libsoup/soup-ssl.c delete mode 100644 libsoup/soup-ssl.h create mode 100755 ltmain.sh create mode 100644 m4/gtk-doc.m4 delete mode 100644 m4/libgcrypt.m4 create mode 100644 m4/libtool.m4 create mode 100644 m4/ltoptions.m4 create mode 100644 m4/ltsugar.m4 create mode 100644 m4/ltversion.m4 create mode 100644 m4/lt~obsolete.m4 create mode 100755 missing create mode 100644 packaging/libsoup2.4.changes create mode 100644 tests/Makefile.in create mode 100644 tests/connection-test.c create mode 100644 tests/resources/mbox.raw create mode 100644 tests/resources/mbox.zlib create mode 100644 tests/ssl-test.c diff --git a/COPYING b/COPYING index eb685a5..5bc8fb2 100644 --- a/COPYING +++ b/COPYING @@ -1,15 +1,15 @@ - GNU LIBRARY GENERAL PUBLIC LICENSE - Version 2, June 1991 + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 Copyright (C) 1991 Free Software Foundation, Inc. - 675 Mass Ave, Cambridge, MA 02139, USA + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the library GPL. It is numbered 2 because it goes with version 2 of the ordinary GPL.] - Preamble + Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public @@ -99,7 +99,7 @@ works together with the library. Note that it is possible for a library to be covered by the ordinary General Public License rather than by this special one. - GNU LIBRARY GENERAL PUBLIC LICENSE + GNU LIBRARY GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library which @@ -411,7 +411,7 @@ decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - NO WARRANTY + NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. @@ -434,9 +434,9 @@ FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS - Appendix: How to Apply These Terms to Your New Libraries + How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that @@ -463,8 +463,8 @@ convey the exclusion of warranty; and each file should have at least the Library General Public License for more details. You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. diff --git a/HACKING b/HACKING deleted file mode 100644 index 51269e5..0000000 --- a/HACKING +++ /dev/null @@ -1,66 +0,0 @@ -CODE STYLE ----------- - -Please use the style used by the rest of the code. Among other things, -this means: - - * Tabs, not spaces, for indentation - - * Put spaces: - * around binary operators - * between if/while/for/switch and "(" - * between function name and "(" - * between ")" and "{" - * after "," - - * if/for/while bodies: - - * Single-line bodies should (a) be on their own line, and (b) - not have braces around them - - * Multi-line bodies should have braces around them, even if - the body is only a single statement and the braces are not - syntactically necessary. - - * Eg: - - for (i = 0; i < len; i++) { - if (find (i, something)) - break; - else { - function_with_big_name (i, something, - something_else); - } - } - - * C89, not C99. (In particular, don't declare variables in the - middle of blocks.) - - * Do not use gint, gchar, glong, and gshort. (Other g-types, such - as gpointer and the unsigned types are fine.) - -CORRECTNESS ------------ - - * libsoup builds with lots of -W options by default, and should - not print any warnings while compiling (unless they're caused by - #included files from other projects, eg, proxy.h). You can use - "make > /dev/null" to do a full compile showing only the - warnings/errors, to make sure your patch does not introduce any - more. - - * There are a number of regression tests in the tests/ directory. - Running "make check" will run all of them (or at least, all of - the ones that it can run based on what software you have - installed. Eg, some tests require apache to be installed.) You - should run "make check" before submitting a patch that could - potentially change libsoup's behavior. ("make check" will warn - you if it was not able to run all of the tests. If you are - making extensive changes, or changing very low-level functions, - you may want to install all of the optional pieces so you can - run all of the regression tests.) - - * libsoup ought to build correctly from outside its source tree, - so if you make large changes to the Makefiles, try a "make - distcheck" to verify that an out-of-source-tree build still - works. diff --git a/Makefile.am b/Makefile.am index 861daff..8b86fb7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,5 @@ ## Process this file with automake to produce Makefile.in -ACLOCAL_AMFLAGS = -I m4 +ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS} SUBDIRS = libsoup tests docs @@ -8,7 +8,8 @@ EXTRA_DIST = \ libsoup-gnome-2.4.pc.in \ gtk-doc.make \ libsoup-zip.in \ - m4/introspection.m4 + m4/introspection.m4 \ + Makefile.glib DISTCHECK_CONFIGURE_FLAGS = --enable-gtk-doc --enable-introspection diff --git a/Makefile.glib b/Makefile.glib new file mode 100644 index 0000000..78eb07d --- /dev/null +++ b/Makefile.glib @@ -0,0 +1,302 @@ +# -*- Mode: makefile -*- +# +# To use: +# +# In configure.ac: +# add -Wno-portability to AM_INIT_AUTOMAKE +# add GLIB_CONFIG([min-version[, required-modules]]) +# (remove AM_PATH_GLIB_2_0 and GLIB_GSETTINGS) +# +# Add to Makefile.am where your library/program is built: +# include $(GLIB_MAKEFILE) +# +# BUILT_SOURCES = $(GLIB_GENERATED) +# +# Add *.stamp to .gitignore +# +# Add a GLIB_GENERATED variable with the files you want to generate, +# as described below. (The examples below use filenames with hyphens, +# eg foo-marshal.h, but you can omit the hyphens if that matches your +# file naming scheme better.) +# +# You do not need to modify CLEANFILES or EXTRA_DIST for any of these +# macros. + + +# glib-genmarshal +# +# To generate signal marshallers, add files with names ending in +# "marshal.h" and "marshal.c" to GLIB_GENERATED: +# +# GLIB_GENERATED += foo-marshal.h foo-marshal.c +# foo_marshal_sources = aaa.c bbb.c ccc.c ddd.c +# +# Makefile.glib will then generate a foo-marshal.list file containing +# all _foo_marshal_* functions referenced by $(foo_marshal_sources), +# and will rebuild foo-marshal.c/foo-marshal.h whenever the list +# changes. +# +# For your convenience, any .h files or $(GLIB_GENERATED) files in +# $(foo_marshal_sources) will be ignored. This means you can usually just +# set foo_marshal_sources to the value of your library/program's +# _SOURCES variable, even if that variable contains foo-marshal.c. +# +# You can set GLIB_GENMARSHAL_H_FLAGS and GLIB_GENMARSHAL_C_FLAGS (or +# an appropriate file-specific variable, eg +# foo_marshal_GENMARSHAL_H_FLAGS) to set/override glib-genmarshal +# options. + + +# glib-mkenums +# +# To generate enum type registrations, add files with names ending +# in "-enum-types.[ch]" or "enumtypes.[ch]" to GLIB_GENERATED: +# +# GLIB_GENERATED += foo-enum-types.h foo-enum-types.c +# foo_enum_types_sources = aaa.h bbb.h ccc.h ddd.h +# +# Makefile.glib will create a list all of the enum/flags types +# declared in $(foo_enum_type_sources), and will rebuild +# foo-enum-types.c/foo-enum-types.h whenever that list changes. (No +# template files are required.) +# +# For your convenience, any .c files or $(GLIB_GENERATED) files in +# $(foo_enum_types_sources) will be ignored. This means you can +# usually set foo_enum_types_sources to the value of your +# library/program's _HEADERS and/or _SOURCES variables, even if that +# contains foo-enum-types.h. +# +# You can set GLIB_MKENUMS_H_FLAGS and GLIB_MKENUMS_C_FLAGS (or an +# appropriate file-specific variable, eg +# foo_enum_types_MKENUMS_H_FLAGS) to set/override glib-mkenums +# options. In particular, you can do: +# +# GLIB_MKENUMS_C_FLAGS = --fhead "\#define FOO_I_KNOW_THIS_IS_UNSTABLE" +# +# (The backslash is necessary to keep make from thinking the "#" is +# the start of a comment.) + + +# glib-compile-schemas +# +# Any foo.gschemas.xml files listed in gsettingsschema_DATA will be +# validated before installation, and (if --disable-schemas-compile was +# not passed) compiled after installation. +# +# To build an enums file, add it to GLIB_GENERATED (in addition to +# gsettingsschema_DATA): +# +# GLIB_GENERATED += org.gnome.foo.enums.xml +# org_gnome_foo_enums_xml_sources = aaa.h bbb.h ccc.h ddd.h +# +# All enums files will be built before any schema files are validated. + + +######## + +# Notes on Makefile.glib hacking: +# +# - The exact rules that automake generates for a Makefile vary +# depending on what sorts of things were done in the Makefile.am, +# so we have to be careful with what rules we assume are there. +# In particular, (a) the glue to handle BUILT_SOURCES and the +# various hooks won't be output unless those things were +# referenced in the Makefile.am, and (b) a Makefile.am with +# SUBDIRS will get different rules than one without. +# +# - Build rules should always refer to their dependencies via $^, +# not by reusing a variable that is listed in the rule's +# dependencies. This is needed to make srcdir!=builddir builds +# work. You can use $(filter)/$(filter-out) if $^ has things +# you don't want in it. +# +# - When using a filename as something other than a filename, +# consider whether you need to wrap it in $(notdir) to get the +# right result when that file is being pulled out of a +# subdirectory. +# +# - All private variables should be prefixed with _glib or _GLIB +# +# - "make -qp > makefile.out" will give you a copy of the +# Makefile after all macros are expanded. +# +# The genmarshal code is commented; the mkenums and schema code is +# generally similar. + +_GLIB_CLEANFILES = +_GLIB_DISTCLEANFILES = + +_GLIB_V_GEN = $(_glib_v_gen_$(V)) +_glib_v_gen_ = $(_glib_v_gen_$(AM_DEFAULT_VERBOSITY)) +_glib_v_gen_0 = @echo " GEN " $(subst .stamp,,$@); + + +### glib-genmarshal + +# _GLIB_MARSHAL_GENERATED contains the basenames (eg, "foo-marshal") +# of all the marshal-related files to be generated. +_GLIB_MARSHAL_GENERATED = $(subst .h,,$(filter %marshal.h,$(GLIB_GENERATED))) + +# These are used as macros (with the value of $(1) inherited from the "caller") +# _glib_marshal_prefix("foo-marshal") = "foo" (used in the C marshal names) +# _glib_marshal_sources_var("foo-marshal") = "foo_marshal_sources" +# _glib_marshal_sources = the filtered value of $(foo_marshal_sources) +_glib_marshal_prefix = $(subst marshal,,$(subst _marshal,,$(subst -,_,$(notdir $(1)))))_marshal +_glib_marshal_sources_var = $(subst -,_,$(notdir $(1)))_sources +_glib_marshal_sources = $(filter-out %.h,$(filter-out $(GLIB_GENERATED),$($(_glib_marshal_sources_var)))) + +# This is a multi-line macro (ending with the "endef" below) that +# outputs a set of rules for a single .h/.c pair (whose basename is +# $(1)). The initial $(if) line makes make error out if +# foo_marshal_sources wasn't set. Note that single-$ variables are +# expanded when the macro is called, and double-$ variables are +# expanded when the rule is invoked. +define _glib_make_genmarshal_rules +$(if $(_glib_marshal_sources),,$(error Need to define $(_glib_marshal_sources_var) for $(1).[ch])) + +$(1).list.stamp: $(_glib_marshal_sources) + $$(_GLIB_V_GEN) LC_ALL=C sed -ne 's/.*_$(_glib_marshal_prefix)_\([_A-Z]*\).*/\1/p' $$^ | sort -u | sed -e 's/__/:/' -e 's/_/,/g' > $(1).list.tmp && \ + (cmp -s $(1).list.tmp $(1).list || cp $(1).list.tmp $(1).list) && \ + rm -f $(1).list.tmp && \ + echo timestamp > $$@ + +$(1).list: $(1).list.stamp + @true + +$(1).h: $(1).list + $$(_GLIB_V_GEN) $$(GLIB_GENMARSHAL) \ + --prefix=_$(_glib_marshal_prefix) --header \ + $$(GLIB_GENMARSHAL_H_FLAGS) \ + $$($(_glib_marshal_prefix)_GENMARSHAL_H_FLAGS) \ + $$< > $$@.tmp && \ + mv $$@.tmp $$@ + +$(1).c: $(1).list + $$(_GLIB_V_GEN) (echo "#include \"$$(subst .c,.h,$$(@F))\""; $$(GLIB_GENMARSHAL) \ + --prefix=_$(_glib_marshal_prefix) --body \ + $$(GLIB_GENMARSHAL_C_FLAGS) \ + $$($(_glib_marshal_prefix)_GENMARSHAL_C_FLAGS) \ + $$< ) > $$@.tmp && \ + mv $$@.tmp $$@ + +_GLIB_CLEANFILES += $(1).list.stamp $(1).list +_GLIB_DISTCLEANFILES += $(1).h $(1).c +endef + +# Run _glib_make_genmarshal_rules for each set of generated files +$(foreach f,$(_GLIB_MARSHAL_GENERATED),$(eval $(call _glib_make_genmarshal_rules,$f))) + + +### glib-mkenums + +_GLIB_ENUM_TYPES_GENERATED = $(subst .h,,$(filter %enum-types.h %enumtypes.h,$(GLIB_GENERATED))) + +_glib_enum_types_prefix = $(subst -,_,$(notdir $(1))) +_glib_enum_types_guard = __$(shell LC_ALL=C echo $(_glib_enum_types_prefix) | tr 'a-z' 'A-Z')_H__ +_glib_enum_types_sources_var = $(_glib_enum_types_prefix)_sources +_glib_enum_types_sources = $(filter-out $(GLIB_GENERATED),$($(_glib_enum_types_sources_var))) +_glib_enum_types_h_sources = $(filter %.h,$(_glib_enum_types_sources)) + +define _glib_make_mkenums_rules +$(if $(_glib_enum_types_sources),,$(error Need to define $(_glib_enum_types_sources_var) for $(1).[ch])) + +$(1).h.stamp: $(_glib_enum_types_h_sources) + $$(_GLIB_V_GEN) $$(GLIB_MKENUMS) \ + --fhead "/* Generated by glib-mkenums. Do not edit */\n\n#ifndef $(_glib_enum_types_guard)\n#define $(_glib_enum_types_guard)\n\n" \ + $$(GLIB_MKENUMS_H_FLAGS) \ + $$($(_glib_enum_types_prefix)_MKENUMS_H_FLAGS) \ + --fhead "#include \n\nG_BEGIN_DECLS\n" \ + --vhead "GType @enum_name@_get_type (void) G_GNUC_CONST;\n#define @ENUMPREFIX@_TYPE_@ENUMSHORT@ (@enum_name@_get_type ())\n" \ + --ftail "G_END_DECLS\n\n#endif /* $(_glib_enum_types_guard) */" \ + $$^ > $(1).h.tmp && \ + (cmp -s $(1).h.tmp $(1).h || cp $(1).h.tmp $(1).h) && \ + rm -f $(1).h.tmp && \ + echo timestamp > $$@ + +$(1).h: $(1).h.stamp + @true + +$(1).c.stamp: $(_glib_enum_types_h_sources) + $$(_GLIB_V_GEN) $$(GLIB_MKENUMS) \ + --fhead "/* Generated by glib-mkenums. Do not edit */\n\n#include \"$(notdir $(1)).h\"\n" \ + $$(GLIB_MKENUMS_C_FLAGS) \ + $$($(_glib_enum_types_prefix)_MKENUMS_C_FLAGS) \ + --fhead "$$(foreach f,$$(^F),\n#include \"$$(f)\")\n\n" \ + --vhead "GType\n@enum_name@_get_type (void)\n{\n static volatile gsize g_define_type_id__volatile = 0;\n\n if (g_once_init_enter (&g_define_type_id__volatile))\n {\n static const G@Type@Value values[] = {\n" \ + --vprod " { @VALUENAME@, \"@VALUENAME@\", \"@valuenick@\" },\n" \ + --vtail " { 0, NULL, NULL }\n };\n GType g_define_type_id =\n g_@type@_register_static (g_intern_static_string (\"@EnumName@\"), values);\n g_once_init_leave (&g_define_type_id__volatile, g_define_type_id);\n }\n\n return g_define_type_id__volatile;\n}\n" \ + $$^ > $(1).c.tmp && \ + (cmp -s $(1).c.tmp $(1).c || cp $(1).c.tmp $(1).c) && \ + rm -f $(1).c.tmp && \ + echo timestamp > $$@ + +$(1).c: $(1).c.stamp + @true + +_GLIB_CLEANFILES += $(1).h.stamp $(1).c.stamp +_GLIB_DISTCLEANFILES += $(1).h $(1).c $(1).h.stamp $(1).c.stamp +endef + +$(foreach f,$(_GLIB_ENUM_TYPES_GENERATED),$(eval $(call _glib_make_mkenums_rules,$f))) + + +### glib-compile-schemas + +_GLIB_ENUMS_XML_GENERATED = $(filter %.enums.xml,$(GLIB_GENERATED)) +_GLIB_GSETTINGS_SCHEMA_FILES = $(filter %.gschema.xml,$(gsettingsschema_DATA)) +_GLIB_GSETTINGS_VALID_FILES = $(subst .xml,.valid,$(_GLIB_GSETTINGS_SCHEMA_FILES)) + +_glib_enums_xml_prefix = $(subst .,_,$(notdir $(1))) +_glib_enums_xml_sources_var = $(_glib_enums_xml_prefix)_sources +_glib_enums_xml_sources = $(filter-out $(GLIB_GENERATED),$($(_glib_enums_xml_sources_var))) +_glib_enums_xml_namespace = $(subst .enums.xml,,$(notdir $(1))) + +define _glib_make_enums_xml_rule +$(if $(_glib_enums_xml_sources),,$(error Need to define $(_glib_enums_xml_sources_var) for $(1))) + +$(1): $(_glib_enums_xml_sources) + $$(_GLIB_V_GEN) $$(GLIB_MKENUMS) --comments '' --fhead "" --vhead " <@type@ id='$(_glib_enums_xml_namespace).@EnumName@'>" --vprod " " --vtail " " --ftail "" $$^ > $$@.tmp && mv $$@.tmp $$@ +endef + +_GLIB_V_CHECK = $(_glib_v_check_$(V)) +_glib_v_check_ = $(_glib_v_check_$(AM_DEFAULT_VERBOSITY)) +_glib_v_check_0 = @echo " CHECK " $(subst .valid,.xml,$@); + +define _glib_make_schema_validate_rule +$(subst .xml,.valid,$(1)): $(_GLIB_ENUMS_XML_GENERATED) $(1) + $$(_GLIB_V_CHECK) $$(GLIB_COMPILE_SCHEMAS) --strict --dry-run $$(addprefix --schema-file=,$$^) && touch $$@ +endef + +define _glib_make_schema_rules +all-am: $(_GLIB_GSETTINGS_VALID_FILES) + +install-data-am: glib-install-schemas-hook + +glib-install-schemas-hook: install-gsettingsschemaDATA + @test -n "$(GSETTINGS_DISABLE_SCHEMAS_COMPILE)$(DESTDIR)" || (echo $(GLIB_COMPILE_SCHEMAS) $(gsettingsschemadir); $(GLIB_COMPILE_SCHEMAS) $(gsettingsschemadir)) + +uninstall-am: glib-uninstall-schemas-hook + +glib-uninstall-schemas-hook: uninstall-gsettingsschemaDATA + @test -n "$(GSETTINGS_DISABLE_SCHEMAS_COMPILE)$(DESTDIR)" || (echo $(GLIB_COMPILE_SCHEMAS) $(gsettingsschemadir); $(GLIB_COMPILE_SCHEMAS) $(gsettingsschemadir)) + +.PHONY: glib-install-schemas-hook glib-uninstall-schemas-hook +endef + +_GLIB_CLEANFILES += $(_GLIB_ENUMS_XML_GENERATED) $(_GLIB_GSETTINGS_VALID_FILES) + +$(foreach f,$(_GLIB_ENUMS_XML_GENERATED),$(eval $(call _glib_make_enums_xml_rule,$f))) +$(foreach f,$(_GLIB_GSETTINGS_SCHEMA_FILES),$(eval $(call _glib_make_schema_validate_rule,$f))) +$(if $(_GLIB_GSETTINGS_SCHEMA_FILES),$(eval $(_glib_make_schema_rules))) + + +### Cleanup +.PHONY: clean-glib distclean-glib + +clean-am: clean-glib +clean-glib: + $(if $(strip $(_GLIB_CLEANFILES)),-rm -f $(_GLIB_CLEANFILES)) + +distclean-am: distclean-glib +distclean-glib: + $(if $(strip $(_GLIB_DISTCLEANFILES)),-rm -f $(_GLIB_DISTCLEANFILES)) diff --git a/Makefile.in b/Makefile.in new file mode 100644 index 0000000..97f649d --- /dev/null +++ b/Makefile.in @@ -0,0 +1,868 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@BUILD_LIBSOUP_GNOME_TRUE@am__append_1 = libsoup-gnome-2.4.pc +subdir = . +DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in $(srcdir)/config.h.in \ + $(srcdir)/libsoup-2.4.pc.in $(srcdir)/libsoup-gnome-2.4.pc.in \ + $(srcdir)/libsoup-zip.in $(top_srcdir)/configure AUTHORS \ + COPYING NEWS config.guess config.sub depcomp install-sh \ + ltmain.sh missing +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/gtk-doc.m4 \ + $(top_srcdir)/m4/introspection.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = config.h +CONFIG_CLEAN_FILES = libsoup-2.4.pc libsoup-gnome-2.4.pc libsoup-zip +CONFIG_CLEAN_VPATH_FILES = +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-dvi-recursive install-exec-recursive \ + install-html-recursive install-info-recursive \ + install-pdf-recursive install-ps-recursive install-recursive \ + installcheck-recursive installdirs-recursive pdf-recursive \ + ps-recursive uninstall-recursive +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pkgconfigdir)" +DATA = $(pkgconfig_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ + $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ + distdir dist dist-all distcheck +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +GZIP_ENV = --best +DIST_ARCHIVES = $(distdir).tar.xz +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +APACHE_HTTPD = @APACHE_HTTPD@ +APACHE_MODULE_DIR = @APACHE_MODULE_DIR@ +APACHE_PHP_MODULE = @APACHE_PHP_MODULE@ +APACHE_PHP_MODULE_DIR = @APACHE_PHP_MODULE_DIR@ +APACHE_SSL_MODULE_DIR = @APACHE_SSL_MODULE_DIR@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL = @CURL@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@ +GLIB_GENMARSHAL = @GLIB_GENMARSHAL@ +GLIB_LIBS = @GLIB_LIBS@ +GLIB_MAKEFILE = @GLIB_MAKEFILE@ +GLIB_MKENUMS = @GLIB_MKENUMS@ +GNOME_KEYRING_CFLAGS = @GNOME_KEYRING_CFLAGS@ +GNOME_KEYRING_LIBS = @GNOME_KEYRING_LIBS@ +GOBJECT_QUERY = @GOBJECT_QUERY@ +GREP = @GREP@ +GTKDOC_CHECK = @GTKDOC_CHECK@ +GTKDOC_DEPS_CFLAGS = @GTKDOC_DEPS_CFLAGS@ +GTKDOC_DEPS_LIBS = @GTKDOC_DEPS_LIBS@ +GTKDOC_MKPDF = @GTKDOC_MKPDF@ +GTKDOC_REBASE = @GTKDOC_REBASE@ +HAVE_GNOME = @HAVE_GNOME@ +HTML_DIR = @HTML_DIR@ +IF_HAVE_PHP = @IF_HAVE_PHP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTROSPECTION_CFLAGS = @INTROSPECTION_CFLAGS@ +INTROSPECTION_COMPILER = @INTROSPECTION_COMPILER@ +INTROSPECTION_GENERATE = @INTROSPECTION_GENERATE@ +INTROSPECTION_GIRDIR = @INTROSPECTION_GIRDIR@ +INTROSPECTION_LIBS = @INTROSPECTION_LIBS@ +INTROSPECTION_MAKEFILE = @INTROSPECTION_MAKEFILE@ +INTROSPECTION_SCANNER = @INTROSPECTION_SCANNER@ +INTROSPECTION_TYPELIBDIR = @INTROSPECTION_TYPELIBDIR@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MISSING_REGRESSION_TEST_PACKAGES = @MISSING_REGRESSION_TEST_PACKAGES@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PHP = @PHP@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SOUP_AGE = @SOUP_AGE@ +SOUP_API_VERSION = @SOUP_API_VERSION@ +SOUP_CURRENT = @SOUP_CURRENT@ +SOUP_DEBUG_FLAGS = @SOUP_DEBUG_FLAGS@ +SOUP_MAINTAINER_FLAGS = @SOUP_MAINTAINER_FLAGS@ +SOUP_REVISION = @SOUP_REVISION@ +SQLITE_CFLAGS = @SQLITE_CFLAGS@ +SQLITE_LIBS = @SQLITE_LIBS@ +STRIP = @STRIP@ +VERSION = @VERSION@ +XML_CFLAGS = @XML_CFLAGS@ +XML_LIBS = @XML_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +ntlm_auth = @ntlm_auth@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS} +SUBDIRS = libsoup tests docs +EXTRA_DIST = \ + libsoup-2.4.pc.in \ + libsoup-gnome-2.4.pc.in \ + gtk-doc.make \ + libsoup-zip.in \ + m4/introspection.m4 \ + Makefile.glib + +DISTCHECK_CONFIGURE_FLAGS = --enable-gtk-doc --enable-introspection +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libsoup-2.4.pc $(am__append_1) +all: config.h + $(MAKE) $(AM_MAKEFLAGS) all-recursive + +.SUFFIXES: +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +config.h: stamp-h1 + @if test ! -f $@; then rm -f stamp-h1; else :; fi + @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi + +stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status config.h +$(srcdir)/config.h.in: $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f config.h stamp-h1 +libsoup-2.4.pc: $(top_builddir)/config.status $(srcdir)/libsoup-2.4.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ +libsoup-gnome-2.4.pc: $(top_builddir)/config.status $(srcdir)/libsoup-gnome-2.4.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ +libsoup-zip: $(top_builddir)/config.status $(srcdir)/libsoup-zip.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + test -z "$(pkgconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +$(RECURSIVE_CLEAN_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__remove_distdir) + +dist-lzma: distdir + tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma + $(am__remove_distdir) +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__remove_distdir) + +dist-tarZ: distdir + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__remove_distdir) + +dist-shar: distdir + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + $(am__remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + +dist dist-all: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lzma*) \ + lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir); chmod a+w $(distdir) + mkdir $(distdir)/_build + mkdir $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile $(DATA) config.h +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-hdr \ + distclean-libtool distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA + +.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \ + ctags-recursive install-am install-strip tags-recursive + +.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ + all all-am am--refresh check check-am clean clean-generic \ + clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \ + dist-gzip dist-lzip dist-lzma dist-shar dist-tarZ dist-xz \ + dist-zip distcheck distclean distclean-generic distclean-hdr \ + distclean-libtool distclean-tags distcleancheck distdir \ + distuninstallcheck dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkgconfigDATA install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am uninstall-pkgconfigDATA + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/NEWS b/NEWS index b5da4ec..aba9f69 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,243 @@ +Changes in libsoup from 2.37.91 to 2.37.92: + + * Added soup_session_prefetch_dns() and deprecated + soup_session_prepare_for_uri(). The new method takes a + completion callback, allowing you to know how many DNS + resolutions are currently outstanding, so you don't spam the + resolver. [Sergio, WebKit bug #41630] + +Changes in libsoup from 2.37.90 to 2.37.91: + + * Fixed an out-of-bounds memory read that could occur when + parsing malformed requests/responses. + + * Fixed a build-related bug in Makefile.glib that affected + locales such as Estonian where "Z" is not the last (ASCII) + letter of the alphabet. [#654395, Priit Laes] + + * Fixed the handling of file: URIs with query components, + which got broken in 2.37.90. + + * Fixed SoupAuthManagerNTLM to not fall back to allowing Basic + auth when NTLM failed. + + * Further tweaked the warnings/fallback in SoupURI, so that + soup_uri_to_string() on an http URI with a NULL path would + translate that to "/" like it used to. [#670431] + + * Fixed a warning when cancelling the load of a page with many + subresources. [#667245] + + * Use G_GNUC_BEGIN/END_IGNORE_DEPRECATIONS if GLib is new + enough, to avoid getting warned about the use of + GValueArray. + +Changes in libsoup from 2.37.5 to 2.37.90: + + * libsoup is now beta-testing Makefile.glib from bug 654395. + If 2.37.90 fails to build in some strange way that 2.37.5 + did not, this is probably why. + + * Replaced some of the SoupURI g_return_if_fail()s that were + added in 2.37.5 with g_warn_if_fail()s. Although it had + always been documented that SoupURIs must have a non-NULL + path, most SoupURI functions treated NULL the same as "", + and various apps (eg, rhythmbox, midori) were accidentally + relying on this. [Dan] + + * Added various return-if-fails and other sanity checks to + various functions. [#669479, Robert Swain, Mark Nauwelaerts, + Simon McVittie] + + * Updated docs/annotation of soup_form_decode_multipart() to + note that all of the out parameters are (allow-none). Fixed + the file_control_name parameter to actually allow NULL like + the docs already claimed. [#669479, Simon McVittie] + + * Fixed a minor URI parsing bug. (It was allowing URI schemes + to contain numbers.) [Dan] + + * Fixed a few memory leaks introduced in the 2.37 cycle. [Dan] + + * Fixed SoupServer to be able to correctly respond to HTTP/1.0 + requests over IPv6 (which previously would always have + returned "400 Bad Request"). [#666399, Dan] + + * Changed SoupSessionAsync to make it possible to finalize it + from the "wrong" thread. [#667364, Dan]. + + * Fixed SoupCache to not cache resources whose URIs have query + components (unless they have explicit cache headers); to not + generate broken conditional requests for resources that + cannot be conditionally validated; and to not spew warnings + when receiving invalid Cache-Control headers. [#668865, + Sergio]. + +Changes in libsoup from 2.37.4 to 2.37.5: + + * Fixed a bug in SoupSession:use-thread-context [Dan] + + * Fixed the case of cancelling a message from + SoupSession::request-started [#668098, Raphael Kubo da + Costa] + + * Fixed a crash in epiphany when loading a page with more than + 1000 or so images. [#668508, Dan] + + * Fixed a bunch of cases involving invalid URLs found while + testing SoupServer against an HTTP protocol fuzzer. Also + fixed up some documentation/annotations and added some new + test cases. [#667637, Simon McVittie] + + * Fixed SoupRequestFile to work on Windows. [Paweł Forysiuk] + +Changes in libsoup from 2.37.3 to 2.37.4: + + * SoupMessage now has a "network-event" signal that can be + monitored for information about DNS, proxy lookup, TCP + connections, and TLS handshakes. [Dan] + + * The HTTP header parsing code now avoids hitting + g_return_if_fails() (and returns an error instead) in a few + cases of seriously-invalid headers. [#666316, Simon + McVittie, Dan] + + * POSTs and other non-idempotent requests are now always sent + on newly-created connections. You can also force this + behavior on other messages by setting the + SOUP_MESSAGE_NEW_CONNECTION flag. [#578990, Dan] + + * Server-closed idle connections are now detected ahead of + time on Windows, preventing occasional spurious "Connection + terminated unexpectedly" errors. (This had been fixed on + UNIX since 2.28, but the earlier fix didn't work with + WinSock.) [#578990, Dan] + + * Plugged a leak in SoupRequestHTTP. [#667099, Xan] + +Changes in libsoup from 2.37.2 to 2.37.3: + + * Now requires glib 2.31 + + * Simplified SoupHTTPInputStream and SoupRequestHTTP, allowing + related simplifications in WebKit's ResourceHandleSoup. This + is an ABI-incompatible change, but SoupHTTPInputStream is an + unstable API. [#663451, Dan] + + * Fixed a bug that caused the SOUP_MESSAGE_CERTIFICATE_TRUSTED + flag to always be cleared, causing epiphany to claim all + https pages were untrusted. [#665182, Dan] + + * Fixed some bugs in the handling of SoupSession:http-aliases + and SoupSession:https-aliases. + + * Fixed handling of ACLOCAL_FLAGS [#641470, Craig Keogh] + + * Fixed SoupContentDecoder's "ignore Content-Encoding: gzip + because the server didn't actually mean it" hack to handle + x-gzip too. + + * Clarified the documentation on SoupSession:ssl-strict + [#666280] + +Changes in libsoup from 2.37.1 to 2.37.2: + + * Fixed up the output of SoupDirectoryInputStream, thus + improving the display of local directories in WebKit-based + browsers. [#662266, Sergio] + + * Fixed a bug introduced in 2.37.1 that caused some cancelled + SoupMessages to be leaked. [#662847, Sergio] + + * Added new SoupSession properties "http-aliases" and + "https-aliases" that allow you to configure more explicitly + what URL protocols are treated as aliases for http (eg, + "dav:", "webcal:", etc), and which should be recognized as + meaning something else, (eg, "ftp:") [Dan] + + * Added soup_session_would_redirect() and + soup_session_redirect_message(), to help users that want to + handle some or all redirects themselves. Added + soup_message_set_redirect() to make it easier to return + redirection responses from a SoupServer. [Dan] + + * Added the SoupSession "use-thread-context" property, which + tells it to use GMainContexts in a gio-compliant way (and in + particular, allows having different messages running in + different GMainContexts on the same SoupSession, though only + to a limited extent since SoupSessionAsync is still not + thread-safe). In particular, this was added in order to + address WebKit bug 68238. [Dan] + + * Made SoupURI %-encode non-ASCII characters when parsing + URIs, in particular to fix a problem with certain servers + sending syntactically invalid redirects that they would then + only interpret correctly if you fixed the syntax for them. + (@$!@#! Although the new code is probably more correct than + the old code anyway, so...) [#662806, Dan] + + * Fixed a connection-handling bug that could cause problems + with servers that requested authentication and then timed + out the connection while the application was waiting for the + user to enter a password. [#660057, Dan] + + * Made NTLM and Basic authentication handle some non-ASCII + usernames and passwords. (NTLM should handle most. It's + impossible to fix Basic in the general case.) [#576838, + Joachim Breitner, "sponsored by ITOMIG GmbH and the City of + Böblingen"] + + * Added support for "deflate" Content-Encoding, so that we can + work with broken servers that insisted on using it even + though we explicitly indicated in the request headers that + we didn't support it. (@$#!#) [#661682, Sergio] + +Changes in libsoup from 2.36.0 to 2.37.1: + + * Fixed a problem with connections being assigned to multiple + requests at once after a redirection [#651146, Sergio]. Made + soup_session_pause/unpause_message() work in any state, not + just while the HTTP I/O is occurring, and used this to add a + test case for that bug. [Dan] + + * Ported SoupSession to use GTlsDatabase internally, and added + two new properties, SoupSession:use-system-ca-file (to + specify that the session should use the default system + tlsdb) and SoupSession:tlsdb (to specify a specific tlsdb to + use). [Dan] + + * Likewise, added SoupServer:tls-certificate, for specifying a + certificate/key for an https server to use. + + * Made SoupHTTPInputStream use memory more efficiently. + [#659255, Sergio] + + * Fixed soup_message_get_https_status() to return information + more reliably (with latest glib-networking). + + * Bumped the glib requirement to 2.30.0; although libsoup does + not use any new glib 2.30 APIs, there are several important + bugfixes in glib between 2.28 and 2.30 that libsoup needs. + Currently libsoup builds against either glib 2.30.x or glib + 2.31.x. + +Changes in libsoup from 2.35.92 to 2.36.0: + + * Improvements to gtk-doc documentation. [Dan] + +Changes in libsoup from 2.35.90 to 2.35.92: + + * Fixed a problem where SoupHTTPRequest response bodies could + be truncated. [#659256, Sergio] + + * Fixed a bug in copying TLS information from SoupSocket to + SoupMessage, causing all https connections to be reported as + "untrusted" in epiphany. [Dan] + + * Made SoupSession remove items from its host cache after a + while, so that if a host changes IP address, it will + eventually try to re-resolve it. [#646959, Sergio] + Changes in libsoup from 2.35.5 to 2.35.90: * Added SOUP_MESSAGE_CAN_REBUILD flag, to use with diff --git a/aclocal.m4 b/aclocal.m4 new file mode 100644 index 0000000..b4be406 --- /dev/null +++ b/aclocal.m4 @@ -0,0 +1,1408 @@ +# generated automatically by aclocal 1.11.3 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, +# Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.68],, +[m4_warning([this file was generated for autoconf 2.68. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically `autoreconf'.])]) + +# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software +# Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +# (This private macro should not be called outside this file.) +AC_DEFUN([AM_AUTOMAKE_VERSION], +[am__api_version='1.11' +dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +dnl require some minimum version. Point them to the right macro. +m4_if([$1], [1.11.3], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +]) + +# _AM_AUTOCONF_VERSION(VERSION) +# ----------------------------- +# aclocal traces this macro to find the Autoconf version. +# This is a private macro too. Using m4_define simplifies +# the logic in aclocal, which can simply ignore this definition. +m4_define([_AM_AUTOCONF_VERSION], []) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +[AM_AUTOMAKE_VERSION([1.11.3])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to +# `$srcdir', `$srcdir/..', or `$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is `.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 9 + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ(2.52)dnl + ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE])dnl +AC_SUBST([$1_FALSE])dnl +_AM_SUBST_NOTMAKE([$1_TRUE])dnl +_AM_SUBST_NOTMAKE([$1_FALSE])dnl +m4_define([_AM_COND_VALUE_$1], [$2])dnl +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009, +# 2010, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 12 + +# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "GCJ", or "OBJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +ifelse([$1], CC, [depcc="$CC" am_compiler_list=], + [$1], CXX, [depcc="$CXX" am_compiler_list=], + [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], UPC, [depcc="$UPC" am_compiler_list=], + [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + am__universal=false + m4_case([$1], [CC], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac], + [CXX], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac]) + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE(dependency-tracking, +[ --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH])dnl +_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +#serial 5 + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[{ + # Autoconf 2.62 quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`AS_DIRNAME("$mf")` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`AS_DIRNAME(["$file"])` + AS_MKDIR_P([$dirpart/$fdir]) + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking +# is enabled. FIXME. This creates each `.P' file that we will +# need in order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) +]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005, 2006, 2008, 2009 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 16 + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.62])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl +dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) + AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) +AM_MISSING_PROG(AUTOCONF, autoconf) +AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) +AM_MISSING_PROG(AUTOHEADER, autoheader) +AM_MISSING_PROG(MAKEINFO, makeinfo) +AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +AC_REQUIRE([AM_PROG_MKDIR_P])dnl +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES(CC)], + [define([AC_PROG_CC], + defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES(CXX)], + [define([AC_PROG_CXX], + defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJC], + [_AM_DEPENDENCIES(OBJC)], + [define([AC_PROG_OBJC], + defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl +]) +_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl +dnl The `parallel-tests' driver may need to know about EXEEXT, so add the +dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro +dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. +AC_CONFIG_COMMANDS_PRE(dnl +[m4_provide_if([_AM_COMPILER_EXEEXT], + [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl +]) + +dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +dnl mangled by Autoconf and run in a shell conditional statement. +m4_define([_AC_COMPILER_EXEEXT], +m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_arg=$1 +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation, +# Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi +AC_SUBST(install_sh)]) + +# Copyright (C) 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_MAKE_INCLUDE() +# ----------------- +# Check to see how make treats includes. +AC_DEFUN([AM_MAKE_INCLUDE], +[am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +AC_MSG_CHECKING([for style of include used by $am_make]) +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from `make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi +AC_SUBST([am__include]) +AC_SUBST([am__quote]) +AC_MSG_RESULT([$_am_result]) +rm -f confinc confmf +]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 6 + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it supports --run. +# If it does, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([missing])dnl +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + AC_MSG_WARN([`missing' script is too old or missing]) +fi +]) + +# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation, +# Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_MKDIR_P +# --------------- +# Check for `mkdir -p'. +AC_DEFUN([AM_PROG_MKDIR_P], +[AC_PREREQ([2.60])dnl +AC_REQUIRE([AC_PROG_MKDIR_P])dnl +dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, +dnl while keeping a definition of mkdir_p for backward compatibility. +dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. +dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of +dnl Makefile.ins that do not define MKDIR_P, so we do our own +dnl adjustment using top_builddir (which is defined more often than +dnl MKDIR_P). +AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl +case $mkdir_p in + [[\\/$]]* | ?:[[\\/]]*) ;; + */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; +esac +]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software +# Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 5 + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# -------------------- +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), 1)]) + +# _AM_SET_OPTIONS(OPTIONS) +# ------------------------ +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 5 + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Just in case +sleep 1 +echo timestamp > conftest.file +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[[\\\"\#\$\&\'\`$am_lf]]*) + AC_MSG_ERROR([unsafe absolute working directory name]);; +esac +case $srcdir in + *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) + AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; +esac + +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + rm -f conftest.file + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken +alias in your environment]) + fi + + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT(yes)]) + +# Copyright (C) 2009, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# AM_SILENT_RULES([DEFAULT]) +# -------------------------- +# Enable less verbose build rules; with the default set to DEFAULT +# (`yes' being less verbose, `no' or empty being verbose). +AC_DEFUN([AM_SILENT_RULES], +[AC_ARG_ENABLE([silent-rules], +[ --enable-silent-rules less verbose build output (undo: `make V=1') + --disable-silent-rules verbose build output (undo: `make V=0')]) +case $enable_silent_rules in +yes) AM_DEFAULT_VERBOSITY=0;; +no) AM_DEFAULT_VERBOSITY=1;; +*) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; +esac +dnl +dnl A few `make' implementations (e.g., NonStop OS and NextStep) +dnl do not support nested variable expansions. +dnl See automake bug#9928 and bug#10237. +am_make=${MAKE-make} +AC_CACHE_CHECK([whether $am_make supports nested variables], + [am_cv_make_support_nested_variables], + [if AS_ECHO([['TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi]) +if test $am_cv_make_support_nested_variables = yes; then + dnl Using `$V' instead of `$(V)' breaks IRIX make. + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AC_SUBST([AM_V])dnl +AM_SUBST_NOTMAKE([AM_V])dnl +AC_SUBST([AM_DEFAULT_V])dnl +AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl +AC_SUBST([AM_DEFAULT_VERBOSITY])dnl +AM_BACKSLASH='\' +AC_SUBST([AM_BACKSLASH])dnl +_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl +]) + +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor `install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in `make install-strip', and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be `maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# _AM_SUBST_NOTMAKE(VARIABLE) +# --------------------------- +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +# This macro is traced by Automake. +AC_DEFUN([_AM_SUBST_NOTMAKE]) + +# AM_SUBST_NOTMAKE(VARIABLE) +# -------------------------- +# Public sister of _AM_SUBST_NOTMAKE. +AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of `v7', `ustar', or `pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) +m4_if([$1], [v7], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], + [m4_case([$1], [ustar],, [pax],, + [m4_fatal([Unknown tar format])]) +AC_MSG_CHECKING([how to create a $1 tar archive]) +# Loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' +_am_tools=${am_cv_prog_tar_$1-$_am_tools} +# Do not fold the above two line into one, because Tru64 sh and +# Solaris sh will not grok spaces in the rhs of `-'. +for _am_tool in $_am_tools +do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; + do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi +done +rm -rf conftest.dir + +AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) +AC_MSG_RESULT([$am_cv_prog_tar_$1])]) +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- +# serial 1 (pkg-config-0.24) +# +# Copyright © 2004 Scott James Remnant . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# PKG_PROG_PKG_CONFIG([MIN-VERSION]) +# ---------------------------------- +AC_DEFUN([PKG_PROG_PKG_CONFIG], +[m4_pattern_forbid([^_?PKG_[A-Z_]+$]) +m4_pattern_allow([^PKG_CONFIG(_PATH)?$]) +AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) +AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) +AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) + +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=m4_default([$1], [0.9.0]) + AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + PKG_CONFIG="" + fi +fi[]dnl +])# PKG_PROG_PKG_CONFIG + +# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# Check to see whether a particular set of modules exists. Similar +# to PKG_CHECK_MODULES(), but does not set variables or print errors. +# +# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) +# only at the first occurence in configure.ac, so if the first place +# it's called might be skipped (such as if it is within an "if", you +# have to call PKG_CHECK_EXISTS manually +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_EXISTS], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +if test -n "$PKG_CONFIG" && \ + AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then + m4_default([$2], [:]) +m4_ifvaln([$3], [else + $3])dnl +fi]) + +# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) +# --------------------------------------------- +m4_define([_PKG_CONFIG], +[if test -n "$$1"; then + pkg_cv_[]$1="$$1" + elif test -n "$PKG_CONFIG"; then + PKG_CHECK_EXISTS([$3], + [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`], + [pkg_failed=yes]) + else + pkg_failed=untried +fi[]dnl +])# _PKG_CONFIG + +# _PKG_SHORT_ERRORS_SUPPORTED +# ----------------------------- +AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG]) +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi[]dnl +])# _PKG_SHORT_ERRORS_SUPPORTED + + +# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], +# [ACTION-IF-NOT-FOUND]) +# +# +# Note that if there is a possibility the first call to +# PKG_CHECK_MODULES might not happen, you should be sure to include an +# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac +# +# +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_MODULES], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl +AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl + +pkg_failed=no +AC_MSG_CHECKING([for $1]) + +_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) +_PKG_CONFIG([$1][_LIBS], [libs], [$2]) + +m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS +and $1[]_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details.]) + +if test $pkg_failed = yes; then + AC_MSG_RESULT([no]) + _PKG_SHORT_ERRORS_SUPPORTED + if test $_pkg_short_errors_supported = yes; then + $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1` + else + $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD + + m4_default([$4], [AC_MSG_ERROR( +[Package requirements ($2) were not met: + +$$1_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +_PKG_TEXT]) + ]) +elif test $pkg_failed = untried; then + AC_MSG_RESULT([no]) + m4_default([$4], [AC_MSG_FAILURE( +[The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +_PKG_TEXT + +To get pkg-config, see .]) + ]) +else + $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS + $1[]_LIBS=$pkg_cv_[]$1[]_LIBS + AC_MSG_RESULT([yes]) + $3 +fi[]dnl +])# PKG_CHECK_MODULES + +# Configure paths for GLIB +# Owen Taylor 1997-2001 + +dnl AM_PATH_GLIB_2_0([MINIMUM-VERSION, [ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND [, MODULES]]]]) +dnl Test for GLIB, and define GLIB_CFLAGS and GLIB_LIBS, if gmodule, gobject, +dnl gthread, or gio is specified in MODULES, pass to pkg-config +dnl +AC_DEFUN([AM_PATH_GLIB_2_0], +[dnl +dnl Get the cflags and libraries from pkg-config +dnl +AC_ARG_ENABLE(glibtest, [ --disable-glibtest do not try to compile and run a test GLIB program], + , enable_glibtest=yes) + + pkg_config_args=glib-2.0 + for module in . $4 + do + case "$module" in + gmodule) + pkg_config_args="$pkg_config_args gmodule-2.0" + ;; + gmodule-no-export) + pkg_config_args="$pkg_config_args gmodule-no-export-2.0" + ;; + gobject) + pkg_config_args="$pkg_config_args gobject-2.0" + ;; + gthread) + pkg_config_args="$pkg_config_args gthread-2.0" + ;; + gio*) + pkg_config_args="$pkg_config_args $module-2.0" + ;; + esac + done + + PKG_PROG_PKG_CONFIG([0.16]) + + no_glib="" + + if test "x$PKG_CONFIG" = x ; then + no_glib=yes + PKG_CONFIG=no + fi + + min_glib_version=ifelse([$1], ,2.0.0,$1) + AC_MSG_CHECKING(for GLIB - version >= $min_glib_version) + + if test x$PKG_CONFIG != xno ; then + ## don't try to run the test against uninstalled libtool libs + if $PKG_CONFIG --uninstalled $pkg_config_args; then + echo "Will use uninstalled version of GLib found in PKG_CONFIG_PATH" + enable_glibtest=no + fi + + if $PKG_CONFIG --atleast-version $min_glib_version $pkg_config_args; then + : + else + no_glib=yes + fi + fi + + if test x"$no_glib" = x ; then + GLIB_GENMARSHAL=`$PKG_CONFIG --variable=glib_genmarshal glib-2.0` + GOBJECT_QUERY=`$PKG_CONFIG --variable=gobject_query glib-2.0` + GLIB_MKENUMS=`$PKG_CONFIG --variable=glib_mkenums glib-2.0` + GLIB_COMPILE_RESOURCES=`$PKG_CONFIG --variable=glib_compile_resources gio-2.0` + + GLIB_CFLAGS=`$PKG_CONFIG --cflags $pkg_config_args` + GLIB_LIBS=`$PKG_CONFIG --libs $pkg_config_args` + glib_config_major_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\1/'` + glib_config_minor_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\2/'` + glib_config_micro_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[0-9]]*\)/\3/'` + if test "x$enable_glibtest" = "xyes" ; then + ac_save_CFLAGS="$CFLAGS" + ac_save_LIBS="$LIBS" + CFLAGS="$CFLAGS $GLIB_CFLAGS" + LIBS="$GLIB_LIBS $LIBS" +dnl +dnl Now check if the installed GLIB is sufficiently new. (Also sanity +dnl checks the results of pkg-config to some extent) +dnl + rm -f conf.glibtest + AC_TRY_RUN([ +#include +#include +#include + +int +main () +{ + unsigned int major, minor, micro; + char *tmp_version; + + fclose (fopen ("conf.glibtest", "w")); + + /* HP/UX 9 (%@#!) writes to sscanf strings */ + tmp_version = g_strdup("$min_glib_version"); + if (sscanf(tmp_version, "%u.%u.%u", &major, &minor, µ) != 3) { + printf("%s, bad version string\n", "$min_glib_version"); + exit(1); + } + + if ((glib_major_version != $glib_config_major_version) || + (glib_minor_version != $glib_config_minor_version) || + (glib_micro_version != $glib_config_micro_version)) + { + printf("\n*** 'pkg-config --modversion glib-2.0' returned %d.%d.%d, but GLIB (%d.%d.%d)\n", + $glib_config_major_version, $glib_config_minor_version, $glib_config_micro_version, + glib_major_version, glib_minor_version, glib_micro_version); + printf ("*** was found! If pkg-config was correct, then it is best\n"); + printf ("*** to remove the old version of GLib. You may also be able to fix the error\n"); + printf("*** by modifying your LD_LIBRARY_PATH enviroment variable, or by editing\n"); + printf("*** /etc/ld.so.conf. Make sure you have run ldconfig if that is\n"); + printf("*** required on your system.\n"); + printf("*** If pkg-config was wrong, set the environment variable PKG_CONFIG_PATH\n"); + printf("*** to point to the correct configuration files\n"); + } + else if ((glib_major_version != GLIB_MAJOR_VERSION) || + (glib_minor_version != GLIB_MINOR_VERSION) || + (glib_micro_version != GLIB_MICRO_VERSION)) + { + printf("*** GLIB header files (version %d.%d.%d) do not match\n", + GLIB_MAJOR_VERSION, GLIB_MINOR_VERSION, GLIB_MICRO_VERSION); + printf("*** library (version %d.%d.%d)\n", + glib_major_version, glib_minor_version, glib_micro_version); + } + else + { + if ((glib_major_version > major) || + ((glib_major_version == major) && (glib_minor_version > minor)) || + ((glib_major_version == major) && (glib_minor_version == minor) && (glib_micro_version >= micro))) + { + return 0; + } + else + { + printf("\n*** An old version of GLIB (%u.%u.%u) was found.\n", + glib_major_version, glib_minor_version, glib_micro_version); + printf("*** You need a version of GLIB newer than %u.%u.%u. The latest version of\n", + major, minor, micro); + printf("*** GLIB is always available from ftp://ftp.gtk.org.\n"); + printf("***\n"); + printf("*** If you have already installed a sufficiently new version, this error\n"); + printf("*** probably means that the wrong copy of the pkg-config shell script is\n"); + printf("*** being found. The easiest way to fix this is to remove the old version\n"); + printf("*** of GLIB, but you can also set the PKG_CONFIG environment to point to the\n"); + printf("*** correct copy of pkg-config. (In this case, you will have to\n"); + printf("*** modify your LD_LIBRARY_PATH enviroment variable, or edit /etc/ld.so.conf\n"); + printf("*** so that the correct libraries are found at run-time))\n"); + } + } + return 1; +} +],, no_glib=yes,[echo $ac_n "cross compiling; assumed OK... $ac_c"]) + CFLAGS="$ac_save_CFLAGS" + LIBS="$ac_save_LIBS" + fi + fi + if test "x$no_glib" = x ; then + AC_MSG_RESULT(yes (version $glib_config_major_version.$glib_config_minor_version.$glib_config_micro_version)) + ifelse([$2], , :, [$2]) + else + AC_MSG_RESULT(no) + if test "$PKG_CONFIG" = "no" ; then + echo "*** A new enough version of pkg-config was not found." + echo "*** See http://www.freedesktop.org/software/pkgconfig/" + else + if test -f conf.glibtest ; then + : + else + echo "*** Could not run GLIB test program, checking why..." + ac_save_CFLAGS="$CFLAGS" + ac_save_LIBS="$LIBS" + CFLAGS="$CFLAGS $GLIB_CFLAGS" + LIBS="$LIBS $GLIB_LIBS" + AC_TRY_LINK([ +#include +#include +], [ return ((glib_major_version) || (glib_minor_version) || (glib_micro_version)); ], + [ echo "*** The test program compiled, but did not run. This usually means" + echo "*** that the run-time linker is not finding GLIB or finding the wrong" + echo "*** version of GLIB. If it is not finding GLIB, you'll need to set your" + echo "*** LD_LIBRARY_PATH environment variable, or edit /etc/ld.so.conf to point" + echo "*** to the installed location Also, make sure you have run ldconfig if that" + echo "*** is required on your system" + echo "***" + echo "*** If you have an old version installed, it is best to remove it, although" + echo "*** you may also be able to get things to work by modifying LD_LIBRARY_PATH" ], + [ echo "*** The test program failed to compile or link. See the file config.log for the" + echo "*** exact error that occured. This usually means GLIB is incorrectly installed."]) + CFLAGS="$ac_save_CFLAGS" + LIBS="$ac_save_LIBS" + fi + fi + GLIB_CFLAGS="" + GLIB_LIBS="" + GLIB_GENMARSHAL="" + GOBJECT_QUERY="" + GLIB_MKENUMS="" + GLIB_COMPILE_RESOURCES="" + ifelse([$3], , :, [$3]) + fi + AC_SUBST(GLIB_CFLAGS) + AC_SUBST(GLIB_LIBS) + AC_SUBST(GLIB_GENMARSHAL) + AC_SUBST(GOBJECT_QUERY) + AC_SUBST(GLIB_MKENUMS) + AC_SUBST(GLIB_COMPILE_RESOURCES) + rm -f conf.glibtest +]) + +m4_include([m4/gtk-doc.m4]) +m4_include([m4/introspection.m4]) +m4_include([m4/libtool.m4]) +m4_include([m4/ltoptions.m4]) +m4_include([m4/ltsugar.m4]) +m4_include([m4/ltversion.m4]) +m4_include([m4/lt~obsolete.m4]) diff --git a/autogen.sh b/autogen.sh deleted file mode 100755 index 90a6488..0000000 --- a/autogen.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -# Run this to generate all the initial makefiles, etc. -REQUIRED_AUTOMAKE_VERSION=1.9 - -srcdir=`dirname $0` -test -z "$srcdir" && srcdir=. - -PKG_NAME="libsoup" - -(test -f $srcdir/configure.ac \ - && test -f $srcdir/libsoup.doap \ - && test -d $srcdir/libsoup) || { - echo -n "**Error**: Directory "\`$srcdir\'" does not look like the" - echo " top-level $PKG_NAME directory" - exit 1 -} - -which gnome-autogen.sh || { - echo "You need to install gnome-common from the GNOME CVS" - exit 1 -} -USE_GNOME2_MACROS=1 . gnome-autogen.sh diff --git a/config.guess b/config.guess new file mode 100755 index 0000000..49ba16f --- /dev/null +++ b/config.guess @@ -0,0 +1,1522 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-01-01' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Originally written by Per Bothner. Please send patches (context +# diff format) to and include a ChangeLog +# entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm:riscos:*:*|arm:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + i*:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-gnu + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-gnueabi + else + echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:Linux:*:*) + LIBC=gnu + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` + echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + ;; + or32:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-gnu + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-gnu + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + i386) + eval $set_cc_for_build + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + UNAME_PROCESSOR="x86_64" + fi + fi ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/config.h.in b/config.h.in new file mode 100644 index 0000000..5f77dcb --- /dev/null +++ b/config.h.in @@ -0,0 +1,91 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Apache httpd */ +#undef APACHE_HTTPD + +/* Whether or not apache can be used for tests */ +#undef HAVE_APACHE + +/* Whether or not curl can be used for tests */ +#undef HAVE_CURL + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the `gmtime_r' function. */ +#undef HAVE_GMTIME_R + +/* Defined if GNOME support is enabled */ +#undef HAVE_GNOME + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `socket' library (-lsocket). */ +#undef HAVE_LIBSOCKET + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the `mmap' function. */ +#undef HAVE_MMAP + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Samba's 'winbind' daemon helper 'ntlm_auth' which can be used for NTLM + single-sign-on */ +#undef NTLM_AUTH + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Whether or not use Samba's 'winbind' daemon helper 'ntlm_auth' for NTLM + single-sign-on */ +#undef USE_NTLM_AUTH + +/* Version number of package */ +#undef VERSION diff --git a/config.sub b/config.sub new file mode 100755 index 0000000..d6b6b3c --- /dev/null +++ b/config.sub @@ -0,0 +1,1766 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-01-01' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +# 02110-1301, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Please send patches to . Submit a context +# diff and a properly formatted GNU ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 \ + | ns16k | ns32k \ + | open8 \ + | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze) + basic_machine=microblaze-xilinx + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i386-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/configure b/configure new file mode 100755 index 0000000..06deae4 --- /dev/null +++ b/configure @@ -0,0 +1,15651 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.68 for libsoup 2.37.92. +# +# Report bugs to . +# +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software +# Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + # We cannot yet assume a decent shell, so we have to provide a + # neutralization value for shells without unset; and this also + # works around shells that cannot unset nonexistent variables. + # Preserve -v and -x to the replacement shell. + BASH_ENV=/dev/null + ENV=/dev/null + (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV + export CONFIG_SHELL + case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; + esac + exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: http://bugzilla.gnome.org/enter_bug.cgi?product=libsoup +$0: about your system, including any error possibly output +$0: before this message. Then install a modern shell, or +$0: manually run the script under such a shell if you do +$0: have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='libsoup' +PACKAGE_TARNAME='libsoup' +PACKAGE_VERSION='2.37.92' +PACKAGE_STRING='libsoup 2.37.92' +PACKAGE_BUGREPORT='http://bugzilla.gnome.org/enter_bug.cgi?product=libsoup' +PACKAGE_URL='' + +ac_unique_file="libsoup-2.4.pc.in" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +ntlm_auth +MISSING_REGRESSION_TEST_PACKAGES_FALSE +MISSING_REGRESSION_TEST_PACKAGES_TRUE +MISSING_REGRESSION_TEST_PACKAGES +HAVE_CURL_FALSE +HAVE_CURL_TRUE +CURL +HAVE_XMLRPC_EPI_PHP_FALSE +HAVE_XMLRPC_EPI_PHP_TRUE +IF_HAVE_PHP +PHP +HAVE_APACHE_FALSE +HAVE_APACHE_TRUE +APACHE_PHP_MODULE +APACHE_PHP_MODULE_DIR +APACHE_SSL_MODULE_DIR +APACHE_MODULE_DIR +APACHE_HTTPD +HAVE_INTROSPECTION_FALSE +HAVE_INTROSPECTION_TRUE +INTROSPECTION_MAKEFILE +INTROSPECTION_LIBS +INTROSPECTION_CFLAGS +INTROSPECTION_TYPELIBDIR +INTROSPECTION_GIRDIR +INTROSPECTION_GENERATE +INTROSPECTION_COMPILER +INTROSPECTION_SCANNER +GTK_DOC_USE_REBASE_FALSE +GTK_DOC_USE_REBASE_TRUE +GTK_DOC_USE_LIBTOOL_FALSE +GTK_DOC_USE_LIBTOOL_TRUE +GTK_DOC_BUILD_PDF_FALSE +GTK_DOC_BUILD_PDF_TRUE +GTK_DOC_BUILD_HTML_FALSE +GTK_DOC_BUILD_HTML_TRUE +ENABLE_GTK_DOC_FALSE +ENABLE_GTK_DOC_TRUE +GTKDOC_DEPS_LIBS +GTKDOC_DEPS_CFLAGS +HTML_DIR +GTKDOC_MKPDF +GTKDOC_REBASE +GTKDOC_CHECK +HAVE_GNOME +SQLITE_LIBS +SQLITE_CFLAGS +BUILD_LIBSOUP_GNOME_FALSE +BUILD_LIBSOUP_GNOME_TRUE +GNOME_KEYRING_LIBS +GNOME_KEYRING_CFLAGS +OS_WIN32_FALSE +OS_WIN32_TRUE +XML_LIBS +XML_CFLAGS +GLIB_MAKEFILE +GLIB_COMPILE_RESOURCES +GLIB_MKENUMS +GOBJECT_QUERY +GLIB_GENMARSHAL +GLIB_LIBS +GLIB_CFLAGS +PKG_CONFIG_LIBDIR +PKG_CONFIG_PATH +PKG_CONFIG +CPP +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +ac_ct_AR +AR +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +EGREP +GREP +SED +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +LIBTOOL +OBJDUMP +DLLTOOL +AS +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__quote +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +SOUP_MAINTAINER_FLAGS +SOUP_DEBUG_FLAGS +SOUP_AGE +SOUP_REVISION +SOUP_CURRENT +SOUP_API_VERSION +AM_BACKSLASH +AM_DEFAULT_VERBOSITY +AM_DEFAULT_V +AM_V +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_silent_rules +enable_debug +enable_dependency_tracking +enable_shared +enable_static +with_pic +enable_fast_install +with_gnu_ld +with_sysroot +enable_libtool_lock +enable_glibtest +with_gnome +with_html_dir +enable_gtk_doc +enable_gtk_doc_html +enable_gtk_doc_pdf +enable_introspection +enable_tls_check +with_apache_httpd +with_apache_module_dir +with_ntlm_auth +enable_more_warnings +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP +PKG_CONFIG +PKG_CONFIG_PATH +PKG_CONFIG_LIBDIR +XML_CFLAGS +XML_LIBS +GNOME_KEYRING_CFLAGS +GNOME_KEYRING_LIBS +SQLITE_CFLAGS +SQLITE_LIBS +GTKDOC_DEPS_CFLAGS +GTKDOC_DEPS_LIBS' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used" >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures libsoup 2.37.92 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/libsoup] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of libsoup 2.37.92:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-silent-rules less verbose build output (undo: `make V=1') + --disable-silent-rules verbose build output (undo: `make V=0') + --enable-debug=no/minimum/yes + turn on debugging (default=$debug_default) + --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --disable-glibtest do not try to compile and run a test GLIB program + --enable-gtk-doc use gtk-doc to build documentation [[default=no]] + --enable-gtk-doc-html build documentation in html format [[default=yes]] + --enable-gtk-doc-pdf build documentation in pdf format [[default=no]] + --enable-introspection=[no/auto/yes] + Enable introspection for this build + --disable-tls-check Don't error out if glib-networking is unavailable + --disable-more-warnings Inhibit compiler warnings + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot=DIR Search for dependent libraries within DIR + (or the compiler's sysroot if not specified). + --without-gnome Do not build libsoup-gnome + --with-html-dir=PATH path to installed docs + --with-apache-httpd Path to apache httpd (for tests) + --with-apache-module-dir + Apache modules dirs (for tests) + --with-ntlm-auth=PATH Where to look for ntlm_auth, path points to + ntlm_auth installation (default: /usr/bin/ntlm_auth) + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CPP C preprocessor + PKG_CONFIG path to pkg-config utility + PKG_CONFIG_PATH + directories to add to pkg-config's search path + PKG_CONFIG_LIBDIR + path overriding pkg-config's built-in search path + XML_CFLAGS C compiler flags for XML, overriding pkg-config + XML_LIBS linker flags for XML, overriding pkg-config + GNOME_KEYRING_CFLAGS + C compiler flags for GNOME_KEYRING, overriding pkg-config + GNOME_KEYRING_LIBS + linker flags for GNOME_KEYRING, overriding pkg-config + SQLITE_CFLAGS + C compiler flags for SQLITE, overriding pkg-config + SQLITE_LIBS linker flags for SQLITE, overriding pkg-config + GTKDOC_DEPS_CFLAGS + C compiler flags for GTKDOC_DEPS, overriding pkg-config + GTKDOC_DEPS_LIBS + linker flags for GTKDOC_DEPS, overriding pkg-config + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +libsoup configure 2.37.92 +generated by GNU Autoconf 2.68 + +Copyright (C) 2010 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libsoup $as_me 2.37.92, which was +generated by GNU Autoconf 2.68. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + +am__api_version='1.11' + +ac_aux_dir= +for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Just in case +sleep 1 +echo timestamp > conftest.file +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; +esac + +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + rm -f conftest.file + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken +alias in your environment" "$LINENO" 5 + fi + + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +mkdir_p="$MKDIR_P" +case $mkdir_p in + [\\/$]* | ?:[\\/]*) ;; + */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; +esac + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='libsoup' + VERSION='2.37.92' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + + + +# Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in +yes) AM_DEFAULT_VERBOSITY=0;; +no) AM_DEFAULT_VERBOSITY=1;; +*) AM_DEFAULT_VERBOSITY=0;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + + +SOUP_API_VERSION=2.4 + + +# Increment on interface addition. Reset on removal. +SOUP_AGE=5 + +# Increment on interface add, remove, or change. +SOUP_CURRENT=6 + +# Increment on source change. Reset when CURRENT changes. +SOUP_REVISION=0 + + + + + + +debug_default=minimum + +# Declare --enable-* args and collect ac_help strings +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then : + enableval=$enable_debug; +else + enable_debug=$debug_default +fi + + +# Set the debug flags +if test "x$enable_debug" = "xyes"; then + test "$cflags_set" = set || CFLAGS="$CFLAGS -g" + SOUP_DEBUG_FLAGS="-DG_ENABLE_DEBUG" +else + if test "x$enable_debug" = "xno"; then + SOUP_DEBUG_FLAGS="-DG_DISABLE_ASSERT -DG_DISABLE_CHECKS" + fi +fi + + + +# Set the maintainer flags +#if test -d .git; then +# SOUP_MAINTAINER_FLAGS="-DG_DISABLE_DEPRECATED" +#fi + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 +$as_echo_n "checking for style of include used by $am_make... " >&6; } +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from `make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 +$as_echo "$_am_result" >&6; } +rm -f confinc confmf + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + + + +# Initialize libtool +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4' +macro_revision='1.3293' + + + + + + + + + + + + + +ltmain="$ac_aux_dir/ltmain.sh" + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case "$ECHO" in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`func_fallback_echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 +$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 +$as_echo "$xsi_shell" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 +$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } +lt_shell_append=no +( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 +$as_echo "$lt_shell_append" >&6; } + + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test "$GCC" != yes; then + reload_cmds=false + fi + ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. + if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cru} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 +$as_echo "${with_sysroot}" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -f conftest && test ! -s conftest.err && test $_lt_result = 0 && $GREP forced_load conftest 2>&1 >/dev/null; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + + + +# Set options +enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}as", so it can be a program name with args. +set dummy ${ac_tool_prefix}as; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AS+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AS"; then + ac_cv_prog_AS="$AS" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AS="${ac_tool_prefix}as" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AS=$ac_cv_prog_AS +if test -n "$AS"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AS" >&5 +$as_echo "$AS" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AS"; then + ac_ct_AS=$AS + # Extract the first word of "as", so it can be a program name with args. +set dummy as; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AS+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AS"; then + ac_cv_prog_ac_ct_AS="$ac_ct_AS" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_AS="as" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AS=$ac_cv_prog_ac_ct_AS +if test -n "$ac_ct_AS"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AS" >&5 +$as_echo "$ac_ct_AS" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_AS" = x; then + AS="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AS=$ac_ct_AS + fi +else + AS="$ac_cv_prog_AS" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + + ;; +esac + +test -z "$AS" && AS=as + + + + + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + enable_dlopen=no + + + + # Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi + + + + + + + + + + # Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=yes +fi + + + + + + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; pic_mode="$withval" +else + pic_mode=default +fi + + +test -z "$pic_mode" && pic_mode=default + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + lt_prog_compiler_pic='-Xcompiler -fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ F* | *Sun*Fortran*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='${wl}--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + freebsd1*) + ld_shlibs=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_flag_spec_ld='+b $libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test x"$lt_cv_prog_compiler__b" = xyes; then + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test "$lt_cv_irix_exported_symbol" = yes; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([A-Za-z]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[123]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Add ABI-specific directories to the system library path. + sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" + + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test "$hardcode_action" = relink || + test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report which library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + + + +GLIB_REQUIRED=2.31.7 +# Check whether --enable-glibtest was given. +if test "${enable_glibtest+set}" = set; then : + enableval=$enable_glibtest; +else + enable_glibtest=yes +fi + + + pkg_config_args=glib-2.0 + for module in . gobject gio + do + case "$module" in + gmodule) + pkg_config_args="$pkg_config_args gmodule-2.0" + ;; + gmodule-no-export) + pkg_config_args="$pkg_config_args gmodule-no-export-2.0" + ;; + gobject) + pkg_config_args="$pkg_config_args gobject-2.0" + ;; + gthread) + pkg_config_args="$pkg_config_args gthread-2.0" + ;; + gio*) + pkg_config_args="$pkg_config_args $module-2.0" + ;; + esac + done + + + + + + + +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. +set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PKG_CONFIG=$ac_cv_path_PKG_CONFIG +if test -n "$PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 +$as_echo "$PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_path_PKG_CONFIG"; then + ac_pt_PKG_CONFIG=$PKG_CONFIG + # Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ac_pt_PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG +if test -n "$ac_pt_PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 +$as_echo "$ac_pt_PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_pt_PKG_CONFIG" = x; then + PKG_CONFIG="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + PKG_CONFIG=$ac_pt_PKG_CONFIG + fi +else + PKG_CONFIG="$ac_cv_path_PKG_CONFIG" +fi + +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=0.16 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 +$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + PKG_CONFIG="" + fi +fi + + no_glib="" + + if test "x$PKG_CONFIG" = x ; then + no_glib=yes + PKG_CONFIG=no + fi + + min_glib_version=$GLIB_REQUIRED + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GLIB - version >= $min_glib_version" >&5 +$as_echo_n "checking for GLIB - version >= $min_glib_version... " >&6; } + + if test x$PKG_CONFIG != xno ; then + ## don't try to run the test against uninstalled libtool libs + if $PKG_CONFIG --uninstalled $pkg_config_args; then + echo "Will use uninstalled version of GLib found in PKG_CONFIG_PATH" + enable_glibtest=no + fi + + if $PKG_CONFIG --atleast-version $min_glib_version $pkg_config_args; then + : + else + no_glib=yes + fi + fi + + if test x"$no_glib" = x ; then + GLIB_GENMARSHAL=`$PKG_CONFIG --variable=glib_genmarshal glib-2.0` + GOBJECT_QUERY=`$PKG_CONFIG --variable=gobject_query glib-2.0` + GLIB_MKENUMS=`$PKG_CONFIG --variable=glib_mkenums glib-2.0` + GLIB_COMPILE_RESOURCES=`$PKG_CONFIG --variable=glib_compile_resources gio-2.0` + + GLIB_CFLAGS=`$PKG_CONFIG --cflags $pkg_config_args` + GLIB_LIBS=`$PKG_CONFIG --libs $pkg_config_args` + glib_config_major_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\1/'` + glib_config_minor_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\2/'` + glib_config_micro_version=`$PKG_CONFIG --modversion glib-2.0 | \ + sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\3/'` + if test "x$enable_glibtest" = "xyes" ; then + ac_save_CFLAGS="$CFLAGS" + ac_save_LIBS="$LIBS" + CFLAGS="$CFLAGS $GLIB_CFLAGS" + LIBS="$GLIB_LIBS $LIBS" + rm -f conf.glibtest + if test "$cross_compiling" = yes; then : + echo $ac_n "cross compiling; assumed OK... $ac_c" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include + +int +main () +{ + unsigned int major, minor, micro; + char *tmp_version; + + fclose (fopen ("conf.glibtest", "w")); + + /* HP/UX 9 (%@#!) writes to sscanf strings */ + tmp_version = g_strdup("$min_glib_version"); + if (sscanf(tmp_version, "%u.%u.%u", &major, &minor, µ) != 3) { + printf("%s, bad version string\n", "$min_glib_version"); + exit(1); + } + + if ((glib_major_version != $glib_config_major_version) || + (glib_minor_version != $glib_config_minor_version) || + (glib_micro_version != $glib_config_micro_version)) + { + printf("\n*** 'pkg-config --modversion glib-2.0' returned %d.%d.%d, but GLIB (%d.%d.%d)\n", + $glib_config_major_version, $glib_config_minor_version, $glib_config_micro_version, + glib_major_version, glib_minor_version, glib_micro_version); + printf ("*** was found! If pkg-config was correct, then it is best\n"); + printf ("*** to remove the old version of GLib. You may also be able to fix the error\n"); + printf("*** by modifying your LD_LIBRARY_PATH enviroment variable, or by editing\n"); + printf("*** /etc/ld.so.conf. Make sure you have run ldconfig if that is\n"); + printf("*** required on your system.\n"); + printf("*** If pkg-config was wrong, set the environment variable PKG_CONFIG_PATH\n"); + printf("*** to point to the correct configuration files\n"); + } + else if ((glib_major_version != GLIB_MAJOR_VERSION) || + (glib_minor_version != GLIB_MINOR_VERSION) || + (glib_micro_version != GLIB_MICRO_VERSION)) + { + printf("*** GLIB header files (version %d.%d.%d) do not match\n", + GLIB_MAJOR_VERSION, GLIB_MINOR_VERSION, GLIB_MICRO_VERSION); + printf("*** library (version %d.%d.%d)\n", + glib_major_version, glib_minor_version, glib_micro_version); + } + else + { + if ((glib_major_version > major) || + ((glib_major_version == major) && (glib_minor_version > minor)) || + ((glib_major_version == major) && (glib_minor_version == minor) && (glib_micro_version >= micro))) + { + return 0; + } + else + { + printf("\n*** An old version of GLIB (%u.%u.%u) was found.\n", + glib_major_version, glib_minor_version, glib_micro_version); + printf("*** You need a version of GLIB newer than %u.%u.%u. The latest version of\n", + major, minor, micro); + printf("*** GLIB is always available from ftp://ftp.gtk.org.\n"); + printf("***\n"); + printf("*** If you have already installed a sufficiently new version, this error\n"); + printf("*** probably means that the wrong copy of the pkg-config shell script is\n"); + printf("*** being found. The easiest way to fix this is to remove the old version\n"); + printf("*** of GLIB, but you can also set the PKG_CONFIG environment to point to the\n"); + printf("*** correct copy of pkg-config. (In this case, you will have to\n"); + printf("*** modify your LD_LIBRARY_PATH enviroment variable, or edit /etc/ld.so.conf\n"); + printf("*** so that the correct libraries are found at run-time))\n"); + } + } + return 1; +} + +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + no_glib=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + CFLAGS="$ac_save_CFLAGS" + LIBS="$ac_save_LIBS" + fi + fi + if test "x$no_glib" = x ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes (version $glib_config_major_version.$glib_config_minor_version.$glib_config_micro_version)" >&5 +$as_echo "yes (version $glib_config_major_version.$glib_config_minor_version.$glib_config_micro_version)" >&6; } + : + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + if test "$PKG_CONFIG" = "no" ; then + echo "*** A new enough version of pkg-config was not found." + echo "*** See http://www.freedesktop.org/software/pkgconfig/" + else + if test -f conf.glibtest ; then + : + else + echo "*** Could not run GLIB test program, checking why..." + ac_save_CFLAGS="$CFLAGS" + ac_save_LIBS="$LIBS" + CFLAGS="$CFLAGS $GLIB_CFLAGS" + LIBS="$LIBS $GLIB_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include + +int +main () +{ + return ((glib_major_version) || (glib_minor_version) || (glib_micro_version)); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + echo "*** The test program compiled, but did not run. This usually means" + echo "*** that the run-time linker is not finding GLIB or finding the wrong" + echo "*** version of GLIB. If it is not finding GLIB, you'll need to set your" + echo "*** LD_LIBRARY_PATH environment variable, or edit /etc/ld.so.conf to point" + echo "*** to the installed location Also, make sure you have run ldconfig if that" + echo "*** is required on your system" + echo "***" + echo "*** If you have an old version installed, it is best to remove it, although" + echo "*** you may also be able to get things to work by modifying LD_LIBRARY_PATH" +else + echo "*** The test program failed to compile or link. See the file config.log for the" + echo "*** exact error that occured. This usually means GLIB is incorrectly installed." +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS="$ac_save_CFLAGS" + LIBS="$ac_save_LIBS" + fi + fi + GLIB_CFLAGS="" + GLIB_LIBS="" + GLIB_GENMARSHAL="" + GOBJECT_QUERY="" + GLIB_MKENUMS="" + GLIB_COMPILE_RESOURCES="" + : + fi + + + + + + + rm -f conf.glibtest + +if test "$GLIB_LIBS" = ""; then + as_fn_error $? "GLIB $GLIB_REQUIRED or later is required to build libsoup" "$LINENO" 5 +fi +GLIB_CFLAGS="$GLIB_CFLAGS -DG_DISABLE_SINGLE_INCLUDES" + +GLIB_MAKEFILE='$(top_srcdir)/Makefile.glib' + + + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for XML" >&5 +$as_echo_n "checking for XML... " >&6; } + +if test -n "$XML_CFLAGS"; then + pkg_cv_XML_CFLAGS="$XML_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libxml-2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_XML_CFLAGS=`$PKG_CONFIG --cflags "libxml-2.0" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$XML_LIBS"; then + pkg_cv_XML_LIBS="$XML_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libxml-2.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_XML_LIBS=`$PKG_CONFIG --libs "libxml-2.0" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + XML_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "libxml-2.0" 2>&1` + else + XML_PKG_ERRORS=`$PKG_CONFIG --print-errors "libxml-2.0" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$XML_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (libxml-2.0) were not met: + +$XML_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables XML_CFLAGS +and XML_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 + +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables XML_CFLAGS +and XML_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } + +else + XML_CFLAGS=$pkg_cv_XML_CFLAGS + XML_LIBS=$pkg_cv_XML_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Win32" >&5 +$as_echo_n "checking for Win32... " >&6; } +case "$host" in + *-*-mingw*) + os_win32=yes + CFLAGS="$CFLAGS -D_REENTRANT" + ;; + *) + os_win32=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $os_win32" >&5 +$as_echo "$os_win32" >&6; } + if test $os_win32 = yes; then + OS_WIN32_TRUE= + OS_WIN32_FALSE='#' +else + OS_WIN32_TRUE='#' + OS_WIN32_FALSE= +fi + + +for ac_func in gmtime_r +do : + ac_fn_c_check_func "$LINENO" "gmtime_r" "ac_cv_func_gmtime_r" +if test "x$ac_cv_func_gmtime_r" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GMTIME_R 1 +_ACEOF + +fi +done + +for ac_func in mmap +do : + ac_fn_c_check_func "$LINENO" "mmap" "ac_cv_func_mmap" +if test "x$ac_cv_func_mmap" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MMAP 1 +_ACEOF + +fi +done + +ac_fn_c_check_func "$LINENO" "socket" "ac_cv_func_socket" +if test "x$ac_cv_func_socket" = xyes; then : + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lsocket" >&5 +$as_echo_n "checking for socket in -lsocket... " >&6; } +if ${ac_cv_lib_socket_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); +int +main () +{ +return socket (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_socket_socket=yes +else + ac_cv_lib_socket_socket=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_socket" >&5 +$as_echo "$ac_cv_lib_socket_socket" >&6; } +if test "x$ac_cv_lib_socket_socket" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSOCKET 1 +_ACEOF + + LIBS="-lsocket $LIBS" + +fi + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build libsoup-gnome" >&5 +$as_echo_n "checking whether to build libsoup-gnome... " >&6; } + +# Check whether --with-gnome was given. +if test "${with_gnome+set}" = set; then : + withval=$with_gnome; : +else + if test $os_win32 = yes; then with_gnome=no; else with_gnome=yes; fi +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_gnome" >&5 +$as_echo "$with_gnome" >&6; } + +if test $with_gnome != no -a $os_win32 != yes; then + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNOME_KEYRING" >&5 +$as_echo_n "checking for GNOME_KEYRING... " >&6; } + +if test -n "$GNOME_KEYRING_CFLAGS"; then + pkg_cv_GNOME_KEYRING_CFLAGS="$GNOME_KEYRING_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gnome-keyring-1\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gnome-keyring-1") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_GNOME_KEYRING_CFLAGS=`$PKG_CONFIG --cflags "gnome-keyring-1" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$GNOME_KEYRING_LIBS"; then + pkg_cv_GNOME_KEYRING_LIBS="$GNOME_KEYRING_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gnome-keyring-1\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gnome-keyring-1") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_GNOME_KEYRING_LIBS=`$PKG_CONFIG --libs "gnome-keyring-1" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + GNOME_KEYRING_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "gnome-keyring-1" 2>&1` + else + GNOME_KEYRING_PKG_ERRORS=`$PKG_CONFIG --print-errors "gnome-keyring-1" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$GNOME_KEYRING_PKG_ERRORS" >&5 + + as_fn_error $? "Could not find gnome-keyring devel files. +Configure with --without-gnome if you wish to build only libsoup +without GNOME-specific features." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Could not find gnome-keyring devel files. +Configure with --without-gnome if you wish to build only libsoup +without GNOME-specific features." "$LINENO" 5 +else + GNOME_KEYRING_CFLAGS=$pkg_cv_GNOME_KEYRING_CFLAGS + GNOME_KEYRING_LIBS=$pkg_cv_GNOME_KEYRING_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + : +fi +fi + + + + if test $with_gnome != no; then + BUILD_LIBSOUP_GNOME_TRUE= + BUILD_LIBSOUP_GNOME_FALSE='#' +else + BUILD_LIBSOUP_GNOME_TRUE='#' + BUILD_LIBSOUP_GNOME_FALSE= +fi + + +if test $with_gnome != no; then + +$as_echo "#define HAVE_GNOME 1" >>confdefs.h + + + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SQLITE" >&5 +$as_echo_n "checking for SQLITE... " >&6; } + +if test -n "$SQLITE_CFLAGS"; then + pkg_cv_SQLITE_CFLAGS="$SQLITE_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3\""; } >&5 + ($PKG_CONFIG --exists --print-errors "sqlite3") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_SQLITE_CFLAGS=`$PKG_CONFIG --cflags "sqlite3" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$SQLITE_LIBS"; then + pkg_cv_SQLITE_LIBS="$SQLITE_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"sqlite3\""; } >&5 + ($PKG_CONFIG --exists --print-errors "sqlite3") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_SQLITE_LIBS=`$PKG_CONFIG --libs "sqlite3" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + SQLITE_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "sqlite3" 2>&1` + else + SQLITE_PKG_ERRORS=`$PKG_CONFIG --print-errors "sqlite3" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$SQLITE_PKG_ERRORS" >&5 + + as_fn_error $? "Could not find sqlite3 devel files: + +$SQLITE_PKG_ERRORS + +Pass \"--without-gnome\" to configure if you want to build libsoup +without GNOME support." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Could not find sqlite3 devel files: + +$SQLITE_PKG_ERRORS + +Pass \"--without-gnome\" to configure if you want to build libsoup +without GNOME support." "$LINENO" 5 +else + SQLITE_CFLAGS=$pkg_cv_SQLITE_CFLAGS + SQLITE_LIBS=$pkg_cv_SQLITE_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + : +fi +fi + + + + + + + + + # Extract the first word of "gtkdoc-check", so it can be a program name with args. +set dummy gtkdoc-check; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GTKDOC_CHECK+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GTKDOC_CHECK in + [\\/]* | ?:[\\/]*) + ac_cv_path_GTKDOC_CHECK="$GTKDOC_CHECK" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_GTKDOC_CHECK="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +GTKDOC_CHECK=$ac_cv_path_GTKDOC_CHECK +if test -n "$GTKDOC_CHECK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GTKDOC_CHECK" >&5 +$as_echo "$GTKDOC_CHECK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + for ac_prog in gtkdoc-rebase +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GTKDOC_REBASE+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GTKDOC_REBASE in + [\\/]* | ?:[\\/]*) + ac_cv_path_GTKDOC_REBASE="$GTKDOC_REBASE" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_GTKDOC_REBASE="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +GTKDOC_REBASE=$ac_cv_path_GTKDOC_REBASE +if test -n "$GTKDOC_REBASE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GTKDOC_REBASE" >&5 +$as_echo "$GTKDOC_REBASE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$GTKDOC_REBASE" && break +done +test -n "$GTKDOC_REBASE" || GTKDOC_REBASE="true" + + # Extract the first word of "gtkdoc-mkpdf", so it can be a program name with args. +set dummy gtkdoc-mkpdf; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_GTKDOC_MKPDF+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $GTKDOC_MKPDF in + [\\/]* | ?:[\\/]*) + ac_cv_path_GTKDOC_MKPDF="$GTKDOC_MKPDF" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_GTKDOC_MKPDF="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +GTKDOC_MKPDF=$ac_cv_path_GTKDOC_MKPDF +if test -n "$GTKDOC_MKPDF"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $GTKDOC_MKPDF" >&5 +$as_echo "$GTKDOC_MKPDF" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + +# Check whether --with-html-dir was given. +if test "${with_html_dir+set}" = set; then : + withval=$with_html_dir; +else + with_html_dir='${datadir}/gtk-doc/html' +fi + + HTML_DIR="$with_html_dir" + + + # Check whether --enable-gtk-doc was given. +if test "${enable_gtk_doc+set}" = set; then : + enableval=$enable_gtk_doc; +else + enable_gtk_doc=no +fi + + + if test x$enable_gtk_doc = xyes; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gtk-doc >= 1.10\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gtk-doc >= 1.10") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + : +else + as_fn_error $? "You need to have gtk-doc >= 1.10 installed to build $PACKAGE_NAME" "$LINENO" 5 +fi + if test "x$PACKAGE_NAME" != "xglib"; then + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GTKDOC_DEPS" >&5 +$as_echo_n "checking for GTKDOC_DEPS... " >&6; } + +if test -n "$GTKDOC_DEPS_CFLAGS"; then + pkg_cv_GTKDOC_DEPS_CFLAGS="$GTKDOC_DEPS_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_GTKDOC_DEPS_CFLAGS=`$PKG_CONFIG --cflags "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$GTKDOC_DEPS_LIBS"; then + pkg_cv_GTKDOC_DEPS_LIBS="$GTKDOC_DEPS_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_GTKDOC_DEPS_LIBS=`$PKG_CONFIG --libs "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0" 2>/dev/null` +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + GTKDOC_DEPS_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0" 2>&1` + else + GTKDOC_DEPS_PKG_ERRORS=`$PKG_CONFIG --print-errors "glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$GTKDOC_DEPS_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0) were not met: + +$GTKDOC_DEPS_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables GTKDOC_DEPS_CFLAGS +and GTKDOC_DEPS_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 + +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables GTKDOC_DEPS_CFLAGS +and GTKDOC_DEPS_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } + +else + GTKDOC_DEPS_CFLAGS=$pkg_cv_GTKDOC_DEPS_CFLAGS + GTKDOC_DEPS_LIBS=$pkg_cv_GTKDOC_DEPS_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi + fi + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build gtk-doc documentation" >&5 +$as_echo_n "checking whether to build gtk-doc documentation... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_gtk_doc" >&5 +$as_echo "$enable_gtk_doc" >&6; } + + # Check whether --enable-gtk-doc-html was given. +if test "${enable_gtk_doc_html+set}" = set; then : + enableval=$enable_gtk_doc_html; +else + enable_gtk_doc_html=yes +fi + + # Check whether --enable-gtk-doc-pdf was given. +if test "${enable_gtk_doc_pdf+set}" = set; then : + enableval=$enable_gtk_doc_pdf; +else + enable_gtk_doc_pdf=no +fi + + + if test -z "$GTKDOC_MKPDF"; then + enable_gtk_doc_pdf=no + fi + + + if test x$enable_gtk_doc = xyes; then + ENABLE_GTK_DOC_TRUE= + ENABLE_GTK_DOC_FALSE='#' +else + ENABLE_GTK_DOC_TRUE='#' + ENABLE_GTK_DOC_FALSE= +fi + + if test x$enable_gtk_doc_html = xyes; then + GTK_DOC_BUILD_HTML_TRUE= + GTK_DOC_BUILD_HTML_FALSE='#' +else + GTK_DOC_BUILD_HTML_TRUE='#' + GTK_DOC_BUILD_HTML_FALSE= +fi + + if test x$enable_gtk_doc_pdf = xyes; then + GTK_DOC_BUILD_PDF_TRUE= + GTK_DOC_BUILD_PDF_FALSE='#' +else + GTK_DOC_BUILD_PDF_TRUE='#' + GTK_DOC_BUILD_PDF_FALSE= +fi + + if test -n "$LIBTOOL"; then + GTK_DOC_USE_LIBTOOL_TRUE= + GTK_DOC_USE_LIBTOOL_FALSE='#' +else + GTK_DOC_USE_LIBTOOL_TRUE='#' + GTK_DOC_USE_LIBTOOL_FALSE= +fi + + if test -n "$GTKDOC_REBASE"; then + GTK_DOC_USE_REBASE_TRUE= + GTK_DOC_USE_REBASE_FALSE='#' +else + GTK_DOC_USE_REBASE_TRUE='#' + GTK_DOC_USE_REBASE_FALSE= +fi + + + + + + + # Check whether --enable-introspection was given. +if test "${enable_introspection+set}" = set; then : + enableval=$enable_introspection; +else + enable_introspection=auto +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gobject-introspection" >&5 +$as_echo_n "checking for gobject-introspection... " >&6; } + + case $enable_introspection in #( + no) : + found_introspection="no (disabled, use --enable-introspection to enable)" + ;; #( + yes) : + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gobject-introspection-1.0\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gobject-introspection-1.0") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + : +else + as_fn_error $? "gobject-introspection-1.0 is not installed" "$LINENO" 5 +fi + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gobject-introspection-1.0 >= 0.9.5\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gobject-introspection-1.0 >= 0.9.5") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + found_introspection=yes +else + as_fn_error $? "You need to have gobject-introspection >= 0.9.5 installed to build libsoup" "$LINENO" 5 +fi + ;; #( + auto) : + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"gobject-introspection-1.0 >= 0.9.5\""; } >&5 + ($PKG_CONFIG --exists --print-errors "gobject-introspection-1.0 >= 0.9.5") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + found_introspection=yes +else + found_introspection=no +fi + ;; #( + *) : + as_fn_error $? "invalid argument passed to --enable-introspection, should be one of [no/auto/yes]" "$LINENO" 5 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $found_introspection" >&5 +$as_echo "$found_introspection" >&6; } + + INTROSPECTION_SCANNER= + INTROSPECTION_COMPILER= + INTROSPECTION_GENERATE= + INTROSPECTION_GIRDIR= + INTROSPECTION_TYPELIBDIR= + if test "x$found_introspection" = "xyes"; then + INTROSPECTION_SCANNER=`$PKG_CONFIG --variable=g_ir_scanner gobject-introspection-1.0` + INTROSPECTION_COMPILER=`$PKG_CONFIG --variable=g_ir_compiler gobject-introspection-1.0` + INTROSPECTION_GENERATE=`$PKG_CONFIG --variable=g_ir_generate gobject-introspection-1.0` + INTROSPECTION_GIRDIR=`$PKG_CONFIG --variable=girdir gobject-introspection-1.0` + INTROSPECTION_TYPELIBDIR="$($PKG_CONFIG --variable=typelibdir gobject-introspection-1.0)" + INTROSPECTION_CFLAGS=`$PKG_CONFIG --cflags gobject-introspection-1.0` + INTROSPECTION_LIBS=`$PKG_CONFIG --libs gobject-introspection-1.0` + INTROSPECTION_MAKEFILE=`$PKG_CONFIG --variable=datadir gobject-introspection-1.0`/gobject-introspection-1.0/Makefile.introspection + fi + + + + + + + + + + if test "x$found_introspection" = "xyes"; then + HAVE_INTROSPECTION_TRUE= + HAVE_INTROSPECTION_FALSE='#' +else + HAVE_INTROSPECTION_TRUE='#' + HAVE_INTROSPECTION_FALSE= +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for glib-networking (glib TLS implementation)" >&5 +$as_echo_n "checking for glib-networking (glib TLS implementation)... " >&6; } +save_CFLAGS="$CFLAGS" +save_LIBS="$LIBS" +CFLAGS="$CFLAGS $GLIB_CFLAGS" +LIBS="$LIBS $GLIB_LIBS" +if test "$cross_compiling" = yes; then : + have_glib_networking="unknown (cross-compiling)" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +g_type_init (); return !g_tls_backend_supports_tls (g_tls_backend_get_default ()); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + have_glib_networking=yes +else + have_glib_networking=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +CFLAGS="$save_CFLAGS" +LIBS="$save_LIBS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_glib_networking" >&5 +$as_echo "$have_glib_networking" >&6; } + +# Check whether --enable-tls-check was given. +if test "${enable_tls_check+set}" = set; then : + enableval=$enable_tls_check; +fi + +if test "$enable_tls_check" != "no" -a "$have_glib_networking" = "no"; then + as_fn_error $? "libsoup requires glib-networking for TLS support. + +If you are building a package, you can pass --disable-tls-check to +allow building libsoup anyway (since glib-networking is not actually +required at compile time), but you should be sure to add a runtime +dependency on it." "$LINENO" 5 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for programs needed for regression tests" >&5 +$as_echo "$as_me: checking for programs needed for regression tests" >&6;} +MISSING_REGRESSION_TEST_PACKAGES="" + +if test "$have_glib_networking" = "no"; then + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES glib-networking" +fi + + +# Check whether --with-apache-httpd was given. +if test "${with_apache_httpd+set}" = set; then : + withval=$with_apache_httpd; APACHE_HTTPD="$withval" +else + for ac_prog in httpd2 httpd apache2 apache +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_APACHE_HTTPD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $APACHE_HTTPD in + [\\/]* | ?:[\\/]*) + ac_cv_path_APACHE_HTTPD="$APACHE_HTTPD" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_dummy="${PATH}:/usr/sbin" +for as_dir in $as_dummy +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_APACHE_HTTPD="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +APACHE_HTTPD=$ac_cv_path_APACHE_HTTPD +if test -n "$APACHE_HTTPD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $APACHE_HTTPD" >&5 +$as_echo "$APACHE_HTTPD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$APACHE_HTTPD" && break +done +test -n "$APACHE_HTTPD" || APACHE_HTTPD="no" + +fi + +if test "$APACHE_HTTPD" != "no"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking Apache version" >&5 +$as_echo_n "checking Apache version... " >&6; } + apache_version=`$APACHE_HTTPD -v 2>/dev/null | sed -ne 's/Server version: Apache\///p'` + case $apache_version in + 2.2.*) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $apache_version (ok)" >&5 +$as_echo "$apache_version (ok)" >&6; } + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $apache_version (ignoring)" >&5 +$as_echo "$apache_version (ignoring)" >&6; } + APACHE_HTTPD="no" + ;; + esac +fi + + +cat >>confdefs.h <<_ACEOF +#define APACHE_HTTPD "$APACHE_HTTPD" +_ACEOF + + +if test "$APACHE_HTTPD" != "no"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Apache module directory" >&5 +$as_echo_n "checking for Apache module directory... " >&6; } + +# Check whether --with-apache-module-dir was given. +if test "${with_apache_module_dir+set}" = set; then : + withval=$with_apache_module_dir; apache_module_dirs="$withval" +else + apache_prefix=`dirname \`dirname $APACHE_HTTPD\`` + mpm=`$APACHE_HTTPD -V | sed -ne 's/^Server MPM: */-/p' | tr 'A-Z' 'a-z'` + # This only works with bash, but should fail harmlessly in sh + apache_module_dirs=`echo $apache_prefix/lib{64,}/{apache,apache2,http,http2,httpd}{$mpm,}{/modules,}` +fi + + for dir in $apache_module_dirs; do + if test -f $dir/mod_auth_digest.so; then + APACHE_MODULE_DIR="$dir" + fi + if test -f $dir/mod_ssl.so; then + APACHE_SSL_MODULE_DIR="$dir" + IF_HAVE_APACHE_SSL="" + fi + if test -f $dir/mod_php5.so; then + APACHE_PHP_MODULE_DIR="$dir" + APACHE_PHP_MODULE=mod_php5.so + fi + if test -f $dir/libphp5.so; then + APACHE_PHP_MODULE_DIR="$dir" + APACHE_PHP_MODULE=libphp5.so + fi + done + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $APACHE_MODULE_DIR" >&5 +$as_echo "$APACHE_MODULE_DIR" >&6; } + + + + +fi + +if test "$APACHE_HTTPD" != "no" -a -n "$APACHE_MODULE_DIR" -a -n "$APACHE_SSL_MODULE_DIR"; then + +$as_echo "#define HAVE_APACHE 1" >>confdefs.h + + have_apache=1 + if test -z "$APACHE_PHP_MODULE_DIR"; then + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES mod_php5" + fi +else + have_apache=0 + if test "$APACHE_HTTPD" = "no" -o -z "$APACHE_MODULE_DIR"; then + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES apache" + else + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES mod_ssl" + fi +fi + if test $have_apache = 1; then + HAVE_APACHE_TRUE= + HAVE_APACHE_FALSE='#' +else + HAVE_APACHE_TRUE='#' + HAVE_APACHE_FALSE= +fi + + +if test "$have_apache" = 1; then + for ac_prog in php php5 +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_PHP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$PHP"; then + ac_cv_prog_PHP="$PHP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_PHP="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +PHP=$ac_cv_prog_PHP +if test -n "$PHP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PHP" >&5 +$as_echo "$PHP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$PHP" && break +done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Apache PHP module" >&5 +$as_echo_n "checking for Apache PHP module... " >&6; } + if test -f $APACHE_PHP_MODULE_DIR/$APACHE_PHP_MODULE; then + have_php=yes + IF_HAVE_PHP="" + else + have_php=no + IF_HAVE_PHP="#" + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES php5" + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_php" >&5 +$as_echo "$have_php" >&6; } + + if test "$have_php" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlrpc-epi-php" >&5 +$as_echo_n "checking for xmlrpc-epi-php... " >&6; } + if $PHP --rf xmlrpc_server_create | grep -q "does not exist"; then + have_xmlrpc_epi_php=no + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES php-xmlrpc" + else + have_xmlrpc_epi_php=yes + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_xmlrpc_epi_php" >&5 +$as_echo "$have_xmlrpc_epi_php" >&6; } + fi +fi + + + if test "$have_xmlrpc_epi_php" = yes; then + HAVE_XMLRPC_EPI_PHP_TRUE= + HAVE_XMLRPC_EPI_PHP_FALSE='#' +else + HAVE_XMLRPC_EPI_PHP_TRUE='#' + HAVE_XMLRPC_EPI_PHP_FALSE= +fi + + +# Extract the first word of "curl", so it can be a program name with args. +set dummy curl; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CURL+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CURL in + [\\/]* | ?:[\\/]*) + ac_cv_path_CURL="$CURL" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_CURL="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_CURL" && ac_cv_path_CURL="no" + ;; +esac +fi +CURL=$ac_cv_path_CURL +if test -n "$CURL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CURL" >&5 +$as_echo "$CURL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test "$CURL" != no; then + +$as_echo "#define HAVE_CURL 1" >>confdefs.h + +else + MISSING_REGRESSION_TEST_PACKAGES="$MISSING_REGRESSION_TEST_PACKAGES curl" +fi + if test "$CURL" != no; then + HAVE_CURL_TRUE= + HAVE_CURL_FALSE='#' +else + HAVE_CURL_TRUE='#' + HAVE_CURL_FALSE= +fi + + + + if test -n "$MISSING_REGRESSION_TEST_PACKAGES"; then + MISSING_REGRESSION_TEST_PACKAGES_TRUE= + MISSING_REGRESSION_TEST_PACKAGES_FALSE='#' +else + MISSING_REGRESSION_TEST_PACKAGES_TRUE='#' + MISSING_REGRESSION_TEST_PACKAGES_FALSE= +fi + + + +# Check whether --with-ntlm-auth was given. +if test "${with_ntlm_auth+set}" = set; then : + withval=$with_ntlm_auth; ntlm_auth="$withval" +else + if test $os_win32 = yes; then ntlm_auth="no"; else ntlm_auth="/usr/bin/ntlm_auth"; fi +fi + +if test "$ntlm_auth" != "no"; then + +$as_echo "#define USE_NTLM_AUTH 1" >>confdefs.h + + if test "$ntlm_auth" = "yes"; then + ntlm_auth="/usr/bin/ntlm_auth" + fi +fi + + +cat >>confdefs.h <<_ACEOF +#define NTLM_AUTH "$ntlm_auth" +_ACEOF + + + +# Check whether --enable-more-warnings was given. +if test "${enable_more_warnings+set}" = set; then : + enableval=$enable_more_warnings; set_more_warnings=no +fi + + +if test "$GCC" = "yes" -a "$set_more_warnings" != "no"; then + CFLAGS="$CFLAGS \ + -Wall -Wstrict-prototypes -Werror=missing-prototypes \ + -Werror=implicit-function-declaration \ + -Werror=pointer-arith -Werror=init-self -Werror=format=2 \ + -Werror=missing-include-dirs -Werror=aggregate-return \ + -Werror=declaration-after-statement" +fi + + +ac_config_headers="$ac_config_headers config.h" + +ac_config_files="$ac_config_files libsoup-2.4.pc libsoup-gnome-2.4.pc Makefile libsoup-zip libsoup/Makefile tests/Makefile tests/httpd.conf docs/Makefile docs/reference/Makefile" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${OS_WIN32_TRUE}" && test -z "${OS_WIN32_FALSE}"; then + as_fn_error $? "conditional \"OS_WIN32\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_LIBSOUP_GNOME_TRUE}" && test -z "${BUILD_LIBSOUP_GNOME_FALSE}"; then + as_fn_error $? "conditional \"BUILD_LIBSOUP_GNOME\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${ENABLE_GTK_DOC_TRUE}" && test -z "${ENABLE_GTK_DOC_FALSE}"; then + as_fn_error $? "conditional \"ENABLE_GTK_DOC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${GTK_DOC_BUILD_HTML_TRUE}" && test -z "${GTK_DOC_BUILD_HTML_FALSE}"; then + as_fn_error $? "conditional \"GTK_DOC_BUILD_HTML\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${GTK_DOC_BUILD_PDF_TRUE}" && test -z "${GTK_DOC_BUILD_PDF_FALSE}"; then + as_fn_error $? "conditional \"GTK_DOC_BUILD_PDF\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${GTK_DOC_USE_LIBTOOL_TRUE}" && test -z "${GTK_DOC_USE_LIBTOOL_FALSE}"; then + as_fn_error $? "conditional \"GTK_DOC_USE_LIBTOOL\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${GTK_DOC_USE_REBASE_TRUE}" && test -z "${GTK_DOC_USE_REBASE_FALSE}"; then + as_fn_error $? "conditional \"GTK_DOC_USE_REBASE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_INTROSPECTION_TRUE}" && test -z "${HAVE_INTROSPECTION_FALSE}"; then + as_fn_error $? "conditional \"HAVE_INTROSPECTION\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_APACHE_TRUE}" && test -z "${HAVE_APACHE_FALSE}"; then + as_fn_error $? "conditional \"HAVE_APACHE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_XMLRPC_EPI_PHP_TRUE}" && test -z "${HAVE_XMLRPC_EPI_PHP_FALSE}"; then + as_fn_error $? "conditional \"HAVE_XMLRPC_EPI_PHP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_CURL_TRUE}" && test -z "${HAVE_CURL_FALSE}"; then + as_fn_error $? "conditional \"HAVE_CURL\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${MISSING_REGRESSION_TEST_PACKAGES_TRUE}" && test -z "${MISSING_REGRESSION_TEST_PACKAGES_FALSE}"; then + as_fn_error $? "conditional \"MISSING_REGRESSION_TEST_PACKAGES\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libsoup $as_me 2.37.92, which was +generated by GNU Autoconf 2.68. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +libsoup config.status 2.37.92 +configured by $0, generated by GNU Autoconf 2.68, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2010 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +AS='`$ECHO "$AS" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_ld='`$ECHO "$hardcode_libdir_flag_spec_ld" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in AS \ +DLLTOOL \ +OBJDUMP \ +SHELL \ +ECHO \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +nm_file_list_spec \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_flag_spec_ld \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +sys_lib_dlsearch_path_spec; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' +xsi_shell='$xsi_shell' +lt_shell_append='$lt_shell_append' + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + + + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + "libsoup-2.4.pc") CONFIG_FILES="$CONFIG_FILES libsoup-2.4.pc" ;; + "libsoup-gnome-2.4.pc") CONFIG_FILES="$CONFIG_FILES libsoup-gnome-2.4.pc" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "libsoup-zip") CONFIG_FILES="$CONFIG_FILES libsoup-zip" ;; + "libsoup/Makefile") CONFIG_FILES="$CONFIG_FILES libsoup/Makefile" ;; + "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; + "tests/httpd.conf") CONFIG_FILES="$CONFIG_FILES tests/httpd.conf" ;; + "docs/Makefile") CONFIG_FILES="$CONFIG_FILES docs/Makefile" ;; + "docs/reference/Makefile") CONFIG_FILES="$CONFIG_FILES docs/reference/Makefile" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Autoconf 2.62 quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, +# Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# The names of the tagged configurations supported by this script. +available_tags="" + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Assembler program. +AS=$lt_AS + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Object dumper program. +OBJDUMP=$lt_OBJDUMP + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and in which our libraries should be installed. +lt_sysroot=$lt_sysroot + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# If ld is used when linking, flag to hardcode \$libdir into a binary +# during linking. This must work even if \$libdir does not exist. +hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain="$ac_aux_dir/ltmain.sh" + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + if test x"$xsi_shell" = xyes; then + sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ +func_dirname ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_basename ()$/,/^} # func_basename /c\ +func_basename ()\ +{\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ +func_dirname_and_basename ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ +func_stripname ()\ +{\ +\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ +\ # positional parameters, so assign one to ordinary parameter first.\ +\ func_stripname_result=${3}\ +\ func_stripname_result=${func_stripname_result#"${1}"}\ +\ func_stripname_result=${func_stripname_result%"${2}"}\ +} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ +func_split_long_opt ()\ +{\ +\ func_split_long_opt_name=${1%%=*}\ +\ func_split_long_opt_arg=${1#*=}\ +} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ +func_split_short_opt ()\ +{\ +\ func_split_short_opt_arg=${1#??}\ +\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ +} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ +func_lo2o ()\ +{\ +\ case ${1} in\ +\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ +\ *) func_lo2o_result=${1} ;;\ +\ esac\ +} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_xform ()$/,/^} # func_xform /c\ +func_xform ()\ +{\ + func_xform_result=${1%.*}.lo\ +} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_arith ()$/,/^} # func_arith /c\ +func_arith ()\ +{\ + func_arith_result=$(( $* ))\ +} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_len ()$/,/^} # func_len /c\ +func_len ()\ +{\ + func_len_result=${#1}\ +} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + +fi + +if test x"$lt_shell_append" = xyes; then + sed -e '/^func_append ()$/,/^} # func_append /c\ +func_append ()\ +{\ + eval "${1}+=\\${2}"\ +} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ +func_append_quoted ()\ +{\ +\ func_quote_for_eval "${2}"\ +\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ +} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 +$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} +fi + + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +if test -n "$MISSING_REGRESSION_TEST_PACKAGES"; then + echo "" + echo Some regression tests will not be run due to missing packages: + echo $MISSING_REGRESSION_TEST_PACKAGES + echo "" +fi diff --git a/configure.ac b/configure.ac index a016ba2..550a12b 100644 --- a/configure.ac +++ b/configure.ac @@ -3,11 +3,11 @@ dnl *** Initialize automake and set version *** dnl ******************************************* AC_PREREQ(2.63) -AC_INIT([libsoup],[2.35.90],[http://bugzilla.gnome.org/enter_bug.cgi?product=libsoup]) +AC_INIT([libsoup],[2.37.92],[http://bugzilla.gnome.org/enter_bug.cgi?product=libsoup]) AC_CONFIG_SRCDIR([libsoup-2.4.pc.in]) AC_CONFIG_MACRO_DIR([m4]) -AM_INIT_AUTOMAKE([foreign 1.9]) +AM_INIT_AUTOMAKE([1.11 foreign no-dist-gzip dist-xz -Wno-portability]) m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])],) AC_PROG_MAKE_SET @@ -16,10 +16,10 @@ SOUP_API_VERSION=2.4 AC_SUBST(SOUP_API_VERSION) # Increment on interface addition. Reset on removal. -SOUP_AGE=4 +SOUP_AGE=5 # Increment on interface add, remove, or change. -SOUP_CURRENT=5 +SOUP_CURRENT=6 # Increment on source change. Reset when CURRENT changes. SOUP_REVISION=0 @@ -53,9 +53,9 @@ fi AC_SUBST(SOUP_DEBUG_FLAGS) # Set the maintainer flags -if test -d .git; then - SOUP_MAINTAINER_FLAGS="-DG_DISABLE_DEPRECATED" -fi +#if test -d .git; then +# SOUP_MAINTAINER_FLAGS="-DG_DISABLE_DEPRECATED" +#fi AC_SUBST(SOUP_MAINTAINER_FLAGS) dnl *************************** @@ -72,12 +72,16 @@ dnl *********************** dnl *** Checks for glib *** dnl *********************** -AM_PATH_GLIB_2_0(2.27.5,,,gobject gthread gio) +GLIB_REQUIRED=2.31.7 +AM_PATH_GLIB_2_0($GLIB_REQUIRED,,,gobject gio) if test "$GLIB_LIBS" = ""; then - AC_MSG_ERROR(GLIB 2.27.5 or later is required to build libsoup) + AC_MSG_ERROR(GLIB $GLIB_REQUIRED or later is required to build libsoup) fi GLIB_CFLAGS="$GLIB_CFLAGS -DG_DISABLE_SINGLE_INCLUDES" +GLIB_MAKEFILE='$(top_srcdir)/Makefile.glib' +AC_SUBST(GLIB_MAKEFILE) + PKG_CHECK_MODULES(XML, libxml-2.0) AC_SUBST(XML_CFLAGS) AC_SUBST(XML_LIBS) @@ -106,30 +110,6 @@ AC_CHECK_FUNCS(gmtime_r) AC_CHECK_FUNCS(mmap) AC_CHECK_FUNC(socket, , AC_CHECK_LIB(socket, socket)) -dnl ************************* -dnl *** SQL Lite support *** -dnl ************************* - -AC_ARG_ENABLE(sqllite, - AS_HELP_STRING([--enable-sqllite], [Enable SQL lite support ]), , - enable_sqllite=no) - -if test "$enable_sqllite" != "no"; then - PKG_CHECK_MODULES(SQLITE, sqlite3, :, [AC_MSG_ERROR(dnl -[Could not find sqlite3 devel files: - -$SQLITE_PKG_ERRORS - -Pass "--without-sqlite" to configure if you want to build libsoup -without sql lite support.])]) - -fi - -AC_SUBST(SQLITE_CFLAGS) -AC_SUBST(SQLITE_LIBS) - -AM_CONDITIONAL(SQLLITE_SUPPORT, [test $enable_sqllite = yes]) - dnl ********************* dnl *** GNOME support *** dnl ********************* @@ -177,35 +157,6 @@ dnl *** gobject-introspection *** dnl ***************************** GOBJECT_INTROSPECTION_CHECK([0.9.5]) -dnl ************************************* -dnl *** Warnings to show if using GCC *** -dnl ************************************* - -AC_ARG_ENABLE(more-warnings, - AS_HELP_STRING([--disable-more-warnings], [Inhibit compiler warnings]), - set_more_warnings=no) - -if test "$GCC" = "yes" -a "$set_more_warnings" != "no"; then - CFLAGS="$CFLAGS \ - -Wall -Wstrict-prototypes -Wmissing-declarations \ - -Wmissing-prototypes -Wnested-externs -Wpointer-arith \ - -Wdeclaration-after-statement -Wformat=2 -Winit-self \ - -Waggregate-return -Wmissing-format-attribute" - - for option in -Wmissing-include-dirs -Wundef; do - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $option" - AC_MSG_CHECKING([whether gcc understands $option]) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], - [has_option=yes], - [has_option=no]) - AC_MSG_RESULT($has_option) - if test $has_option = no; then - CFLAGS="$SAVE_CFLAGS" - fi - done -fi - AC_MSG_CHECKING([for glib-networking (glib TLS implementation)]) save_CFLAGS="$CFLAGS" save_LIBS="$LIBS" @@ -367,6 +318,24 @@ fi AC_SUBST(ntlm_auth) AC_DEFINE_UNQUOTED(NTLM_AUTH, "$ntlm_auth", [Samba's 'winbind' daemon helper 'ntlm_auth' which can be used for NTLM single-sign-on]) +dnl **************************************************** +dnl *** Warnings to show if using GCC *** +dnl *** (do this last so -Werror won't mess up tests *** +dnl **************************************************** + +AC_ARG_ENABLE(more-warnings, + AS_HELP_STRING([--disable-more-warnings], [Inhibit compiler warnings]), + set_more_warnings=no) + +if test "$GCC" = "yes" -a "$set_more_warnings" != "no"; then + CFLAGS="$CFLAGS \ + -Wall -Wstrict-prototypes -Werror=missing-prototypes \ + -Werror=implicit-function-declaration \ + -Werror=pointer-arith -Werror=init-self -Werror=format=2 \ + -Werror=missing-include-dirs -Werror=aggregate-return \ + -Werror=declaration-after-statement" +fi + dnl ************************* dnl *** Output Everything *** dnl ************************* diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index e450d47..0000000 --- a/debian/changelog +++ /dev/null @@ -1,483 +0,0 @@ -libsoup2.4 (2.35.90-0) unstable; urgency=low - - * Git : pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-0 - - * [Kwangtae Ko] Change OS(SLP) with OS(TIZEN) - - -- seung hak lee Tue, 27 Dec 2011 16:24:57 +0900 - -libsoup2.4 (2.35.90-1slp2+5) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-1slp2+5 - - * [DongJae KIM] Change 'use-ssl3' param value to TRUE :: Rollback open source patch to fix paypal.com issue. - * [Kwangtae Ko] Add null checking code in soup_socket_is_ssl() to fix timesofindia.indiatimes.com issue. - - -- seung hak lee Wed, 12 Oct 2011 21:31:02 +0900 - -libsoup2.4 (2.35.90-1slp2+4) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-1slp2+4 - - * Remove SLP.h frome Makefile.am - * Add samsung specific private function declarations to SLP.h - - -- seung hak lee Fri, 30 Sep 2011 10:30:09 +0900 - -libsoup2.4 (2.35.90-1slp2+3) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-1slp2+3 - - * ADD SLP.h in makefile.am - - -- seung hak lee Thu, 29 Sep 2011 10:00:09 +0900 - -libsoup2.4 (2.35.90-1slp2+2) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-1slp2+2 - - * libsoup version up 2.23.92 >> 2.25.90 - * Samsung patchs are merged - - -- seung hak lee Wed, 28 Sep 2011 21:32:27 +0900 - -libsoup2.4 (2.35.90-1slp2+1) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.35.90-1slp2+1 - - * libsoup version up 2.23.92 >> 2.25.90 - - -- seung hak lee Thu, 08 Sep 2011 14:20:57 +0900 - -libsoup2.4 (2.33.92-1slp2+12) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+12 - - * DonJae KIM FIX: do not append data to existing files - * DonJae KIM FIX: Change soup-date-is-past checking value, using Current System Year - - -- seung hak lee Wed, 07 Sep 2011 15:58:10 +0900 - -libsoup2.4 (2.33.92-1slp2+11) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+11 - - * Remove package dependency on libgnutlsxx27 and libgnutls-iopenssl27 - - -- Dongwook Lee Tue, 6 Sep 2011 16:38:30 +0900 - -libsoup2.4 (2.33.92-1slp2+10) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+10 - - * Kwangtae Ko Fix to disable limiting the number of pending connections - * Kwangtae Ko Fix to play a PluginStream Video - - -- seung hak lee Wed, 10 Aug 2011 17:59:30 +0900 - -libsoup2.4 (2.33.92-1slp2+9) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+9 - - * DongJae KIM Rollback not to accept all certificate which has unknown CA - * Kwangtae Ko Fix to a crash problem when the browser is closed - - -- seung hak lee Wed, 13 Jul 2011 15:17:32 +0900 - -libsoup2.4 (2.33.92-1slp2+8) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+8 - - * Keunsoon Lee Fix to accept all certificate which has unknown CA - * Jaehun Lim Fix to the encoding probolm of data protocol - - -- seung hak lee Mon, 13 Jun 2011 17:31:57 +0900 - -libsoup2.4 (2.33.92-1slp2+7) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+7 - - * DongJae KIM Fix to the soup redirection when using proxy and https. - * Kwangtae Ko Fix to the browser touch freezing issue on http://news.mt.co.kr. - - -- seung hak lee Mon, 30 May 2011 15:39:25 +0900 - -libsoup2.4 (2.33.92-1slp2+6) unstable; urgency=low - - * lucid --> unstable - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+6 - - -- Taesoo Jun Wed, 25 May 2011 15:39:01 +0900 - -libsoup2.4 (2.33.92-1slp2+5) lucid; urgency=low - - * patch for UTF-8 encoding patch for cachekey - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+5 - - -- Taesoo Jun Wed, 25 May 2011 15:20:56 +0900 - -libsoup2.4 (2.33.92-1slp2+4) unstable; urgency=low - - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+4 - - * Kwangtae Ko Fix to a long-delay and overcharge CPU when closing... - * Taesoo Jun patch for contents sniff by SLP - - -- Jaehun Lim Mon, 23 May 2011 17:30:54 +0900 - -libsoup2.4 (2.33.92-1slp2+3) unstable; urgency=low - - * patch for cache entry UTF-8 encoding in pack_entry() - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+3 - - -- Taesoo Jun Mon, 16 May 2011 20:53:22 +0900 - -libsoup2.4 (2.33.92-1slp2+2) unstable; urgency=low - - * add gnutls and glib-networking dependency - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+2 - - -- JaeHyun Kim Mon, 25 Apr 2011 17:14:29 +0900 - -libsoup2.4 (2.33.92-1slp2+1) unstable; urgency=low - - * Upgrade to 2.33.92 - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.33.92-1slp2+1 - - -- JaeHyun Kim Tue, 04 Jan 2011 14:27:51 +0900 - -libsoup2.4 (2.31.6-1slp2+2) unstable; urgency=low - - * Apply SEC_CERTIFICATE patch from browser part - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.31.6-1slp2+2 - - -- Taesoo Jun Tue, 04 Jan 2011 14:27:51 +0900 - -libsoup2.4 (2.31.6-1slp2+1) unstable; urgency=low - - * Roll back to version, 2.31.6 - * Apply the ActiveSync crash patch - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.31.6-1slp2+1 - - -- Taesoo Jun Thu, 30 Dec 2010 11:21:36 +0900 - -libsoup2.4 (2.32.2-1slp2+6) unstable; urgency=low - - * Push patched files by sungwon chung - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+6 - - -- Taesoo Jun Wed, 22 Dec 2010 17:14:22 +0900 - -libsoup2.4 (2.32.2-1slp2+5) unstable; urgency=low - - * Fix build break by certificate patch - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+5 - - -- Taesoo Jun Thu, 16 Dec 2010 16:31:56 +0900 - -libsoup2.4 (2.32.2-1slp2+4) unstable; urgency=low - - * Push patched files - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+4 - - -- Taesoo Jun Thu, 16 Dec 2010 11:37:18 +0900 - -libsoup2.4 (2.32.2-1slp2+3) unstable; urgency=low - - * Apply a patch about a device binding to libsoup - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+3 - - -- Taesoo Jun Wed, 15 Dec 2010 16:53:17 +0900 - -libsoup2.4 (2.32.2-1slp2+2) unstable; urgency=low - - * Add soup-debug.h into dev package - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+2 - - -- Sungwon Chung Fri, 10 Dec 2010 21:09:39 +0900 - -libsoup2.4 (2.32.2-1slp2+1) unstable; urgency=low - - * Upgrade libsoup to 2.32.2 - * Git : 165.213.180.234:slp/pkgs/l/libsoup2.4 - * Tag : libsoup2.4_2.32.2-1slp2+1 - - -- Sungwon Chung Fri, 10 Dec 2010 15:01:52 +0900 - -libsoup2.4 (2.30.0-9slp2+1) unstable; urgency=low - - * Commit fixing dailymotion.com problem with flash applied - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-9slp2+1 - - -- Lukasz Slachciak Mon, 29 Nov 2010 17:24:19 +0100 - -libsoup2.4 (2.30.0-8slp2+2) unstable; urgency=low - - * Fix for crash after sign in on mobile version of twitter.com - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-8slp2+2 - - -- Lukasz Slachciak Tue, 09 Nov 2010 17:36:49 +0100 - -libsoup2.4 (2.30.0-8slp2+1) unstable; urgency=low - - * Fix for problems with gnutls_certificate_get_peers - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-8slp2+1 - - -- Lukasz Slachciak Fri, 05 Nov 2010 17:45:11 +0100 - -libsoup2.4 (2.30.0-7slp2+3) unstable; urgency=low - - * Additional gnutls session checks added for certificate handling - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-7slp2+3 - - -- Lukasz Slachciak Fri, 22 Oct 2010 16:55:21 +0200 - -libsoup2.4 (2.30.0-7slp2+2) unstable; urgency=low - - * Added new SoupMessage property with server certificates list - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-7slp2+2 - - -- Lukasz Slachciak Mon, 18 Oct 2010 11:59:34 +0200 - -libsoup2.4 (2.30.0-7slp2+1) unstable; urgency=low - - * Added new SoupMessage property with server certificates list - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-7slp2+1 - - -- Jongmin Lee Mon, 18 Oct 2010 11:37:19 +0200 - -libsoup2.4 (2.30.0-6slp2+3) unstable; urgency=low - - * Add "Replaces" field in debian control file - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-6slp2+3 - - -- Jongmin Lee Thu, 26 Aug 2010 10:41:47 +0900 - -libsoup2.4 (2.30.0-6slp2+2) unstable; urgency=low - - * Remove "Replaces" field in debian control file - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-6slp2+2 - - -- Jongmin Lee Wed, 25 Aug 2010 14:37:06 +0900 - -libsoup2.4 (2.30.0-6slp2+1) unstable; urgency=low - - * fix for livejournal.com incorrect response headers - * Git : 165.213.180.234:/git/slp/pkgs/libsoup2.4 - * Tag : libsoup2.4_2.30.0-6slp2+1 - - -- Lukasz Slachciak Thu, 22 Jul 2010 10:36:13 +0200 - -libsoup2.4 (2.30.0-5slp2+3) unstable; urgency=low - - * Fix flashplayer's build break - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-5slp2+3 - - -- Jaehyun Kim Fri, 09 Jul 2010 11:06:53 +0900 - -libsoup2.4 (2.30.0-5slp2+2) unstable; urgency=low - - * Maintainer/Uploader updated - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-5slp2+2 - - -- Jaehyun Kim Thu, 08 Jul 2010 15:48:05 +0900 - -libsoup2.4 (2.30.0-5slp2+1) unstable; urgency=low - - * unneeded logging removed - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-5slp2+1 - - -- Lukasz Slachciak Mon, 07 Jun 2010 10:40:49 +0200 - -libsoup2.4 (2.30.0-4slp2+1) unstable; urgency=low - - * fix for crash when connection address is invalid - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-4slp2+1 - - -- Lukasz Slachciak Fri, 28 May 2010 17:07:07 +0200 - -libsoup2.4 (2.30.0-3slp2+2) unstable; urgency=low - - * tag description added - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-3slp2+2 - - -- Lukasz Slachciak Mon, 24 May 2010 14:21:57 +0200 - -libsoup2.4 (2.30.0-3slp2+1) unstable; urgency=low - - * changelog updated - - -- Lukasz Slachciak Mon, 24 May 2010 14:04:22 +0200 - -libsoup2.4 (2.30.0-2slp2+4) unstable; urgency=low - - * unneded files removed - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-3slp2+1 - - -- Lukasz Slachciak Mon, 24 May 2010 13:58:54 +0200 - -libsoup2.4 (2.30.0-2slp2+3) unstable; urgency=low - - * fixing tag again. upsss - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : libsoup2.4_2.30.0-2slp2+3 - - -- Lukasz Slachciak Fri, 21 May 2010 15:56:27 +0900 - -libsoup2.4 (2.30.0-2slp2+2) unstable; urgency=low - - * adding tag description - * Git : 165.213.180.234:/git/slp2.0/slp2.0-pkgs/libsoup-2.4 - * Tag : 2.30.0-2slp2+2 - - -- Lukasz Slachciak Fri, 21 May 2010 15:40:45 +0900 - -libsoup2.4 (2.30.0-2slp2+1) unstable; urgency=low - - * just upversioning due to build problems - - -- Lukasz Slachciak Fri, 21 May 2010 15:19:39 +0900 - -libsoup2.4 (2.30.0-2slp2) unstable; urgency=low - - * Fix for Samsung Proxy problem in following situations: - - when running wikipedia.org from tests/get - - when using Samsung3G and opening abc.go.com site - - when using CJP01 router + proxy value set and abc.go.com - - -- Lukasz Slachciak Fri, 21 May 2010 14:47:58 +0900 - -libsoup2.4 (2.30.0-1slp2) unstable; urgency=low - - * libsoup upversioned to 2.30.0 - - -- Lukasz Slachciak Wed, 14 Apr 2010 16:29:42 +0200 - -libsoup2.4 (2.29.5-12slp2) unstable; urgency=low - - * Added PRAGMA secure_delete, when db is initialized, to clear cookie file content when cookies are deleted - - -- Lukasz Slachciak Thu, 08 Apr 2010 11:53:08 +0200 - -libsoup2.4 (2.29.5-11slp2+14) unstable; urgency=low - - * Added missing dependency - - -- Lukasz Slachciak Thu, 25 Mar 2010 12:36:01 +0100 - -libsoup2.4 (2.29.5-11slp2+13) unstable; urgency=low - - * Recompilation because of toolchain upgrade - - -- Lukasz Slachciak Thu, 25 Mar 2010 09:42:28 +0100 - -libsoup2.4 (2.29.5-11slp2+12) unstable; urgency=low - - * added option --enable-sqllite - - -- Lukasz Slachciak Wed, 24 Mar 2010 16:52:36 +0100 - -libsoup2.4 (2.29.5-11slp2+11) unstable; urgency=low - - * reversion for new package policy - - -- Jaehun Lim Fri, 19 Mar 2010 17:26:50 +0900 - -libsoup2.4 (2.29.5-10) unstable; urgency=low - - * memory leaks and logging fixes - - -- Lukasz Slachciak Thu, 04 Mar 2010 14:40:54 +0100 - -libsoup2.4 (2.29.5-9) unstable; urgency=low - - * logging fixes - - -- Lukasz Slachciak Thu, 25 Feb 2010 16:45:08 +0100 - -libsoup2.4 (2.29.5-8) unstable; urgency=low - - * Soup initialization fix for binding property - - -- Lukasz Slachciak Tue, 09 Feb 2010 11:10:05 +0100 - -libsoup2.4 (2.29.5-7) unstable; urgency=low - - * Setting binding property for session and connection - - -- Lukasz Slachciak Wed, 03 Feb 2010 11:37:40 +0100 - -libsoup2.4 (2.29.5-6) unstable; urgency=low - - * Add a gnutle dependency - - -- Gyuyoung Kim Thu, 28 Jan 2010 12:10:23 +0900 - -libsoup2.4 (2.29.5-5) unstable; urgency=low - - * Conflicts removed from debian/control - - -- Lukasz Slachciak Wed, 27 Jan 2010 11:02:11 +0100 - -libsoup2.4 (2.29.5-4) unstable; urgency=low - - * Fixed package files (conflict with 2.4-1): - - -- Lukasz Slachciak Wed, 27 Jan 2010 09:31:56 +0100 - -libsoup2.4 (2.29.5-3) unstable; urgency=low - - * Fixed package files - - -- Lukasz Slachciak Wed, 27 Jan 2010 08:20:40 +0100 - -libsoup2.4 (2.29.5-2) unstable; urgency=low - - * Fixed package files. - - -- Jaroslaw Staniek Tue, 26 Jan 2010 16:49:59 +0100 - -libsoup2.4 (2.29.5-1) UNRELEASED; urgency=low - - * Initial release. - - -- Lukasz Slachciak Mon, 25 Jan 2010 15:24:00 +0100 diff --git a/debian/compat b/debian/compat deleted file mode 100644 index 7ed6ff8..0000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -5 diff --git a/debian/control b/debian/control deleted file mode 100644 index 47b40e2..0000000 --- a/debian/control +++ /dev/null @@ -1,26 +0,0 @@ -Source: libsoup2.4 -Section: net -Priority: extra -Maintainer: Jaehyun Kim , Taesoo Jun -Uploaders: Lukasz Slachciak , Jaroslaw Staniek , Lukasz Slachciak , Gyuyoung Kim , Jaehun Lim , Jihye Lim Jongmin Lee , Sungwon Chung , Taesoo Jun , Taeksu Shin , Seonghyeon Kim , Dongwook Lee , seung hak lee -Build-Depends: debhelper (>= 5), autotools-dev, libglib2.0-dev (>= 2.21.3), pkg-config, libxml2-dev, zlib1g-dev, libsqlite3-dev, glib-networking -Standards-Version: 0.1.0 - -Package: libsoup2.4 -Section: libs -Architecture: any -Depends: zlib1g, libsqlite3-0, libglib2.0-0, libxml2, glib-networking -Replaces: libsoup2.4-1 -Description: an HTTP library implementation in C (shared libs) - -Package: libsoup2.4-dev -Section: libdevel -Architecture: any -Depends: libsoup2.4 (= ${binary:Version}), libglib2.0-dev (>= 2.18.1), libxml2-dev, pkg-config, zlib1g-dev, libsqlite3-dev -Description: an HTTP library implementation in C (development files) - -Package: libsoup2.4-dbg -Section: debug -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, libsoup2.4 (= ${Source-Version}) -Description: an HTTP library implementation in C (development files) diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index e69de29..0000000 diff --git a/debian/docs b/debian/docs deleted file mode 100644 index 6f12db5..0000000 --- a/debian/docs +++ /dev/null @@ -1,2 +0,0 @@ -AUTHORS -README diff --git a/debian/libsoup2.4-dev.install.in b/debian/libsoup2.4-dev.install.in deleted file mode 100644 index ca2c259..0000000 --- a/debian/libsoup2.4-dev.install.in +++ /dev/null @@ -1,4 +0,0 @@ -@PREFIX@/include/libsoup-2.4/libsoup/* -@PREFIX@/lib/libsoup*.la -@PREFIX@/lib/libsoup*.a -@PREFIX@/lib/pkgconfig/libsoup* diff --git a/debian/libsoup2.4.install.in b/debian/libsoup2.4.install.in deleted file mode 100644 index 4bc5b2b..0000000 --- a/debian/libsoup2.4.install.in +++ /dev/null @@ -1 +0,0 @@ -@PREFIX@/lib/libsoup-2.4.so* diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 7d274d7..0000000 --- a/debian/rules +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/make -f -# -*- makefile -*- -# Sample debian/rules that uses debhelper. -# This file was originally written by Joey Hess and Craig Small. -# As a special exception, when this file is copied by dh-make into a -# dh-make output file, you may use that output file without restriction. -# This special exception was added by Craig Small in version 0.37 of dh-make. - -# Uncomment this to turn on verbose mode. -export DH_VERBOSE=1 - - -# These are used for cross-compiling and for saving the configure script -# from having to guess our platform (since we know it already) -DEB_HOST_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) -DEB_BUILD_GNU_TYPE ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) - -CFLAGS += -Wall -g -LDFLAGS ?= -PREFIX ?= /usr -DATADIR ?= /opt - -ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS))) - CFLAGS += -O0 -else - CFLAGS += -O2 -endif - -LDFLAGS += -Wl,--rpath=$(PREFIX)/lib -Wl,--as-needed - -config.status: - dh_testdir - # Add here commands to configure the package. -# CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" ./autogen.sh --prefix=$(PREFIX) --without-gnome --enable-sqllite=yes - CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" ./autogen.sh --prefix=$(PREFIX) --without-gnome --enable-sqllite=yes --disable-tls-check - -build: build-stamp - -build-stamp: config.status - dh_testdir - - # Add here commands to compile the package. - $(MAKE) - #docbook-to-man debian/ncurses.sgml > ncurses.1 - - for f in `find $(CURDIR)/debian/ -name "*.in"`; do \ - cat $$f > $${f%.in}; \ - sed -i -e "s#@PREFIX@#$(PREFIX)#g" $${f%.in}; \ - sed -i -e "s#@DATADIR@#$(DATADIR)#g" $${f%.in}; \ - done - - touch $@ - -clean: - dh_testdir - dh_testroot - rm -f build-stamp - - # Add here commands to clean up after the build process. - -$(MAKE) distclean -ifneq "$(wildcard /usr/share/misc/config.sub)" "" - cp -f /usr/share/misc/config.sub config.sub -endif -ifneq "$(wildcard /usr/share/misc/config.guess)" "" - cp -f /usr/share/misc/config.guess config.guess -endif - - for f in `find $(CURDIR)/debian/ -name "*.in"`; do \ - rm -f $${f%.in}; \ - done - - dh_clean - -install: build - dh_testdir - dh_testroot - dh_clean -k - dh_installdirs - - # Add here commands to install the package into debian/ncurses. - $(MAKE) DESTDIR=$(CURDIR)/debian/tmp install - - -# Build architecture-independent files here. -binary-indep: build install -# We have nothing to do by default. - -# Build architecture-dependent files here. -binary-arch: build install - dh_testdir - dh_testroot - dh_installchangelogs - dh_installdocs - dh_installexamples - dh_install --sourcedir=debian/tmp -# dh_installmenu -# dh_installdebconf -# dh_installlogrotate -# dh_installemacsen -# dh_installpam -# dh_installmime -# dh_python -# dh_installinit -# dh_installcron -# dh_installinfo - dh_installman - dh_link - dh_strip --dbg-package=libsoup2.4-dbg - dh_compress - dh_fixperms -# dh_perl - dh_makeshlibs - dh_installdeb - dh_shlibdeps - dh_gencontrol - dh_md5sums - dh_builddeb - -binary: binary-indep binary-arch -.PHONY: build clean binary-indep binary-arch binary install diff --git a/depcomp b/depcomp new file mode 100755 index 0000000..bd0ac08 --- /dev/null +++ b/depcomp @@ -0,0 +1,688 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2011-12-04.11; # UTC + +# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010, +# 2011 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try \`$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by `PROGRAMS ARGS'. + object Object file output by `PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz +## The second -e expression handles DOS-style file names with drive letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the `deleted header file' problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. + tr ' ' ' +' < "$tmpdepfile" | +## Some versions of gcc put a space before the `:'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like `#:fec' to the end of the + # dependency line. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ + tr ' +' ' ' >> "$depfile" + echo >> "$depfile" + + # The second pass generates a dummy entry for each header file. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts `$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + # Each line is of the form `foo.o: dependent.h'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +icc) + # Intel's C compiler understands `-MD -MF file'. However on + # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c + # ICC 7.0 will fill foo.d with something like + # foo.o: sub/foo.c + # foo.o: sub/foo.h + # which is wrong. We want: + # sub/foo.o: sub/foo.c + # sub/foo.o: sub/foo.h + # sub/foo.c: + # sub/foo.h: + # ICC 7.1 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using \ : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | + sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" + # Add `dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in `foo.d' instead, so we check for that too. + # Subdirectories are respected. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + + if test "$libtool" = yes; then + # With Tru64 cc, shared objects can also be used to make a + # static library. This mechanism is used in libtool 1.4 series to + # handle both shared and static libraries in a single compilation. + # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. + # + # With libtool 1.5 this exception was removed, and libtool now + # generates 2 separate objects for the 2 libraries. These two + # compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 + tmpdepfile2=$dir$base.o.d # libtool 1.5 + tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 + tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.o.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + tmpdepfile4=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test "$stat" = 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/ \1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/ / + G + p +}' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for `:' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. + "$@" $dashmflag | + sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + tr ' ' ' +' < "$tmpdepfile" | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + sed '1,2d' "$tmpdepfile" | tr ' ' ' +' | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E | + sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" + echo " " >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/docs/Makefile.in b/docs/Makefile.in new file mode 100644 index 0000000..4be14e6 --- /dev/null +++ b/docs/Makefile.in @@ -0,0 +1,616 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = docs +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/gtk-doc.m4 \ + $(top_srcdir)/m4/introspection.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-dvi-recursive install-exec-recursive \ + install-html-recursive install-info-recursive \ + install-pdf-recursive install-ps-recursive install-recursive \ + installcheck-recursive installdirs-recursive pdf-recursive \ + ps-recursive uninstall-recursive +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ + $(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \ + distdir +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +APACHE_HTTPD = @APACHE_HTTPD@ +APACHE_MODULE_DIR = @APACHE_MODULE_DIR@ +APACHE_PHP_MODULE = @APACHE_PHP_MODULE@ +APACHE_PHP_MODULE_DIR = @APACHE_PHP_MODULE_DIR@ +APACHE_SSL_MODULE_DIR = @APACHE_SSL_MODULE_DIR@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL = @CURL@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@ +GLIB_GENMARSHAL = @GLIB_GENMARSHAL@ +GLIB_LIBS = @GLIB_LIBS@ +GLIB_MAKEFILE = @GLIB_MAKEFILE@ +GLIB_MKENUMS = @GLIB_MKENUMS@ +GNOME_KEYRING_CFLAGS = @GNOME_KEYRING_CFLAGS@ +GNOME_KEYRING_LIBS = @GNOME_KEYRING_LIBS@ +GOBJECT_QUERY = @GOBJECT_QUERY@ +GREP = @GREP@ +GTKDOC_CHECK = @GTKDOC_CHECK@ +GTKDOC_DEPS_CFLAGS = @GTKDOC_DEPS_CFLAGS@ +GTKDOC_DEPS_LIBS = @GTKDOC_DEPS_LIBS@ +GTKDOC_MKPDF = @GTKDOC_MKPDF@ +GTKDOC_REBASE = @GTKDOC_REBASE@ +HAVE_GNOME = @HAVE_GNOME@ +HTML_DIR = @HTML_DIR@ +IF_HAVE_PHP = @IF_HAVE_PHP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTROSPECTION_CFLAGS = @INTROSPECTION_CFLAGS@ +INTROSPECTION_COMPILER = @INTROSPECTION_COMPILER@ +INTROSPECTION_GENERATE = @INTROSPECTION_GENERATE@ +INTROSPECTION_GIRDIR = @INTROSPECTION_GIRDIR@ +INTROSPECTION_LIBS = @INTROSPECTION_LIBS@ +INTROSPECTION_MAKEFILE = @INTROSPECTION_MAKEFILE@ +INTROSPECTION_SCANNER = @INTROSPECTION_SCANNER@ +INTROSPECTION_TYPELIBDIR = @INTROSPECTION_TYPELIBDIR@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MISSING_REGRESSION_TEST_PACKAGES = @MISSING_REGRESSION_TEST_PACKAGES@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PHP = @PHP@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SOUP_AGE = @SOUP_AGE@ +SOUP_API_VERSION = @SOUP_API_VERSION@ +SOUP_CURRENT = @SOUP_CURRENT@ +SOUP_DEBUG_FLAGS = @SOUP_DEBUG_FLAGS@ +SOUP_MAINTAINER_FLAGS = @SOUP_MAINTAINER_FLAGS@ +SOUP_REVISION = @SOUP_REVISION@ +SQLITE_CFLAGS = @SQLITE_CFLAGS@ +SQLITE_LIBS = @SQLITE_LIBS@ +STRIP = @STRIP@ +VERSION = @VERSION@ +XML_CFLAGS = @XML_CFLAGS@ +XML_LIBS = @XML_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +ntlm_auth = @ntlm_auth@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +SUBDIRS = reference +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign docs/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign docs/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +$(RECURSIVE_CLEAN_TARGETS): + @fail= failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \ + install-am install-strip tags-recursive + +.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ + all all-am check check-am clean clean-generic clean-libtool \ + ctags ctags-recursive distclean distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/docs/reference/Makefile.am b/docs/reference/Makefile.am index 43b568d..f21e710 100644 --- a/docs/reference/Makefile.am +++ b/docs/reference/Makefile.am @@ -16,7 +16,7 @@ DOC_SOURCE_DIR=../../libsoup SCAN_OPTIONS=--deprecated-guards=LIBSOUP_DISABLE_DEPRECATED --rebuild-types # Extra options to supply to gtkdoc-scangobj. -SCANGOBJ_OPTIONS=--type-init-func 'g_type_init(); g_thread_init(NULL)' +SCANGOBJ_OPTIONS= # Extra options to supply to gtkdoc-mkdb. MKDB_OPTIONS=--sgml-mode --output-format=xml @@ -36,7 +36,9 @@ IGNORE_HFILES= soup.h soup-marshal.h soup-enum-types.h \ soup-dns.h soup-auth-manager.h soup-auth-manager-ntlm.h \ soup-message-queue.h soup-path-map.h soup-ssl.h \ soup-proxy-resolver.h soup-proxy-resolver-gnome.h \ - soup-proxy-resolver-static.h + soup-proxy-resolver-static.h soup-directory-input-stream.h \ + soup-http-input-stream.h soup-password-manager.h \ + soup-password-manager-gnome.h # Images to copy into HTML directory. HTML_IMAGES = @@ -45,8 +47,7 @@ HTML_IMAGES = content_files = \ build-howto.xml \ client-howto.xml \ - server-howto.xml \ - porting-2.2-2.4.xml + server-howto.xml # Other files to distribute. extra_files = @@ -61,7 +62,10 @@ GTKDOC_CFLAGS = \ $(XML_CFLAGS) \ $(GNUTLS_CFLAGS) -GTKDOC_LIBS = $(top_builddir)/libsoup/libsoup-gnome-2.4.la +GTKDOC_LIBS = \ + $(top_builddir)/libsoup/libsoup-2.4.la \ + $(top_builddir)/libsoup/libsoup-gnome-2.4.la \ + $(GLIB_LIBS) # include common portion ... include $(top_srcdir)/gtk-doc.make diff --git a/docs/reference/Makefile.in b/docs/reference/Makefile.in new file mode 100644 index 0000000..1b58719 --- /dev/null +++ b/docs/reference/Makefile.in @@ -0,0 +1,760 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# -*- mode: makefile -*- + +#################################### +# Everything below here is generic # +#################################### +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ + $(top_srcdir)/gtk-doc.make +subdir = docs/reference +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/gtk-doc.m4 \ + $(top_srcdir)/m4/introspection.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +SOURCES = +DIST_SOURCES = +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +APACHE_HTTPD = @APACHE_HTTPD@ +APACHE_MODULE_DIR = @APACHE_MODULE_DIR@ +APACHE_PHP_MODULE = @APACHE_PHP_MODULE@ +APACHE_PHP_MODULE_DIR = @APACHE_PHP_MODULE_DIR@ +APACHE_SSL_MODULE_DIR = @APACHE_SSL_MODULE_DIR@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL = @CURL@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@ +GLIB_GENMARSHAL = @GLIB_GENMARSHAL@ +GLIB_LIBS = @GLIB_LIBS@ +GLIB_MAKEFILE = @GLIB_MAKEFILE@ +GLIB_MKENUMS = @GLIB_MKENUMS@ +GNOME_KEYRING_CFLAGS = @GNOME_KEYRING_CFLAGS@ +GNOME_KEYRING_LIBS = @GNOME_KEYRING_LIBS@ +GOBJECT_QUERY = @GOBJECT_QUERY@ +GREP = @GREP@ +GTKDOC_CHECK = @GTKDOC_CHECK@ +GTKDOC_DEPS_CFLAGS = @GTKDOC_DEPS_CFLAGS@ +GTKDOC_DEPS_LIBS = @GTKDOC_DEPS_LIBS@ +GTKDOC_MKPDF = @GTKDOC_MKPDF@ +GTKDOC_REBASE = @GTKDOC_REBASE@ +HAVE_GNOME = @HAVE_GNOME@ +HTML_DIR = @HTML_DIR@ +IF_HAVE_PHP = @IF_HAVE_PHP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTROSPECTION_CFLAGS = @INTROSPECTION_CFLAGS@ +INTROSPECTION_COMPILER = @INTROSPECTION_COMPILER@ +INTROSPECTION_GENERATE = @INTROSPECTION_GENERATE@ +INTROSPECTION_GIRDIR = @INTROSPECTION_GIRDIR@ +INTROSPECTION_LIBS = @INTROSPECTION_LIBS@ +INTROSPECTION_MAKEFILE = @INTROSPECTION_MAKEFILE@ +INTROSPECTION_SCANNER = @INTROSPECTION_SCANNER@ +INTROSPECTION_TYPELIBDIR = @INTROSPECTION_TYPELIBDIR@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MISSING_REGRESSION_TEST_PACKAGES = @MISSING_REGRESSION_TEST_PACKAGES@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PHP = @PHP@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SOUP_AGE = @SOUP_AGE@ +SOUP_API_VERSION = @SOUP_API_VERSION@ +SOUP_CURRENT = @SOUP_CURRENT@ +SOUP_DEBUG_FLAGS = @SOUP_DEBUG_FLAGS@ +SOUP_MAINTAINER_FLAGS = @SOUP_MAINTAINER_FLAGS@ +SOUP_REVISION = @SOUP_REVISION@ +SQLITE_CFLAGS = @SQLITE_CFLAGS@ +SQLITE_LIBS = @SQLITE_LIBS@ +STRIP = @STRIP@ +VERSION = @VERSION@ +XML_CFLAGS = @XML_CFLAGS@ +XML_LIBS = @XML_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +ntlm_auth = @ntlm_auth@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = 1.6 + +# The name of the module +DOC_MODULE = libsoup-2.4 + +# The top-level SGML file. +DOC_MAIN_SGML_FILE = $(DOC_MODULE)-docs.sgml + +# The directory containing the source code. Relative to $(srcdir). +# gtk-doc will search all .c & .h files beneath here for inline comments +# documenting functions and macros. +DOC_SOURCE_DIR = ../../libsoup + +# Extra options to supply to gtkdoc-scan. +SCAN_OPTIONS = --deprecated-guards=LIBSOUP_DISABLE_DEPRECATED --rebuild-types + +# Extra options to supply to gtkdoc-scangobj. +SCANGOBJ_OPTIONS = + +# Extra options to supply to gtkdoc-mkdb. +MKDB_OPTIONS = --sgml-mode --output-format=xml + +# Extra options to supply to gtkdoc-fixref. +FIXXREF_OPTIONS = + +# Used for dependencies. +HFILE_GLOB = +CFILE_GLOB = + +# Header files to ignore when scanning. +IGNORE_HFILES = soup.h soup-marshal.h soup-enum-types.h \ + soup-message-private.h soup-session-private.h \ + soup-auth-basic.h soup-auth-digest.h soup-auth-ntlm.h \ + soup-connection.h soup-connection-ntlm.h \ + soup-dns.h soup-auth-manager.h soup-auth-manager-ntlm.h \ + soup-message-queue.h soup-path-map.h soup-ssl.h \ + soup-proxy-resolver.h soup-proxy-resolver-gnome.h \ + soup-proxy-resolver-static.h soup-directory-input-stream.h \ + soup-http-input-stream.h soup-password-manager.h \ + soup-password-manager-gnome.h + + +# Images to copy into HTML directory. +HTML_IMAGES = + +# Extra XML files that are included by $(DOC_MAIN_SGML_FILE). +content_files = \ + build-howto.xml \ + client-howto.xml \ + server-howto.xml + + +# Other files to distribute. +extra_files = + +# CFLAGS and LDFLAGS for compiling scan program. Only needed +# if $(DOC_MODULE).types is non-empty. +GTKDOC_CFLAGS = \ + -I$(top_srcdir) \ + -I$(top_builddir) \ + $(GLIB_CFLAGS) \ + $(XML_CFLAGS) \ + $(GNUTLS_CFLAGS) + +GTKDOC_LIBS = \ + $(top_builddir)/libsoup/libsoup-2.4.la \ + $(top_builddir)/libsoup/libsoup-gnome-2.4.la \ + $(GLIB_LIBS) + +@GTK_DOC_USE_LIBTOOL_FALSE@GTKDOC_CC = $(CC) $(INCLUDES) $(GTKDOC_DEPS_CFLAGS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +@GTK_DOC_USE_LIBTOOL_TRUE@GTKDOC_CC = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(INCLUDES) $(GTKDOC_DEPS_CFLAGS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +@GTK_DOC_USE_LIBTOOL_FALSE@GTKDOC_LD = $(CC) $(GTKDOC_DEPS_LIBS) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) +@GTK_DOC_USE_LIBTOOL_TRUE@GTKDOC_LD = $(LIBTOOL) --tag=CC --mode=link $(CC) $(GTKDOC_DEPS_LIBS) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) +@GTK_DOC_USE_LIBTOOL_FALSE@GTKDOC_RUN = +@GTK_DOC_USE_LIBTOOL_TRUE@GTKDOC_RUN = $(LIBTOOL) --mode=execute + +# We set GPATH here; this gives us semantics for GNU make +# which are more like other make's VPATH, when it comes to +# whether a source that is a target of one rule is then +# searched for in VPATH/GPATH. +# +GPATH = $(srcdir) +TARGET_DIR = $(HTML_DIR)/$(DOC_MODULE) +SETUP_FILES = \ + $(content_files) \ + $(DOC_MAIN_SGML_FILE) \ + $(DOC_MODULE)-sections.txt \ + $(DOC_MODULE)-overrides.txt + +EXTRA_DIST = \ + $(HTML_IMAGES) \ + $(SETUP_FILES) + +DOC_STAMPS = setup-build.stamp scan-build.stamp tmpl-build.stamp sgml-build.stamp \ + html-build.stamp pdf-build.stamp \ + tmpl.stamp sgml.stamp html.stamp pdf.stamp + +SCANOBJ_FILES = \ + $(DOC_MODULE).args \ + $(DOC_MODULE).hierarchy \ + $(DOC_MODULE).interfaces \ + $(DOC_MODULE).prerequisites \ + $(DOC_MODULE).signals + +REPORT_FILES = \ + $(DOC_MODULE)-undocumented.txt \ + $(DOC_MODULE)-undeclared.txt \ + $(DOC_MODULE)-unused.txt + +CLEANFILES = $(SCANOBJ_FILES) $(REPORT_FILES) $(DOC_STAMPS) +@ENABLE_GTK_DOC_TRUE@@GTK_DOC_BUILD_HTML_FALSE@HTML_BUILD_STAMP = +@ENABLE_GTK_DOC_TRUE@@GTK_DOC_BUILD_HTML_TRUE@HTML_BUILD_STAMP = html-build.stamp +@ENABLE_GTK_DOC_TRUE@@GTK_DOC_BUILD_PDF_FALSE@PDF_BUILD_STAMP = +@ENABLE_GTK_DOC_TRUE@@GTK_DOC_BUILD_PDF_TRUE@PDF_BUILD_STAMP = pdf-build.stamp +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(top_srcdir)/gtk-doc.make $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign docs/reference/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign docs/reference/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; +$(top_srcdir)/gtk-doc.make: + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags: TAGS +TAGS: + +ctags: CTAGS +CTAGS: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook +check-am: all-am +check: check-am +all-am: Makefile all-local +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean-am: clean-generic clean-libtool clean-local mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-local + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-data-local + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic \ + maintainer-clean-local + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-local + +.MAKE: install-am install-strip + +.PHONY: all all-am all-local check check-am clean clean-generic \ + clean-libtool clean-local dist-hook distclean \ + distclean-generic distclean-libtool distclean-local distdir \ + dvi dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-data-local install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic \ + maintainer-clean-local mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am \ + uninstall-local + + +@ENABLE_GTK_DOC_TRUE@all-local: $(HTML_BUILD_STAMP) $(PDF_BUILD_STAMP) +@ENABLE_GTK_DOC_FALSE@all-local: + +docs: $(HTML_BUILD_STAMP) $(PDF_BUILD_STAMP) + +$(REPORT_FILES): sgml-build.stamp + +#### setup #### + +setup-build.stamp: + -@if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + echo ' DOC Preparing build'; \ + files=`echo $(SETUP_FILES) $(expand_content_files) $(DOC_MODULE).types`; \ + if test "x$$files" != "x" ; then \ + for file in $$files ; do \ + test -f $(abs_srcdir)/$$file && \ + cp -pu $(abs_srcdir)/$$file $(abs_builddir)/ || true; \ + done; \ + fi; \ + test -d $(abs_srcdir)/tmpl && \ + { cp -rp $(abs_srcdir)/tmpl $(abs_builddir)/; \ + chmod -R u+w $(abs_builddir)/tmpl; } \ + fi + @touch setup-build.stamp + +#### scan #### + +scan-build.stamp: $(HFILE_GLOB) $(CFILE_GLOB) + @echo ' DOC Scanning header files' + @_source_dir='' ; \ + for i in $(DOC_SOURCE_DIR) ; do \ + _source_dir="$${_source_dir} --source-dir=$$i" ; \ + done ; \ + gtkdoc-scan --module=$(DOC_MODULE) --ignore-headers="$(IGNORE_HFILES)" $${_source_dir} $(SCAN_OPTIONS) $(EXTRA_HFILES) + @if grep -l '^..*$$' $(DOC_MODULE).types > /dev/null 2>&1 ; then \ + echo " DOC Introspecting gobjects"; \ + scanobj_options=""; \ + gtkdoc-scangobj 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + scanobj_options="--verbose"; \ + fi; \ + fi; \ + CC="$(GTKDOC_CC)" LD="$(GTKDOC_LD)" RUN="$(GTKDOC_RUN)" CFLAGS="$(GTKDOC_CFLAGS) $(CFLAGS)" LDFLAGS="$(GTKDOC_LIBS) $(LDFLAGS)" \ + gtkdoc-scangobj $(SCANGOBJ_OPTIONS) $$scanobj_options --module=$(DOC_MODULE); \ + else \ + for i in $(SCANOBJ_FILES) ; do \ + test -f $$i || touch $$i ; \ + done \ + fi + @touch scan-build.stamp + +$(DOC_MODULE)-decl.txt $(SCANOBJ_FILES) $(DOC_MODULE)-sections.txt $(DOC_MODULE)-overrides.txt: scan-build.stamp + @true + +#### templates #### + +tmpl-build.stamp: setup-build.stamp $(DOC_MODULE)-decl.txt $(SCANOBJ_FILES) $(DOC_MODULE)-sections.txt $(DOC_MODULE)-overrides.txt + @echo ' DOC Rebuilding template files' + @gtkdoc-mktmpl --module=$(DOC_MODULE) $(MKTMPL_OPTIONS) + @if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + if test -w $(abs_srcdir) ; then \ + cp -rp $(abs_builddir)/tmpl $(abs_srcdir)/; \ + fi \ + fi + @touch tmpl-build.stamp + +tmpl.stamp: tmpl-build.stamp + @true + +$(srcdir)/tmpl/*.sgml: + @true + +#### xml #### + +sgml-build.stamp: tmpl.stamp $(DOC_MODULE)-sections.txt $(srcdir)/tmpl/*.sgml $(expand_content_files) + @echo ' DOC Building XML' + @-chmod -R u+w $(srcdir) + @_source_dir='' ; \ + for i in $(DOC_SOURCE_DIR) ; do \ + _source_dir="$${_source_dir} --source-dir=$$i" ; \ + done ; \ + gtkdoc-mkdb --module=$(DOC_MODULE) --output-format=xml --expand-content-files="$(expand_content_files)" --main-sgml-file=$(DOC_MAIN_SGML_FILE) $${_source_dir} $(MKDB_OPTIONS) + @touch sgml-build.stamp + +sgml.stamp: sgml-build.stamp + @true + +#### html #### + +html-build.stamp: sgml.stamp $(DOC_MAIN_SGML_FILE) $(content_files) + @echo ' DOC Building HTML' + @rm -rf html + @mkdir html + @mkhtml_options=""; \ + gtkdoc-mkhtml 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + mkhtml_options="$$mkhtml_options --verbose"; \ + fi; \ + fi; \ + gtkdoc-mkhtml 2>&1 --help | grep >/dev/null "\-\-path"; \ + if test "$(?)" = "0"; then \ + mkhtml_options="$$mkhtml_options --path=\"$(abs_srcdir)\""; \ + fi; \ + cd html && gtkdoc-mkhtml $$mkhtml_options $(MKHTML_OPTIONS) $(DOC_MODULE) ../$(DOC_MAIN_SGML_FILE) + -@test "x$(HTML_IMAGES)" = "x" || \ + for file in $(HTML_IMAGES) ; do \ + if test -f $(abs_srcdir)/$$file ; then \ + cp $(abs_srcdir)/$$file $(abs_builddir)/html; \ + fi; \ + if test -f $(abs_builddir)/$$file ; then \ + cp $(abs_builddir)/$$file $(abs_builddir)/html; \ + fi; \ + done; + @echo ' DOC Fixing cross-references' + @gtkdoc-fixxref --module=$(DOC_MODULE) --module-dir=html --html-dir=$(HTML_DIR) $(FIXXREF_OPTIONS) + @touch html-build.stamp + +#### pdf #### + +pdf-build.stamp: sgml.stamp $(DOC_MAIN_SGML_FILE) $(content_files) + @echo ' DOC Building PDF' + @rm -f $(DOC_MODULE).pdf + @mkpdf_options=""; \ + gtkdoc-mkpdf 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + mkpdf_options="$$mkpdf_options --verbose"; \ + fi; \ + fi; \ + if test "x$(HTML_IMAGES)" != "x"; then \ + for img in $(HTML_IMAGES); do \ + part=`dirname $$img`; \ + echo $$mkpdf_options | grep >/dev/null "\-\-imgdir=$$part "; \ + if test $$? != 0; then \ + mkpdf_options="$$mkpdf_options --imgdir=$$part"; \ + fi; \ + done; \ + fi; \ + gtkdoc-mkpdf --path="$(abs_srcdir)" $$mkpdf_options $(DOC_MODULE) $(DOC_MAIN_SGML_FILE) $(MKPDF_OPTIONS) + @touch pdf-build.stamp + +############## + +clean-local: + @rm -f *~ *.bak + @rm -rf .libs + +distclean-local: + @rm -rf xml html $(REPORT_FILES) $(DOC_MODULE).pdf \ + $(DOC_MODULE)-decl-list.txt $(DOC_MODULE)-decl.txt + @if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + rm -f $(SETUP_FILES) $(expand_content_files) $(DOC_MODULE).types; \ + rm -rf tmpl; \ + fi + +maintainer-clean-local: clean + @rm -rf xml html + +install-data-local: + @installfiles=`echo $(builddir)/html/*`; \ + if test "$$installfiles" = '$(builddir)/html/*'; \ + then echo 1>&2 'Nothing to install' ; \ + else \ + if test -n "$(DOC_MODULE_VERSION)"; then \ + installdir="$(DESTDIR)$(TARGET_DIR)-$(DOC_MODULE_VERSION)"; \ + else \ + installdir="$(DESTDIR)$(TARGET_DIR)"; \ + fi; \ + $(mkinstalldirs) $${installdir} ; \ + for i in $$installfiles; do \ + echo ' $(INSTALL_DATA) '$$i ; \ + $(INSTALL_DATA) $$i $${installdir}; \ + done; \ + if test -n "$(DOC_MODULE_VERSION)"; then \ + mv -f $${installdir}/$(DOC_MODULE).devhelp2 \ + $${installdir}/$(DOC_MODULE)-$(DOC_MODULE_VERSION).devhelp2; \ + fi; \ + $(GTKDOC_REBASE) --relative --dest-dir=$(DESTDIR) --html-dir=$${installdir}; \ + fi + +uninstall-local: + @if test -n "$(DOC_MODULE_VERSION)"; then \ + installdir="$(DESTDIR)$(TARGET_DIR)-$(DOC_MODULE_VERSION)"; \ + else \ + installdir="$(DESTDIR)$(TARGET_DIR)"; \ + fi; \ + rm -rf $${installdir} + +# +# Require gtk-doc when making dist +# +@ENABLE_GTK_DOC_TRUE@dist-check-gtkdoc: +@ENABLE_GTK_DOC_FALSE@dist-check-gtkdoc: +@ENABLE_GTK_DOC_FALSE@ @echo "*** gtk-doc must be installed and enabled in order to make dist" +@ENABLE_GTK_DOC_FALSE@ @false + +dist-hook: dist-check-gtkdoc dist-hook-local + @mkdir $(distdir)/tmpl + @mkdir $(distdir)/html + @-cp ./tmpl/*.sgml $(distdir)/tmpl + @cp ./html/* $(distdir)/html + @-cp ./$(DOC_MODULE).pdf $(distdir)/ + @-cp ./$(DOC_MODULE).types $(distdir)/ + @-cp ./$(DOC_MODULE)-sections.txt $(distdir)/ + @cd $(distdir) && rm -f $(DISTCLEANFILES) + @$(GTKDOC_REBASE) --online --relative --html-dir=$(distdir)/html + +.PHONY : dist-hook-local docs + +# include common portion ... + +# kludges +$(srcdir)/tmpl/*.sgml: + +clean: clean-am + rm -rf tmpl + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/docs/reference/client-howto.xml b/docs/reference/client-howto.xml index e87f901..a53f2ac 100644 --- a/docs/reference/client-howto.xml +++ b/docs/reference/client-howto.xml @@ -58,7 +58,7 @@ you can specify various additional options: - SOUP_SESSION_MAX_CONNS + SOUP_SESSION_MAX_CONNS Allows you to set the maximum total number of connections the session will have open at one time. (Once it reaches @@ -68,7 +68,7 @@ you can specify various additional options: - SOUP_SESSION_MAX_CONNS_PER_HOST + SOUP_SESSION_MAX_CONNS_PER_HOST Allows you to set the maximum total number of connections the session will have open to a single @@ -76,7 +76,7 @@ you can specify various additional options: - SOUP_SESSION_USE_NTLM + SOUP_SESSION_USE_NTLM If TRUE, then Microsoft NTLM authentication will be used if available (and will be @@ -89,7 +89,7 @@ you can specify various additional options: - SOUP_SESSION_SSL_CA_FILE + SOUP_SESSION_SSL_CA_FILE Points to a file containing certificates for recognized SSL Certificate Authorities. If this is set, then HTTPS @@ -99,7 +99,7 @@ you can specify various additional options: - SOUP_SESSION_ASYNC_CONTEXT + SOUP_SESSION_ASYNC_CONTEXT A GMainContext @@ -110,7 +110,7 @@ you can specify various additional options: - SOUP_SESSION_ADD_FEATURE and SOUP_SESSION_ADD_FEATURE_BY_TYPE + SOUP_SESSION_ADD_FEATURE and SOUP_SESSION_ADD_FEATURE_BY_TYPE These allow you to specify SoupSessionFeatures @@ -136,9 +136,9 @@ which take no arguments. Additional session functionality is provided as SoupSessionFeatures, which can be added to a session, via the SOUP_SESSION_ADD_FEATURE +linkend="SOUP-SESSION-ADD-FEATURE:CAPS">SOUP_SESSION_ADD_FEATURE and SOUP_SESSION_ADD_FEATURE_BY_TYPE +linkend="SOUP-SESSION-ADD-FEATURE-BY-TYPE:CAPS">SOUP_SESSION_ADD_FEATURE_BY_TYPE options at session-construction-time, or afterward via the soup_session_add_feature and libsoup-gnome: - SoupProxyResolverGNOME + SoupProxyResolverGNOME A feature that automatically determines the correct HTTP proxy to use for requests. @@ -190,7 +190,7 @@ And in libsoup-gnome: Use the "add_feature_by_type" property/function to add features that don't require any configuration (such as SoupProxyResolverGNOME), +linkend="SOUP-TYPE-PROXY-RESOLVER-GNOME:CAPS">SoupProxyResolverGNOME), and the "add_feature" property/function to add features that must be constructed first (such as SoupLogger). For example, an @@ -249,9 +249,9 @@ request headers and body of the message: (Although this is a bad example, because libsoup actually has convenience methods -for dealing with HTML +for dealing with HTML forms, as well as XML-RPC.) +linkend="libsoup-2.4-XMLRPC-Support">XML-RPC.) @@ -260,7 +260,7 @@ linkend="soup-message-set-flags">soup_message_set_flagsSoupSession automatically handles responses from the server that redirect to another URL. If you would like to handle these -yourself, you can set the SOUP_MESSAGE_NO_REDIRECT +yourself, you can set the SOUP_MESSAGE_NO_REDIRECT flag. @@ -286,7 +286,7 @@ it will run the main loop itself until the message is complete.) The return value from soup_session_send_message -is a libsoup status code, +is a libsoup status code, indicating either a transport error that prevented the message from being sent, or the HTTP status that was returned by the server in response to the message. (The status is also available as @@ -344,7 +344,7 @@ linkend="soup-session-queue-message">soup_session_queue_messageSoupSessionSync, the message will be sent in another thread, with the callback eventually -being invoked in the session's SOUP_SESSION_ASYNC_CONTEXT.) +being invoked in the session's SOUP_SESSION_ASYNC_CONTEXT.) @@ -527,7 +527,7 @@ A few sample programs are available in the getbug is a trivial demonstration of the XMLRPC interface. + linkend="libsoup-2.4-XMLRPC-Support">XMLRPC interface. (xmlrpc-test provides a slightly more complicated example.) diff --git a/docs/reference/html/SoupAddress.html b/docs/reference/html/SoupAddress.html new file mode 100644 index 0000000..2980343 --- /dev/null +++ b/docs/reference/html/SoupAddress.html @@ -0,0 +1,800 @@ + + + + +SoupAddress + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupAddress

+

SoupAddress — DNS support

+
+ +
+

Object Hierarchy

+
+  GObject
+   +----SoupAddress
+
+
+
+

Implemented Interfaces

+

+SoupAddress implements + GSocketConnectable.

+
+
+

Properties

+
+  "family"                   SoupAddressFamily     : Read / Write / Construct Only
+  "name"                     gchar*                : Read / Write / Construct Only
+  "physical"                 gchar*                : Read
+  "port"                     gint                  : Read / Write / Construct Only
+  "sockaddr"                 gpointer              : Read / Write / Construct Only
+
+
+
+

Description

+

+SoupAddress represents the address of a TCP connection endpoint: +both the IP address and the port. (It is somewhat like an +object-oriented version of struct sockaddr.) +

+
+
+

Details

+
+

SoupAddress

+
typedef struct _SoupAddress SoupAddress;
+

+

+
+
+
+

enum SoupAddressFamily

+
typedef enum {
+	SOUP_ADDRESS_FAMILY_INVALID = -1,
+
+	SOUP_ADDRESS_FAMILY_IPV4 = G_SOCKET_FAMILY_IPV4,
+	SOUP_ADDRESS_FAMILY_IPV6 = G_SOCKET_FAMILY_IPV6
+} SoupAddressFamily;
+
+

+The supported address families. +

+
++ + + + + + + + + + + + + + +

SOUP_ADDRESS_FAMILY_INVALID

an invalid SoupAddress +

SOUP_ADDRESS_FAMILY_IPV4

an IPv4 address +

SOUP_ADDRESS_FAMILY_IPV6

an IPv6 address +
+
+
+
+

SOUP_ADDRESS_ANY_PORT

+
#define SOUP_ADDRESS_ANY_PORT 0
+
+

+This can be passed to any SoupAddress method that expects a port, +to indicate that you don't care what port is used. +

+
+
+
+

soup_address_new ()

+
SoupAddress *       soup_address_new                    (const char *name,
+                                                         guint port);
+

+Creates a SoupAddress from name and port. The SoupAddress's IP +address may not be available right away; the caller can call +soup_address_resolve_async() or soup_address_resolve_sync() to +force a DNS resolution. +

+
++ + + + + + + + + + + + + + +

name :

a hostname or physical address

port :

a port number

Returns :

a SoupAddress +
+
+
+
+

soup_address_new_from_sockaddr ()

+
SoupAddress *       soup_address_new_from_sockaddr      (struct sockaddr *sa,
+                                                         int len);
+

+Returns a SoupAddress equivalent to sa (or NULL if sa's +address family isn't supported) +

+
++ + + + + + + + + + + + + + +

sa :

a pointer to a sockaddr

len :

size of sa +

Returns :

the new SoupAddress. [allow-none] +
+
+
+
+

soup_address_new_any ()

+
SoupAddress *       soup_address_new_any                (SoupAddressFamily family,
+                                                         guint port);
+

+Returns a SoupAddress corresponding to the "any" address +for family (or NULL if family isn't supported), suitable for +using as a listening SoupSocket. +

+
++ + + + + + + + + + + + + + +

family :

the address family

port :

the port number (usually SOUP_ADDRESS_ANY_PORT)

Returns :

the new SoupAddress. [allow-none] +
+
+
+
+

SoupAddressCallback ()

+
void                (*SoupAddressCallback)              (SoupAddress *addr,
+                                                         guint status,
+                                                         gpointer user_data);
+

+The callback function passed to soup_address_resolve_async(). +

+
++ + + + + + + + + + + + + + +

addr :

the SoupAddress that was resolved

status :

+SOUP_STATUS_OK, SOUP_STATUS_CANT_RESOLVE, or +SOUP_STATUS_CANCELLED +

user_data :

the user data that was passed to +soup_address_resolve_async() +
+
+
+
+

soup_address_resolve_async ()

+
void                soup_address_resolve_async          (SoupAddress *addr,
+                                                         GMainContext *async_context,
+                                                         GCancellable *cancellable,
+                                                         SoupAddressCallback callback,
+                                                         gpointer user_data);
+

+Asynchronously resolves the missing half of addr (its IP address +if it was created with soup_address_new(), or its hostname if it +was created with soup_address_new_from_sockaddr() or +soup_address_new_any().) +

+

+If cancellable is non-NULL, it can be used to cancel the +resolution. callback will still be invoked in this case, with a +status of SOUP_STATUS_CANCELLED. +

+

+It is safe to call this more than once on a given address, from the +same thread, with the same async_context (and doing so will not +result in redundant DNS queries being made). But it is not safe to +call from multiple threads, or with different async_contexts, or +mixed with calls to soup_address_resolve_sync(). +

+
++ + + + + + + + + + + + + + + + + + + + + + +

addr :

a SoupAddress +

async_context :

the GMainContext to call callback from. [allow-none] +

cancellable :

a GCancellable object, or NULL +

callback :

callback to call with the result. [scope async] +

user_data :

data for callback +
+
+
+
+

soup_address_resolve_sync ()

+
guint               soup_address_resolve_sync           (SoupAddress *addr,
+                                                         GCancellable *cancellable);
+

+Synchronously resolves the missing half of addr, as with +soup_address_resolve_async(). +

+

+If cancellable is non-NULL, it can be used to cancel the +resolution. soup_address_resolve_sync() will then return a status +of SOUP_STATUS_CANCELLED. +

+

+It is safe to call this more than once, even from different +threads, but it is not safe to mix calls to +soup_address_resolve_sync() with calls to +soup_address_resolve_async() on the same address. +

+
++ + + + + + + + + + + + + + +

addr :

a SoupAddress +

cancellable :

a GCancellable object, or NULL +

Returns :

+SOUP_STATUS_OK, SOUP_STATUS_CANT_RESOLVE, or +SOUP_STATUS_CANCELLED.
+
+
+
+

soup_address_is_resolved ()

+
gboolean            soup_address_is_resolved            (SoupAddress *addr);
+

+Tests if addr has already been resolved. Unlike the other +SoupAddress "get" methods, this is safe to call when addr might +be being resolved in another thread. +

+
++ + + + + + + + + + +

addr :

a SoupAddress +

Returns :

+TRUE if addr has been resolved.
+
+
+
+

soup_address_get_name ()

+
const char *        soup_address_get_name               (SoupAddress *addr);
+

+Returns the hostname associated with addr. +

+

+This method is not thread-safe; if you call it while addr is being +resolved in another thread, it may return garbage. You can use +soup_address_is_resolved() to safely test whether or not an address +is resolved before fetching its name or address. +

+
++ + + + + + + + + + +

addr :

a SoupAddress +

Returns :

the hostname, or NULL if it is not known. [allow-none] +
+
+
+
+

soup_address_get_sockaddr ()

+
struct sockaddr *   soup_address_get_sockaddr           (SoupAddress *addr,
+                                                         int *len);
+

+Returns the sockaddr associated with addr, with its length in +*len. If the sockaddr is not yet known, returns NULL. +

+

+This method is not thread-safe; if you call it while addr is being +resolved in another thread, it may return garbage. You can use +soup_address_is_resolved() to safely test whether or not an address +is resolved before fetching its name or address. +

+
++ + + + + + + + + + + + + + +

addr :

a SoupAddress +

len :

return location for sockaddr length

Returns :

the sockaddr, or NULL. [allow-none][transfer none] +
+
+
+
+

soup_address_get_gsockaddr ()

+
GSocketAddress *    soup_address_get_gsockaddr          (SoupAddress *addr);
+

+Creates a new GSocketAddress corresponding to addr (which is assumed +to only have one socket address associated with it). +

+
++ + + + + + + + + + +

addr :

a SoupAddress +

Returns :

a new GSocketAddress. [transfer full] +
+

Since 2.32

+
+
+
+

soup_address_get_physical ()

+
const char *        soup_address_get_physical           (SoupAddress *addr);
+

+Returns the physical address associated with addr as a string. +(Eg, "127.0.0.1"). If the address is not yet known, returns NULL. +

+

+This method is not thread-safe; if you call it while addr is being +resolved in another thread, it may return garbage. You can use +soup_address_is_resolved() to safely test whether or not an address +is resolved before fetching its name or address. +

+
++ + + + + + + + + + +

addr :

a SoupAddress +

Returns :

the physical address, or NULL. [allow-none] +
+
+
+
+

soup_address_get_port ()

+
guint               soup_address_get_port               (SoupAddress *addr);
+

+Returns the port associated with addr. +

+
++ + + + + + + + + + +

addr :

a SoupAddress +

Returns :

the port
+
+
+
+

soup_address_equal_by_name ()

+
gboolean            soup_address_equal_by_name          (gconstpointer addr1,
+                                                         gconstpointer addr2);
+

+Tests if addr1 and addr2 have the same "name". This method can be +used with soup_address_hash_by_name() to create a GHashTable that +hashes on address "names". +

+

+Comparing by name normally means comparing the addresses by their +hostnames. But if the address was originally created using an IP +address literal, then it will be compared by that instead. +

+

+In particular, if "www.example.com" has the IP address 10.0.0.1, +and addr1 was created with the name "www.example.com" and addr2 +was created with the name "10.0.0.1", then they will compare as +unequal for purposes of soup_address_equal_by_name(). +

+

+This would be used to distinguish hosts in situations where +different virtual hosts on the same IP address should be considered +different. Eg, for purposes of HTTP authentication or cookies, two +hosts with the same IP address but different names are considered +to be different hosts. +

+

+See also soup_address_equal_by_ip(), which compares by IP address +rather than by name. +

+
++ + + + + + + + + + + + + + +

addr1 :

a SoupAddress with a resolved name. [type Soup.Address] +

addr2 :

another SoupAddress with a resolved +name. [type Soup.Address] +

Returns :

whether or not addr1 and addr2 have the same name
+

Since 2.26

+
+
+
+

soup_address_hash_by_name ()

+
guint               soup_address_hash_by_name           (gconstpointer addr);
+

+A hash function (for GHashTable) that corresponds to +soup_address_equal_by_name(), qv +

+
++ + + + + + + + + + +

addr :

a SoupAddress. [type Soup.Address] +

Returns :

the named-based hash value for addr.
+

Since 2.26

+
+
+
+

soup_address_equal_by_ip ()

+
gboolean            soup_address_equal_by_ip            (gconstpointer addr1,
+                                                         gconstpointer addr2);
+

+Tests if addr1 and addr2 have the same IP address. This method +can be used with soup_address_hash_by_ip() to create a +GHashTable that hashes on IP address. +

+

+This would be used to distinguish hosts in situations where +different virtual hosts on the same IP address should be considered +the same. Eg, if "www.example.com" and "www.example.net" have the +same IP address, then a single connection can be used to talk +to either of them. +

+

+See also soup_address_equal_by_name(), which compares by name +rather than by IP address. +

+
++ + + + + + + + + + + + + + +

addr1 :

a SoupAddress with a resolved IP +address. [type Soup.Address] +

addr2 :

another SoupAddress with a resolved +IP address. [type Soup.Address] +

Returns :

whether or not addr1 and addr2 have the same IP +address.
+

Since 2.26

+
+
+
+

soup_address_hash_by_ip ()

+
guint               soup_address_hash_by_ip             (gconstpointer addr);
+

+A hash function (for GHashTable) that corresponds to +soup_address_equal_by_ip(), qv +

+
++ + + + + + + + + + +

addr :

a SoupAddress. [type Soup.Address] +

Returns :

the IP-based hash value for addr.
+

Since 2.26

+
+
+
+

SOUP_ADDRESS_FAMILY

+
#define SOUP_ADDRESS_FAMILY   "family"
+
+

+Alias for the "family" property. (The +SoupAddressFamily for this address.) +

+
+
+
+

SOUP_ADDRESS_NAME

+
#define SOUP_ADDRESS_NAME     "name"
+
+

+Alias for the "name" property. (The hostname for +this address.) +

+
+
+
+

SOUP_ADDRESS_PHYSICAL

+
#define SOUP_ADDRESS_PHYSICAL "physical"
+
+

+An alias for the "physical" property. (The +stringified IP address for this address.) +

+
+
+
+

SOUP_ADDRESS_PORT

+
#define SOUP_ADDRESS_PORT     "port"
+
+

+An alias for the "port" property. (The port for +this address.) +

+
+
+
+

SOUP_ADDRESS_SOCKADDR

+
#define SOUP_ADDRESS_SOCKADDR "sockaddr"
+
+

+An alias for the "sockaddr" property. (A pointer +to the struct sockaddr for this address.) +

+
+
+
+

Property Details

+
+

The "family" property

+
  "family"                   SoupAddressFamily     : Read / Write / Construct Only
+

Address family for this address.

+

Default value: SOUP_ADDRESS_FAMILY_INVALID

+
+
+
+

The "name" property

+
  "name"                     gchar*                : Read / Write / Construct Only
+

Hostname for this address.

+

Default value: NULL

+
+
+
+

The "physical" property

+
  "physical"                 gchar*                : Read
+

IP address for this address.

+

Default value: NULL

+
+
+
+

The "port" property

+
  "port"                     gint                  : Read / Write / Construct Only
+

Port for this address.

+

Allowed values: [G_MAXULONG,65535]

+

Default value: -1

+
+
+
+

The "sockaddr" property

+
  "sockaddr"                 gpointer              : Read / Write / Construct Only
+

struct sockaddr for this address.

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupAuth.html b/docs/reference/html/SoupAuth.html new file mode 100644 index 0000000..f6c8d55 --- /dev/null +++ b/docs/reference/html/SoupAuth.html @@ -0,0 +1,624 @@ + + + + +SoupAuth + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupAuth

+

SoupAuth — HTTP client-side authentication support

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupAuth;
+SoupAuth *          soup_auth_new                       (GType type,
+                                                         SoupMessage *msg,
+                                                         const char *auth_header);
+gboolean            soup_auth_update                    (SoupAuth *auth,
+                                                         SoupMessage *msg,
+                                                         const char *auth_header);
+
+#define             SOUP_TYPE_AUTH_BASIC
+#define             SOUP_TYPE_AUTH_DIGEST
+#define             SOUP_TYPE_AUTH_NTLM
+
+gboolean            soup_auth_is_for_proxy              (SoupAuth *auth);
+const char *        soup_auth_get_scheme_name           (SoupAuth *auth);
+const char *        soup_auth_get_host                  (SoupAuth *auth);
+const char *        soup_auth_get_realm                 (SoupAuth *auth);
+char *              soup_auth_get_info                  (SoupAuth *auth);
+
+void                soup_auth_authenticate              (SoupAuth *auth,
+                                                         const char *username,
+                                                         const char *password);
+gboolean            soup_auth_is_authenticated          (SoupAuth *auth);
+
+char *              soup_auth_get_authorization         (SoupAuth *auth,
+                                                         SoupMessage *msg);
+GSList *            soup_auth_get_protection_space      (SoupAuth *auth,
+                                                         SoupURI *source_uri);
+void                soup_auth_free_protection_space     (SoupAuth *auth,
+                                                         GSList *space);
+
+#define             SOUP_AUTH_SCHEME_NAME
+#define             SOUP_AUTH_REALM
+#define             SOUP_AUTH_HOST
+#define             SOUP_AUTH_IS_FOR_PROXY
+#define             SOUP_AUTH_IS_AUTHENTICATED
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupAuth
+
+
+
+

Properties

+
+  "host"                     gchar*                : Read / Write / Construct Only
+  "is-authenticated"         gboolean              : Read
+  "is-for-proxy"             gboolean              : Read / Write / Construct Only
+  "realm"                    gchar*                : Read / Write / Construct Only
+  "scheme-name"              gchar*                : Read
+
+
+
+

Signals

+
+  "save-password"                                  : Run First
+
+
+
+

Description

+

+SoupAuth objects store the authentication data associated with a +given bit of web space. They are created automatically by +SoupSession. +

+
+
+

Details

+
+

SoupAuth

+
typedef struct _SoupAuth SoupAuth;
+

+The abstract base class for handling authentication. Specific HTTP +Authentication mechanisms are implemented by its subclasses, but +applications never need to be aware of the specific subclasses +being used. +

+
+
+
+

soup_auth_new ()

+
SoupAuth *          soup_auth_new                       (GType type,
+                                                         SoupMessage *msg,
+                                                         const char *auth_header);
+

+Creates a new SoupAuth of type type with the information from +msg and auth_header. +

+

+This is called by SoupSession; you will normally not create auths +yourself. +

+
++ + + + + + + + + + + + + + + + + + +

type :

the type of auth to create (a subtype of SoupAuth)

msg :

the SoupMessage the auth is being created for

auth_header :

the WWW-Authenticate/Proxy-Authenticate header

Returns :

the new SoupAuth, or NULL if it could not be +created
+
+
+
+

soup_auth_update ()

+
gboolean            soup_auth_update                    (SoupAuth *auth,
+                                                         SoupMessage *msg,
+                                                         const char *auth_header);
+

+Updates auth with the information from msg and auth_header, +possibly un-authenticating it. As with soup_auth_new(), this is +normally only used by SoupSession. +

+
++ + + + + + + + + + + + + + + + + + +

auth :

a SoupAuth +

msg :

the SoupMessage auth is being updated for

auth_header :

the WWW-Authenticate/Proxy-Authenticate header

Returns :

+TRUE if auth is still a valid (but potentially +unauthenticated) SoupAuth. FALSE if something about auth_params +could not be parsed or incorporated into auth at all.
+
+
+
+

SOUP_TYPE_AUTH_BASIC

+
#define SOUP_TYPE_AUTH_BASIC  (soup_auth_basic_get_type ())
+
+

+

+
+
+
+

SOUP_TYPE_AUTH_DIGEST

+
#define SOUP_TYPE_AUTH_DIGEST (soup_auth_digest_get_type ())
+
+

+

+
+
+
+

SOUP_TYPE_AUTH_NTLM

+
#define SOUP_TYPE_AUTH_NTLM   (soup_auth_ntlm_get_type ())
+
+

+

+
+
+
+

soup_auth_is_for_proxy ()

+
gboolean            soup_auth_is_for_proxy              (SoupAuth *auth);
+

+Tests whether or not auth is associated with a proxy server rather +than an "origin" server. +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

+TRUE or FALSE +
+
+
+
+

soup_auth_get_scheme_name ()

+
const char *        soup_auth_get_scheme_name           (SoupAuth *auth);
+

+Returns auth's scheme name. (Eg, "Basic", "Digest", or "NTLM") +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

the scheme name
+
+
+
+

soup_auth_get_host ()

+
const char *        soup_auth_get_host                  (SoupAuth *auth);
+

+Returns the host that auth is associated with. +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

the hostname
+
+
+
+

soup_auth_get_realm ()

+
const char *        soup_auth_get_realm                 (SoupAuth *auth);
+

+Returns auth's realm. This is an identifier that distinguishes +separate authentication spaces on a given server, and may be some +string that is meaningful to the user. (Although it is probably not +localized.) +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

the realm name
+
+
+
+

soup_auth_get_info ()

+
char *              soup_auth_get_info                  (SoupAuth *auth);
+

+Gets an opaque identifier for auth, for use as a hash key or the +like. SoupAuth objects from the same server with the same +identifier refer to the same authentication domain (eg, the URLs +associated with them take the same usernames and passwords). +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

the identifier
+
+
+
+

soup_auth_authenticate ()

+
void                soup_auth_authenticate              (SoupAuth *auth,
+                                                         const char *username,
+                                                         const char *password);
+

+Call this on an auth to authenticate it; normally this will cause +the auth's message to be requeued with the new authentication info. +

+

+This does not cause the password to be saved to persistent storage; +see soup_auth_save_password() for that. +

+
++ + + + + + + + + + + + + + +

auth :

a SoupAuth +

username :

the username provided by the user or client

password :

the password provided by the user or client
+
+
+
+

soup_auth_is_authenticated ()

+
gboolean            soup_auth_is_authenticated          (SoupAuth *auth);
+

+Tests if auth has been given a username and password +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

Returns :

+TRUE if auth has been given a username and password
+
+
+
+

soup_auth_get_authorization ()

+
char *              soup_auth_get_authorization         (SoupAuth *auth,
+                                                         SoupMessage *msg);
+

+Generates an appropriate "Authorization" header for msg. (The +session will only call this if soup_auth_is_authenticated() +returned TRUE.) +

+
++ + + + + + + + + + + + + + +

auth :

a SoupAuth +

msg :

the SoupMessage to be authorized

Returns :

the "Authorization" header, which must be freed.
+
+
+
+

soup_auth_get_protection_space ()

+
GSList *            soup_auth_get_protection_space      (SoupAuth *auth,
+                                                         SoupURI *source_uri);
+

+Returns a list of paths on the server which auth extends over. +(All subdirectories of these paths are also assumed to be part +of auth's protection space, unless otherwise discovered not to +be.) +

+
++ + + + + + + + + + + + + + +

auth :

a SoupAuth +

source_uri :

the URI of the request that auth was generated in +response to.

Returns :

the list of +paths, which can be freed with soup_auth_free_protection_space(). [element-type utf8][transfer full] +
+
+
+
+

soup_auth_free_protection_space ()

+
void                soup_auth_free_protection_space     (SoupAuth *auth,
+                                                         GSList *space);
+

+Frees space. +

+
++ + + + + + + + + + +

auth :

a SoupAuth +

space :

the return value from soup_auth_get_protection_space() +
+
+
+
+

SOUP_AUTH_SCHEME_NAME

+
#define SOUP_AUTH_SCHEME_NAME      "scheme-name"
+
+

+An alias for the "scheme-name" property. (The +authentication scheme name.) +

+
+
+
+

SOUP_AUTH_REALM

+
#define SOUP_AUTH_REALM            "realm"
+
+

+An alias for the "realm" property. (The +authentication realm.) +

+
+
+
+

SOUP_AUTH_HOST

+
#define SOUP_AUTH_HOST             "host"
+
+

+An alias for the "host" property. (The +host being authenticated to.) +

+
+
+
+

SOUP_AUTH_IS_FOR_PROXY

+
#define SOUP_AUTH_IS_FOR_PROXY     "is-for-proxy"
+
+

+An alias for the "is-for-proxy" property. (Whether +or not the auth is for a proxy server.) +

+
+
+
+

SOUP_AUTH_IS_AUTHENTICATED

+
#define SOUP_AUTH_IS_AUTHENTICATED "is-authenticated"
+
+

+An alias for the "is-authenticated" property. +(Whether or not the auth has been authenticated.) +

+
+
+
+

Property Details

+
+

The "host" property

+
  "host"                     gchar*                : Read / Write / Construct Only
+

Authentication host.

+

Default value: NULL

+
+
+
+

The "is-authenticated" property

+
  "is-authenticated"         gboolean              : Read
+

Whether or not the auth is authenticated.

+

Default value: FALSE

+
+
+
+

The "is-for-proxy" property

+
  "is-for-proxy"             gboolean              : Read / Write / Construct Only
+

Whether or not the auth is for a proxy server.

+

Default value: FALSE

+
+
+
+

The "realm" property

+
  "realm"                    gchar*                : Read / Write / Construct Only
+

Authentication realm.

+

Default value: NULL

+
+
+
+

The "scheme-name" property

+
  "scheme-name"              gchar*                : Read
+

Authentication scheme name.

+

Default value: NULL

+
+
+
+

Signal Details

+
+

The "save-password" signal

+
void                user_function                      (SoupAuth *auth,
+                                                        gchar    *username,
+                                                        gchar    *password,
+                                                        gpointer  user_data)      : Run First
+

+Emitted to request that the username/password pair be +saved. If the session supports password-saving, it will +connect to this signal before emitting +"authenticate", so that it record the password +if requested by the caller. +

+
++ + + + + + + + + + + + + + + + + + +

auth :

the auth

username :

the username to save

password :

the password to save

user_data :

user data set when the signal handler was connected.
+

Since 2.28

+
+
+
+

See Also

+SoupSession +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupAuthDomain.html b/docs/reference/html/SoupAuthDomain.html new file mode 100644 index 0000000..aa486ea --- /dev/null +++ b/docs/reference/html/SoupAuthDomain.html @@ -0,0 +1,725 @@ + + + + +SoupAuthDomain + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupAuthDomain

+

SoupAuthDomain — Server-side authentication

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupAuthDomain;
+
+void                soup_auth_domain_add_path           (SoupAuthDomain *domain,
+                                                         const char *path);
+void                soup_auth_domain_remove_path        (SoupAuthDomain *domain,
+                                                         const char *path);
+gboolean            (*SoupAuthDomainFilter)             (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+void                soup_auth_domain_set_filter         (SoupAuthDomain *domain,
+                                                         SoupAuthDomainFilter filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify dnotify);
+const char *        soup_auth_domain_get_realm          (SoupAuthDomain *domain);
+
+gboolean            (*SoupAuthDomainGenericAuthCallback)
+                                                        (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         gpointer user_data);
+void                soup_auth_domain_set_generic_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainGenericAuthCallback auth_callback,
+                                                         gpointer auth_data,
+                                                         GDestroyNotify dnotify);
+gboolean            soup_auth_domain_check_password     (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         const char *password);
+
+gboolean            soup_auth_domain_covers             (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+char *              soup_auth_domain_accepts            (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+void                soup_auth_domain_challenge          (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+
+#define             SOUP_AUTH_DOMAIN_REALM
+#define             SOUP_AUTH_DOMAIN_PROXY
+#define             SOUP_AUTH_DOMAIN_ADD_PATH
+#define             SOUP_AUTH_DOMAIN_REMOVE_PATH
+#define             SOUP_AUTH_DOMAIN_FILTER
+#define             SOUP_AUTH_DOMAIN_FILTER_DATA
+#define             SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK
+#define             SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupAuthDomain
+         +----SoupAuthDomainBasic
+         +----SoupAuthDomainDigest
+
+
+
+

Properties

+
+  "add-path"                 gchar*                : Write
+  "filter"                   gpointer              : Read / Write
+  "filter-data"              gpointer              : Read / Write
+  "generic-auth-callback"    gpointer              : Read / Write
+  "generic-auth-data"        gpointer              : Read / Write
+  "proxy"                    gboolean              : Read / Write / Construct Only
+  "realm"                    gchar*                : Read / Write / Construct Only
+  "remove-path"              gchar*                : Write
+
+
+
+

Description

+

+A SoupAuthDomain manages authentication for all or part of a +SoupServer. To make a server require authentication, first create +an appropriate subclass of SoupAuthDomain, and then add it to the +server with soup_server_add_auth_domain(). +

+

+In order for an auth domain to have any effect, you must add one or +more paths to it (via soup_auth_domain_add_path() or the +SOUP_AUTH_DOMAIN_ADD_PATH property). To require authentication for +all ordinary requests, add the path "/". (Note that this does not +include the special "*" URI (eg, "OPTIONS *"), which must be added +as a separate path if you want to cover it.) +

+

+If you need greater control over which requests should and +shouldn't be authenticated, add paths covering everything you +might want authenticated, and then use a +filter (soup_auth_domain_set_filter()) to bypass authentication for +those requests that don't need it. +

+
+
+

Details

+
+

SoupAuthDomain

+
typedef struct _SoupAuthDomain SoupAuthDomain;
+

+

+
+
+
+

soup_auth_domain_add_path ()

+
void                soup_auth_domain_add_path           (SoupAuthDomain *domain,
+                                                         const char *path);
+

+Adds path to domain, such that requests under path on domain's +server will require authentication (unless overridden by +soup_auth_domain_remove_path() or soup_auth_domain_set_filter()). +

+

+You can also add paths by setting the SOUP_AUTH_DOMAIN_ADD_PATH +property, which can also be used to add one or more paths at +construct time. +

+
++ + + + + + + + + + +

domain :

a SoupAuthDomain +

path :

the path to add to domain +
+
+
+
+

soup_auth_domain_remove_path ()

+
void                soup_auth_domain_remove_path        (SoupAuthDomain *domain,
+                                                         const char *path);
+

+Removes path from domain, such that requests under path on +domain's server will NOT require authentication. +

+

+This is not simply an undo-er for soup_auth_domain_add_path(); it +can be used to "carve out" a subtree that does not require +authentication inside a hierarchy that does. Note also that unlike +with soup_auth_domain_add_path(), this cannot be overridden by +adding a filter, as filters can only bypass authentication that +would otherwise be required, not require it where it would +otherwise be unnecessary. +

+

+You can also remove paths by setting the +SOUP_AUTH_DOMAIN_REMOVE_PATH property, which can also be used to +remove one or more paths at construct time. +

+
++ + + + + + + + + + +

domain :

a SoupAuthDomain +

path :

the path to remove from domain +
+
+
+
+

SoupAuthDomainFilter ()

+
gboolean            (*SoupAuthDomainFilter)             (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+

+The prototype for a SoupAuthDomain filter; see +soup_auth_domain_set_filter() for details. +

+
++ + + + + + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

a SoupMessage +

user_data :

the data passed to soup_auth_domain_set_filter() +

Returns :

+TRUE if msg requires authentication, FALSE if not.
+
+
+
+

soup_auth_domain_set_filter ()

+
void                soup_auth_domain_set_filter         (SoupAuthDomain *domain,
+                                                         SoupAuthDomainFilter filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify dnotify);
+

+Adds filter as an authentication filter to domain. The filter +gets a chance to bypass authentication for certain requests that +would otherwise require it. Eg, it might check the message's path +in some way that is too complicated to do via the other methods, or +it might check the message's method, and allow GETs but not PUTs. +

+

+The filter function returns TRUE if the request should still +require authentication, or FALSE if authentication is unnecessary +for this request. +

+

+To help prevent security holes, your filter should return TRUE by +default, and only return FALSE under specifically-tested +circumstances, rather than the other way around. Eg, in the example +above, where you want to authenticate PUTs but not GETs, you should +check if the method is GET and return FALSE in that case, and then +return TRUE for all other methods (rather than returning TRUE for +PUT and FALSE for all other methods). This way if it turned out +(now or later) that some paths supported additional methods besides +GET and PUT, those methods would default to being NOT allowed for +unauthenticated users. +

+

+You can also set the filter by setting the SOUP_AUTH_DOMAIN_FILTER +and SOUP_AUTH_DOMAIN_FILTER_DATA properties, which can also be +used to set the filter at construct time. +

+
++ + + + + + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

filter :

the auth filter for domain +

filter_data :

data to pass to filter +

dnotify :

destroy notifier to free filter_data when domain +is destroyed
+
+
+
+

soup_auth_domain_get_realm ()

+
const char *        soup_auth_domain_get_realm          (SoupAuthDomain *domain);
+

+Gets the realm name associated with domain +

+
++ + + + + + + + + + +

domain :

a SoupAuthDomain +

Returns :

+domain's realm
+
+
+
+

SoupAuthDomainGenericAuthCallback ()

+
gboolean            (*SoupAuthDomainGenericAuthCallback)
+                                                        (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         gpointer user_data);
+

+The prototype for a SoupAuthDomain generic authentication callback. +

+

+The callback should look up the user's password, call +soup_auth_domain_check_password(), and use the return value from +that method as its own return value. +

+

+In general, for security reasons, it is preferable to use the +auth-domain-specific auth callbacks (eg, +SoupAuthDomainBasicAuthCallback and +SoupAuthDomainDigestAuthCallback), because they don't require +keeping a cleartext password database. Most users will use the same +password for many different sites, meaning if any site with a +cleartext password database is compromised, accounts on other +servers might be compromised as well. For many of the cases where +SoupServer is used, this is not really relevant, but it may still +be worth considering. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

the SoupMessage being authenticated

username :

the username from msg +

user_data :

the data passed to +soup_auth_domain_set_generic_auth_callback() +

Returns :

+TRUE if msg is authenticated, FALSE if not.
+
+
+
+

soup_auth_domain_set_generic_auth_callback ()

+
void                soup_auth_domain_set_generic_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainGenericAuthCallback auth_callback,
+                                                         gpointer auth_data,
+                                                         GDestroyNotify dnotify);
+

+Sets auth_callback as an authentication-handling callback for +domain. Whenever a request comes in to domain which cannot be +authenticated via a domain-specific auth callback (eg, +SoupAuthDomainDigestAuthCallback), the generic auth callback +will be invoked. See SoupAuthDomainGenericAuthCallback for information +on what the callback should do. +

+
++ + + + + + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

auth_callback :

the auth callback

auth_data :

data to pass to auth_callback +

dnotify :

destroy notifier to free auth_data when domain +is destroyed
+
+
+
+

soup_auth_domain_check_password ()

+
gboolean            soup_auth_domain_check_password     (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         const char *password);
+

+Checks if msg authenticates to domain via username and +password. This would normally be called from a +SoupAuthDomainGenericAuthCallback. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

a SoupMessage +

username :

a username

password :

a password

Returns :

whether or not the message is authenticated
+
+
+
+

soup_auth_domain_covers ()

+
gboolean            soup_auth_domain_covers             (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+

+Checks if domain requires msg to be authenticated (according to +its paths and filter function). This does not actually look at +whether msg is authenticated, merely whether +or not it needs to be. +

+

+This is used by SoupServer internally and is probably of no use to +anyone else. +

+
++ + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

a SoupMessage +

Returns :

+TRUE if domain requires msg to be authenticated
+
+
+
+

soup_auth_domain_accepts ()

+
char *              soup_auth_domain_accepts            (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+

+Checks if msg contains appropriate authorization for domain to +accept it. Mirroring soup_auth_domain_covers(), this does not check +whether or not domain cares if msg is +authorized. +

+

+This is used by SoupServer internally and is probably of no use to +anyone else. +

+
++ + + + + + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

a SoupMessage +

Returns :

the username that msg has authenticated as, if in +fact it has authenticated. NULL otherwise.
+
+
+
+

soup_auth_domain_challenge ()

+
void                soup_auth_domain_challenge          (SoupAuthDomain *domain,
+                                                         SoupMessage *msg);
+

+Adds a "WWW-Authenticate" or "Proxy-Authenticate" header to msg, +requesting that the client authenticate, and sets msg's status +accordingly. +

+

+This is used by SoupServer internally and is probably of no use to +anyone else. +

+
++ + + + + + + + + + +

domain :

a SoupAuthDomain +

msg :

a SoupMessage +
+
+
+
+

SOUP_AUTH_DOMAIN_REALM

+
#define SOUP_AUTH_DOMAIN_REALM       "realm"
+
+

+Alias for the "realm" property. (The realm of +this auth domain.) +

+
+
+
+

SOUP_AUTH_DOMAIN_PROXY

+
#define SOUP_AUTH_DOMAIN_PROXY       "proxy"
+
+

+Alias for the "proxy" property. (Whether or +not this is a proxy auth domain.) +

+
+
+
+

SOUP_AUTH_DOMAIN_ADD_PATH

+
#define SOUP_AUTH_DOMAIN_ADD_PATH    "add-path"
+
+

+Alias for the "add-path" property. (Shortcut +for calling soup_auth_domain_add_path().) +

+
+
+
+

SOUP_AUTH_DOMAIN_REMOVE_PATH

+
#define SOUP_AUTH_DOMAIN_REMOVE_PATH "remove-path"
+
+

+Alias for the "remove-path" property. +(Shortcut for calling soup_auth_domain_remove_path().) +

+
+
+
+

SOUP_AUTH_DOMAIN_FILTER

+
#define SOUP_AUTH_DOMAIN_FILTER      "filter"
+
+

+Alias for the "filter" property. (The +SoupAuthDomainFilter for the domain.) +

+
+
+
+

SOUP_AUTH_DOMAIN_FILTER_DATA

+
#define SOUP_AUTH_DOMAIN_FILTER_DATA "filter-data"
+
+

+Alias for the "filter-data" property. (Data +to pass to the SoupAuthDomainFilter.) +

+
+
+
+

SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK

+
#define SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK "generic-auth-callback"
+
+

+Alias for the "generic-auth-callback" property. +(The SoupAuthDomainGenericAuthCallback.) +

+
+
+
+

SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA

+
#define SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA     "generic-auth-data"
+
+

+Alias for the "generic-auth-data" property. +(The data to pass to the SoupAuthDomainGenericAuthCallback.) +

+
+
+
+

Property Details

+
+

The "add-path" property

+
  "add-path"                 gchar*                : Write
+

Add a path covered by this auth domain.

+

Default value: NULL

+
+
+
+

The "filter" property

+
  "filter"                   gpointer              : Read / Write
+

A filter for deciding whether or not to require authentication.

+
+
+
+

The "filter-data" property

+
  "filter-data"              gpointer              : Read / Write
+

Data to pass to filter.

+
+
+
+

The "generic-auth-callback" property

+
  "generic-auth-callback"    gpointer              : Read / Write
+

An authentication callback that can be used with any SoupAuthDomain subclass.

+
+
+
+

The "generic-auth-data" property

+
  "generic-auth-data"        gpointer              : Read / Write
+

Data to pass to auth callback.

+
+
+
+

The "proxy" property

+
  "proxy"                    gboolean              : Read / Write / Construct Only
+

Whether or not this is a proxy auth domain.

+

Default value: FALSE

+
+
+
+

The "realm" property

+
  "realm"                    gchar*                : Read / Write / Construct Only
+

The realm of this auth domain.

+

Default value: NULL

+
+
+
+

The "remove-path" property

+
  "remove-path"              gchar*                : Write
+

Remove a path covered by this auth domain.

+

Default value: NULL

+
+
+
+

See Also

+SoupServer +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupAuthDomainBasic.html b/docs/reference/html/SoupAuthDomainBasic.html new file mode 100644 index 0000000..0e0dcbb --- /dev/null +++ b/docs/reference/html/SoupAuthDomainBasic.html @@ -0,0 +1,268 @@ + + + + +SoupAuthDomainBasic + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupAuthDomainBasic

+

SoupAuthDomainBasic — Server-side "Basic" authentication

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupAuthDomainBasic;
+SoupAuthDomain *    soup_auth_domain_basic_new          (const char *optname1,
+                                                         ...);
+
+gboolean            (*SoupAuthDomainBasicAuthCallback)  (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         const char *password,
+                                                         gpointer user_data);
+void                soup_auth_domain_basic_set_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainBasicAuthCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify dnotify);
+
+#define             SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK
+#define             SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupAuthDomain
+         +----SoupAuthDomainBasic
+
+
+
+

Properties

+
+  "auth-callback"            gpointer              : Read / Write
+  "auth-data"                gpointer              : Read / Write
+
+
+
+

Description

+

+SoupAuthDomainBasic handles the server side of HTTP "Basic" (ie, +cleartext password) authentication. +

+
+
+

Details

+
+

SoupAuthDomainBasic

+
typedef struct _SoupAuthDomainBasic SoupAuthDomainBasic;
+

+

+
+
+
+

soup_auth_domain_basic_new ()

+
SoupAuthDomain *    soup_auth_domain_basic_new          (const char *optname1,
+                                                         ...);
+

+Creates a SoupAuthDomainBasic. You must set the +SOUP_AUTH_DOMAIN_REALM parameter, to indicate the realm name to be +returned with the authentication challenge to the client. Other +parameters are optional. +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first option, or NULL +

... :

option name/value pairs

Returns :

the new SoupAuthDomain +
+
+
+
+

SoupAuthDomainBasicAuthCallback ()

+
gboolean            (*SoupAuthDomainBasicAuthCallback)  (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         const char *password,
+                                                         gpointer user_data);
+

+Callback used by SoupAuthDomainBasic for authentication purposes. +The application should verify that username and password and valid +and return TRUE or FALSE. +

+

+If you are maintaining your own password database (rather than +using the password to authenticate against some other system like +PAM or a remote server), you should make sure you know what you are +doing. In particular, don't store cleartext passwords, or +easily-computed hashes of cleartext passwords, even if you don't +care that much about the security of your server, because users +will frequently use the same password for multiple sites, and so +compromising any site with a cleartext (or easily-cracked) password +database may give attackers access to other more-interesting sites +as well. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

domain :

the domain

msg :

the message being authenticated

username :

the username provided by the client

password :

the password provided by the client

user_data :

the data passed to soup_auth_domain_basic_set_auth_callback() +

Returns :

+TRUE if username and password are valid
+
+
+
+

soup_auth_domain_basic_set_auth_callback ()

+
void                soup_auth_domain_basic_set_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainBasicAuthCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify dnotify);
+

+Sets the callback that domain will use to authenticate incoming +requests. For each request containing authorization, domain will +invoke the callback, and then either accept or reject the request +based on callback's return value. +

+

+You can also set the auth callback by setting the +SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK and +SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA properties, which can also be +used to set the callback at construct time. +

+
++ + + + + + + + + + + + + + + + + + +

domain :

the domain

callback :

the callback

user_data :

data to pass to auth_callback +

dnotify :

destroy notifier to free user_data when domain +is destroyed
+
+
+
+

SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK

+
#define SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK "auth-callback"
+
+

+Alias for the "auth-callback" property. +(The SoupAuthDomainBasicAuthCallback.) +

+
+
+
+

SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA

+
#define SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA     "auth-data"
+
+

+Alias for the "auth-data" property. +(The data to pass to the SoupAuthDomainBasicAuthCallback.) +

+
+
+
+

Property Details

+
+

The "auth-callback" property

+
  "auth-callback"            gpointer              : Read / Write
+

Password-checking callback.

+
+
+
+

The "auth-data" property

+
  "auth-data"                gpointer              : Read / Write
+

Data to pass to authentication callback.

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupAuthDomainDigest.html b/docs/reference/html/SoupAuthDomainDigest.html new file mode 100644 index 0000000..b07c134 --- /dev/null +++ b/docs/reference/html/SoupAuthDomainDigest.html @@ -0,0 +1,301 @@ + + + + +SoupAuthDomainDigest + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupAuthDomainDigest

+

SoupAuthDomainDigest — Server-side "Digest" authentication

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupAuthDomainDigest;
+SoupAuthDomain *    soup_auth_domain_digest_new         (const char *optname1,
+                                                         ...);
+
+char *              (*SoupAuthDomainDigestAuthCallback) (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         gpointer user_data);
+void                soup_auth_domain_digest_set_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainDigestAuthCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify dnotify);
+char *              soup_auth_domain_digest_encode_password
+                                                        (const char *username,
+                                                         const char *realm,
+                                                         const char *password);
+
+#define             SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK
+#define             SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupAuthDomain
+         +----SoupAuthDomainDigest
+
+
+
+

Properties

+
+  "auth-callback"            gpointer              : Read / Write
+  "auth-data"                gpointer              : Read / Write
+
+
+
+

Description

+

+SoupAuthDomainBasic handles the server side of HTTP "Digest" +authentication. +

+
+
+

Details

+
+

SoupAuthDomainDigest

+
typedef struct _SoupAuthDomainDigest SoupAuthDomainDigest;
+

+

+
+
+
+

soup_auth_domain_digest_new ()

+
SoupAuthDomain *    soup_auth_domain_digest_new         (const char *optname1,
+                                                         ...);
+

+Creates a SoupAuthDomainDigest. You must set the +SOUP_AUTH_DOMAIN_REALM parameter, to indicate the realm name to be +returned with the authentication challenge to the client. Other +parameters are optional. +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first option, or NULL +

... :

option name/value pairs

Returns :

the new SoupAuthDomain +
+
+
+
+

SoupAuthDomainDigestAuthCallback ()

+
char *              (*SoupAuthDomainDigestAuthCallback) (SoupAuthDomain *domain,
+                                                         SoupMessage *msg,
+                                                         const char *username,
+                                                         gpointer user_data);
+

+Callback used by SoupAuthDomainDigest for authentication purposes. +The application should look up username in its password database, +and return the corresponding encoded password (see +soup_auth_domain_digest_encode_password()). +

+
++ + + + + + + + + + + + + + + + + + + + + + +

domain :

the domain

msg :

the message being authenticated

username :

the username provided by the client

user_data :

the data passed to soup_auth_domain_digest_set_auth_callback() +

Returns :

the encoded password, or NULL if username is not a +valid user. domain will free the password when it is done with it.
+
+
+
+

soup_auth_domain_digest_set_auth_callback ()

+
void                soup_auth_domain_digest_set_auth_callback
+                                                        (SoupAuthDomain *domain,
+                                                         SoupAuthDomainDigestAuthCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify dnotify);
+

+Sets the callback that domain will use to authenticate incoming +requests. For each request containing authorization, domain will +invoke the callback, and then either accept or reject the request +based on callback's return value. +

+

+You can also set the auth callback by setting the +SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK and +SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA properties, which can also be +used to set the callback at construct time. +

+
++ + + + + + + + + + + + + + + + + + +

domain :

the domain

callback :

the callback

user_data :

data to pass to auth_callback +

dnotify :

destroy notifier to free user_data when domain +is destroyed
+
+
+
+

soup_auth_domain_digest_encode_password ()

+
char *              soup_auth_domain_digest_encode_password
+                                                        (const char *username,
+                                                         const char *realm,
+                                                         const char *password);
+

+Encodes the username/realm/password triplet for Digest +authentication. (That is, it returns a stringified MD5 hash of +username, realm, and password concatenated together). This is +the form that is needed as the return value of +SoupAuthDomainDigest's auth handler. +

+

+For security reasons, you should store the encoded hash, rather +than storing the cleartext password itself and calling this method +only when you need to verify it. This way, if your server is +compromised, the attackers will not gain access to cleartext +passwords which might also be usable at other sites. (Note also +that the encoded password returned by this method is identical to +the encoded password stored in an Apache .htdigest file.) +

+
++ + + + + + + + + + + + + + + + + + +

username :

a username

realm :

an auth realm name

password :

the password for username in realm +

Returns :

the encoded password
+
+
+
+

SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK

+
#define SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK "auth-callback"
+
+

+Alias for the "auth-callback" property. +(The SoupAuthDomainDigestAuthCallback.) +

+
+
+
+

SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA

+
#define SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA     "auth-data"
+
+

+Alias for the "auth-callback" property. +(The SoupAuthDomainDigestAuthCallback.) +

+
+
+
+

Property Details

+
+

The "auth-callback" property

+
  "auth-callback"            gpointer              : Read / Write
+

Password-finding callback.

+
+
+
+

The "auth-data" property

+
  "auth-data"                gpointer              : Read / Write
+

Data to pass to authentication callback.

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupCache.html b/docs/reference/html/SoupCache.html new file mode 100644 index 0000000..ef095d0 --- /dev/null +++ b/docs/reference/html/SoupCache.html @@ -0,0 +1,240 @@ + + + + +SoupCache + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupCache

+

SoupCache

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+struct              SoupCache;
+enum                SoupCacheType;
+SoupCache *         soup_cache_new                      (const char *cache_dir,
+                                                         SoupCacheType cache_type);
+void                soup_cache_flush                    (SoupCache *cache);
+void                soup_cache_clear                    (SoupCache *cache);
+void                soup_cache_dump                     (SoupCache *cache);
+void                soup_cache_load                     (SoupCache *cache);
+guint               soup_cache_get_max_size             (SoupCache *cache);
+void                soup_cache_set_max_size             (SoupCache *cache,
+                                                         guint max_size);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupCache
+
+
+
+

Implemented Interfaces

+

+SoupCache implements + SoupSessionFeature.

+
+
+

Properties

+
+  "cache-dir"                gchar*                : Read / Write / Construct Only
+  "cache-type"               SoupCacheType         : Read / Write / Construct Only
+
+
+
+

Description

+

+

+
+
+

Details

+
+

struct SoupCache

+
struct SoupCache;
+

+

+
+
+
+

enum SoupCacheType

+
typedef enum {
+	SOUP_CACHE_SINGLE_USER,
+	SOUP_CACHE_SHARED
+} SoupCacheType;
+
+

+The type of cache; this affects what kinds of responses will be +saved. +

+
++ + + + + + + + + + +

SOUP_CACHE_SINGLE_USER

a single-user cache +

SOUP_CACHE_SHARED

a shared cache +
+

Since 2.34

+
+
+
+

soup_cache_new ()

+
SoupCache *         soup_cache_new                      (const char *cache_dir,
+                                                         SoupCacheType cache_type);
+

+Creates a new SoupCache. +

+
++ + + + + + + + + + + + + + +

cache_dir :

the directory to store the cached data, or NULL to use the default one

cache_type :

the SoupCacheType of the cache

Returns :

a new SoupCache +
+

Since 2.34

+
+
+
+

soup_cache_flush ()

+
void                soup_cache_flush                    (SoupCache *cache);
+

+This function will force all pending writes in the cache to be +committed to disk. For doing so it will iterate the GMainContext +associated with cache's session as long as needed. +

+
++ + + + +

cache :

a SoupCache +
+

Since 2.34

+
+
+
+

soup_cache_clear ()

+
void                soup_cache_clear                    (SoupCache *cache);
+

+Will remove all entries in the cache plus all the cache files +associated with them. +

+
++ + + + +

cache :

a SoupCache +
+

Since 2.34

+
+
+
+

soup_cache_dump ()

+
void                soup_cache_dump                     (SoupCache *cache);
+

+

+
+
+
+

soup_cache_load ()

+
void                soup_cache_load                     (SoupCache *cache);
+

+

+
+
+
+

soup_cache_get_max_size ()

+
guint               soup_cache_get_max_size             (SoupCache *cache);
+

+

+
+
+
+

soup_cache_set_max_size ()

+
void                soup_cache_set_max_size             (SoupCache *cache,
+                                                         guint max_size);
+

+

+
+
+
+

Property Details

+
+

The "cache-dir" property

+
  "cache-dir"                gchar*                : Read / Write / Construct Only
+

The directory to store the cache files.

+

Default value: NULL

+
+
+
+

The "cache-type" property

+
  "cache-type"               SoupCacheType         : Read / Write / Construct Only
+

Whether the cache is private or shared.

+

Default value: SOUP_CACHE_SINGLE_USER

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupContentDecoder.html b/docs/reference/html/SoupContentDecoder.html new file mode 100644 index 0000000..167e647 --- /dev/null +++ b/docs/reference/html/SoupContentDecoder.html @@ -0,0 +1,106 @@ + + + + +SoupContentDecoder + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupContentDecoder

+

SoupContentDecoder — Content-Encoding handler

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupContentDecoder;
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupContentDecoder
+
+
+
+

Implemented Interfaces

+

+SoupContentDecoder implements + SoupSessionFeature.

+
+
+

Description

+

+SoupContentDecoder handles the "Accept-Encoding" header on +outgoing messages, and the "Content-Encoding" header on incoming +ones. If you add it to a session with soup_session_add_feature() or +soup_session_add_feature_by_type(), the session will automatically +use Content-Encoding as appropriate. +

+

+(Note that currently there is no way to (automatically) use +Content-Encoding when sending a request body, or to pick specific +encoding types to support.) +

+

+If SoupContentDecoder successfully decodes the Content-Encoding, +it will set the SOUP_MESSAGE_CONTENT_DECODED flag on the message, +and the message body and the chunks in the "got_chunk" +signals will contain the decoded data; however, the message headers +will be unchanged (and so "Content-Encoding" will still be present, +"Content-Length" will describe the original encoded length, etc). +

+

+If "Content-Encoding" contains any encoding types that +SoupContentDecoder doesn't recognize, then none of the encodings +will be decoded (and the SOUP_MESSAGE_CONTENT_DECODED flag will +not be set). +

+
+
+

Details

+
+

SoupContentDecoder

+
typedef struct _SoupContentDecoder SoupContentDecoder;
+

+

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupContentSniffer.html b/docs/reference/html/SoupContentSniffer.html new file mode 100644 index 0000000..d98ee16 --- /dev/null +++ b/docs/reference/html/SoupContentSniffer.html @@ -0,0 +1,152 @@ + + + + +SoupContentSniffer + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupContentSniffer

+

SoupContentSniffer — Content sniffing for SoupSession

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupContentSniffer;
+SoupContentSniffer * soup_content_sniffer_new           (void);
+char *              soup_content_sniffer_sniff          (SoupContentSniffer *sniffer,
+                                                         SoupMessage *msg,
+                                                         SoupBuffer *buffer,
+                                                         GHashTable **params);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupContentSniffer
+
+
+
+

Implemented Interfaces

+

+SoupContentSniffer implements + SoupSessionFeature.

+
+
+

Description

+

+A SoupContentSniffer tries to detect the actual content type of +the files that are being downloaded by looking at some of the data +before the SoupMessage emits its "got-headers" signal. +SoupContentSniffer implements SoupSessionFeature, so you can add +content sniffing to a session with soup_session_add_feature() or +soup_session_add_feature_by_type(). +

+
+
+

Details

+
+

SoupContentSniffer

+
typedef struct _SoupContentSniffer SoupContentSniffer;
+

+

+
+
+
+

soup_content_sniffer_new ()

+
SoupContentSniffer * soup_content_sniffer_new           (void);
+

+Creates a new SoupContentSniffer. +

+
++ + + + +

Returns :

a new SoupContentSniffer +
+

Since 2.27.3

+
+
+
+

soup_content_sniffer_sniff ()

+
char *              soup_content_sniffer_sniff          (SoupContentSniffer *sniffer,
+                                                         SoupMessage *msg,
+                                                         SoupBuffer *buffer,
+                                                         GHashTable **params);
+

+Sniffs buffer to determine its Content-Type. The result may also +be influenced by the Content-Type declared in msg's response +headers. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

sniffer :

a SoupContentSniffer +

msg :

the message to sniff

buffer :

a buffer containing the start of msg's response body

params :

return +location for Content-Type parameters (eg, "charset"), or NULL. [element-type utf8 utf8][out][transfer full][allow-none] +

Returns :

the sniffed Content-Type of buffer; this will never be NULL, +but may be "application/octet-stream".
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupCookie.html b/docs/reference/html/SoupCookie.html new file mode 100644 index 0000000..c166d5f --- /dev/null +++ b/docs/reference/html/SoupCookie.html @@ -0,0 +1,1046 @@ + + + + +SoupCookie + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupCookie

+

SoupCookie — HTTP Cookies

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupCookie;
+SoupCookie *        soup_cookie_new                     (const char *name,
+                                                         const char *value,
+                                                         const char *domain,
+                                                         const char *path,
+                                                         int max_age);
+SoupCookie *        soup_cookie_parse                   (const char *header,
+                                                         SoupURI *origin);
+SoupCookie *        soup_cookie_copy                    (SoupCookie *cookie);
+void                soup_cookie_free                    (SoupCookie *cookie);
+
+void                soup_cookie_set_name                (SoupCookie *cookie,
+                                                         const char *name);
+const char *        soup_cookie_get_name                (SoupCookie *cookie);
+void                soup_cookie_set_value               (SoupCookie *cookie,
+                                                         const char *value);
+const char *        soup_cookie_get_value               (SoupCookie *cookie);
+void                soup_cookie_set_domain              (SoupCookie *cookie,
+                                                         const char *domain);
+const char *        soup_cookie_get_domain              (SoupCookie *cookie);
+void                soup_cookie_set_path                (SoupCookie *cookie,
+                                                         const char *path);
+const char *        soup_cookie_get_path                (SoupCookie *cookie);
+void                soup_cookie_set_max_age             (SoupCookie *cookie,
+                                                         int max_age);
+#define             SOUP_COOKIE_MAX_AGE_ONE_HOUR
+#define             SOUP_COOKIE_MAX_AGE_ONE_DAY
+#define             SOUP_COOKIE_MAX_AGE_ONE_WEEK
+#define             SOUP_COOKIE_MAX_AGE_ONE_YEAR
+void                soup_cookie_set_expires             (SoupCookie *cookie,
+                                                         SoupDate *expires);
+SoupDate *          soup_cookie_get_expires             (SoupCookie *cookie);
+void                soup_cookie_set_secure              (SoupCookie *cookie,
+                                                         gboolean secure);
+gboolean            soup_cookie_get_secure              (SoupCookie *cookie);
+void                soup_cookie_set_http_only           (SoupCookie *cookie,
+                                                         gboolean http_only);
+gboolean            soup_cookie_get_http_only           (SoupCookie *cookie);
+
+gboolean            soup_cookie_applies_to_uri          (SoupCookie *cookie,
+                                                         SoupURI *uri);
+gboolean            soup_cookie_domain_matches          (SoupCookie *cookie,
+                                                         const char *host);
+
+char *              soup_cookie_to_cookie_header        (SoupCookie *cookie);
+char *              soup_cookie_to_set_cookie_header    (SoupCookie *cookie);
+
+GSList *            soup_cookies_from_request           (SoupMessage *msg);
+GSList *            soup_cookies_from_response          (SoupMessage *msg);
+void                soup_cookies_to_request             (GSList *cookies,
+                                                         SoupMessage *msg);
+void                soup_cookies_to_response            (GSList *cookies,
+                                                         SoupMessage *msg);
+char *              soup_cookies_to_cookie_header       (GSList *cookies);
+void                soup_cookies_free                   (GSList *cookies);
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupCookie
+
+
+
+

Description

+

+SoupCookie implements HTTP cookies, primarily as described by +the +original Netscape cookie specification, but with slight +modifications based on RFC 2109, Microsoft's +HttpOnly extension attribute, and observed real-world usage +(and, in particular, based on what Firefox does). +

+

+To have a SoupSession handle cookies for your appliction +automatically, use a SoupCookieJar. +

+
+
+

Details

+
+

SoupCookie

+
typedef struct {
+	char     *name;
+	char     *value;
+	char     *domain;
+	char     *path;
+	SoupDate *expires;
+	gboolean  secure;
+	gboolean  http_only;
+} SoupCookie;
+
+

+An HTTP cookie. +

+

+name and value will be set for all cookies. If the cookie is +generated from a string that appears to have no name, then name +will be the empty string. +

+

+domain and path give the host or domain, and path within that +host/domain, to restrict this cookie to. If domain starts with +".", that indicates a domain (which matches the string after the +".", or any hostname that has domain as a suffix). Otherwise, it +is a hostname and must match exactly. +

+

+expires will be non-NULL if the cookie uses either the original +"expires" attribute, or the "max-age" attribute specified in RFC +2109. If expires is NULL, it indicates that neither "expires" nor +"max-age" was specified, and the cookie expires at the end of the +session. +

+

+If http_only is set, the cookie should not be exposed to untrusted +code (eg, javascript), so as to minimize the danger posed by +cross-site scripting attacks. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

char *name;

the cookie name

char *value;

the cookie value

char *domain;

the "domain" attribute, or else the hostname that the +cookie came from.

char *path;

the "path" attribute, or NULL +

SoupDate *expires;

the cookie expiration time, or NULL for a session cookie

gboolean secure;

+TRUE if the cookie should only be tranferred over SSL

gboolean http_only;

+TRUE if the cookie should not be exposed to scripts
+

Since 2.24

+
+
+
+

soup_cookie_new ()

+
SoupCookie *        soup_cookie_new                     (const char *name,
+                                                         const char *value,
+                                                         const char *domain,
+                                                         const char *path,
+                                                         int max_age);
+

+Creates a new SoupCookie with the given attributes. (Use +soup_cookie_set_secure() and soup_cookie_set_http_only() if you +need to set those attributes on the returned cookie.) +

+

+max_age is used to set the "expires" attribute on the cookie; pass +-1 to not include the attribute (indicating that the cookie expires +with the current session), 0 for an already-expired cookie, or a +lifetime in seconds. You can use the constants +SOUP_COOKIE_MAX_AGE_ONE_HOUR, SOUP_COOKIE_MAX_AGE_ONE_DAY, +SOUP_COOKIE_MAX_AGE_ONE_WEEK and SOUP_COOKIE_MAX_AGE_ONE_YEAR (or +multiples thereof) to calculate this value. (If you really care +about setting the exact time that the cookie will expire, use +soup_cookie_set_expires().) +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

name :

cookie name

value :

cookie value

domain :

cookie domain or hostname

path :

cookie path, or NULL +

max_age :

max age of the cookie, or -1 for a session cookie

Returns :

a new SoupCookie.
+

Since 2.24

+
+
+
+

soup_cookie_parse ()

+
SoupCookie *        soup_cookie_parse                   (const char *header,
+                                                         SoupURI *origin);
+

+Parses header and returns a SoupCookie. (If header contains +multiple cookies, only the first one will be parsed.) +

+

+If header does not have "path" or "domain" attributes, they will +be defaulted from origin. If origin is NULL, path will default +to "/", but domain will be left as NULL. Note that this is not a +valid state for a SoupCookie, and you will need to fill in some +appropriate string for the domain if you want to actually make use +of the cookie. +

+
++ + + + + + + + + + + + + + +

header :

a cookie string (eg, the value of a Set-Cookie header)

origin :

origin of the cookie, or NULL +

Returns :

a new SoupCookie, or NULL if it could not be +parsed, or contained an illegal "domain" attribute for a cookie +originating from origin.
+

Since 2.24

+
+
+
+

soup_cookie_copy ()

+
SoupCookie *        soup_cookie_copy                    (SoupCookie *cookie);
+

+Copies cookie. +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

a copy of cookie +
+

Since 2.24

+
+
+
+

soup_cookie_free ()

+
void                soup_cookie_free                    (SoupCookie *cookie);
+

+Frees cookie +

+
++ + + + +

cookie :

a SoupCookie +
+

Since 2.24

+
+
+
+

soup_cookie_set_name ()

+
void                soup_cookie_set_name                (SoupCookie *cookie,
+                                                         const char *name);
+

+Sets cookie's name to name +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

name :

the new name
+

Since 2.24

+
+
+
+

soup_cookie_get_name ()

+
const char *        soup_cookie_get_name                (SoupCookie *cookie);
+

+Gets cookie's name +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's name
+

Since 2.32

+
+
+
+

soup_cookie_set_value ()

+
void                soup_cookie_set_value               (SoupCookie *cookie,
+                                                         const char *value);
+

+Sets cookie's value to value +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

value :

the new value
+

Since 2.24

+
+
+
+

soup_cookie_get_value ()

+
const char *        soup_cookie_get_value               (SoupCookie *cookie);
+

+Gets cookie's value +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's value
+

Since 2.32

+
+
+
+

soup_cookie_set_domain ()

+
void                soup_cookie_set_domain              (SoupCookie *cookie,
+                                                         const char *domain);
+

+Sets cookie's domain to domain +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

domain :

the new domain
+

Since 2.24

+
+
+
+

soup_cookie_get_domain ()

+
const char *        soup_cookie_get_domain              (SoupCookie *cookie);
+

+Gets cookie's domain +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's domain
+

Since 2.32

+
+
+
+

soup_cookie_set_path ()

+
void                soup_cookie_set_path                (SoupCookie *cookie,
+                                                         const char *path);
+

+Sets cookie's path to path +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

path :

the new path
+

Since 2.24

+
+
+
+

soup_cookie_get_path ()

+
const char *        soup_cookie_get_path                (SoupCookie *cookie);
+

+Gets cookie's path +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's path
+

Since 2.32

+
+
+
+

soup_cookie_set_max_age ()

+
void                soup_cookie_set_max_age             (SoupCookie *cookie,
+                                                         int max_age);
+

+Sets cookie's max age to max_age. If max_age is -1, the cookie +is a session cookie, and will expire at the end of the client's +session. Otherwise, it is the number of seconds until the cookie +expires. You can use the constants SOUP_COOKIE_MAX_AGE_ONE_HOUR, +SOUP_COOKIE_MAX_AGE_ONE_DAY, SOUP_COOKIE_MAX_AGE_ONE_WEEK and +SOUP_COOKIE_MAX_AGE_ONE_YEAR (or multiples thereof) to calculate +this value. (A value of 0 indicates that the cookie should be +considered already-expired.) +

+

+(This sets the same property as soup_cookie_set_expires().) +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

max_age :

the new max age
+

Since 2.24

+
+
+
+

SOUP_COOKIE_MAX_AGE_ONE_HOUR

+
#define SOUP_COOKIE_MAX_AGE_ONE_HOUR (60 * 60)
+
+

+A constant corresponding to 1 hour, for use with soup_cookie_new() +and soup_cookie_set_max_age(). +

+

Since 2.24

+
+
+
+

SOUP_COOKIE_MAX_AGE_ONE_DAY

+
#define SOUP_COOKIE_MAX_AGE_ONE_DAY  (SOUP_COOKIE_MAX_AGE_ONE_HOUR * 24)
+
+

+A constant corresponding to 1 day, for use with soup_cookie_new() +and soup_cookie_set_max_age(). +

+

Since 2.24

+
+
+
+

SOUP_COOKIE_MAX_AGE_ONE_WEEK

+
#define SOUP_COOKIE_MAX_AGE_ONE_WEEK (SOUP_COOKIE_MAX_AGE_ONE_DAY * 7)
+
+

+A constant corresponding to 1 week, for use with soup_cookie_new() +and soup_cookie_set_max_age(). +

+

Since 2.24

+
+
+
+

SOUP_COOKIE_MAX_AGE_ONE_YEAR

+
#define SOUP_COOKIE_MAX_AGE_ONE_YEAR (SOUP_COOKIE_MAX_AGE_ONE_DAY * 365.2422)
+
+

+A constant corresponding to 1 year, for use with soup_cookie_new() +and soup_cookie_set_max_age(). +

+

Since 2.24

+
+
+
+

soup_cookie_set_expires ()

+
void                soup_cookie_set_expires             (SoupCookie *cookie,
+                                                         SoupDate *expires);
+

+Sets cookie's expiration time to expires. If expires is NULL, +cookie will be a session cookie and will expire at the end of the +client's session. +

+

+(This sets the same property as soup_cookie_set_max_age().) +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

expires :

the new expiration time, or NULL +
+

Since 2.24

+
+
+
+

soup_cookie_get_expires ()

+
SoupDate *          soup_cookie_get_expires             (SoupCookie *cookie);
+

+Gets cookie's expiration time +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's expiration time, which is +owned by cookie and should not be modified or freed. [transfer none] +
+

Since 2.32

+
+
+
+

soup_cookie_set_secure ()

+
void                soup_cookie_set_secure              (SoupCookie *cookie,
+                                                         gboolean secure);
+

+Sets cookie's secure attribute to secure. If TRUE, cookie will +only be transmitted from the client to the server over secure +(https) connections. +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

secure :

the new value for the secure attribute
+

Since 2.24

+
+
+
+

soup_cookie_get_secure ()

+
gboolean            soup_cookie_get_secure              (SoupCookie *cookie);
+

+Gets cookie's secure attribute +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's secure attribute
+

Since 2.32

+
+
+
+

soup_cookie_set_http_only ()

+
void                soup_cookie_set_http_only           (SoupCookie *cookie,
+                                                         gboolean http_only);
+

+Sets cookie's HttpOnly attribute to http_only. If TRUE, cookie +will be marked as "http only", meaning it should not be exposed to +web page scripts or other untrusted code. +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

http_only :

the new value for the HttpOnly attribute
+

Since 2.24

+
+
+
+

soup_cookie_get_http_only ()

+
gboolean            soup_cookie_get_http_only           (SoupCookie *cookie);
+

+Gets cookie's HttpOnly attribute +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

+cookie's HttpOnly attribute
+

Since 2.32

+
+
+
+

soup_cookie_applies_to_uri ()

+
gboolean            soup_cookie_applies_to_uri          (SoupCookie *cookie,
+                                                         SoupURI *uri);
+

+Tests if cookie should be sent to uri. +

+

+(At the moment, this does not check that cookie's domain matches +uri, because it assumes that the caller has already done that. +But don't rely on that; it may change in the future.) +

+
++ + + + + + + + + + + + + + +

cookie :

a SoupCookie +

uri :

a SoupURI +

Returns :

+TRUE if cookie should be sent to uri, FALSE if +not
+

Since 2.24

+
+
+
+

soup_cookie_domain_matches ()

+
gboolean            soup_cookie_domain_matches          (SoupCookie *cookie,
+                                                         const char *host);
+

+Checks if the cookie's domain and host match in the sense that +cookie should be sent when making a request to host, or that +cookie should be accepted when receiving a response from host. +

+
++ + + + + + + + + + + + + + +

cookie :

a SoupCookie +

host :

a URI

Returns :

+TRUE if the domains match, FALSE otherwise
+

Since 2.30

+
+
+
+

soup_cookie_to_cookie_header ()

+
char *              soup_cookie_to_cookie_header        (SoupCookie *cookie);
+

+Serializes cookie in the format used by the Cookie header (ie, for +returning a cookie from a SoupSession to a server). +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

the header
+

Since 2.24

+
+
+
+

soup_cookie_to_set_cookie_header ()

+
char *              soup_cookie_to_set_cookie_header    (SoupCookie *cookie);
+

+Serializes cookie in the format used by the Set-Cookie header +(ie, for sending a cookie from a SoupServer to a client). +

+
++ + + + + + + + + + +

cookie :

a SoupCookie +

Returns :

the header
+

Since 2.24

+
+
+
+

soup_cookies_from_request ()

+
GSList *            soup_cookies_from_request           (SoupMessage *msg);
+

+Parses msg's Cookie request header and returns a GSList of +SoupCookies. As the "Cookie" header, unlike "Set-Cookie", +only contains cookie names and values, none of the other +SoupCookie fields will be filled in. (Thus, you can't generally +pass a cookie returned from this method directly to +soup_cookies_to_response().) +

+
++ + + + + + + + + + +

msg :

a SoupMessage containing a "Cookie" request header

Returns :

a GSList +of SoupCookies, which can be freed with +soup_cookies_free(). [element-type SoupCookie][transfer full] +
+

Since 2.24

+
+
+
+

soup_cookies_from_response ()

+
GSList *            soup_cookies_from_response          (SoupMessage *msg);
+

+Parses msg's Set-Cookie response headers and returns a GSList of +SoupCookies. Cookies that do not specify "path" or +"domain" attributes will have their values defaulted from msg. +

+
++ + + + + + + + + + +

msg :

a SoupMessage containing a "Set-Cookie" response header

Returns :

a GSList +of SoupCookies, which can be freed with +soup_cookies_free(). [element-type SoupCookie][transfer full] +
+

Since 2.24

+
+
+
+

soup_cookies_to_request ()

+
void                soup_cookies_to_request             (GSList *cookies,
+                                                         SoupMessage *msg);
+

+Adds the name and value of each cookie in cookies to msg's +"Cookie" request. (If msg already has a "Cookie" request header, +these cookies will be appended to the cookies already present. Be +careful that you do not append the same cookies twice, eg, when +requeuing a message.) +

+
++ + + + + + + + + + +

cookies :

a GSList of SoupCookie. [element-type SoupCookie] +

msg :

a SoupMessage +
+

Since 2.24

+
+
+
+

soup_cookies_to_response ()

+
void                soup_cookies_to_response            (GSList *cookies,
+                                                         SoupMessage *msg);
+

+Appends a "Set-Cookie" response header to msg for each cookie in +cookies. (This is in addition to any other "Set-Cookie" headers +msg may already have.) +

+
++ + + + + + + + + + +

cookies :

a GSList of SoupCookie. [element-type SoupCookie] +

msg :

a SoupMessage +
+

Since 2.24

+
+
+
+

soup_cookies_to_cookie_header ()

+
char *              soup_cookies_to_cookie_header       (GSList *cookies);
+

+Serializes a GSList of SoupCookie into a string suitable for +setting as the value of the "Cookie" header. +

+
++ + + + + + + + + + +

cookies :

a GSList of SoupCookie. [element-type SoupCookie] +

Returns :

the serialization of cookies +
+

Since 2.24

+
+
+
+

soup_cookies_free ()

+
void                soup_cookies_free                   (GSList *cookies);
+

+Frees cookies. +

+
++ + + + +

cookies :

a GSList of SoupCookie. [element-type SoupCookie] +
+

Since 2.24

+
+
+
+

See Also

+SoupMessage +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupCookieJar.html b/docs/reference/html/SoupCookieJar.html new file mode 100644 index 0000000..71d3a17 --- /dev/null +++ b/docs/reference/html/SoupCookieJar.html @@ -0,0 +1,529 @@ + + + + +SoupCookieJar + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupCookieJar

+

SoupCookieJar — Automatic cookie handling for SoupSession

+
+ +
+

Object Hierarchy

+
+  GObject
+   +----SoupCookieJar
+         +----SoupCookieJarSqlite
+         +----SoupCookieJarText
+
+
+
+

Implemented Interfaces

+

+SoupCookieJar implements + SoupSessionFeature.

+
+
+

Properties

+
+  "accept-policy"            SoupCookieJarAcceptPolicy  : Read / Write
+  "read-only"                gboolean              : Read / Write / Construct Only
+
+
+
+

Signals

+
+  "changed"                                        : Run First
+
+
+
+

Description

+

+A SoupCookieJar stores SoupCookies and arrange for them +to be sent with the appropriate SoupMessages. +SoupCookieJar implements SoupSessionFeature, so you can add a +cookie jar to a session with soup_session_add_feature() or +soup_session_add_feature_by_type(). +

+

+Note that the base SoupCookieJar class does not support any form +of long-term cookie persistence. +

+
+
+

Details

+
+

SoupCookieJar

+
typedef struct _SoupCookieJar SoupCookieJar;
+

+

+
+
+
+

soup_cookie_jar_new ()

+
SoupCookieJar *     soup_cookie_jar_new                 (void);
+

+Creates a new SoupCookieJar. The base SoupCookieJar class does +not support persistent storage of cookies; use a subclass for that. +

+
++ + + + +

Returns :

a new SoupCookieJar +
+

Since 2.24

+
+
+
+

soup_cookie_jar_get_cookies ()

+
char *              soup_cookie_jar_get_cookies         (SoupCookieJar *jar,
+                                                         SoupURI *uri,
+                                                         gboolean for_http);
+

+Retrieves (in Cookie-header form) the list of cookies that would +be sent with a request to uri. +

+

+If for_http is TRUE, the return value will include cookies marked +"HttpOnly" (that is, cookies that the server wishes to keep hidden +from client-side scripting operations such as the JavaScript +document.cookies property). Since SoupCookieJar sets the Cookie +header itself when making the actual HTTP request, you should +almost certainly be setting for_http to FALSE if you are calling +this. +

+
++ + + + + + + + + + + + + + + + + + +

jar :

a SoupCookieJar +

uri :

a SoupURI +

for_http :

whether or not the return value is being passed directly +to an HTTP operation

Returns :

the cookies, in string form, or NULL if there are no +cookies for uri.
+

Since 2.24

+
+
+
+

soup_cookie_jar_set_cookie ()

+
void                soup_cookie_jar_set_cookie          (SoupCookieJar *jar,
+                                                         SoupURI *uri,
+                                                         const char *cookie);
+

+Adds cookie to jar, exactly as though it had appeared in a +Set-Cookie header returned from a request to uri. +

+

+Keep in mind that if the SoupCookieJarAcceptPolicy +SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY is set you'll need to use +soup_cookie_jar_set_cookie_with_first_party(), otherwise the jar +will have no way of knowing if the cookie is being set by a third +party or not. +

+
++ + + + + + + + + + + + + + +

jar :

a SoupCookieJar +

uri :

the URI setting the cookie

cookie :

the stringified cookie to set
+

Since 2.24

+
+
+
+

soup_cookie_jar_set_cookie_with_first_party ()

+
void                soup_cookie_jar_set_cookie_with_first_party
+                                                        (SoupCookieJar *jar,
+                                                         SoupURI *uri,
+                                                         SoupURI *first_party,
+                                                         const char *cookie);
+

+Adds cookie to jar, exactly as though it had appeared in a +Set-Cookie header returned from a request to uri. first_party +will be used to reject cookies coming from third party resources in +case such a security policy is set in the jar. +

+
++ + + + + + + + + + + + + + + + + + +

jar :

a SoupCookieJar +

uri :

the URI setting the cookie

first_party :

the URI for the main document

cookie :

the stringified cookie to set
+

Since 2.30

+
+
+
+

soup_cookie_jar_add_cookie ()

+
void                soup_cookie_jar_add_cookie          (SoupCookieJar *jar,
+                                                         SoupCookie *cookie);
+

+Adds cookie to jar, emitting the 'changed' signal if we are modifying +an existing cookie or adding a valid new cookie ('valid' means +that the cookie's expire date is not in the past). +

+

+cookie will be 'stolen' by the jar, so don't free it afterwards. +

+
++ + + + + + + + + + +

jar :

a SoupCookieJar +

cookie :

a SoupCookie +
+

Since 2.26

+
+
+
+

soup_cookie_jar_delete_cookie ()

+
void                soup_cookie_jar_delete_cookie       (SoupCookieJar *jar,
+                                                         SoupCookie *cookie);
+

+Deletes cookie from jar, emitting the 'changed' signal. +

+
++ + + + + + + + + + +

jar :

a SoupCookieJar +

cookie :

a SoupCookie +
+

Since 2.26

+
+
+
+

soup_cookie_jar_all_cookies ()

+
GSList *            soup_cookie_jar_all_cookies         (SoupCookieJar *jar);
+

+Constructs a GSList with every cookie inside the jar. +The cookies in the list are a copy of the original, so +you have to free them when you are done with them. +

+
++ + + + + + + + + + +

jar :

a SoupCookieJar +

Returns :

a GSList +with all the cookies in the jar. [transfer full][element-type Soup.Cookie] +
+

Since 2.26

+
+
+
+

enum SoupCookieJarAcceptPolicy

+
typedef enum {
+	SOUP_COOKIE_JAR_ACCEPT_ALWAYS,
+	SOUP_COOKIE_JAR_ACCEPT_NEVER,
+	SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY
+} SoupCookieJarAcceptPolicy;
+
+
++ + + + + + + + + + + + + + +

SOUP_COOKIE_JAR_ACCEPT_ALWAYS

accept all cookies unconditionally. +

SOUP_COOKIE_JAR_ACCEPT_NEVER

reject all cookies unconditionally. +

SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY

accept all cookies set by +the main document loaded in the application using libsoup. An +example of the most common case, web browsers, would be: If +http://www.example.com is the page loaded, accept all cookies set +by example.com, but if a resource from http://www.third-party.com +is loaded from that page reject any cookie that it could try to +set. For libsoup to be able to tell apart first party cookies from +the rest, the application must call soup_message_set_first_party() +on each outgoing SoupMessage, setting the SoupURI of the main +document. If no first party is set in a message when this policy is +in effect, cookies will be assumed to be third party by default. +
+

Since 2.30

+
+
+
+

soup_cookie_jar_get_accept_policy ()

+
SoupCookieJarAcceptPolicy soup_cookie_jar_get_accept_policy
+                                                        (SoupCookieJar *jar);
+

+Gets jar's SoupCookieJarAcceptPolicy +

+
++ + + + + + + + + + +

jar :

a SoupCookieJar +

Returns :

the SoupCookieJarAcceptPolicy set in the jar +
+

Since 2.30

+
+
+
+

soup_cookie_jar_set_accept_policy ()

+
void                soup_cookie_jar_set_accept_policy   (SoupCookieJar *jar,
+                                                         SoupCookieJarAcceptPolicy policy);
+

+Sets policy as the cookie acceptance policy for jar. +

+
++ + + + + + + + + + +

jar :

a SoupCookieJar +

policy :

a SoupCookieJarAcceptPolicy +
+

Since 2.30

+
+
+
+

SOUP_COOKIE_JAR_READ_ONLY

+
#define SOUP_COOKIE_JAR_READ_ONLY "read-only"
+
+

+Alias for the "read-only" property. (Whether +or not the cookie jar is read-only.) +

+
+
+
+

SOUP_COOKIE_JAR_ACCEPT_POLICY

+
#define SOUP_COOKIE_JAR_ACCEPT_POLICY "accept-policy"
+
+

+Alias for the "accept-policy" property. +

+

Since 2.30

+
+
+
+

Property Details

+
+

The "accept-policy" property

+
  "accept-policy"            SoupCookieJarAcceptPolicy  : Read / Write
+

+The policy the jar should follow to accept or reject cookies +

+

Default value: SOUP_COOKIE_JAR_ACCEPT_ALWAYS

+

Since 2.30

+
+
+
+

The "read-only" property

+
  "read-only"                gboolean              : Read / Write / Construct Only
+

Whether or not the cookie jar is read-only.

+

Default value: FALSE

+
+
+
+

Signal Details

+
+

The "changed" signal

+
void                user_function                      (SoupCookieJar *jar,
+                                                        SoupCookie    *old_cookie,
+                                                        SoupCookie    *new_cookie,
+                                                        gpointer       user_data)       : Run First
+

+Emitted when jar changes. If a cookie has been added, +new_cookie will contain the newly-added cookie and +old_cookie will be NULL. If a cookie has been deleted, +old_cookie will contain the to-be-deleted cookie and +new_cookie will be NULL. If a cookie has been changed, +old_cookie will contain its old value, and new_cookie its +new value. +

+
++ + + + + + + + + + + + + + + + + + +

jar :

the SoupCookieJar +

old_cookie :

the old SoupCookie value

new_cookie :

the new SoupCookie value

user_data :

user data set when the signal handler was connected.
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupCookieJarSqlite.html b/docs/reference/html/SoupCookieJarSqlite.html new file mode 100644 index 0000000..ab79f35 --- /dev/null +++ b/docs/reference/html/SoupCookieJarSqlite.html @@ -0,0 +1,154 @@ + + + + +SoupCookieJarSqlite + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupCookieJarSqlite

+

SoupCookieJarSqlite — SQLite-based Cookie Jar

+
+
+

Synopsis

+
+#include <libsoup/soup-gnome.h>
+
+                    SoupCookieJarSqlite;
+SoupCookieJar *     soup_cookie_jar_sqlite_new          (const char *filename,
+                                                         gboolean read_only);
+
+#define             SOUP_COOKIE_JAR_SQLITE_FILENAME
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupCookieJar
+         +----SoupCookieJarSqlite
+
+
+
+

Implemented Interfaces

+

+SoupCookieJarSqlite implements + SoupSessionFeature.

+
+
+

Properties

+
+  "filename"                 gchar*                : Read / Write / Construct Only
+
+
+
+

Description

+

+SoupCookieJarSqlite is a SoupCookieJar that reads cookies from and +writes them to an SQLite file in the new Mozilla format. +

+
+
+

Details

+
+

SoupCookieJarSqlite

+
typedef struct _SoupCookieJarSqlite SoupCookieJarSqlite;
+

+

+
+
+
+

soup_cookie_jar_sqlite_new ()

+
SoupCookieJar *     soup_cookie_jar_sqlite_new          (const char *filename,
+                                                         gboolean read_only);
+

+Creates a SoupCookieJarSqlite. +

+

+filename will be read in at startup to create an initial set of +cookies. If read_only is FALSE, then the non-session cookies will +be written to filename when the 'changed' signal is emitted from +the jar. (If read_only is TRUE, then the cookie jar will only be +used for this session, and changes made to it will be lost when the +jar is destroyed.) +

+
++ + + + + + + + + + + + + + +

filename :

the filename to read to/write from, or NULL +

read_only :

+TRUE if filename is read-only

Returns :

the new SoupCookieJar +
+

Since 2.26

+
+
+
+

SOUP_COOKIE_JAR_SQLITE_FILENAME

+
#define SOUP_COOKIE_JAR_SQLITE_FILENAME  "filename"
+
+

+Alias for the "filename" property. (The +cookie-storage filename.) +

+
+
+
+

Property Details

+
+

The "filename" property

+
  "filename"                 gchar*                : Read / Write / Construct Only
+

Cookie-storage filename.

+

Default value: NULL

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupCookieJarText.html b/docs/reference/html/SoupCookieJarText.html new file mode 100644 index 0000000..688b352 --- /dev/null +++ b/docs/reference/html/SoupCookieJarText.html @@ -0,0 +1,153 @@ + + + + +SoupCookieJarText + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupCookieJarText

+

SoupCookieJarText — Text-file-based ("cookies.txt") Cookie Jar

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupCookieJarText;
+SoupCookieJar *     soup_cookie_jar_text_new            (const char *filename,
+                                                         gboolean read_only);
+
+#define             SOUP_COOKIE_JAR_TEXT_FILENAME
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupCookieJar
+         +----SoupCookieJarText
+
+
+
+

Implemented Interfaces

+

+SoupCookieJarText implements + SoupSessionFeature.

+
+
+

Properties

+
+  "filename"                 gchar*                : Read / Write / Construct Only
+
+
+
+

Description

+

+SoupCookieJarText is a SoupCookieJar that reads cookies from and +writes them to a text file in the Mozilla "cookies.txt" format. +

+
+
+

Details

+
+

SoupCookieJarText

+
typedef struct _SoupCookieJarText SoupCookieJarText;
+

+

+
+
+
+

soup_cookie_jar_text_new ()

+
SoupCookieJar *     soup_cookie_jar_text_new            (const char *filename,
+                                                         gboolean read_only);
+

+Creates a SoupCookieJarText. +

+

+filename will be read in at startup to create an initial set of +cookies. If read_only is FALSE, then the non-session cookies will +be written to filename when the 'changed' signal is emitted from +the jar. (If read_only is TRUE, then the cookie jar will only be +used for this session, and changes made to it will be lost when the +jar is destroyed.) +

+
++ + + + + + + + + + + + + + +

filename :

the filename to read to/write from

read_only :

+TRUE if filename is read-only

Returns :

the new SoupCookieJar +
+

Since 2.26

+
+
+
+

SOUP_COOKIE_JAR_TEXT_FILENAME

+
#define SOUP_COOKIE_JAR_TEXT_FILENAME  "filename"
+
+

+Alias for the "filename" property. (The +cookie-storage filename.) +

+
+
+
+

Property Details

+
+

The "filename" property

+
  "filename"                 gchar*                : Read / Write / Construct Only
+

Cookie-storage filename.

+

Default value: NULL

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupLogger.html b/docs/reference/html/SoupLogger.html new file mode 100644 index 0000000..d7b61a3 --- /dev/null +++ b/docs/reference/html/SoupLogger.html @@ -0,0 +1,527 @@ + + + + +SoupLogger + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupLogger

+

SoupLogger — Debug logging support

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupLogger;
+enum                SoupLoggerLogLevel;
+SoupLogger *        soup_logger_new                     (SoupLoggerLogLevel level,
+                                                         int max_body_size);
+void                soup_logger_attach                  (SoupLogger *logger,
+                                                         SoupSession *session);
+void                soup_logger_detach                  (SoupLogger *logger,
+                                                         SoupSession *session);
+
+SoupLoggerLogLevel  (*SoupLoggerFilter)                 (SoupLogger *logger,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+void                soup_logger_set_request_filter      (SoupLogger *logger,
+                                                         SoupLoggerFilter request_filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify destroy);
+void                soup_logger_set_response_filter     (SoupLogger *logger,
+                                                         SoupLoggerFilter response_filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify destroy);
+
+void                (*SoupLoggerPrinter)                (SoupLogger *logger,
+                                                         SoupLoggerLogLevel level,
+                                                         char direction,
+                                                         const char *data,
+                                                         gpointer user_data);
+void                soup_logger_set_printer             (SoupLogger *logger,
+                                                         SoupLoggerPrinter printer,
+                                                         gpointer printer_data,
+                                                         GDestroyNotify destroy);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupLogger
+
+
+
+

Implemented Interfaces

+

+SoupLogger implements + SoupSessionFeature.

+
+
+

Description

+

+SoupLogger watches a SoupSession and logs the HTTP traffic that +it generates, for debugging purposes. Many applications use an +environment variable to determine whether or not to use +SoupLogger, and to determine the amount of debugging output. +

+

+To use SoupLogger, first create a logger with soup_logger_new(), +optionally configure it with soup_logger_set_request_filter(), +soup_logger_set_response_filter(), and soup_logger_set_printer(), +and then attach it to a session (or multiple sessions) with +soup_session_add_feature(). +

+

+By default, the debugging output is sent to +stdout, and looks something like: +

+

+

+
+> POST /unauth HTTP/1.1
+> Soup-Debug-Timestamp: 1200171744
+> Soup-Debug: SoupSessionAsync 1 (0x612190), SoupMessage 1 (0x617000), SoupSocket 1 (0x612220)
+> Host: localhost
+> Content-Type: text/plain
+> Connection: close
+> 
+> This is a test.
+  
+< HTTP/1.1 201 Created
+< Soup-Debug-Timestamp: 1200171744
+< Soup-Debug: SoupMessage 1 (0x617000)
+< Date: Sun, 12 Jan 2008 21:02:24 GMT
+< Content-Length: 0
+
+

+

+

+The Soup-Debug-Timestamp line gives the time (as +a time_t) when the request was sent, or the response fully +received. +

+

+The Soup-Debug line gives further debugging +information about the SoupSession, SoupMessage, and SoupSocket +involved; the hex numbers are the addresses of the objects in +question (which may be useful if you are running in a debugger). +The decimal IDs are simply counters that uniquely identify objects +across the lifetime of the SoupLogger. In particular, this can be +used to identify when multiple messages are sent across the same +connection. +

+

+Currently, the request half of the message is logged just before +the first byte of the request gets written to the network (from the +"request_started" signal), which means that if you have +not made the complete request body available at that point, it will +not be logged. The response is logged just after the last byte of +the response body is read from the network (from the +"got_body" or "got_informational" signal), +which means that the "got_headers" signal, and anything +triggered off it (such as "authenticate") will be +emitted before the response headers are +actually logged. +

+
+
+

Details

+
+

SoupLogger

+
typedef struct _SoupLogger SoupLogger;
+

+

+
+
+
+

enum SoupLoggerLogLevel

+
typedef enum {
+	SOUP_LOGGER_LOG_NONE,
+	SOUP_LOGGER_LOG_MINIMAL,
+	SOUP_LOGGER_LOG_HEADERS,
+	SOUP_LOGGER_LOG_BODY
+} SoupLoggerLogLevel;
+
+

+Describes the level of logging output to provide. +

+
++ + + + + + + + + + + + + + + + + + +

SOUP_LOGGER_LOG_NONE

No logging +

SOUP_LOGGER_LOG_MINIMAL

Log the Request-Line or Status-Line and +the Soup-Debug pseudo-headers +

SOUP_LOGGER_LOG_HEADERS

Log the full request/response headers +

SOUP_LOGGER_LOG_BODY

Log the full headers and request/response +bodies. +
+
+
+
+

soup_logger_new ()

+
SoupLogger *        soup_logger_new                     (SoupLoggerLogLevel level,
+                                                         int max_body_size);
+

+Creates a new SoupLogger with the given debug level. If level is +SOUP_LOGGER_LOG_BODY, max_body_size gives the maximum number of +bytes of the body that will be logged. (-1 means "no limit".) +

+

+If you need finer control over what message parts are and aren't +logged, use soup_logger_set_request_filter() and +soup_logger_set_response_filter(). +

+
++ + + + + + + + + + + + + + +

level :

the debug level

max_body_size :

the maximum body size to output, or -1

Returns :

a new SoupLogger +
+
+
+
+

soup_logger_attach ()

+
void                soup_logger_attach                  (SoupLogger *logger,
+                                                         SoupSession *session);
+
+

Warning

+

soup_logger_attach is deprecated and should not be used in newly-written code. Use soup_session_add_feature() instead.

+
+

+Sets logger to watch session and print debug information for +its messages. +

+

+(The session will take a reference on logger, which will be +removed when you call soup_logger_detach(), or when the session is +destroyed.) +

+
++ + + + + + + + + + +

logger :

a SoupLogger +

session :

a SoupSession +
+
+
+
+

soup_logger_detach ()

+
void                soup_logger_detach                  (SoupLogger *logger,
+                                                         SoupSession *session);
+
+

Warning

+

soup_logger_detach is deprecated and should not be used in newly-written code. Use soup_session_remove_feature() instead.

+
+

+Stops logger from watching session. +

+
++ + + + + + + + + + +

logger :

a SoupLogger +

session :

a SoupSession +
+
+
+
+

SoupLoggerFilter ()

+
SoupLoggerLogLevel  (*SoupLoggerFilter)                 (SoupLogger *logger,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+

+The prototype for a logging filter. The filter callback will be +invoked for each request or response, and should analyze it and +return a SoupLoggerLogLevel value indicating how much of the +message to log. Eg, it might choose between SOUP_LOGGER_LOG_BODY +and SOUP_LOGGER_LOG_HEADERS depending on the Content-Type. +

+
++ + + + + + + + + + + + + + + + + + +

logger :

the SoupLogger +

msg :

the message being logged

user_data :

the data passed to soup_logger_set_request_filter() +or soup_logger_set_response_filter() +

Returns :

a SoupLoggerLogLevel value indicating how much of +the message to log
+
+
+
+

soup_logger_set_request_filter ()

+
void                soup_logger_set_request_filter      (SoupLogger *logger,
+                                                         SoupLoggerFilter request_filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify destroy);
+

+Sets up a filter to determine the log level for a given request. +For each HTTP request logger will invoke request_filter to +determine how much (if any) of that request to log. (If you do not +set a request filter, logger will just always log requests at the +level passed to soup_logger_new().) +

+
++ + + + + + + + + + + + + + + + + + +

logger :

a SoupLogger +

request_filter :

the callback for request debugging

filter_data :

data to pass to the callback

destroy :

a GDestroyNotify to free filter_data +
+
+
+
+

soup_logger_set_response_filter ()

+
void                soup_logger_set_response_filter     (SoupLogger *logger,
+                                                         SoupLoggerFilter response_filter,
+                                                         gpointer filter_data,
+                                                         GDestroyNotify destroy);
+

+Sets up a filter to determine the log level for a given response. +For each HTTP response logger will invoke response_filter to +determine how much (if any) of that response to log. (If you do not +set a response filter, logger will just always log responses at +the level passed to soup_logger_new().) +

+
++ + + + + + + + + + + + + + + + + + +

logger :

a SoupLogger +

response_filter :

the callback for response debugging

filter_data :

data to pass to the callback

destroy :

a GDestroyNotify to free filter_data +
+
+
+
+

SoupLoggerPrinter ()

+
void                (*SoupLoggerPrinter)                (SoupLogger *logger,
+                                                         SoupLoggerLogLevel level,
+                                                         char direction,
+                                                         const char *data,
+                                                         gpointer user_data);
+

+The prototype for a custom printing callback. +

+

+level indicates what kind of information is being printed. Eg, it +will be SOUP_LOGGER_LOG_HEADERS if data is header data. +

+

+direction is either '<', '>', or ' ', and data is the single line +to print; the printer is expected to add a terminating newline. +

+

+To get the effect of the default printer, you would do: +

+

+

+
+ + + + + + + +
1
printf ("%c %s\n", direction, data);
+
+ +

+

+
++ + + + + + + + + + + + + + + + + + + + + + +

logger :

the SoupLogger +

level :

the level of the information being printed.

direction :

a single-character prefix to data +

data :

data to print

user_data :

the data passed to soup_logger_set_printer() +
+
+
+
+

soup_logger_set_printer ()

+
void                soup_logger_set_printer             (SoupLogger *logger,
+                                                         SoupLoggerPrinter printer,
+                                                         gpointer printer_data,
+                                                         GDestroyNotify destroy);
+

+Sets up an alternate log printing routine, if you don't want +the log to go to stdout. +

+
++ + + + + + + + + + + + + + + + + + +

logger :

a SoupLogger +

printer :

the callback for printing logging output

printer_data :

data to pass to the callback

destroy :

a GDestroyNotify to free printer_data +
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupMessage.html b/docs/reference/html/SoupMessage.html new file mode 100644 index 0000000..fe41b8b --- /dev/null +++ b/docs/reference/html/SoupMessage.html @@ -0,0 +1,1832 @@ + + + + +SoupMessage + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupMessage

+

SoupMessage — An HTTP request and response.

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupMessage;
+
+SoupMessage *       soup_message_new                    (const char *method,
+                                                         const char *uri_string);
+SoupMessage *       soup_message_new_from_uri           (const char *method,
+                                                         SoupURI *uri);
+void                soup_message_set_request            (SoupMessage *msg,
+                                                         const char *content_type,
+                                                         SoupMemoryUse req_use,
+                                                         const char *req_body,
+                                                         gsize req_length);
+void                soup_message_set_response           (SoupMessage *msg,
+                                                         const char *content_type,
+                                                         SoupMemoryUse resp_use,
+                                                         const char *resp_body,
+                                                         gsize resp_length);
+
+enum                SoupHTTPVersion;
+void                soup_message_set_http_version       (SoupMessage *msg,
+                                                         SoupHTTPVersion version);
+SoupHTTPVersion     soup_message_get_http_version       (SoupMessage *msg);
+SoupURI *           soup_message_get_uri                (SoupMessage *msg);
+void                soup_message_set_uri                (SoupMessage *msg,
+                                                         SoupURI *uri);
+SoupAddress *       soup_message_get_address            (SoupMessage *msg);
+
+void                soup_message_set_status             (SoupMessage *msg,
+                                                         guint status_code);
+void                soup_message_set_status_full        (SoupMessage *msg,
+                                                         guint status_code,
+                                                         const char *reason_phrase);
+void                soup_message_set_redirect           (SoupMessage *msg,
+                                                         guint status_code,
+                                                         const char *redirect_uri);
+gboolean            soup_message_is_keepalive           (SoupMessage *msg);
+gboolean            soup_message_get_https_status       (SoupMessage *msg,
+                                                         GTlsCertificate **certificate,
+                                                         GTlsCertificateFlags *errors);
+
+void                soup_message_set_first_party        (SoupMessage *msg,
+                                                         SoupURI *first_party);
+SoupURI *           soup_message_get_first_party        (SoupMessage *msg);
+
+guint               soup_message_add_header_handler     (SoupMessage *msg,
+                                                         const char *signal,
+                                                         const char *header,
+                                                         GCallback callback,
+                                                         gpointer user_data);
+guint               soup_message_add_status_code_handler
+                                                        (SoupMessage *msg,
+                                                         const char *signal,
+                                                         guint status_code,
+                                                         GCallback callback,
+                                                         gpointer user_data);
+
+enum                SoupMessageFlags;
+void                soup_message_set_flags              (SoupMessage *msg,
+                                                         SoupMessageFlags flags);
+SoupMessageFlags    soup_message_get_flags              (SoupMessage *msg);
+SoupBuffer *        (*SoupChunkAllocator)               (SoupMessage *msg,
+                                                         gsize max_len,
+                                                         gpointer user_data);
+void                soup_message_set_chunk_allocator    (SoupMessage *msg,
+                                                         SoupChunkAllocator allocator,
+                                                         gpointer user_data,
+                                                         GDestroyNotify destroy_notify);
+
+void                soup_message_disable_feature        (SoupMessage *msg,
+                                                         GType feature_type);
+
+#define             SOUP_MESSAGE_METHOD
+#define             SOUP_MESSAGE_URI
+#define             SOUP_MESSAGE_HTTP_VERSION
+#define             SOUP_MESSAGE_FLAGS
+#define             SOUP_MESSAGE_STATUS_CODE
+#define             SOUP_MESSAGE_REASON_PHRASE
+#define             SOUP_MESSAGE_SERVER_SIDE
+#define             SOUP_MESSAGE_FIRST_PARTY
+#define             SOUP_MESSAGE_REQUEST_BODY
+#define             SOUP_MESSAGE_REQUEST_HEADERS
+#define             SOUP_MESSAGE_RESPONSE_BODY
+#define             SOUP_MESSAGE_RESPONSE_HEADERS
+#define             SOUP_MESSAGE_TLS_CERTIFICATE
+#define             SOUP_MESSAGE_TLS_ERRORS
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupMessage
+
+
+
+

Properties

+
+  "first-party"              SoupURI*              : Read / Write
+  "flags"                    SoupMessageFlags      : Read / Write
+  "http-version"             SoupHTTPVersion       : Read / Write
+  "method"                   gchar*                : Read / Write
+  "reason-phrase"            gchar*                : Read / Write
+  "request-body"             SoupMessageBody*      : Read
+  "request-headers"          SoupMessageHeaders*   : Read
+  "response-body"            SoupMessageBody*      : Read
+  "response-headers"         SoupMessageHeaders*   : Read
+  "server-side"              gboolean              : Read / Write / Construct Only
+  "status-code"              guint                 : Read / Write
+  "tls-certificate"          GTlsCertificate*      : Read / Write
+  "tls-errors"               GTlsCertificateFlags  : Read / Write
+  "uri"                      SoupURI*              : Read / Write
+
+
+ +
+

Description

+

+A SoupMessage represents an HTTP message that is being sent or +received. +

+

+For client-side usage, you would create a SoupMessage with +soup_message_new() or soup_message_new_from_uri(), set up its +fields appropriate, and send it via a SoupSession. +

+

+For server-side usage, SoupServer will create SoupMessages automatically for incoming requests, which your application +will receive via handlers. +

+

+Note that libsoup's terminology here does not quite match the HTTP +specification: in RFC 2616, an "HTTP-message" is +either a Request, or a +Response. In libsoup, a SoupMessage combines both the request and +the response. +

+
+
+

Details

+
+

SoupMessage

+
typedef struct {
+	const char         *method;
+
+	guint               status_code;
+	char               *reason_phrase;
+
+	SoupMessageBody    *request_body;
+	SoupMessageHeaders *request_headers;
+
+	SoupMessageBody    *response_body;
+	SoupMessageHeaders *response_headers;
+} SoupMessage;
+
+

+Represents an HTTP message being sent or received. +

+

+status_code will normally be a SoupKnownStatusCode, eg, +SOUP_STATUS_OK, though of course it might actually be an unknown +status code. reason_phrase is the actual text returned from the +server, which may or may not correspond to the "standard" +description of status_code. At any rate, it is almost certainly +not localized, and not very descriptive even if it is in the user's +language; you should not use reason_phrase in user-visible +messages. Rather, you should look at status_code, and determine an +end-user-appropriate message based on that and on what you were +trying to do. +

+

+As described in the SoupMessageBody documentation, the +request_body and response_body data fields +will not necessarily be filled in at all times. When they are +filled in, they will be terminated with a '\0' byte (which is not +included in the length), so you can use them as +ordinary C strings (assuming that you know that the body doesn't +have any other '\0' bytes). +

+

+For a client-side SoupMessage, request_body's data is usually +filled in right before libsoup writes the request to the network, +but you should not count on this; use soup_message_body_flatten() +if you want to ensure that data is filled in. response_body's +data will be filled in before "finished" is emitted. +

+

+For a server-side SoupMessage, request_body's data will be +filled in before "got_body" is emitted. +

+

+To prevent the data field from being filled in at all (eg, if you +are handling the data from a "got_chunk", and so don't +need to see it all at the end), call +soup_message_body_set_accumulate() on response_body or +request_body as appropriate, passing FALSE. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

const char *method;

the HTTP method

guint status_code;

the HTTP status code

char *reason_phrase;

the status phrase associated with status_code +

SoupMessageBody *request_body;

the request body

SoupMessageHeaders *request_headers;

the request headers

SoupMessageBody *response_body;

the response body

SoupMessageHeaders *response_headers;

the response headers
+
+
+
+

soup_message_new ()

+
SoupMessage *       soup_message_new                    (const char *method,
+                                                         const char *uri_string);
+

+Creates a new empty SoupMessage, which will connect to uri +

+
++ + + + + + + + + + + + + + +

method :

the HTTP method for the created request

uri_string :

the destination endpoint (as a string)

Returns :

the new SoupMessage (or NULL if uri could not +be parsed).
+
+
+
+

soup_message_new_from_uri ()

+
SoupMessage *       soup_message_new_from_uri           (const char *method,
+                                                         SoupURI *uri);
+

+Creates a new empty SoupMessage, which will connect to uri +

+
++ + + + + + + + + + + + + + +

method :

the HTTP method for the created request

uri :

the destination endpoint (as a SoupURI)

Returns :

the new SoupMessage +
+
+
+
+

soup_message_set_request ()

+
void                soup_message_set_request            (SoupMessage *msg,
+                                                         const char *content_type,
+                                                         SoupMemoryUse req_use,
+                                                         const char *req_body,
+                                                         gsize req_length);
+

+Convenience function to set the request body of a SoupMessage. If +content_type is NULL, the request body must be empty as well. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

msg :

the message

content_type :

MIME Content-Type of the body

req_use :

a SoupMemoryUse describing how to handle req_body +

req_body :

a data buffer containing the body of the message request.

req_length :

the byte length of req_body.
+
+
+
+

soup_message_set_response ()

+
void                soup_message_set_response           (SoupMessage *msg,
+                                                         const char *content_type,
+                                                         SoupMemoryUse resp_use,
+                                                         const char *resp_body,
+                                                         gsize resp_length);
+

+Convenience function to set the response body of a SoupMessage. If +content_type is NULL, the response body must be empty as well. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

msg :

the message

content_type :

MIME Content-Type of the body. [allow-none] +

resp_use :

a SoupMemoryUse describing how to handle resp_body +

resp_body :

a data buffer +containing the body of the message response. [array length=resp_length][element-type guint8] +

resp_length :

the byte length of resp_body.
+
+
+
+

enum SoupHTTPVersion

+
typedef enum {
+	SOUP_HTTP_1_0 = 0, /*< nick=http-1-0 >*/
+	SOUP_HTTP_1_1 = 1  /*< nick=http-1-1 >*/
+} SoupHTTPVersion;
+
+

+Indicates the HTTP protocol version being used. +

+
++ + + + + + + + + + +

SOUP_HTTP_1_0

HTTP 1.0 (RFC 1945) +

SOUP_HTTP_1_1

HTTP 1.1 (RFC 2616) +
+
+
+
+

soup_message_set_http_version ()

+
void                soup_message_set_http_version       (SoupMessage *msg,
+                                                         SoupHTTPVersion version);
+

+Sets the HTTP version on msg. The default version is +SOUP_HTTP_1_1. Setting it to SOUP_HTTP_1_0 will prevent certain +functionality from being used. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

version :

the HTTP version
+
+
+
+

soup_message_get_http_version ()

+
SoupHTTPVersion     soup_message_get_http_version       (SoupMessage *msg);
+

+Gets the HTTP version of msg. This is the minimum of the +version from the request and the version from the response. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

the HTTP version
+
+
+
+

soup_message_get_uri ()

+
SoupURI *           soup_message_get_uri                (SoupMessage *msg);
+

+Gets msg's URI +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

the URI msg is targeted for. [transfer none] +
+
+
+
+

soup_message_set_uri ()

+
void                soup_message_set_uri                (SoupMessage *msg,
+                                                         SoupURI *uri);
+

+Sets msg's URI to uri. If msg has already been sent and you want +to re-send it with the new URI, you need to call +soup_session_requeue_message(). +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

uri :

the new SoupURI +
+
+
+
+

soup_message_get_address ()

+
SoupAddress *       soup_message_get_address            (SoupMessage *msg);
+

+Gets the address msg's URI points to. After first setting the +URI on a message, this will be unresolved, although the message's +session will resolve it before sending the message. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

the address msg's URI points to. [transfer none] +
+

Since 2.26

+
+
+
+

soup_message_set_status ()

+
void                soup_message_set_status             (SoupMessage *msg,
+                                                         guint status_code);
+

+Sets msg's status code to status_code. If status_code is a +known value, it will also set msg's reason_phrase. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

status_code :

an HTTP status code
+
+
+
+

soup_message_set_status_full ()

+
void                soup_message_set_status_full        (SoupMessage *msg,
+                                                         guint status_code,
+                                                         const char *reason_phrase);
+

+Sets msg's status code and reason phrase. +

+
++ + + + + + + + + + + + + + +

msg :

a SoupMessage +

status_code :

an HTTP status code

reason_phrase :

a description of the status
+
+
+
+

soup_message_set_redirect ()

+
void                soup_message_set_redirect           (SoupMessage *msg,
+                                                         guint status_code,
+                                                         const char *redirect_uri);
+

+Sets msg's status_code to status_code and adds a Location header +pointing to redirect_uri. Use this from a SoupServer when you +want to redirect the client to another URI. +

+

+redirect_uri can be a relative URI, in which case it is +interpreted relative to msg's current URI. In particular, if +redirect_uri is just a path, it will replace the path +and query of msg's URI. +

+
++ + + + + + + + + + + + + + +

msg :

a SoupMessage +

status_code :

a 3xx status code

redirect_uri :

the URI to redirect msg to
+

Since 2.38

+
+
+
+

soup_message_is_keepalive ()

+
gboolean            soup_message_is_keepalive           (SoupMessage *msg);
+

+Determines whether or not msg's connection can be kept alive for +further requests after processing msg, based on the HTTP version, +Connection header, etc. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

+TRUE or FALSE.
+
+
+
+

soup_message_get_https_status ()

+
gboolean            soup_message_get_https_status       (SoupMessage *msg,
+                                                         GTlsCertificate **certificate,
+                                                         GTlsCertificateFlags *errors);
+

+If msg is using https, this retrieves the GTlsCertificate +associated with its connection, and the GTlsCertificateFlags showing +what problems, if any, have been found with that certificate. +

+
++ + + + + + + + + + + + + + + + + + +

msg :

a SoupMessage +

certificate :

+msg's TLS certificate. [out][transfer none] +

errors :

the verification status of certificate. [out] +

Returns :

+TRUE if msg uses https, FALSE if not
+

Since 2.34

+
+
+
+

soup_message_set_first_party ()

+
void                soup_message_set_first_party        (SoupMessage *msg,
+                                                         SoupURI *first_party);
+

+Sets first_party as the main document SoupURI for msg. For +details of when and how this is used refer to the documentation for +SoupCookieJarAcceptPolicy. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

first_party :

the SoupURI for the msg's first party
+

Since 2.30

+
+
+
+

soup_message_get_first_party ()

+
SoupURI *           soup_message_get_first_party        (SoupMessage *msg);
+

+Gets msg's first-party SoupURI +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

the msg's first party SoupURI. [transfer none] +
+

Since 2.30

+
+
+
+

soup_message_add_header_handler ()

+
guint               soup_message_add_header_handler     (SoupMessage *msg,
+                                                         const char *signal,
+                                                         const char *header,
+                                                         GCallback callback,
+                                                         gpointer user_data);
+

+Adds a signal handler to msg for signal, as with +g_signal_connect(), but with two differences: the callback will +only be run if msg has a header named header, and it will only be +run if no earlier handler cancelled or requeued the message. +

+

+If signal is one of the "got" signals (eg, "got_headers"), or +"finished" or "restarted", then header is matched against the +incoming message headers (that is, the request_headers for a +client SoupMessage, or the response_headers for a server +SoupMessage). If signal is one of the "wrote" signals, then +header is matched against the outgoing message headers. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

msg :

a SoupMessage +

signal :

signal to connect the handler to.

header :

HTTP response header to match against

callback :

the header handler

user_data :

data to pass to handler_cb +

Returns :

the handler ID from g_signal_connect() +
+
+
+
+

soup_message_add_status_code_handler ()

+
guint               soup_message_add_status_code_handler
+                                                        (SoupMessage *msg,
+                                                         const char *signal,
+                                                         guint status_code,
+                                                         GCallback callback,
+                                                         gpointer user_data);
+

+Adds a signal handler to msg for signal, as with +g_signal_connect() but with two differences: the callback will +only be run if msg has the status status_code, and it will only +be run if no earlier handler cancelled or requeued the message. +

+

+signal must be a signal that will be emitted after msg's status +is set. For a client SoupMessage, this means it can't be a "wrote" +signal. For a server SoupMessage, this means it can't be a "got" +signal. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

msg :

a SoupMessage +

signal :

signal to connect the handler to.

status_code :

status code to match against

callback :

the header handler

user_data :

data to pass to handler_cb +

Returns :

the handler ID from g_signal_connect() +
+
+
+
+

enum SoupMessageFlags

+
typedef enum {
+	SOUP_MESSAGE_NO_REDIRECT          = (1 << 1),
+	SOUP_MESSAGE_CAN_REBUILD          = (1 << 2),
+#ifndef LIBSOUP_DISABLE_DEPRECATED
+	SOUP_MESSAGE_OVERWRITE_CHUNKS     = (1 << 3),
+#endif
+	SOUP_MESSAGE_CONTENT_DECODED      = (1 << 4),
+	SOUP_MESSAGE_CERTIFICATE_TRUSTED  = (1 << 5),
+	SOUP_MESSAGE_NEW_CONNECTION       = (1 << 6)
+} SoupMessageFlags;
+
+

+Various flags that can be set on a SoupMessage to alter its +behavior. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_MESSAGE_NO_REDIRECT

The session should not follow redirect + (3xx) responses received by this message. +

SOUP_MESSAGE_CAN_REBUILD

The caller will rebuild the request + body if the message is restarted; see + soup_message_body_set_accumulate() for more details. +

SOUP_MESSAGE_OVERWRITE_CHUNKS

Deprecated: equivalent to calling + soup_message_body_set_accumulate() on the incoming message body + (ie, "response_body" for a client-side request), + passing FALSE. +

SOUP_MESSAGE_CONTENT_DECODED

Set by SoupContentDecoder to + indicate that it has removed the Content-Encoding on a message (and + so headers such as Content-Length may no longer accurately describe + the body). +

SOUP_MESSAGE_CERTIFICATE_TRUSTED

if set after an https response + has been received, indicates that the server's SSL certificate is + trusted according to the session's CA. +

SOUP_MESSAGE_NEW_CONNECTION

The message should be sent on a + newly-created connection, not reusing an existing persistent + connection. Note that messages with non-idempotent + "method"s behave this way by default. +
+
+
+
+

soup_message_set_flags ()

+
void                soup_message_set_flags              (SoupMessage *msg,
+                                                         SoupMessageFlags flags);
+

+Sets the specified flags on msg. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

flags :

a set of SoupMessageFlags values
+
+
+
+

soup_message_get_flags ()

+
SoupMessageFlags    soup_message_get_flags              (SoupMessage *msg);
+

+Gets the flags on msg +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

Returns :

the flags
+
+
+
+

SoupChunkAllocator ()

+
SoupBuffer *        (*SoupChunkAllocator)               (SoupMessage *msg,
+                                                         gsize max_len,
+                                                         gpointer user_data);
+

+The prototype for a chunk allocation callback. This should allocate +a new SoupBuffer and return it for the I/O layer to read message +body data off the network into. +

+

+If max_len is non-0, it indicates the maximum number of bytes that +could be read, based on what is known about the message size. Note +that this might be a very large number, and you should not simply +try to allocate that many bytes blindly. If max_len is 0, that +means that libsoup does not know how many bytes remain to be read, +and the allocator should return a buffer of a size that it finds +convenient. +

+

+If the allocator returns NULL, the message will be paused. It is +up to the application to make sure that it gets unpaused when it +becomes possible to allocate a new buffer. +

+
++ + + + + + + + + + + + + + + + + + +

msg :

the SoupMessage the chunk is being allocated for

max_len :

the maximum length that will be read, or 0.

user_data :

the data passed to soup_message_set_chunk_allocator() +

Returns :

the new buffer (or NULL)
+
+
+
+

soup_message_set_chunk_allocator ()

+
void                soup_message_set_chunk_allocator    (SoupMessage *msg,
+                                                         SoupChunkAllocator allocator,
+                                                         gpointer user_data,
+                                                         GDestroyNotify destroy_notify);
+

+Sets an alternate chunk-allocation function to use when reading +msg's body. Every time data is available to read, libsoup will +call allocator, which should return a SoupBuffer. (See +SoupChunkAllocator for additional details.) Libsoup will then read +data from the network into that buffer, and update the buffer's +length to indicate how much data it read. +

+

+Generally, a custom chunk allocator would be used in conjunction +with soup_message_body_set_accumulate() FALSE and +"got_chunk", as part of a strategy to avoid unnecessary +copying of data. However, you cannot assume that every call to the +allocator will be followed by a call to your +"got_chunk" handler; if an I/O error occurs, then the +buffer will be unreffed without ever having been used. If your +buffer-allocation strategy requires special cleanup, use +soup_buffer_new_with_owner() rather than doing the cleanup from the +"got_chunk" handler. +

+

+The other thing to remember when using non-accumulating message +bodies is that the buffer passed to the "got_chunk" +handler will be unreffed after the handler returns, just as it +would be in the non-custom-allocated case. If you want to hand the +chunk data off to some other part of your program to use later, +you'll need to ref the SoupBuffer (or its owner, in the +soup_buffer_new_with_owner() case) to ensure that the data remains +valid. +

+
++ + + + + + + + + + + + + + + + + + +

msg :

a SoupMessage +

allocator :

the chunk allocator callback

user_data :

data to pass to allocator +

destroy_notify :

destroy notifier to free user_data when msg is +destroyed
+
+
+
+

soup_message_disable_feature ()

+
void                soup_message_disable_feature        (SoupMessage *msg,
+                                                         GType feature_type);
+

+This disables the actions of SoupSessionFeatures with the +given feature_type (or a subclass of that type) on msg, so that +msg is processed as though the feature(s) hadn't been added to the +session. Eg, passing SOUP_TYPE_PROXY_URI_RESOLVER for feature_type +will disable proxy handling and cause msg to be sent directly to +the indicated origin server, regardless of system proxy +configuration. +

+

+You must call this before queueing msg on a session; calling it on +a message that has already been queued is undefined. In particular, +you cannot call this on a message that is being requeued after a +redirect or authentication. +

+
++ + + + + + + + + + +

msg :

a SoupMessage +

feature_type :

the GType of a SoupSessionFeature +
+

Since 2.28

+
+
+
+

SOUP_MESSAGE_METHOD

+
#define SOUP_MESSAGE_METHOD           "method"
+
+

+Alias for the "method" property. (The message's +HTTP method.) +

+
+
+
+

SOUP_MESSAGE_URI

+
#define SOUP_MESSAGE_URI              "uri"
+
+

+Alias for the "uri" property. (The message's +SoupURI.) +

+
+
+
+

SOUP_MESSAGE_HTTP_VERSION

+
#define SOUP_MESSAGE_HTTP_VERSION     "http-version"
+
+

+Alias for the "http-version" property. (The +message's SoupHTTPVersion.) +

+
+
+
+

SOUP_MESSAGE_FLAGS

+
#define SOUP_MESSAGE_FLAGS            "flags"
+
+

+Alias for the "flags" property. (The message's +SoupMessageFlags.) +

+
+
+
+

SOUP_MESSAGE_STATUS_CODE

+
#define SOUP_MESSAGE_STATUS_CODE      "status-code"
+
+

+Alias for the "status-code" property. (The +message's HTTP response status code.) +

+
+
+
+

SOUP_MESSAGE_REASON_PHRASE

+
#define SOUP_MESSAGE_REASON_PHRASE    "reason-phrase"
+
+

+Alias for the "reason-phrase" property. (The +message's HTTP response reason phrase.) +

+
+
+
+

SOUP_MESSAGE_SERVER_SIDE

+
#define SOUP_MESSAGE_SERVER_SIDE      "server-side"
+
+

+Alias for the "server-side" property. (TRUE if +the message was created by SoupServer.) +

+
+
+
+

SOUP_MESSAGE_FIRST_PARTY

+
#define SOUP_MESSAGE_FIRST_PARTY      "first-party"
+
+

+Alias for the "first-party" property. (The +SoupURI loaded in the application when the message was +queued.) +

+

Since 2.30

+
+
+
+

SOUP_MESSAGE_REQUEST_BODY

+
#define SOUP_MESSAGE_REQUEST_BODY     "request-body"
+
+

+Alias for the "request-body" property. (The +message's HTTP request body.) +

+
+
+
+

SOUP_MESSAGE_REQUEST_HEADERS

+
#define SOUP_MESSAGE_REQUEST_HEADERS  "request-headers"
+
+

+Alias for the "request-headers" property. (The +message's HTTP request headers.) +

+
+
+
+

SOUP_MESSAGE_RESPONSE_BODY

+
#define SOUP_MESSAGE_RESPONSE_BODY    "response-body"
+
+

+Alias for the "response-body" property. (The +message's HTTP response body.) +

+
+
+
+

SOUP_MESSAGE_RESPONSE_HEADERS

+
#define SOUP_MESSAGE_RESPONSE_HEADERS "response-headers"
+
+

+Alias for the "response-headers" property. (The +message's HTTP response headers.) +

+
+
+
+

SOUP_MESSAGE_TLS_CERTIFICATE

+
#define SOUP_MESSAGE_TLS_CERTIFICATE  "tls-certificate"
+
+

+Alias for the "tls-certificate" property. (The +TLS certificate associated with the message, if any.) +

+

Since 2.34

+
+
+
+

SOUP_MESSAGE_TLS_ERRORS

+
#define SOUP_MESSAGE_TLS_ERRORS       "tls-errors"
+
+

+Alias for the "tls-errors" property. (The +verification errors on "tls-certificate".) +

+

Since 2.34

+
+
+
+

Property Details

+
+

The "first-party" property

+
  "first-party"              SoupURI*              : Read / Write
+

+The SoupURI loaded in the application when the message was +queued. +

+

Since 2.30

+
+
+
+

The "flags" property

+
  "flags"                    SoupMessageFlags      : Read / Write
+

Various message options.

+
+
+
+

The "http-version" property

+
  "http-version"             SoupHTTPVersion       : Read / Write
+

The HTTP protocol version to use.

+

Default value: SOUP_HTTP_1_1

+
+
+
+

The "method" property

+
  "method"                   gchar*                : Read / Write
+

The message's HTTP method.

+

Default value: "GET"

+
+
+
+

The "reason-phrase" property

+
  "reason-phrase"            gchar*                : Read / Write
+

The HTTP response reason phrase.

+

Default value: NULL

+
+
+
+

The "request-body" property

+
  "request-body"             SoupMessageBody*      : Read
+

The HTTP request content.

+
+
+
+

The "request-headers" property

+
  "request-headers"          SoupMessageHeaders*   : Read
+

The HTTP request headers.

+
+
+
+

The "response-body" property

+
  "response-body"            SoupMessageBody*      : Read
+

The HTTP response content.

+
+
+
+

The "response-headers" property

+
  "response-headers"         SoupMessageHeaders*   : Read
+

The HTTP response headers.

+
+
+
+

The "server-side" property

+
  "server-side"              gboolean              : Read / Write / Construct Only
+

Whether or not the message is server-side rather than client-side.

+

Default value: FALSE

+
+
+
+

The "status-code" property

+
  "status-code"              guint                 : Read / Write
+

The HTTP response status code.

+

Allowed values: <= 599

+

Default value: 0

+
+
+
+

The "tls-certificate" property

+
  "tls-certificate"          GTlsCertificate*      : Read / Write
+

+The verification errors on "tls-certificate" +

+

Since 2.34

+
+
+
+

The "tls-errors" property

+
  "tls-errors"               GTlsCertificateFlags  : Read / Write
+

The verification errors on the message's TLS certificate.

+
+
+
+

The "uri" property

+
  "uri"                      SoupURI*              : Read / Write
+

The message's Request-URI.

+
+
+
+

Signal Details

+
+

The "content-sniffed" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gchar       *type,
+                                                        GHashTable  *params,
+                                                        gpointer     user_data)      : Run First
+

+This signal is emitted after "got-headers", and +before the first "got-chunk". If content +sniffing is disabled, or no content sniffing will be +performed, due to the sniffer deciding to trust the +Content-Type sent by the server, this signal is emitted +immediately after "got-headers", and type is +NULL. +

+

+If the SoupContentSniffer feature is enabled, and the +sniffer decided to perform sniffing, the first +"got-chunk" emission may be delayed, so that the +sniffer has enough data to correctly sniff the content. It +notified the library user that the content has been +sniffed, and allows it to change the header contents in the +message, if desired. +

+

+After this signal is emitted, the data that was spooled so +that sniffing could be done is delivered on the first +emission of "got-chunk". +

+
++ + + + + + + + + + + + + + + + + + +

msg :

the message

type :

the content type that we got from sniffing

params :

a GHashTable with the parameters. [element-type utf8 utf8] +

user_data :

user data set when the signal handler was connected.
+

Since 2.27.3

+
+
+
+

The "finished" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted when all HTTP processing is finished for a message. +(After "got_body" for client-side messages, or +after "wrote_body" for server-side messages.) +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "got-body" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted after receiving the complete message body. (For a +server-side message, this means it has received the request +body. For a client-side message, this means it has received +the response body and is nearly done with the message.) +

+

+See also soup_message_add_header_handler() and +soup_message_add_status_code_handler(), which can be used +to connect to a subset of emissions of this signal. +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "got-chunk" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        SoupBuffer  *chunk,
+                                                        gpointer     user_data)      : Run First
+

+Emitted after receiving a chunk of a message body. Note +that "chunk" in this context means any subpiece of the +body, not necessarily the specific HTTP 1.1 chunks sent by +the other side. +

+

+If you cancel or requeue msg while processing this signal, +then the current HTTP I/O will be stopped after this signal +emission finished, and msg's connection will be closed. +

+
++ + + + + + + + + + + + + + +

msg :

the message

chunk :

the just-read chunk

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "got-headers" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted after receiving all message headers for a message. +(For a client-side message, this is after receiving the +Status-Line and response headers; for a server-side +message, it is after receiving the Request-Line and request +headers.) +

+

+See also soup_message_add_header_handler() and +soup_message_add_status_code_handler(), which can be used +to connect to a subset of emissions of this signal. +

+

+If you cancel or requeue msg while processing this signal, +then the current HTTP I/O will be stopped after this signal +emission finished, and msg's connection will be closed. +(If you need to requeue a message--eg, after handling +authentication or redirection--it is usually better to +requeue it from a "got_body" handler rather +than a "got_headers" handler, so that the +existing HTTP connection can be reused.) +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "got-informational" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted after receiving a 1xx (Informational) response for +a (client-side) message. The response_headers will be +filled in with the headers associated with the +informational response; however, those header values will +be erased after this signal is done. +

+

+If you cancel or requeue msg while processing this signal, +then the current HTTP I/O will be stopped after this signal +emission finished, and msg's connection will be closed. +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "network-event" signal

+
void                user_function                      (SoupMessage       *msg,
+                                                        GSocketClientEvent event,
+                                                        GIOStream         *connection,
+                                                        gpointer           user_data)       : Run First
+

+Emitted to indicate that some network-related event +related to msg has occurred. This essentially proxies the +"event" signal, but only for events that +occur while msg "owns" the connection; if msg is sent on +an existing persistent connection, then this signal will +not be emitted. (If you want to force the message to be +sent on a new connection, set the +SOUP_MESSAGE_NEW_CONNECTION flag on it.) +

+

+See "event" for more information on what +the different values of event correspond to, and what +connection will be in each case. +

+
++ + + + + + + + + + + + + + + + + + +

msg :

the message

event :

the network event

connection :

the current state of the network connection

user_data :

user data set when the signal handler was connected.
+

Since 2.38

+
+
+
+

The "restarted" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted when a request that was already sent once is now +being sent again (eg, because the first attempt received a +redirection response, or because we needed to use +authentication). +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "wrote-body" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted immediately after writing the complete body for a +message. (For a client-side message, this means that +libsoup is done writing and is now waiting for the response +from the server. For a server-side message, this means that +libsoup has finished writing the response and is nearly +done with the message.) +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "wrote-body-data" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        SoupBuffer  *chunk,
+                                                        gpointer     user_data)      : Run First
+

+Emitted immediately after writing a portion of the message +body to the network. +

+

+Unlike "wrote_chunk", this is emitted after +every successful write() call, not only after finishing a +complete "chunk". +

+
++ + + + + + + + + + + + + + +

msg :

the message

chunk :

the data written

user_data :

user data set when the signal handler was connected.
+

Since 2.4.1

+
+
+
+

The "wrote-chunk" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted immediately after writing a body chunk for a message. +

+

+Note that this signal is not parallel to +"got_chunk"; it is emitted only when a complete +chunk (added with soup_message_body_append() or +soup_message_body_append_buffer()) has been written. To get +more useful continuous progress information, use +"wrote_body_data". +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "wrote-headers" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted immediately after writing the headers for a +message. (For a client-side message, this is after writing +the request headers; for a server-side message, it is after +writing the response headers.) +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "wrote-informational" signal

+
void                user_function                      (SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted immediately after writing a 1xx (Informational) +response for a (server-side) message. +

+
++ + + + + + + + + + +

msg :

the message

user_data :

user data set when the signal handler was connected.
+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupMessageBody.html b/docs/reference/html/SoupMessageBody.html new file mode 100644 index 0000000..e6874d6 --- /dev/null +++ b/docs/reference/html/SoupMessageBody.html @@ -0,0 +1,912 @@ + + + + +SoupMessageBody + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupMessageBody

+

SoupMessageBody — HTTP message body

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupBuffer;
+enum                SoupMemoryUse;
+SoupBuffer *        soup_buffer_new                     (SoupMemoryUse use,
+                                                         gconstpointer data,
+                                                         gsize length);
+SoupBuffer *        soup_buffer_new_subbuffer           (SoupBuffer *parent,
+                                                         gsize offset,
+                                                         gsize length);
+SoupBuffer *        soup_buffer_new_with_owner          (gconstpointer data,
+                                                         gsize length,
+                                                         gpointer owner,
+                                                         GDestroyNotify owner_dnotify);
+SoupBuffer *        soup_buffer_new_take                (guchar *data,
+                                                         gsize length);
+gpointer            soup_buffer_get_owner               (SoupBuffer *buffer);
+void                soup_buffer_get_data                (SoupBuffer *buffer,
+                                                         const guint8 **data,
+                                                         gsize *length);
+SoupBuffer *        soup_buffer_copy                    (SoupBuffer *buffer);
+void                soup_buffer_free                    (SoupBuffer *buffer);
+
+                    SoupMessageBody;
+SoupMessageBody *   soup_message_body_new               (void);
+void                soup_message_body_free              (SoupMessageBody *body);
+
+void                soup_message_body_set_accumulate    (SoupMessageBody *body,
+                                                         gboolean accumulate);
+gboolean            soup_message_body_get_accumulate    (SoupMessageBody *body);
+
+void                soup_message_body_append            (SoupMessageBody *body,
+                                                         SoupMemoryUse use,
+                                                         gconstpointer data,
+                                                         gsize length);
+void                soup_message_body_append_buffer     (SoupMessageBody *body,
+                                                         SoupBuffer *buffer);
+void                soup_message_body_append_take       (SoupMessageBody *body,
+                                                         guchar *data,
+                                                         gsize length);
+void                soup_message_body_truncate          (SoupMessageBody *body);
+void                soup_message_body_complete          (SoupMessageBody *body);
+SoupBuffer *        soup_message_body_flatten           (SoupMessageBody *body);
+SoupBuffer *        soup_message_body_get_chunk         (SoupMessageBody *body,
+                                                         goffset offset);
+
+void                soup_message_body_got_chunk         (SoupMessageBody *body,
+                                                         SoupBuffer *chunk);
+void                soup_message_body_wrote_chunk       (SoupMessageBody *body,
+                                                         SoupBuffer *chunk);
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupBuffer
+
+
+  GBoxed
+   +----SoupMessageBody
+
+
+
+

Description

+

+SoupMessageBody represents the request or response body of a +SoupMessage. +

+

+In addition to SoupMessageBody, libsoup also defines a "smaller" +data buffer type, SoupBuffer, which is primarily used as a +component of SoupMessageBody. In particular, when using chunked +encoding to transmit or receive a message, each chunk is +represented as a SoupBuffer. +

+
+
+

Details

+
+

SoupBuffer

+
typedef struct {
+	const char *data;
+	gsize       length;
+} SoupBuffer;
+
+

+A data buffer, generally used to represent a chunk of a +SoupMessageBody. +

+

+data is a char because that's generally convenient; in some +situations you may need to cast it to guchar or another type. +

+
++ + + + + + + + + + +

const char *data;

the data. [type gpointer] +

gsize length;

length of data +
+
+
+
+

enum SoupMemoryUse

+
typedef enum {
+	SOUP_MEMORY_STATIC,
+	SOUP_MEMORY_TAKE,
+	SOUP_MEMORY_COPY,
+	SOUP_MEMORY_TEMPORARY
+} SoupMemoryUse;
+
+

+Describes how SoupBuffer should use the data passed in by the +caller. +

+

+See also soup_buffer_new_with_owner(), which allows to you create a +buffer containing data which is owned by another object. +

+
++ + + + + + + + + + + + + + + + + + +

SOUP_MEMORY_STATIC

The memory is statically allocated and +constant; libsoup can use the passed-in buffer directly and not +need to worry about it being modified or freed. +

SOUP_MEMORY_TAKE

The caller has allocated the memory for the +SoupBuffer's use; libsoup will assume ownership of it and free it +(with g_free()) when it is done with it. +

SOUP_MEMORY_COPY

The passed-in data belongs to the caller; the +SoupBuffer will copy it into new memory, leaving the caller free +to reuse the original memory. +

SOUP_MEMORY_TEMPORARY

The passed-in data belongs to the caller, +but will remain valid for the lifetime of the SoupBuffer. The +difference between this and SOUP_MEMORY_STATIC is that if you copy +a SOUP_MEMORY_TEMPORARY buffer, it will make a copy of the memory +as well, rather than reusing the original memory. +
+
+
+
+

soup_buffer_new ()

+
SoupBuffer *        soup_buffer_new                     (SoupMemoryUse use,
+                                                         gconstpointer data,
+                                                         gsize length);
+

+Creates a new SoupBuffer containing length bytes from data. +

+
++ + + + + + + + + + + + + + + + + + +

use :

how data is to be used by the buffer

data :

data

length :

length of data +

Returns :

the new SoupBuffer.
+
+
+
+

soup_buffer_new_subbuffer ()

+
SoupBuffer *        soup_buffer_new_subbuffer           (SoupBuffer *parent,
+                                                         gsize offset,
+                                                         gsize length);
+

+Creates a new SoupBuffer containing length bytes "copied" from +parent starting at offset. (Normally this will not actually copy +any data, but will instead simply reference the same data as +parent does.) +

+
++ + + + + + + + + + + + + + + + + + +

parent :

the parent SoupBuffer +

offset :

offset within parent to start at

length :

number of bytes to copy from parent +

Returns :

the new SoupBuffer.
+
+
+
+

soup_buffer_new_with_owner ()

+
SoupBuffer *        soup_buffer_new_with_owner          (gconstpointer data,
+                                                         gsize length,
+                                                         gpointer owner,
+                                                         GDestroyNotify owner_dnotify);
+

+Creates a new SoupBuffer containing length bytes from data. When +the SoupBuffer is freed, it will call owner_dnotify, passing +owner to it. You must ensure that data will remain valid until +owner_dnotify is called. +

+

+For example, you could use this to create a buffer containing data +returned from libxml without needing to do an extra copy: +

+

+

+
+ + + + + + + +
1
+2
+3
xmlDocDumpMemory (doc, &xmlbody, &len);
+return soup_buffer_new_with_owner (xmlbody, len, xmlbody,
+                                   (GDestroyNotify)xmlFree);
+
+ +

+

+

+In this example, data and owner are the same, but in other cases +they would be different (eg, owner would be a object, and data +would be a pointer to one of the object's fields). +

+
++ + + + + + + + + + + + + + + + + + + + + + +

data :

data

length :

length of data +

owner :

pointer to an object that owns data +

owner_dnotify :

a function to free/unref owner when +the buffer is freed. [allow-none] +

Returns :

the new SoupBuffer.
+
+
+
+

soup_buffer_new_take ()

+
SoupBuffer *        soup_buffer_new_take                (guchar *data,
+                                                         gsize length);
+

+Creates a new SoupBuffer containing length bytes from data. +

+

+This function is exactly equivalent to soup_buffer_new() with +SOUP_MEMORY_TAKE as first argument; it exists mainly for +convenience and simplifying language bindings. +

+
++ + + + + + + + + + + + + + +

data :

data. [array length=length][transfer full] +

length :

length of data +

Returns :

the new SoupBuffer. +Rename to: soup_buffer_new
+

Since 2.32

+
+
+
+

soup_buffer_get_owner ()

+
gpointer            soup_buffer_get_owner               (SoupBuffer *buffer);
+

+Gets the "owner" object for a buffer created with +soup_buffer_new_with_owner(). +

+
++ + + + + + + + + + +

buffer :

a SoupBuffer created with soup_buffer_new_with_owner() +

Returns :

the owner pointer. [transfer none] +
+
+
+
+

soup_buffer_get_data ()

+
void                soup_buffer_get_data                (SoupBuffer *buffer,
+                                                         const guint8 **data,
+                                                         gsize *length);
+

+This function exists for use by language bindings, because it's not +currently possible to get the right effect by annotating the fields +of SoupBuffer. +

+
++ + + + + + + + + + + + + + +

buffer :

a SoupBuffer +

data :

the pointer +to the buffer data is stored here. [out][array length=length][transfer none] +

length :

the length of the buffer data is stored here. [out] +
+

Since 2.32

+
+
+
+

soup_buffer_copy ()

+
SoupBuffer *        soup_buffer_copy                    (SoupBuffer *buffer);
+

+Makes a copy of buffer. In reality, SoupBuffer is a refcounted +type, and calling soup_buffer_copy() will normally just increment +the refcount on buffer and return it. However, if buffer was +created with SOUP_MEMORY_TEMPORARY memory, then soup_buffer_copy() +will actually return a copy of it, so that the data in the copy +will remain valid after the temporary buffer is freed. +

+
++ + + + + + + + + + +

buffer :

a SoupBuffer +

Returns :

the new (or newly-reffed) buffer
+
+
+
+

soup_buffer_free ()

+
void                soup_buffer_free                    (SoupBuffer *buffer);
+

+Frees buffer. (In reality, as described in the documentation for +soup_buffer_copy(), this is actually an "unref" operation, and may +or may not actually free buffer.) +

+
++ + + + +

buffer :

a SoupBuffer +
+
+
+
+

SoupMessageBody

+
typedef struct {
+	const char *data;
+	goffset     length;
+} SoupMessageBody;
+
+

+A SoupMessage request or response body. +

+

+Note that while length always reflects the full length of the +message body, data is normally NULL, and will only be filled in +after soup_message_body_flatten() is called. For client-side +messages, this automatically happens for the response body after it +has been fully read, unless you set the +SOUP_MESSAGE_OVERWRITE_CHUNKS flags. Likewise, for server-side +messages, the request body is automatically filled in after being +read. +

+

+As an added bonus, when data is filled in, it is always terminated +with a '\0' byte (which is not reflected in length). +

+
++ + + + + + + + + + +

const char *data;

the data

goffset length;

length of data +
+
+
+
+

soup_message_body_new ()

+
SoupMessageBody *   soup_message_body_new               (void);
+

+Creates a new SoupMessageBody. SoupMessage uses this internally; you +will not normally need to call it yourself. +

+
++ + + + +

Returns :

a new SoupMessageBody.
+
+
+
+

soup_message_body_free ()

+
void                soup_message_body_free              (SoupMessageBody *body);
+

+Frees body. You will not normally need to use this, as +SoupMessage frees its associated message bodies automatically. +

+
++ + + + +

body :

a SoupMessageBody +
+
+
+
+

soup_message_body_set_accumulate ()

+
void                soup_message_body_set_accumulate    (SoupMessageBody *body,
+                                                         gboolean accumulate);
+

+Sets or clears the accumulate flag on body. (The default value is +TRUE.) If set to FALSE, body's data field will not be filled in +after the body is fully sent/received, and the chunks that make up +body may be discarded when they are no longer needed. +

+

+In particular, if you set this flag to FALSE on an "incoming" +message body (that is, the "response_body" of a +client-side message, or "request_body" of a server-side +message), this will cause each chunk of the body to be discarded +after its corresponding "got_chunk" signal is emitted. +(This is equivalent to setting the deprecated +SOUP_MESSAGE_OVERWRITE_CHUNKS flag on the message.) +

+

+If you set this flag to FALSE on the "response_body" of +a server-side message, it will cause each chunk of the body to be +discarded after its corresponding "wrote_chunk" signal +is emitted. +

+

+If you set the flag to FALSE on the "request_body" of a +client-side message, it will block the accumulation of chunks into +body's data field, but it will not normally cause the chunks to +be discarded after being written like in the server-side +"response_body" case, because the request body needs to +be kept around in case the request needs to be sent a second time +due to redirection or authentication. However, if you set the +SOUP_MESSAGE_CAN_REBUILD flag on the message, then the chunks will +be discarded, and you will be responsible for recreating the +request body after the "restarted" signal is emitted. +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

accumulate :

whether or not to accumulate body chunks in body +
+

Since 2.4.1

+
+
+
+

soup_message_body_get_accumulate ()

+
gboolean            soup_message_body_get_accumulate    (SoupMessageBody *body);
+

+Gets the accumulate flag on body; see +soup_message_body_set_accumulate() for details. +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

Returns :

the accumulate flag for body.
+

Since 2.4.1

+
+
+
+

soup_message_body_append ()

+
void                soup_message_body_append            (SoupMessageBody *body,
+                                                         SoupMemoryUse use,
+                                                         gconstpointer data,
+                                                         gsize length);
+

+Appends length bytes from data to body according to use. +

+
++ + + + + + + + + + + + + + + + + + +

body :

a SoupMessageBody +

use :

how to use data +

data :

data to append. [array length=length][element-type guint8] +

length :

length of data +
+
+
+
+

soup_message_body_append_buffer ()

+
void                soup_message_body_append_buffer     (SoupMessageBody *body,
+                                                         SoupBuffer *buffer);
+

+Appends the data from buffer to body. (SoupMessageBody uses +SoupBuffers internally, so this is normally a constant-time +operation that doesn't actually require copying the data in +buffer.) +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

buffer :

a SoupBuffer +
+
+
+
+

soup_message_body_append_take ()

+
void                soup_message_body_append_take       (SoupMessageBody *body,
+                                                         guchar *data,
+                                                         gsize length);
+

+Appends length bytes from data to body. +

+

+This function is exactly equivalent to soup_message_body_append() +with SOUP_MEMORY_TAKE as second argument; it exists mainly for +convenience and simplifying language bindings. +

+

+Rename to: soup_message_body_append +

+
++ + + + + + + + + + + + + + +

body :

a SoupMessageBody +

data :

data to append. [array length=length][transfer full] +

length :

length of data +
+

Since 2.32

+
+
+
+

soup_message_body_truncate ()

+
void                soup_message_body_truncate          (SoupMessageBody *body);
+

+Deletes all of the data in body. +

+
++ + + + +

body :

a SoupMessageBody +
+
+
+
+

soup_message_body_complete ()

+
void                soup_message_body_complete          (SoupMessageBody *body);
+

+Tags body as being complete; Call this when using chunked encoding +after you have appended the last chunk. +

+
++ + + + +

body :

a SoupMessageBody +
+
+
+
+

soup_message_body_flatten ()

+
SoupBuffer *        soup_message_body_flatten           (SoupMessageBody *body);
+

+Fills in body's data field with a buffer containing all of the +data in body (plus an additional '\0' byte not counted by body's +length field). +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

Returns :

a SoupBuffer containing the same data as body. +(You must free this buffer if you do not want it.)
+
+
+
+

soup_message_body_get_chunk ()

+
SoupBuffer *        soup_message_body_get_chunk         (SoupMessageBody *body,
+                                                         goffset offset);
+

+Gets a SoupBuffer containing data from body starting at offset. +The size of the returned chunk is unspecified. You can iterate +through the entire body by first calling +soup_message_body_get_chunk() with an offset of 0, and then on each +successive call, increment the offset by the length of the +previously-returned chunk. +

+

+If offset is greater than or equal to the total length of body, +then the return value depends on whether or not +soup_message_body_complete() has been called or not; if it has, +then soup_message_body_get_chunk() will return a 0-length chunk +(indicating the end of body). If it has not, then +soup_message_body_get_chunk() will return NULL (indicating that +body may still potentially have more data, but that data is not +currently available). +

+
++ + + + + + + + + + + + + + +

body :

a SoupMessageBody +

offset :

an offset

Returns :

a SoupBuffer, or NULL.
+
+
+
+

soup_message_body_got_chunk ()

+
void                soup_message_body_got_chunk         (SoupMessageBody *body,
+                                                         SoupBuffer *chunk);
+

+Handles the SoupMessageBody part of receiving a chunk of data from +the network. Normally this means appending chunk to body, exactly +as with soup_message_body_append_buffer(), but if you have set +body's accumulate flag to FALSE, then that will not happen. +

+

+This is a low-level method which you should not normally need to +use. +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

chunk :

a SoupBuffer received from the network
+

Since 2.4.1

+
+
+
+

soup_message_body_wrote_chunk ()

+
void                soup_message_body_wrote_chunk       (SoupMessageBody *body,
+                                                         SoupBuffer *chunk);
+

+Handles the SoupMessageBody part of writing a chunk of data to the +network. Normally this is a no-op, but if you have set body's +accumulate flag to FALSE, then this will cause chunk to be +discarded to free up memory. +

+

+This is a low-level method which you should not need to use, and +there are further restrictions on its proper use which are not +documented here. +

+
++ + + + + + + + + + +

body :

a SoupMessageBody +

chunk :

a SoupBuffer returned from soup_message_body_get_chunk() +
+

Since 2.4.1

+
+
+
+

See Also

+SoupMessage +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupMessageHeaders.html b/docs/reference/html/SoupMessageHeaders.html new file mode 100644 index 0000000..3e5b658 --- /dev/null +++ b/docs/reference/html/SoupMessageHeaders.html @@ -0,0 +1,1332 @@ + + + + +SoupMessageHeaders + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupMessageHeaders

+

SoupMessageHeaders — HTTP message headers

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+typedef             SoupMessageHeaders;
+enum                SoupMessageHeadersType;
+SoupMessageHeaders * soup_message_headers_new           (SoupMessageHeadersType type);
+void                soup_message_headers_free           (SoupMessageHeaders *hdrs);
+
+void                soup_message_headers_append         (SoupMessageHeaders *hdrs,
+                                                         const char *name,
+                                                         const char *value);
+void                soup_message_headers_replace        (SoupMessageHeaders *hdrs,
+                                                         const char *name,
+                                                         const char *value);
+void                soup_message_headers_remove         (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+void                soup_message_headers_clear          (SoupMessageHeaders *hdrs);
+void                soup_message_headers_clean_connection_headers
+                                                        (SoupMessageHeaders *hdrs);
+const char *        soup_message_headers_get_one        (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+const char *        soup_message_headers_get_list       (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+const char *        soup_message_headers_get            (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+
+void                (*SoupMessageHeadersForeachFunc)    (const char *name,
+                                                         const char *value,
+                                                         gpointer user_data);
+void                soup_message_headers_foreach        (SoupMessageHeaders *hdrs,
+                                                         SoupMessageHeadersForeachFunc func,
+                                                         gpointer user_data);
+
+                    SoupMessageHeadersIter;
+void                soup_message_headers_iter_init      (SoupMessageHeadersIter *iter,
+                                                         SoupMessageHeaders *hdrs);
+gboolean            soup_message_headers_iter_next      (SoupMessageHeadersIter *iter,
+                                                         const char **name,
+                                                         const char **value);
+
+enum                SoupEncoding;
+SoupEncoding        soup_message_headers_get_encoding   (SoupMessageHeaders *hdrs);
+void                soup_message_headers_set_encoding   (SoupMessageHeaders *hdrs,
+                                                         SoupEncoding encoding);
+goffset             soup_message_headers_get_content_length
+                                                        (SoupMessageHeaders *hdrs);
+void                soup_message_headers_set_content_length
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset content_length);
+
+enum                SoupExpectation;
+SoupExpectation     soup_message_headers_get_expectations
+                                                        (SoupMessageHeaders *hdrs);
+void                soup_message_headers_set_expectations
+                                                        (SoupMessageHeaders *hdrs,
+                                                         SoupExpectation expectations);
+
+const char *        soup_message_headers_get_content_type
+                                                        (SoupMessageHeaders *hdrs,
+                                                         GHashTable **params);
+void                soup_message_headers_set_content_type
+                                                        (SoupMessageHeaders *hdrs,
+                                                         const char *content_type,
+                                                         GHashTable *params);
+
+gboolean            soup_message_headers_get_content_disposition
+                                                        (SoupMessageHeaders *hdrs,
+                                                         char **disposition,
+                                                         GHashTable **params);
+void                soup_message_headers_set_content_disposition
+                                                        (SoupMessageHeaders *hdrs,
+                                                         const char *disposition,
+                                                         GHashTable *params);
+
+                    SoupRange;
+gboolean            soup_message_headers_get_ranges     (SoupMessageHeaders *hdrs,
+                                                         goffset total_length,
+                                                         SoupRange **ranges,
+                                                         int *length);
+void                soup_message_headers_set_ranges     (SoupMessageHeaders *hdrs,
+                                                         SoupRange *ranges,
+                                                         int length);
+void                soup_message_headers_set_range      (SoupMessageHeaders *hdrs,
+                                                         goffset start,
+                                                         goffset end);
+void                soup_message_headers_free_ranges    (SoupMessageHeaders *hdrs,
+                                                         SoupRange *ranges);
+gboolean            soup_message_headers_get_content_range
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset *start,
+                                                         goffset *end,
+                                                         goffset *total_length);
+void                soup_message_headers_set_content_range
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset start,
+                                                         goffset end,
+                                                         goffset total_length);
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupMessageHeaders
+
+
+
+

Description

+

+SoupMessageHeaders represents the HTTP message headers associated +with a request or response. +

+
+
+

Details

+
+

SoupMessageHeaders

+
typedef struct SoupMessageHeaders SoupMessageHeaders;
+
+

+The HTTP message headers associated with a request or response. +

+
+
+
+

enum SoupMessageHeadersType

+
typedef enum {
+	SOUP_MESSAGE_HEADERS_REQUEST,
+	SOUP_MESSAGE_HEADERS_RESPONSE,
+	SOUP_MESSAGE_HEADERS_MULTIPART
+} SoupMessageHeadersType;
+
+

+Value passed to soup_message_headers_new() to set certain default +behaviors. +

+
++ + + + + + + + + + + + + + +

SOUP_MESSAGE_HEADERS_REQUEST

request headers +

SOUP_MESSAGE_HEADERS_RESPONSE

response headers +

SOUP_MESSAGE_HEADERS_MULTIPART

multipart body part headers +
+
+
+
+

soup_message_headers_new ()

+
SoupMessageHeaders * soup_message_headers_new           (SoupMessageHeadersType type);
+

+Creates a SoupMessageHeaders. (SoupMessage does this +automatically for its own headers. You would only need to use this +method if you are manually parsing or generating message headers.) +

+
++ + + + + + + + + + +

type :

the type of headers

Returns :

a new SoupMessageHeaders +
+
+
+
+

soup_message_headers_free ()

+
void                soup_message_headers_free           (SoupMessageHeaders *hdrs);
+

+Frees hdrs. +

+
++ + + + +

hdrs :

a SoupMessageHeaders +
+
+
+
+

soup_message_headers_append ()

+
void                soup_message_headers_append         (SoupMessageHeaders *hdrs,
+                                                         const char *name,
+                                                         const char *value);
+

+Appends a new header with name name and value value to hdrs. (If +there is an existing header with name name, then this creates a +second one, which is only allowed for list-valued headers; see also +soup_message_headers_replace().) +

+

+The caller is expected to make sure that name and value are +syntactically correct. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

the header name to add

value :

the new value of name +
+
+
+
+

soup_message_headers_replace ()

+
void                soup_message_headers_replace        (SoupMessageHeaders *hdrs,
+                                                         const char *name,
+                                                         const char *value);
+

+Replaces the value of the header name in hdrs with value. (See +also soup_message_headers_append().) +

+

+The caller is expected to make sure that name and value are +syntactically correct. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

the header name to replace

value :

the new value of name +
+
+
+
+

soup_message_headers_remove ()

+
void                soup_message_headers_remove         (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+

+Removes name from hdrs. If there are multiple values for name, +they are all removed. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

the header name to remove
+
+
+
+

soup_message_headers_clear ()

+
void                soup_message_headers_clear          (SoupMessageHeaders *hdrs);
+

+Clears hdrs. +

+
++ + + + +

hdrs :

a SoupMessageHeaders +
+
+
+
+

soup_message_headers_clean_connection_headers ()

+
void                soup_message_headers_clean_connection_headers
+                                                        (SoupMessageHeaders *hdrs);
+

+Removes all the headers listed in the Connection header. +

+
++ + + + +

hdrs :

a SoupMessageHeaders +
+

Since 2.36

+
+
+
+

soup_message_headers_get_one ()

+
const char *        soup_message_headers_get_one        (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+

+Gets the value of header name in hdrs. Use this for headers whose +values are not comma-delimited lists, and +which therefore can only appear at most once in the headers. For +list-valued headers, use soup_message_headers_get_list(). +

+

+If hdrs does erroneously contain multiple copies of the header, it +is not defined which one will be returned. (Ideally, it will return +whichever one makes libsoup most compatible with other HTTP +implementations.) +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

header name

Returns :

the header's value or NULL if not found.
+

Since 2.26.1

+
+
+
+

soup_message_headers_get_list ()

+
const char *        soup_message_headers_get_list       (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+

+Gets the value of header name in hdrs. Use this for headers whose +values are comma-delimited lists, and which are therefore allowed +to appear multiple times in the headers. For non-list-valued +headers, use soup_message_headers_get_one(). +

+

+If name appears multiple times in hdrs, +soup_message_headers_get_list() will concatenate all of the values +together, separated by commas. This is sometimes awkward to parse +(eg, WWW-Authenticate, Set-Cookie), but you have to be able to deal +with it anyway, because the HTTP spec explicitly states that this +transformation is allowed, and so an upstream proxy could do the +same thing. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

header name

Returns :

the header's value or NULL if not found.
+

Since 2.26.1

+
+
+
+

soup_message_headers_get ()

+
const char *        soup_message_headers_get            (SoupMessageHeaders *hdrs,
+                                                         const char *name);
+
+

Warning

+

soup_message_headers_get is deprecated and should not be used in newly-written code. Use soup_message_headers_get_one() or +soup_message_headers_get_list() instead.

+
+

+Gets the value of header name in hdrs. +

+

+This method was supposed to work correctly for both single-valued +and list-valued headers, but because some HTTP clients/servers +mistakenly send multiple copies of headers that are supposed to be +single-valued, it sometimes returns incorrect results. To fix this, +the methods soup_message_headers_get_one() and +soup_message_headers_get_list() were introduced, so callers can +explicitly state which behavior they are expecting. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

name :

header name

Returns :

as with soup_message_headers_get_list().
+
+
+
+

SoupMessageHeadersForeachFunc ()

+
void                (*SoupMessageHeadersForeachFunc)    (const char *name,
+                                                         const char *value,
+                                                         gpointer user_data);
+

+The callback passed to soup_message_headers_foreach(). +

+
++ + + + + + + + + + + + + + +

name :

the header name

value :

the header value

user_data :

the data passed to soup_message_headers_foreach() +
+
+
+
+

soup_message_headers_foreach ()

+
void                soup_message_headers_foreach        (SoupMessageHeaders *hdrs,
+                                                         SoupMessageHeadersForeachFunc func,
+                                                         gpointer user_data);
+

+Calls func once for each header value in hdrs. +

+

+Beware that unlike soup_message_headers_get(), this processes the +headers in exactly the way they were added, rather than +concatenating multiple same-named headers into a single value. +(This is intentional; it ensures that if you call +soup_message_headers_append() multiple times with the same name, +then the I/O code will output multiple copies of the header when +sending the message to the remote implementation, which may be +required for interoperability in some cases.) +

+

+You may not modify the headers from func. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

func :

callback function to run for each header. [scope call] +

user_data :

data to pass to func +
+
+
+
+

SoupMessageHeadersIter

+
typedef struct {
+} SoupMessageHeadersIter;
+
+

+An opaque type used to iterate over a SoupMessageHeaders +structure. +

+

+After intializing the iterator with +soup_message_headers_iter_init(), call +soup_message_headers_iter_next() to fetch data from it. +

+

+You may not modify the headers while iterating over them. +

+
+
+
+

soup_message_headers_iter_init ()

+
void                soup_message_headers_iter_init      (SoupMessageHeadersIter *iter,
+                                                         SoupMessageHeaders *hdrs);
+

+Initializes iter for iterating hdrs. +

+
++ + + + + + + + + + +

iter :

a pointer to a SoupMessageHeadersIter +structure. [out][transfer none] +

hdrs :

a SoupMessageHeaders +
+
+
+
+

soup_message_headers_iter_next ()

+
gboolean            soup_message_headers_iter_next      (SoupMessageHeadersIter *iter,
+                                                         const char **name,
+                                                         const char **value);
+

+Yields the next name/value pair in the SoupMessageHeaders being +iterated by iter. If iter has already yielded the last header, +then soup_message_headers_iter_next() will return FALSE and name +and value will be unchanged. +

+
++ + + + + + + + + + + + + + + + + + +

iter :

a SoupMessageHeadersIter. [inout][transfer none] +

name :

pointer to a variable to return +the header name in. [out][transfer none] +

value :

pointer to a variable to return +the header value in. [out][transfer none] +

Returns :

+TRUE if another name and value were returned, FALSE +if the end of the headers has been reached.
+
+
+
+

enum SoupEncoding

+
typedef enum {
+	SOUP_ENCODING_UNRECOGNIZED,
+	SOUP_ENCODING_NONE,
+	SOUP_ENCODING_CONTENT_LENGTH,
+	SOUP_ENCODING_EOF,
+	SOUP_ENCODING_CHUNKED,
+	SOUP_ENCODING_BYTERANGES
+} SoupEncoding;
+
+

+How a message body is encoded for transport +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_ENCODING_UNRECOGNIZED

unknown / error +

SOUP_ENCODING_NONE

no body is present (which is not the same as a +0-length body, and only occurs in certain places) +

SOUP_ENCODING_CONTENT_LENGTH

Content-Length encoding +

SOUP_ENCODING_EOF

Response body ends when the connection is closed +

SOUP_ENCODING_CHUNKED

chunked encoding (currently only supported +for response) +

SOUP_ENCODING_BYTERANGES

multipart/byteranges (Reserved for future +use: NOT CURRENTLY IMPLEMENTED) +
+
+
+
+

soup_message_headers_get_encoding ()

+
SoupEncoding        soup_message_headers_get_encoding   (SoupMessageHeaders *hdrs);
+

+Gets the message body encoding that hdrs declare. This may not +always correspond to the encoding used on the wire; eg, a HEAD +response may declare a Content-Length or Transfer-Encoding, but +it will never actually include a body. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

Returns :

the encoding declared by hdrs.
+
+
+
+

soup_message_headers_set_encoding ()

+
void                soup_message_headers_set_encoding   (SoupMessageHeaders *hdrs,
+                                                         SoupEncoding encoding);
+

+Sets the message body encoding that hdrs will declare. In particular, +you should use this if you are going to send a request or response in +chunked encoding. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

encoding :

a SoupEncoding +
+
+
+
+

soup_message_headers_get_content_length ()

+
goffset             soup_message_headers_get_content_length
+                                                        (SoupMessageHeaders *hdrs);
+

+Gets the message body length that hdrs declare. This will only +be non-0 if soup_message_headers_get_encoding() returns +SOUP_ENCODING_CONTENT_LENGTH. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

Returns :

the message body length declared by hdrs.
+
+
+
+

soup_message_headers_set_content_length ()

+
void                soup_message_headers_set_content_length
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset content_length);
+

+Sets the message body length that hdrs will declare, and sets +hdrs's encoding to SOUP_ENCODING_CONTENT_LENGTH. +

+

+You do not normally need to call this; if hdrs is set to use +Content-Length encoding, libsoup will automatically set its +Content-Length header for you immediately before sending the +headers. One situation in which this method is useful is when +generating the response to a HEAD request; Calling +soup_message_headers_set_content_length() allows you to put the +correct content length into the response without needing to waste +memory by filling in a response body which won't actually be sent. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

content_length :

the message body length
+
+
+
+

enum SoupExpectation

+
typedef enum {
+	SOUP_EXPECTATION_UNRECOGNIZED = (1 << 0),
+	SOUP_EXPECTATION_CONTINUE     = (1 << 1)
+} SoupExpectation;
+
+

+Represents the parsed value of the "Expect" header. +

+
++ + + + + + + + + + +

SOUP_EXPECTATION_UNRECOGNIZED

any unrecognized expectation +

SOUP_EXPECTATION_CONTINUE

"100-continue" +
+
+
+
+

soup_message_headers_get_expectations ()

+
SoupExpectation     soup_message_headers_get_expectations
+                                                        (SoupMessageHeaders *hdrs);
+

+Gets the expectations declared by hdrs's "Expect" header. +Currently this will either be SOUP_EXPECTATION_CONTINUE or +SOUP_EXPECTATION_UNRECOGNIZED. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

Returns :

the contents of hdrs's "Expect" header
+
+
+
+

soup_message_headers_set_expectations ()

+
void                soup_message_headers_set_expectations
+                                                        (SoupMessageHeaders *hdrs,
+                                                         SoupExpectation expectations);
+

+Sets hdrs's "Expect" header according to expectations. +

+

+Currently SOUP_EXPECTATION_CONTINUE is the only known expectation +value. You should set this value on a request if you are sending a +large message body (eg, via POST or PUT), and want to give the +server a chance to reject the request after seeing just the headers +(eg, because it will require authentication before allowing you to +post, or because you're POSTing to a URL that doesn't exist). This +saves you from having to transmit the large request body when the +server is just going to ignore it anyway. +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

expectations :

the expectations to set
+
+
+
+

soup_message_headers_get_content_type ()

+
const char *        soup_message_headers_get_content_type
+                                                        (SoupMessageHeaders *hdrs,
+                                                         GHashTable **params);
+

+Looks up the "Content-Type" header in hdrs, parses it, and returns +its value in *content_type and *params. params can be NULL if you +are only interested in the content type itself. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

params :

return location for +the Content-Type parameters (eg, "charset"), or NULL. [out][allow-none][transfer full] +

Returns :

a string with the value of the "Content-Type" header +or NULL if hdrs does not contain that header or it cannot be +parsed (in which case *params will be unchanged).
+

Since 2.26

+
+
+
+

soup_message_headers_set_content_type ()

+
void                soup_message_headers_set_content_type
+                                                        (SoupMessageHeaders *hdrs,
+                                                         const char *content_type,
+                                                         GHashTable *params);
+

+Sets the "Content-Type" header in hdrs to content_type, +optionally with additional parameters specified in params. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

content_type :

the MIME type

params :

additional +parameters, or NULL. [allow-none][element-type utf8 utf8] +
+

Since 2.26

+
+
+
+

soup_message_headers_get_content_disposition ()

+
gboolean            soup_message_headers_get_content_disposition
+                                                        (SoupMessageHeaders *hdrs,
+                                                         char **disposition,
+                                                         GHashTable **params);
+

+Looks up the "Content-Disposition" header in hdrs, parses it, and +returns its value in *disposition and *params. params can be +NULL if you are only interested in the disposition-type. +

+

+In HTTP, the most common use of this header is to set a +disposition-type of "attachment", to suggest to the browser that a +response should be saved to disk rather than displayed in the +browser. If params contains a "filename" parameter, this is a +suggestion of a filename to use. (If the parameter value in the +header contains an absolute or relative path, libsoup will truncate +it down to just the final path component, so you do not need to +test this yourself.) +

+

+Content-Disposition is also used in "multipart/form-data", however +this is handled automatically by SoupMultipart and the associated +form methods. +

+
++ + + + + + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

disposition :

return location for the +disposition-type, or NULL. [out][transfer full] +

params :

return +location for the Content-Disposition parameters, or NULL. [out][transfer full][element-type utf8 utf8] +

Returns :

+TRUE if hdrs contains a "Content-Disposition" +header, FALSE if not (in which case *disposition and *params +will be unchanged).
+

Since 2.26

+
+
+
+

soup_message_headers_set_content_disposition ()

+
void                soup_message_headers_set_content_disposition
+                                                        (SoupMessageHeaders *hdrs,
+                                                         const char *disposition,
+                                                         GHashTable *params);
+

+Sets the "Content-Disposition" header in hdrs to disposition, +optionally with additional parameters specified in params. +

+

+See soup_message_headers_get_content_disposition() for a discussion +of how Content-Disposition is used in HTTP. +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

disposition :

the disposition-type

params :

additional +parameters, or NULL. [allow-none][element-type utf8 utf8] +
+

Since 2.26

+
+
+
+

SoupRange

+
typedef struct {
+	goffset start;
+	goffset end;
+} SoupRange;
+
+

+Represents a byte range as used in the Range header. +

+

+If end is non-negative, then start and end represent the bounds +of of the range, counting from 0. (Eg, the first 500 bytes would be +represented as start = 0 and end = 499.) +

+

+If end is -1 and start is non-negative, then this represents a +range starting at start and ending with the last byte of the +requested resource body. (Eg, all but the first 500 bytes would be +start = 500, and end = -1.) +

+

+If end is -1 and start is negative, then it represents a "suffix +range", referring to the last -start bytes of the resource body. +(Eg, the last 500 bytes would be start = -500 and end = -1.) +

+
++ + + + + + + + + + +

goffset start;

the start of the range

goffset end;

the end of the range
+

Since 2.26

+
+
+
+

soup_message_headers_get_ranges ()

+
gboolean            soup_message_headers_get_ranges     (SoupMessageHeaders *hdrs,
+                                                         goffset total_length,
+                                                         SoupRange **ranges,
+                                                         int *length);
+

+Parses hdrs's Range header and returns an array of the requested +byte ranges. The returned array must be freed with +soup_message_headers_free_ranges(). +

+

+If total_length is non-0, its value will be used to adjust the +returned ranges to have explicit start and end values, and the +returned ranges will be sorted and non-overlapping. If +total_length is 0, then some ranges may have an end value of -1, +as described under SoupRange, and some of the ranges may be +redundant. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

total_length :

the total_length of the response body

ranges :

return location for an array of SoupRange. [out] +

length :

the length of the returned array

Returns :

+TRUE if hdrs contained a "Range" header containing +byte ranges which could be parsed, FALSE otherwise (in which case +range and length will not be set).
+

Since 2.26

+
+
+
+

soup_message_headers_set_ranges ()

+
void                soup_message_headers_set_ranges     (SoupMessageHeaders *hdrs,
+                                                         SoupRange *ranges,
+                                                         int length);
+

+Sets hdrs's Range header to request the indicated ranges. (If you +only want to request a single range, you can use +soup_message_headers_set_range().) +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

ranges :

an array of SoupRange +

length :

the length of range +
+

Since 2.26

+
+
+
+

soup_message_headers_set_range ()

+
void                soup_message_headers_set_range      (SoupMessageHeaders *hdrs,
+                                                         goffset start,
+                                                         goffset end);
+

+Sets hdrs's Range header to request the indicated range. +start and end are interpreted as in a SoupRange. +

+

+If you need to request multiple ranges, use +soup_message_headers_set_ranges(). +

+
++ + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

start :

the start of the range to request

end :

the end of the range to request
+

Since 2.26

+
+
+
+

soup_message_headers_free_ranges ()

+
void                soup_message_headers_free_ranges    (SoupMessageHeaders *hdrs,
+                                                         SoupRange *ranges);
+

+Frees the array of ranges returned from soup_message_headers_get_ranges(). +

+
++ + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

ranges :

an array of SoupRange +
+

Since 2.26

+
+
+
+

soup_message_headers_get_content_range ()

+
gboolean            soup_message_headers_get_content_range
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset *start,
+                                                         goffset *end,
+                                                         goffset *total_length);
+

+Parses hdrs's Content-Range header and returns it in start, +end, and total_length. If the total length field in the header +was specified as "*", then total_length will be set to -1. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

start :

return value for the start of the range

end :

return value for the end of the range

total_length :

return value for the total length of the resource, +or NULL if you don't care.

Returns :

+TRUE if hdrs contained a "Content-Range" header +containing a byte range which could be parsed, FALSE otherwise.
+

Since 2.26

+
+
+
+

soup_message_headers_set_content_range ()

+
void                soup_message_headers_set_content_range
+                                                        (SoupMessageHeaders *hdrs,
+                                                         goffset start,
+                                                         goffset end,
+                                                         goffset total_length);
+

+Sets hdrs's Content-Range header according to the given values. +(Note that total_length is the total length of the entire resource +that this is a range of, not simply end - start + 1.) +

+
++ + + + + + + + + + + + + + + + + + +

hdrs :

a SoupMessageHeaders +

start :

the start of the range

end :

the end of the range

total_length :

the total length of the resource, or -1 if unknown
+

Since 2.26

+
+
+
+

See Also

+SoupMessage +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupMultipart.html b/docs/reference/html/SoupMultipart.html new file mode 100644 index 0000000..a2fb819 --- /dev/null +++ b/docs/reference/html/SoupMultipart.html @@ -0,0 +1,386 @@ + + + + +SoupMultipart + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupMultipart

+

SoupMultipart — multipart HTTP message bodies

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+typedef             SoupMultipart;
+SoupMultipart *     soup_multipart_new                  (const char *mime_type);
+SoupMultipart *     soup_multipart_new_from_message     (SoupMessageHeaders *headers,
+                                                         SoupMessageBody *body);
+void                soup_multipart_free                 (SoupMultipart *multipart);
+
+int                 soup_multipart_get_length           (SoupMultipart *multipart);
+gboolean            soup_multipart_get_part             (SoupMultipart *multipart,
+                                                         int part,
+                                                         SoupMessageHeaders **headers,
+                                                         SoupBuffer **body);
+void                soup_multipart_append_part          (SoupMultipart *multipart,
+                                                         SoupMessageHeaders *headers,
+                                                         SoupBuffer *body);
+void                soup_multipart_append_form_string   (SoupMultipart *multipart,
+                                                         const char *control_name,
+                                                         const char *data);
+void                soup_multipart_append_form_file     (SoupMultipart *multipart,
+                                                         const char *control_name,
+                                                         const char *filename,
+                                                         const char *content_type,
+                                                         SoupBuffer *body);
+void                soup_multipart_to_message           (SoupMultipart *multipart,
+                                                         SoupMessageHeaders *dest_headers,
+                                                         SoupMessageBody *dest_body);
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupMultipart
+
+
+
+

Description

+
+
+

Details

+
+

SoupMultipart

+
typedef struct SoupMultipart SoupMultipart;
+
+

+Represents a multipart HTTP message body, parsed according to the +syntax of RFC 2046. Of particular interest to HTTP are +multipart/byte-ranges and +multipart/form-data. +

+

+Although the headers of a SoupMultipart body part will contain the +full headers from that body part, libsoup does not interpret them +according to MIME rules. For example, each body part is assumed to +have "binary" Content-Transfer-Encoding, even if its headers +explicitly state otherwise. In other words, don't try to use +SoupMultipart for handling real MIME multiparts. +

+

Since 2.26

+
+
+
+

soup_multipart_new ()

+
SoupMultipart *     soup_multipart_new                  (const char *mime_type);
+

+Creates a new empty SoupMultipart with a randomly-generated +boundary string. Note that mime_type must be the full MIME type, +including "multipart/". +

+
++ + + + + + + + + + +

mime_type :

the MIME type of the multipart to create.

Returns :

a new empty SoupMultipart of the given mime_type +
+

Since 2.26

+
+
+
+

soup_multipart_new_from_message ()

+
SoupMultipart *     soup_multipart_new_from_message     (SoupMessageHeaders *headers,
+                                                         SoupMessageBody *body);
+

+Parses headers and body to form a new SoupMultipart +

+
++ + + + + + + + + + + + + + +

headers :

the headers of the HTTP message to parse

body :

the body of the HTTP message to parse

Returns :

a new SoupMultipart (or NULL if the message couldn't +be parsed or wasn't multipart).
+

Since 2.26

+
+
+
+

soup_multipart_free ()

+
void                soup_multipart_free                 (SoupMultipart *multipart);
+

+Frees multipart +

+
++ + + + +

multipart :

a SoupMultipart +
+

Since 2.26

+
+
+
+

soup_multipart_get_length ()

+
int                 soup_multipart_get_length           (SoupMultipart *multipart);
+

+Gets the number of body parts in multipart +

+
++ + + + + + + + + + +

multipart :

a SoupMultipart +

Returns :

the number of body parts in multipart +
+

Since 2.26

+
+
+
+

soup_multipart_get_part ()

+
gboolean            soup_multipart_get_part             (SoupMultipart *multipart,
+                                                         int part,
+                                                         SoupMessageHeaders **headers,
+                                                         SoupBuffer **body);
+

+Gets the indicated body part from multipart. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

multipart :

a SoupMultipart +

part :

the part number to get (counting from 0)

headers :

return location for the MIME part +headers. [out][transfer none] +

body :

return location for the MIME part +body. [out][transfer none] +

Returns :

+TRUE on success, FALSE if part is out of range (in +which case headers and body won't be set)
+

Since 2.26

+
+
+
+

soup_multipart_append_part ()

+
void                soup_multipart_append_part          (SoupMultipart *multipart,
+                                                         SoupMessageHeaders *headers,
+                                                         SoupBuffer *body);
+

+Adds a new MIME part to multipart with the given headers and body. +(The multipart will make its own copies of headers and body, so +you should free your copies if you are not using them for anything +else.) +

+
++ + + + + + + + + + + + + + +

multipart :

a SoupMultipart +

headers :

the MIME part headers

body :

the MIME part body
+

Since 2.26

+
+
+
+

soup_multipart_append_form_string ()

+
void                soup_multipart_append_form_string   (SoupMultipart *multipart,
+                                                         const char *control_name,
+                                                         const char *data);
+

+Adds a new MIME part containing data to multipart, using +"Content-Disposition: form-data", as per the HTML forms +specification. See soup_form_request_new_from_multipart() for more +details. +

+
++ + + + + + + + + + + + + + +

multipart :

a multipart (presumably of type "multipart/form-data")

control_name :

the name of the control associated with data +

data :

the body data
+

Since 2.26

+
+
+
+

soup_multipart_append_form_file ()

+
void                soup_multipart_append_form_file     (SoupMultipart *multipart,
+                                                         const char *control_name,
+                                                         const char *filename,
+                                                         const char *content_type,
+                                                         SoupBuffer *body);
+

+Adds a new MIME part containing body to multipart, using +"Content-Disposition: form-data", as per the HTML forms +specification. See soup_form_request_new_from_multipart() for more +details. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

multipart :

a multipart (presumably of type "multipart/form-data")

control_name :

the name of the control associated with this file

filename :

the name of the file, or NULL if not known

content_type :

the MIME type of the file, or NULL if not known

body :

the file data
+

Since 2.26

+
+
+
+

soup_multipart_to_message ()

+
void                soup_multipart_to_message           (SoupMultipart *multipart,
+                                                         SoupMessageHeaders *dest_headers,
+                                                         SoupMessageBody *dest_body);
+

+Serializes multipart to dest_headers and dest_body. +

+
++ + + + + + + + + + + + + + +

multipart :

a SoupMultipart +

dest_headers :

the headers of the HTTP message to serialize multipart to

dest_body :

the body of the HTTP message to serialize multipart to
+

Since 2.26

+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupProxyResolverDefault.html b/docs/reference/html/SoupProxyResolverDefault.html new file mode 100644 index 0000000..be862fa --- /dev/null +++ b/docs/reference/html/SoupProxyResolverDefault.html @@ -0,0 +1,101 @@ + + + + +SoupProxyResolverDefault + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupProxyResolverDefault

+

SoupProxyResolverDefault

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupProxyResolverDefault;
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupProxyResolverDefault
+
+
+
+

Implemented Interfaces

+

+SoupProxyResolverDefault implements + SoupSessionFeature and SoupProxyURIResolver.

+
+
+

Properties

+
+  "gproxy-resolver"          GProxyResolver*       : Write
+
+
+
+

Description

+

+

+
+
+

Details

+
+

SoupProxyResolverDefault

+
typedef struct _SoupProxyResolverDefault SoupProxyResolverDefault;
+

+A SoupProxyURIResolver implementation that uses the default gio +GProxyResolver to resolve proxies. +

+

Since 2.34

+
+
+
+

Property Details

+
+

The "gproxy-resolver" property

+
  "gproxy-resolver"          GProxyResolver*       : Write
+

The underlying GProxyResolver.

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupProxyURIResolver.html b/docs/reference/html/SoupProxyURIResolver.html new file mode 100644 index 0000000..5305b5c --- /dev/null +++ b/docs/reference/html/SoupProxyURIResolver.html @@ -0,0 +1,237 @@ + + + + +SoupProxyURIResolver + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupProxyURIResolver

+

SoupProxyURIResolver

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupProxyURIResolver;
+void                (*SoupProxyURIResolverCallback)     (SoupProxyURIResolver *resolver,
+                                                         guint status,
+                                                         SoupURI *proxy_uri,
+                                                         gpointer user_data);
+void                soup_proxy_uri_resolver_get_proxy_uri_async
+                                                        (SoupProxyURIResolver *proxy_uri_resolver,
+                                                         SoupURI *uri,
+                                                         GMainContext *async_context,
+                                                         GCancellable *cancellable,
+                                                         SoupProxyURIResolverCallback callback,
+                                                         gpointer user_data);
+guint               soup_proxy_uri_resolver_get_proxy_uri_sync
+                                                        (SoupProxyURIResolver *proxy_uri_resolver,
+                                                         SoupURI *uri,
+                                                         GCancellable *cancellable,
+                                                         SoupURI **proxy_uri);
+
+
+
+

Object Hierarchy

+
+  GInterface
+   +----SoupProxyURIResolver
+
+
+
+

Prerequisites

+

+SoupProxyURIResolver requires + GObject.

+
+
+

Known Implementations

+

+SoupProxyURIResolver is implemented by + SoupProxyResolverDefault.

+
+
+

Description

+

+

+
+
+

Details

+
+

SoupProxyURIResolver

+
typedef struct _SoupProxyURIResolver SoupProxyURIResolver;
+

+

+
+
+
+

SoupProxyURIResolverCallback ()

+
void                (*SoupProxyURIResolverCallback)     (SoupProxyURIResolver *resolver,
+                                                         guint status,
+                                                         SoupURI *proxy_uri,
+                                                         gpointer user_data);
+

+Callback for soup_proxy_uri_resolver_get_proxy_uri_async() +

+
++ + + + + + + + + + + + + + + + + + +

resolver :

the SoupProxyURIResolver +

status :

a SoupKnownStatusCode +

proxy_uri :

the resolved proxy URI, or NULL +

user_data :

data passed to soup_proxy_uri_resolver_get_proxy_uri_async() +
+
+
+
+

soup_proxy_uri_resolver_get_proxy_uri_async ()

+
void                soup_proxy_uri_resolver_get_proxy_uri_async
+                                                        (SoupProxyURIResolver *proxy_uri_resolver,
+                                                         SoupURI *uri,
+                                                         GMainContext *async_context,
+                                                         GCancellable *cancellable,
+                                                         SoupProxyURIResolverCallback callback,
+                                                         gpointer user_data);
+

+Asynchronously determines a proxy URI to use for msg and calls +callback. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

proxy_uri_resolver :

the SoupProxyURIResolver +

uri :

the SoupURI you want a proxy for

async_context :

the GMainContext to invoke callback in. [allow-none] +

cancellable :

a GCancellable, or NULL +

callback :

callback to invoke with the proxy address. [scope async] +

user_data :

data for callback +
+

Since 2.26.3

+
+
+
+

soup_proxy_uri_resolver_get_proxy_uri_sync ()

+
guint               soup_proxy_uri_resolver_get_proxy_uri_sync
+                                                        (SoupProxyURIResolver *proxy_uri_resolver,
+                                                         SoupURI *uri,
+                                                         GCancellable *cancellable,
+                                                         SoupURI **proxy_uri);
+

+Synchronously determines a proxy URI to use for uri. If uri +should be sent via proxy, *proxy_uri will be set to the URI of the +proxy, else it will be set to NULL. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

proxy_uri_resolver :

the SoupProxyURIResolver +

uri :

the SoupURI you want a proxy for

cancellable :

a GCancellable, or NULL +

proxy_uri :

on return, will contain the proxy URI. [out] +

Returns :

+SOUP_STATUS_OK if successful, or a transport-level +error.
+

Since 2.26.3

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupRequest.html b/docs/reference/html/SoupRequest.html new file mode 100644 index 0000000..9b4f9e9 --- /dev/null +++ b/docs/reference/html/SoupRequest.html @@ -0,0 +1,359 @@ + + + + +SoupRequest + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupRequest

+

SoupRequest — Protocol-independent streaming request interface

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+struct              SoupRequest;
+GInputStream *      soup_request_send                   (SoupRequest *request,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+void                soup_request_send_async             (SoupRequest *request,
+                                                         GCancellable *cancellable,
+                                                         GAsyncReadyCallback callback,
+                                                         gpointer user_data);
+GInputStream *      soup_request_send_finish            (SoupRequest *request,
+                                                         GAsyncResult *result,
+                                                         GError **error);
+
+goffset             soup_request_get_content_length     (SoupRequest *request);
+const char *        soup_request_get_content_type       (SoupRequest *request);
+SoupSession *       soup_request_get_session            (SoupRequest *request);
+SoupURI *           soup_request_get_uri                (SoupRequest *request);
+
+#define             SOUP_REQUEST_SESSION
+#define             SOUP_REQUEST_URI
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupRequest
+         +----SoupRequestData
+         +----SoupRequestFile
+         +----SoupRequestHTTP
+
+
+
+

Implemented Interfaces

+

+SoupRequest implements + GInitable.

+
+
+

Properties

+
+  "session"                  SoupSession*          : Read / Write / Construct Only
+  "uri"                      SoupURI*              : Read / Write / Construct Only
+
+
+
+

Description

+

+A SoupRequest is created by SoupRequester, and represents a +request to retrieve a particular URI. +

+
+
+

Details

+
+

struct SoupRequest

+
struct SoupRequest;
+

+A request to retrieve a particular URI. +

+

Since 2.34

+
+
+
+

soup_request_send ()

+
GInputStream *      soup_request_send                   (SoupRequest *request,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+

+Synchronously requests the URI pointed to by request, and returns +a GInputStream that can be used to read its contents. +

+
++ + + + + + + + + + + + + + + + + + +

request :

a SoupRequest +

cancellable :

a GCancellable or NULL +

error :

return location for a GError, or NULL +

Returns :

a GInputStream that can be used to +read from the URI pointed to by request. [transfer full] +
+

Since 2.34

+
+
+
+

soup_request_send_async ()

+
void                soup_request_send_async             (SoupRequest *request,
+                                                         GCancellable *cancellable,
+                                                         GAsyncReadyCallback callback,
+                                                         gpointer user_data);
+

+Begins an asynchronously request for the URI pointed to by +request. +

+
++ + + + + + + + + + + + + + + + + + +

request :

a SoupRequest +

cancellable :

a GCancellable or NULL +

callback :

a GAsyncReadyCallback +

user_data :

user data passed to callback +
+

Since 2.34

+
+
+
+

soup_request_send_finish ()

+
GInputStream *      soup_request_send_finish            (SoupRequest *request,
+                                                         GAsyncResult *result,
+                                                         GError **error);
+

+Gets the result of a soup_request_send_async(). +

+
++ + + + + + + + + + + + + + + + + + +

request :

a SoupRequest +

result :

the GAsyncResult +

error :

return location for a GError, or NULL +

Returns :

a GInputStream that can be used to +read from the URI pointed to by request. [transfer full] +
+

Since 2.34

+
+
+
+

soup_request_get_content_length ()

+
goffset             soup_request_get_content_length     (SoupRequest *request);
+

+Gets the length of the data represented by request. +

+
++ + + + + + + + + + +

request :

a SoupRequest +

Returns :

the length of the data represented by request, +or -1 if not known.
+

Since 2.34

+
+
+
+

soup_request_get_content_type ()

+
const char *        soup_request_get_content_type       (SoupRequest *request);
+

+Gets the type of the data represented by request. As in the +HTTP Content-Type header, this may include parameters after +the MIME type. +

+
++ + + + + + + + + + +

request :

a SoupRequest +

Returns :

the type of the data represented by request, +or NULL if not known.
+

Since 2.34

+
+
+
+

soup_request_get_session ()

+
SoupSession *       soup_request_get_session            (SoupRequest *request);
+

+Gets request's SoupSession +

+
++ + + + + + + + + + +

request :

a SoupRequest +

Returns :

+request's SoupSession. [transfer none] +
+

Since 2.34

+
+
+
+

soup_request_get_uri ()

+
SoupURI *           soup_request_get_uri                (SoupRequest *request);
+

+Gets request's URI +

+
++ + + + + + + + + + +

request :

a SoupRequest +

Returns :

+request's URI. [transfer none] +
+

Since 2.34

+
+
+
+

SOUP_REQUEST_SESSION

+
#define SOUP_REQUEST_SESSION "session"
+
+

+

+
+
+
+

SOUP_REQUEST_URI

+
#define SOUP_REQUEST_URI     "uri"
+
+

+

+
+
+
+

Property Details

+
+

The "session" property

+
  "session"                  SoupSession*          : Read / Write / Construct Only
+

The request's session.

+
+
+
+

The "uri" property

+
  "uri"                      SoupURI*              : Read / Write / Construct Only
+

The request URI.

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupRequestData.html b/docs/reference/html/SoupRequestData.html new file mode 100644 index 0000000..98cddcd --- /dev/null +++ b/docs/reference/html/SoupRequestData.html @@ -0,0 +1,83 @@ + + + + +SoupRequestData + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupRequestData

+

SoupRequestData

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupRequestData;
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupRequest
+         +----SoupRequestData
+
+
+
+

Implemented Interfaces

+

+SoupRequestData implements + GInitable.

+
+
+

Description

+

+

+
+
+

Details

+
+

SoupRequestData

+
typedef struct _SoupRequestData SoupRequestData;
+

+

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupRequestFile.html b/docs/reference/html/SoupRequestFile.html new file mode 100644 index 0000000..bf5e607 --- /dev/null +++ b/docs/reference/html/SoupRequestFile.html @@ -0,0 +1,108 @@ + + + + +SoupRequestFile + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupRequestFile

+

SoupRequestFile

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupRequestFile;
+GFile *             soup_request_file_get_file          (SoupRequestFile *file);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupRequest
+         +----SoupRequestFile
+
+
+
+

Implemented Interfaces

+

+SoupRequestFile implements + GInitable.

+
+
+

Description

+

+

+
+
+

Details

+
+

SoupRequestFile

+
typedef struct _SoupRequestFile SoupRequestFile;
+

+

+
+
+
+

soup_request_file_get_file ()

+
GFile *             soup_request_file_get_file          (SoupRequestFile *file);
+

+Gets a GFile corresponding to file's URI +

+
++ + + + + + + + + + +

file :

a SoupRequestFile +

Returns :

a GFile corresponding to file. [transfer full] +
+

Since 2.34

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupRequestHTTP.html b/docs/reference/html/SoupRequestHTTP.html new file mode 100644 index 0000000..a650ea2 --- /dev/null +++ b/docs/reference/html/SoupRequestHTTP.html @@ -0,0 +1,107 @@ + + + + +SoupRequestHTTP + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupRequestHTTP

+

SoupRequestHTTP

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupRequestHTTP;
+SoupMessage *       soup_request_http_get_message       (SoupRequestHTTP *http);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupRequest
+         +----SoupRequestHTTP
+
+
+
+

Implemented Interfaces

+

+SoupRequestHTTP implements + GInitable.

+
+
+

Description

+

+

+
+
+

Details

+
+

SoupRequestHTTP

+
typedef struct _SoupRequestHTTP SoupRequestHTTP;
+

+

+
+
+
+

soup_request_http_get_message ()

+
SoupMessage *       soup_request_http_get_message       (SoupRequestHTTP *http);
+

+Gets a new reference to the SoupMessage associated to this SoupRequest +

+
++ + + + + + + + + + +

http :

a SoupRequestHTTP object

Returns :

a new reference to the SoupMessage +
+

Since 2.34

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupRequester.html b/docs/reference/html/SoupRequester.html new file mode 100644 index 0000000..6222ad9 --- /dev/null +++ b/docs/reference/html/SoupRequester.html @@ -0,0 +1,222 @@ + + + + +SoupRequester + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupRequester

+

SoupRequester

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+struct              SoupRequester;
+SoupRequester *     soup_requester_new                  (void);
+SoupRequest *       soup_requester_request              (SoupRequester *requester,
+                                                         const char *uri_string,
+                                                         GError **error);
+SoupRequest *       soup_requester_request_uri          (SoupRequester *requester,
+                                                         SoupURI *uri,
+                                                         GError **error);
+
+enum                SoupRequesterError;
+#define             SOUP_REQUESTER_ERROR
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupRequester
+
+
+
+

Implemented Interfaces

+

+SoupRequester implements + SoupSessionFeature.

+
+
+

Description

+

+

+
+
+

Details

+
+

struct SoupRequester

+
struct SoupRequester;
+

+

+
+
+
+

soup_requester_new ()

+
SoupRequester *     soup_requester_new                  (void);
+

+Creates a new SoupRequester object, which can be added to +a SoupSession with soup_session_add_feature(). +

+
++ + + + +

Returns :

the new SoupRequester +
+

Since 2.34

+
+
+
+

soup_requester_request ()

+
SoupRequest *       soup_requester_request              (SoupRequester *requester,
+                                                         const char *uri_string,
+                                                         GError **error);
+

+Creates a SoupRequest for retrieving uri_string. +

+
++ + + + + + + + + + + + + + + + + + +

requester :

a SoupRequester +

uri_string :

a URI, in string form

error :

return location for a GError, or NULL +

Returns :

a new SoupRequest, or +NULL on error. [transfer full] +
+

Since 2.34

+
+
+
+

soup_requester_request_uri ()

+
SoupRequest *       soup_requester_request_uri          (SoupRequester *requester,
+                                                         SoupURI *uri,
+                                                         GError **error);
+

+Creates a SoupRequest for retrieving uri. +

+
++ + + + + + + + + + + + + + + + + + +

requester :

a SoupRequester +

uri :

a SoupURI representing the URI to retrieve

error :

return location for a GError, or NULL +

Returns :

a new SoupRequest, or +NULL on error. [transfer full] +
+

Since 2.34

+
+
+
+

enum SoupRequesterError

+
typedef enum {
+	SOUP_REQUESTER_ERROR_BAD_URI,
+	SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME
+} SoupRequesterError;
+
+

+A SoupRequester error. +

+
++ + + + + + + + + + +

SOUP_REQUESTER_ERROR_BAD_URI

the URI could not be parsed +

SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME

the URI scheme is not + supported by this SoupRequester +
+

Since 2.34

+
+
+
+

SOUP_REQUESTER_ERROR

+
#define SOUP_REQUESTER_ERROR soup_requester_error_quark ()
+
+

+A GError domain for SoupRequester errors. Used with +SoupRequesterError. +

+

Since 2.34

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupServer.html b/docs/reference/html/SoupServer.html new file mode 100644 index 0000000..c3f9fc4 --- /dev/null +++ b/docs/reference/html/SoupServer.html @@ -0,0 +1,1188 @@ + + + + +SoupServer + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupServer

+

SoupServer — HTTP server

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupServer;
+SoupServer *        soup_server_new                     (const char *optname1,
+                                                         ...);
+gboolean            soup_server_is_https                (SoupServer *server);
+guint               soup_server_get_port                (SoupServer *server);
+SoupSocket *        soup_server_get_listener            (SoupServer *server);
+void                soup_server_run                     (SoupServer *server);
+void                soup_server_run_async               (SoupServer *server);
+void                soup_server_quit                    (SoupServer *server);
+void                soup_server_disconnect              (SoupServer *server);
+GMainContext *      soup_server_get_async_context       (SoupServer *server);
+
+void                (*SoupServerCallback)               (SoupServer *server,
+                                                         SoupMessage *msg,
+                                                         const char *path,
+                                                         GHashTable *query,
+                                                         SoupClientContext *client,
+                                                         gpointer user_data);
+void                soup_server_add_handler             (SoupServer *server,
+                                                         const char *path,
+                                                         SoupServerCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify destroy);
+void                soup_server_remove_handler          (SoupServer *server,
+                                                         const char *path);
+
+typedef             SoupClientContext;
+SoupSocket *        soup_client_context_get_socket      (SoupClientContext *client);
+SoupAddress *       soup_client_context_get_address     (SoupClientContext *client);
+const char *        soup_client_context_get_host        (SoupClientContext *client);
+SoupAuthDomain *    soup_client_context_get_auth_domain (SoupClientContext *client);
+const char *        soup_client_context_get_auth_user   (SoupClientContext *client);
+
+void                soup_server_add_auth_domain         (SoupServer *server,
+                                                         SoupAuthDomain *auth_domain);
+void                soup_server_remove_auth_domain      (SoupServer *server,
+                                                         SoupAuthDomain *auth_domain);
+
+void                soup_server_pause_message           (SoupServer *server,
+                                                         SoupMessage *msg);
+void                soup_server_unpause_message         (SoupServer *server,
+                                                         SoupMessage *msg);
+
+#define             SOUP_SERVER_PORT
+#define             SOUP_SERVER_INTERFACE
+#define             SOUP_SERVER_SSL_CERT_FILE
+#define             SOUP_SERVER_SSL_KEY_FILE
+#define             SOUP_SERVER_ASYNC_CONTEXT
+#define             SOUP_SERVER_RAW_PATHS
+#define             SOUP_SERVER_SERVER_HEADER
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupServer
+
+
+  GBoxed
+   +----SoupClientContext
+
+
+
+

Properties

+
+  "async-context"            gpointer              : Read / Write / Construct Only
+  "interface"                SoupAddress*          : Read / Write / Construct Only
+  "port"                     guint                 : Read / Write / Construct Only
+  "raw-paths"                gboolean              : Read / Write / Construct Only
+  "server-header"            gchar*                : Read / Write / Construct
+  "ssl-cert-file"            gchar*                : Read / Write / Construct Only
+  "ssl-key-file"             gchar*                : Read / Write / Construct Only
+  "tls-certificate"          GTlsCertificate*      : Read / Write / Construct Only
+
+
+ +
+

Description

+

+SoupServer implements a simple HTTP server. +

+

+To begin, create a server using soup_server_new(). Add at least one +handler by calling soup_server_add_handler(); the handler will be +called to process any requests underneath the path passed to +soup_server_add_handler(). (If you want all requests to go to the +same handler, just pass "/" (or NULL) for the path.) Any request +that does not match any handler will automatically be returned to +the client with a 404 (Not Found) status. +

+

+If you want to handle the special "*" URI (eg, "OPTIONS *"), you +must explicitly register a handler for "*"; the default handler +will not be used for that case. +

+

+To add authentication to some or all paths, create an appropriate +SoupAuthDomain (qv), and add it to the server via +soup_server_add_auth_domain(). (As with handlers, you must +explicitly add "*" to an auth domain if you want it to be covered.) +

+

+Additional processing options are available via SoupServer's +signals; Connect to "request-started" to be notified +every time a new request is being processed. (This gives you a +chance to connect to the SoupMessage "got-" signals in case you +want to do processing before the body has been fully read.) +

+

+Once the server is set up, start it processing connections by +calling soup_server_run_async() or soup_server_run(). SoupServer +runs via the glib main loop; if you need to have a server that runs +in another thread (or merely isn't bound to the default main loop), +create a GMainContext for it to use, and set that via the +SOUP_SERVER_ASYNC_CONTEXT property. +

+
+
+

Details

+
+

SoupServer

+
typedef struct _SoupServer SoupServer;
+

+

+
+
+
+

soup_server_new ()

+
SoupServer *        soup_server_new                     (const char *optname1,
+                                                         ...);
+

+Creates a new SoupServer. +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first property to set

... :

value of optname1, followed by additional property/value pairs

Returns :

a new SoupServer +
+
+
+
+

soup_server_is_https ()

+
gboolean            soup_server_is_https                (SoupServer *server);
+

+Checks whether server is running plain http or https. +

+

+In order for a server to run https, you must set the +SOUP_SERVER_SSL_CERT_FILE and SOUP_SERVER_SSL_KEY_FILE properties +to provide it with an SSL certificate to use. +

+
++ + + + + + + + + + +

server :

a SoupServer +

Returns :

+TRUE if server is serving https.
+
+
+
+

soup_server_get_port ()

+
guint               soup_server_get_port                (SoupServer *server);
+

+Gets the TCP port that server is listening on. This is most useful +when you did not request a specific port (or explicitly requested +SOUP_ADDRESS_ANY_PORT). +

+
++ + + + + + + + + + +

server :

a SoupServer +

Returns :

the port server is listening on.
+
+
+
+

soup_server_get_listener ()

+
SoupSocket *        soup_server_get_listener            (SoupServer *server);
+

+Gets server's listening socket. You should treat this as +read-only; writing to it or modifiying it may cause server to +malfunction. +

+
++ + + + + + + + + + +

server :

a SoupServer +

Returns :

the listening socket. [transfer none] +
+
+
+
+

soup_server_run ()

+
void                soup_server_run                     (SoupServer *server);
+

+Starts server, causing it to listen for and process incoming +connections. Unlike soup_server_run_async(), this creates a +GMainLoop and runs it, and it will not return until someone calls +soup_server_quit() to stop the server. +

+
++ + + + +

server :

a SoupServer +
+
+
+
+

soup_server_run_async ()

+
void                soup_server_run_async               (SoupServer *server);
+

+Starts server, causing it to listen for and process incoming +connections. +

+

+The server actually runs in server's GMainContext. It will not +actually perform any processing unless the appropriate main loop is +running. In the simple case where you did not set the server's +SOUP_SERVER_ASYNC_CONTEXT property, this means the server will run +whenever the glib main loop is running. +

+
++ + + + +

server :

a SoupServer +
+
+
+
+

soup_server_quit ()

+
void                soup_server_quit                    (SoupServer *server);
+

+Stops processing for server. Call this to clean up after +soup_server_run_async(), or to terminate a call to soup_server_run(). +

+

+server is still in a working state after this call; you can start +and stop a server as many times as you want. +

+
++ + + + +

server :

a SoupServer +
+
+
+
+

soup_server_disconnect ()

+
void                soup_server_disconnect              (SoupServer *server);
+

+Stops processing for server and closes its socket. This implies +the effects of soup_server_quit(), but additionally closes the +listening socket. Note that messages currently in progress will +continue to be handled, if the main loop associated with the +server is resumed or kept running. +

+

+After calling this function, server is no longer functional, so it +has nearly the same effect as destroying server entirely. The +function is thus useful mainly for language bindings without +explicit control over object lifetime. +

+
++ + + + +

server :

a SoupServer +
+
+
+
+

soup_server_get_async_context ()

+
GMainContext *      soup_server_get_async_context       (SoupServer *server);
+

+Gets server's async_context. This does not add a ref to the +context, so you will need to ref it yourself if you want it to +outlive its server. +

+
++ + + + + + + + + + +

server :

a SoupServer +

Returns :

+server's GMainContext, which may be NULL. [transfer none] +
+
+
+
+

SoupServerCallback ()

+
void                (*SoupServerCallback)               (SoupServer *server,
+                                                         SoupMessage *msg,
+                                                         const char *path,
+                                                         GHashTable *query,
+                                                         SoupClientContext *client,
+                                                         gpointer user_data);
+

+A callback used to handle requests to a SoupServer. The callback +will be invoked after receiving the request body; msg's +"method", "request_headers", and +"request_body" fields will be filled in. +

+

+path and query contain the likewise-named components of the +Request-URI, subject to certain assumptions. By default, +SoupServer decodes all percent-encoding in the URI path, such that +"/foo%2Fbar" is treated the same as "/foo/bar". If your +server is serving resources in some non-POSIX-filesystem namespace, +you may want to distinguish those as two distinct paths. In that +case, you can set the SOUP_SERVER_RAW_PATHS property when creating +the SoupServer, and it will leave those characters undecoded. (You +may want to call soup_uri_normalize() to decode any percent-encoded +characters that you aren't handling specially.) +

+

+query contains the query component of the Request-URI parsed +according to the rules for HTML form handling. Although this is the +only commonly-used query string format in HTTP, there is nothing +that actually requires that HTTP URIs use that format; if your +server needs to use some other format, you can just ignore query, +and call soup_message_get_uri() and parse the URI's query field +yourself. +

+

+After determining what to do with the request, the callback must at +a minimum call soup_message_set_status() (or +soup_message_set_status_full()) on msg to set the response status +code. Additionally, it may set response headers and/or fill in the +response body. +

+

+If the callback cannot fully fill in the response before returning +(eg, if it needs to wait for information from a database, or +another network server), it should call soup_server_pause_message() +to tell SoupServer to not send the response right away. When the +response is ready, call soup_server_unpause_message() to cause it +to be sent. +

+

+To send the response body a bit at a time using "chunked" encoding, +first call soup_message_headers_set_encoding() to set +SOUP_ENCODING_CHUNKED on the "response_headers". Then call +soup_message_body_append() (or soup_message_body_append_buffer()) +to append each chunk as it becomes ready, and +soup_server_unpause_message() to make sure it's running. (The +server will automatically pause the message if it is using chunked +encoding but no more chunks are available.) When you are done, call +soup_message_body_complete() to indicate that no more chunks are +coming. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

server :

the SoupServer +

msg :

the message being processed

path :

the path component of msg's Request-URI

query :

the parsed query +component of msg's Request-URI. [element-type utf8 utf8][allow-none] +

client :

additional contextual information about the client

user_data :

the data passed to soup_server_add_handler +
+
+
+
+

soup_server_add_handler ()

+
void                soup_server_add_handler             (SoupServer *server,
+                                                         const char *path,
+                                                         SoupServerCallback callback,
+                                                         gpointer user_data,
+                                                         GDestroyNotify destroy);
+

+Adds a handler to server for requests under path. See the +documentation for SoupServerCallback for information about +how callbacks should behave. +

+

+If path is NULL or "/", then this will be the default handler for +all requests that don't have a more specific handler. Note though +that if you want to handle requests to the special "*" URI, you +must explicitly register a handler for "*"; the default handler +will not be used for that case. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

server :

a SoupServer +

path :

the toplevel path for the handler. [allow-none] +

callback :

callback to invoke for requests under path +

user_data :

data for callback +

destroy :

destroy notifier to free user_data +
+
+
+
+

soup_server_remove_handler ()

+
void                soup_server_remove_handler          (SoupServer *server,
+                                                         const char *path);
+

+Removes the handler registered at path. +

+
++ + + + + + + + + + +

server :

a SoupServer +

path :

the toplevel path for the handler
+
+
+
+

SoupClientContext

+
typedef struct SoupClientContext SoupClientContext;
+
+

+A SoupClientContext provides additional information about the +client making a particular request. In particular, you can use +soup_client_context_get_auth_domain() and +soup_client_context_get_auth_user() to determine if HTTP +authentication was used successfully. +

+

+soup_client_context_get_address() and/or +soup_client_context_get_host() can be used to get information for +logging or debugging purposes. soup_client_context_get_socket() may +also be of use in some situations (eg, tracking when multiple +requests are made on the same connection). +

+
+
+
+

soup_client_context_get_socket ()

+
SoupSocket *        soup_client_context_get_socket      (SoupClientContext *client);
+

+Retrieves the SoupSocket that client is associated with. +

+

+If you are using this method to observe when multiple requests are +made on the same persistent HTTP connection (eg, as the ntlm-test +test program does), you will need to pay attention to socket +destruction as well (either by using weak references, or by +connecting to the "disconnected" signal), so that you do +not get fooled when the allocator reuses the memory address of a +previously-destroyed socket to represent a new socket. +

+
++ + + + + + + + + + +

client :

a SoupClientContext +

Returns :

the SoupSocket that client is +associated with. [transfer none] +
+
+
+
+

soup_client_context_get_address ()

+
SoupAddress *       soup_client_context_get_address     (SoupClientContext *client);
+

+Retrieves the SoupAddress associated with the remote end +of a connection. +

+
++ + + + + + + + + + +

client :

a SoupClientContext +

Returns :

the SoupAddress associated with the +remote end of a connection. [transfer none] +
+
+
+
+

soup_client_context_get_host ()

+
const char *        soup_client_context_get_host        (SoupClientContext *client);
+

+Retrieves the IP address associated with the remote end of a +connection. (If you want the actual hostname, you'll have to call +soup_client_context_get_address() and then call the appropriate +SoupAddress method to resolve it.) +

+
++ + + + + + + + + + +

client :

a SoupClientContext +

Returns :

the IP address associated with the remote end of a +connection.
+
+
+
+

soup_client_context_get_auth_domain ()

+
SoupAuthDomain *    soup_client_context_get_auth_domain (SoupClientContext *client);
+

+Checks whether the request associated with client has been +authenticated, and if so returns the SoupAuthDomain that +authenticated it. +

+
++ + + + + + + + + + +

client :

a SoupClientContext +

Returns :

a SoupAuthDomain, or +NULL if the request was not authenticated. [transfer none][allow-none] +
+
+
+
+

soup_client_context_get_auth_user ()

+
const char *        soup_client_context_get_auth_user   (SoupClientContext *client);
+

+Checks whether the request associated with client has been +authenticated, and if so returns the username that the client +authenticated as. +

+
++ + + + + + + + + + +

client :

a SoupClientContext +

Returns :

the authenticated-as user, or NULL if the request +was not authenticated.
+
+
+
+

soup_server_add_auth_domain ()

+
void                soup_server_add_auth_domain         (SoupServer *server,
+                                                         SoupAuthDomain *auth_domain);
+

+Adds an authentication domain to server. Each auth domain will +have the chance to require authentication for each request that +comes in; normally auth domains will require authentication for +requests on certain paths that they have been set up to watch, or +that meet other criteria set by the caller. If an auth domain +determines that a request requires authentication (and the request +doesn't contain authentication), server will automatically reject +the request with an appropriate status (401 Unauthorized or 407 +Proxy Authentication Required). If the request used the +"100-continue" Expectation, server will reject it before the +request body is sent. +

+
++ + + + + + + + + + +

server :

a SoupServer +

auth_domain :

a SoupAuthDomain +
+
+
+
+

soup_server_remove_auth_domain ()

+
void                soup_server_remove_auth_domain      (SoupServer *server,
+                                                         SoupAuthDomain *auth_domain);
+

+Removes auth_domain from server. +

+
++ + + + + + + + + + +

server :

a SoupServer +

auth_domain :

a SoupAuthDomain +
+
+
+
+

soup_server_pause_message ()

+
void                soup_server_pause_message           (SoupServer *server,
+                                                         SoupMessage *msg);
+

+Pauses I/O on msg. This can be used when you need to return from +the server handler without having the full response ready yet. Use +soup_server_unpause_message() to resume I/O. +

+
++ + + + + + + + + + +

server :

a SoupServer +

msg :

a SoupMessage associated with server.
+
+
+
+

soup_server_unpause_message ()

+
void                soup_server_unpause_message         (SoupServer *server,
+                                                         SoupMessage *msg);
+

+Resumes I/O on msg. Use this to resume after calling +soup_server_pause_message(), or after adding a new chunk to a +chunked response. +

+

+I/O won't actually resume until you return to the main loop. +

+
++ + + + + + + + + + +

server :

a SoupServer +

msg :

a SoupMessage associated with server.
+
+
+
+

SOUP_SERVER_PORT

+
#define SOUP_SERVER_PORT            "port"
+
+

+Alias for the "port" property. (The port the +server listens on.) +

+
+
+
+

SOUP_SERVER_INTERFACE

+
#define SOUP_SERVER_INTERFACE       "interface"
+
+

+Alias for the "interface" property. (The address +of the network interface the server listens on.) +

+
+
+
+

SOUP_SERVER_SSL_CERT_FILE

+
#define SOUP_SERVER_SSL_CERT_FILE   "ssl-cert-file"
+
+

+Alias for the "ssl-cert-file" property, qv. +

+
+
+
+

SOUP_SERVER_SSL_KEY_FILE

+
#define SOUP_SERVER_SSL_KEY_FILE    "ssl-key-file"
+
+

+Alias for the "ssl-key-file" property, qv. +

+
+
+
+

SOUP_SERVER_ASYNC_CONTEXT

+
#define SOUP_SERVER_ASYNC_CONTEXT   "async-context"
+
+

+Alias for the "async-context" property. (The +server's GMainContext.) +

+
+
+
+

SOUP_SERVER_RAW_PATHS

+
#define SOUP_SERVER_RAW_PATHS       "raw-paths"
+
+

+Alias for the "raw-paths" property. (If TRUE, +percent-encoding in the Request-URI path will not be +automatically decoded.) +

+
+
+
+

SOUP_SERVER_SERVER_HEADER

+
#define SOUP_SERVER_SERVER_HEADER   "server-header"
+
+

+Alias for the "server-header" property, qv. +

+
+
+
+

Property Details

+
+

The "async-context" property

+
  "async-context"            gpointer              : Read / Write / Construct Only
+

The GMainContext to dispatch async I/O in.

+
+
+
+

The "interface" property

+
  "interface"                SoupAddress*          : Read / Write / Construct Only
+

Address of interface to listen on.

+
+
+
+

The "port" property

+
  "port"                     guint                 : Read / Write / Construct Only
+

Port to listen on.

+

Allowed values: <= 65536

+

Default value: 0

+
+
+
+

The "raw-paths" property

+
  "raw-paths"                gboolean              : Read / Write / Construct Only
+

If %TRUE, percent-encoding in the Request-URI path will not be automatically decoded.

+

Default value: FALSE

+
+
+
+

The "server-header" property

+
  "server-header"            gchar*                : Read / Write / Construct
+

+If non-NULL, the value to use for the "Server" header on +SoupMessages processed by this server. +

+

+The Server header is the server equivalent of the +User-Agent header, and provides information about the +server and its components. It contains a list of one or +more product tokens, separated by whitespace, with the most +significant product token coming first. The tokens must be +brief, ASCII, and mostly alphanumeric (although "-", "_", +and "." are also allowed), and may optionally include a "/" +followed by a version string. You may also put comments, +enclosed in parentheses, between or after the tokens. +

+

+Some HTTP server implementations intentionally do not use +version numbers in their Server header, so that +installations running older versions of the server don't +end up advertising their vulnerability to specific security +holes. +

+

+As with "user_agent", if you set a +"server_header" property that has trailing whitespace, +SoupServer will append its own product token (eg, +"libsoup/2.3.2") to the end of the +header for you. +

+

Default value: NULL

+
+
+
+

The "ssl-cert-file" property

+
  "ssl-cert-file"            gchar*                : Read / Write / Construct Only
+

+Path to a file containing a PEM-encoded certificate. If +this and "ssl-key-file" are both set, then the +server will speak https rather than plain http. +

+

+Alternatively, you can use "tls-certificate" +to provide an arbitrary GTlsCertificate. +

+

Default value: NULL

+
+
+
+

The "ssl-key-file" property

+
  "ssl-key-file"             gchar*                : Read / Write / Construct Only
+

+Path to a file containing a PEM-encoded private key. If +this and "ssl-key-file" are both set, then the +server will speak https rather than plain http. Note that +you are allowed to set them to the same value, if you have +a single file containing both the certificate and the key. +

+

+Alternatively, you can use "tls-certificate" +to provide an arbitrary GTlsCertificate. +

+

Default value: NULL

+
+
+
+

The "tls-certificate" property

+
  "tls-certificate"          GTlsCertificate*      : Read / Write / Construct Only
+

+A GTlsCertificate that has a "private-key" +set. If this is set, then the server will speak https +rather than plain http. +

+

+Alternatively, you can use "ssl-cert-file" and +"ssl-key-file" properties, to have SoupServer +read in a a certificate from a file. +

+
+
+
+

Signal Details

+
+

The "request-aborted" signal

+
void                user_function                      (SoupServer        *server,
+                                                        SoupMessage       *message,
+                                                        SoupClientContext *client,
+                                                        gpointer           user_data)      : Run First
+

+Emitted when processing has failed for a message; this +could mean either that it could not be read (if +"request_read" has not been emitted for it yet), +or that the response could not be written back (if +"request_read" has been emitted but +"request_finished" has not been). +

+

+message is in an undefined state when this signal is +emitted; the signal exists primarily to allow the server to +free any state that it may have allocated in +"request_started". +

+
++ + + + + + + + + + + + + + + + + + +

server :

the server

message :

the message

client :

the client context

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "request-finished" signal

+
void                user_function                      (SoupServer        *server,
+                                                        SoupMessage       *message,
+                                                        SoupClientContext *client,
+                                                        gpointer           user_data)      : Run First
+

+Emitted when the server has finished writing a response to +a request. +

+
++ + + + + + + + + + + + + + + + + + +

server :

the server

message :

the message

client :

the client context

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "request-read" signal

+
void                user_function                      (SoupServer        *server,
+                                                        SoupMessage       *message,
+                                                        SoupClientContext *client,
+                                                        gpointer           user_data)      : Run First
+

+Emitted when the server has successfully read a request. +message will have all of its request-side information +filled in, and if the message was authenticated, client +will have information about that. This signal is emitted +before any handlers are called for the message, and if it +sets the message's status_code, then normal handler +processing will be skipped. +

+
++ + + + + + + + + + + + + + + + + + +

server :

the server

message :

the message

client :

the client context

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "request-started" signal

+
void                user_function                      (SoupServer        *server,
+                                                        SoupMessage       *message,
+                                                        SoupClientContext *client,
+                                                        gpointer           user_data)      : Run First
+

+Emitted when the server has started reading a new request. +message will be completely blank; not even the +Request-Line will have been read yet. About the only thing +you can usefully do with it is connect to its signals. +

+

+If the request is read successfully, this will eventually +be followed by a "request_read" signal. If a +response is then sent, the request processing will end with +a "request_finished" signal. If a network error +occurs, the processing will instead end with +"request_aborted". +

+
++ + + + + + + + + + + + + + + + + + +

server :

the server

message :

the new message

client :

the client context

user_data :

user data set when the signal handler was connected.
+
+
+
+

See Also

+SoupAuthDomain +
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupSession.html b/docs/reference/html/SoupSession.html new file mode 100644 index 0000000..d27958d --- /dev/null +++ b/docs/reference/html/SoupSession.html @@ -0,0 +1,1509 @@ + + + + +SoupSession + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupSession

+

SoupSession — Soup session state object

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupSession;
+
+void                (*SoupSessionCallback)              (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+void                soup_session_queue_message          (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         SoupSessionCallback callback,
+                                                         gpointer user_data);
+void                soup_session_requeue_message        (SoupSession *session,
+                                                         SoupMessage *msg);
+guint               soup_session_send_message           (SoupSession *session,
+                                                         SoupMessage *msg);
+void                soup_session_cancel_message         (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         guint status_code);
+void                soup_session_prepare_for_uri        (SoupSession *session,
+                                                         SoupURI *uri);
+void                soup_session_abort                  (SoupSession *session);
+
+gboolean            soup_session_would_redirect         (SoupSession *session,
+                                                         SoupMessage *msg);
+gboolean            soup_session_redirect_message       (SoupSession *session,
+                                                         SoupMessage *msg);
+
+void                soup_session_pause_message          (SoupSession *session,
+                                                         SoupMessage *msg);
+void                soup_session_unpause_message        (SoupSession *session,
+                                                         SoupMessage *msg);
+
+GMainContext *      soup_session_get_async_context      (SoupSession *session);
+
+void                soup_session_add_feature            (SoupSession *session,
+                                                         SoupSessionFeature *feature);
+void                soup_session_add_feature_by_type    (SoupSession *session,
+                                                         GType feature_type);
+void                soup_session_remove_feature         (SoupSession *session,
+                                                         SoupSessionFeature *feature);
+void                soup_session_remove_feature_by_type (SoupSession *session,
+                                                         GType feature_type);
+GSList *            soup_session_get_features           (SoupSession *session,
+                                                         GType feature_type);
+SoupSessionFeature * soup_session_get_feature           (SoupSession *session,
+                                                         GType feature_type);
+SoupSessionFeature * soup_session_get_feature_for_message
+                                                        (SoupSession *session,
+                                                         GType feature_type,
+                                                         SoupMessage *msg);
+
+#define             SOUP_SESSION_PROXY_URI
+#define             SOUP_SESSION_MAX_CONNS
+#define             SOUP_SESSION_MAX_CONNS_PER_HOST
+#define             SOUP_SESSION_USE_NTLM
+#define             SOUP_SESSION_SSL_CA_FILE
+#define             SOUP_SESSION_ASYNC_CONTEXT
+#define             SOUP_SESSION_TIMEOUT
+#define             SOUP_SESSION_IDLE_TIMEOUT
+#define             SOUP_SESSION_USER_AGENT
+#define             SOUP_SESSION_ADD_FEATURE
+#define             SOUP_SESSION_ADD_FEATURE_BY_TYPE
+#define             SOUP_SESSION_REMOVE_FEATURE_BY_TYPE
+#define             SOUP_SESSION_ACCEPT_LANGUAGE
+#define             SOUP_SESSION_ACCEPT_LANGUAGE_AUTO
+#define             SOUP_SESSION_SSL_STRICT
+#define             SOUP_SESSION_HTTP_ALIASES
+#define             SOUP_SESSION_HTTPS_ALIASES
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupSession
+         +----SoupSessionAsync
+         +----SoupSessionSync
+
+
+
+

Properties

+
+  "accept-language"          gchar*                : Read / Write
+  "accept-language-auto"     gboolean              : Read / Write
+  "add-feature"              SoupSessionFeature*   : Read / Write
+  "add-feature-by-type"      GType*                : Read / Write
+  "async-context"            gpointer              : Read / Write / Construct Only
+  "http-aliases"             GStrv                 : Read / Write
+  "https-aliases"            GStrv                 : Read / Write
+  "idle-timeout"             guint                 : Read / Write
+  "max-conns"                gint                  : Read / Write
+  "max-conns-per-host"       gint                  : Read / Write
+  "proxy-uri"                SoupURI*              : Read / Write
+  "remove-feature-by-type"   GType*                : Read / Write
+  "ssl-ca-file"              gchar*                : Read / Write
+  "ssl-strict"               gboolean              : Read / Write
+  "ssl-use-system-ca-file"   gboolean              : Read / Write
+  "timeout"                  guint                 : Read / Write
+  "tls-database"             GTlsDatabase*         : Read / Write
+  "use-ntlm"                 gboolean              : Read / Write
+  "use-thread-context"       gboolean              : Read / Write
+  "user-agent"               gchar*                : Read / Write
+
+
+ +
+

Description

+

+SoupSession is the object that controls client-side HTTP. A +SoupSession encapsulates all of the state that libsoup is keeping +on behalf of your program; cached HTTP connections, authentication +information, etc. +

+

+Most applications will only need a single SoupSession; the primary +reason you might need multiple sessions is if you need to have +multiple independent authentication contexts. (Eg, you are +connecting to a server and authenticating as two different users at +different times; the easiest way to ensure that each SoupMessage +is sent with the authentication information you intended is to use +one session for the first user, and a second session for the other +user.) +

+

+SoupSession itself is an abstract class, with two subclasses. If +you are using the glib main loop, you will generally want to use +SoupSessionAsync, which uses non-blocking I/O and callbacks. On +the other hand, if your application is threaded and you want to do +synchronous I/O in a separate thread from the UI, use +SoupSessionSync. +

+
+
+

Details

+
+

SoupSession

+
typedef struct _SoupSession SoupSession;
+

+

+
+
+
+

SoupSessionCallback ()

+
void                (*SoupSessionCallback)              (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         gpointer user_data);
+

+Prototype for the callback passed to soup_session_queue_message(), +qv. +

+
++ + + + + + + + + + + + + + +

session :

the session

msg :

the message that has finished

user_data :

the data passed to soup_session_queue_message
+
+
+
+

soup_session_queue_message ()

+
void                soup_session_queue_message          (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         SoupSessionCallback callback,
+                                                         gpointer user_data);
+

+Queues the message msg for sending. All messages are processed +while the glib main loop runs. If msg has been processed before, +any resources related to the time it was last sent are freed. +

+

+Upon message completion, the callback specified in callback will +be invoked (in the thread associated with session's async +context). If after returning from this callback the message has not +been requeued, msg will be unreffed. +

+
++ + + + + + + + + + + + + + + + + + +

session :

a SoupSession +

msg :

the message to queue. [transfer full] +

callback :

a SoupSessionCallback which will +be called after the message completes or when an unrecoverable error occurs. [allow-none][scope async] +

user_data :

a pointer passed to callback. [allow-none] +
+
+
+
+

soup_session_requeue_message ()

+
void                soup_session_requeue_message        (SoupSession *session,
+                                                         SoupMessage *msg);
+

+This causes msg to be placed back on the queue to be attempted +again. +

+
++ + + + + + + + + + +

session :

a SoupSession +

msg :

the message to requeue
+
+
+
+

soup_session_send_message ()

+
guint               soup_session_send_message           (SoupSession *session,
+                                                         SoupMessage *msg);
+

+Synchronously send msg. This call will not return until the +transfer is finished successfully or there is an unrecoverable +error. +

+

+msg is not freed upon return. +

+
++ + + + + + + + + + + + + + +

session :

a SoupSession +

msg :

the message to send

Returns :

the HTTP status code of the response
+
+
+
+

soup_session_cancel_message ()

+
void                soup_session_cancel_message         (SoupSession *session,
+                                                         SoupMessage *msg,
+                                                         guint status_code);
+

+Causes session to immediately finish processing msg (regardless +of its current state) with a final status_code of status_code. You +may call this at any time after handing msg off to session; if +session has started sending the request but has not yet received +the complete response, then it will close the request's connection. +Note that with non-idempotent requests (eg, +POST, PUT, +DELETE) it is possible that you might cancel the +request after the server acts on it, but before it returns a +response, leaving the remote resource in an unknown state. +

+

+If the message is cancelled while its response body is being read, +then the response body in msg will be left partially-filled-in. +The response headers, on the other hand, will always be either +empty or complete. +

+

+For messages queued with soup_session_queue_message() (and +cancelled from the same thread), the callback will be invoked +before soup_session_cancel_message() returns. +

+
++ + + + + + + + + + + + + + +

session :

a SoupSession +

msg :

the message to cancel

status_code :

status code to set on msg (generally +SOUP_STATUS_CANCELLED)
+
+
+
+

soup_session_prepare_for_uri ()

+
void                soup_session_prepare_for_uri        (SoupSession *session,
+                                                         SoupURI *uri);
+
+

Warning

+

soup_session_prepare_for_uri has been deprecated since version 2.38 and should not be used in newly-written code. use soup_session_prefetch_dns() instead

+
+

+Tells session that uri may be requested shortly, and so the +session can try to prepare (resolving the domain name, obtaining +proxy address, etc.) in order to work more quickly once the URI is +actually requested. +

+

+This method acts asynchronously, in session's +"async_context". If you are using SoupSessionSync and do +not have a main loop running, then you can't use this method. +

+
++ + + + + + + + + + +

session :

a SoupSession +

uri :

a SoupURI which may be required
+

Since 2.30

+
+
+
+

soup_session_abort ()

+
void                soup_session_abort                  (SoupSession *session);
+

+Cancels all pending requests in session. +

+
++ + + + +

session :

the session
+
+
+
+

soup_session_would_redirect ()

+
gboolean            soup_session_would_redirect         (SoupSession *session,
+                                                         SoupMessage *msg);
+

+Checks if msg contains a response that would cause session to +redirect it to a new URL (ignoring msg's SOUP_MESSAGE_NO_REDIRECT +flag, and the number of times it has already been redirected). +

+
++ + + + + + + + + + + + + + +

session :

a SoupSession +

msg :

a SoupMessage that has response headers

Returns :

whether msg would be redirected
+

Since 2.38

+
+
+
+

soup_session_redirect_message ()

+
gboolean            soup_session_redirect_message       (SoupSession *session,
+                                                         SoupMessage *msg);
+

+Updates msg's URI according to its status code and "Location" +header, and requeues it on session. Use this when you have set +SOUP_MESSAGE_NO_REDIRECT on a message, but have decided to allow a +particular redirection to occur, or if you want to allow a +redirection that SoupSession will not perform automatically (eg, +redirecting a non-safe method such as DELETE). +

+

+If msg's status code indicates that it should be retried as a GET +request, then msg will be modified accordingly. +

+

+If msg has already been redirected too many times, this will +cause it to fail with SOUP_STATUS_TOO_MANY_REDIRECTS. +

+
++ + + + + + + + + + + + + + +

session :

the session

msg :

a SoupMessage that has received a 3xx response

Returns :

+TRUE if a redirection was applied, FALSE if not +(eg, because there was no Location header, or it could not be +parsed).
+

Since 2.38

+
+
+
+

soup_session_pause_message ()

+
void                soup_session_pause_message          (SoupSession *session,
+                                                         SoupMessage *msg);
+

+Pauses HTTP I/O on msg. Call soup_session_unpause_message() to +resume I/O. +

+
++ + + + + + + + + + +

session :

a SoupSession +

msg :

a SoupMessage currently running on session +
+
+
+
+

soup_session_unpause_message ()

+
void                soup_session_unpause_message        (SoupSession *session,
+                                                         SoupMessage *msg);
+

+Resumes HTTP I/O on msg. Use this to resume after calling +soup_session_pause_message(). +

+

+If msg is being sent via blocking I/O, this will resume reading or +writing immediately. If msg is using non-blocking I/O, then +reading or writing won't resume until you return to the main loop. +

+
++ + + + + + + + + + +

session :

a SoupSession +

msg :

a SoupMessage currently running on session +
+
+
+
+

soup_session_get_async_context ()

+
GMainContext *      soup_session_get_async_context      (SoupSession *session);
+

+Gets session's async_context. This does not add a ref to the +context, so you will need to ref it yourself if you want it to +outlive its session. +

+

+If "use-thread-context" is true, this will return the +current thread-default main context. +

+
++ + + + + + + + + + +

session :

a SoupSession +

Returns :

+session's GMainContext, which may +be NULL. [transfer none] +
+
+
+
+

soup_session_add_feature ()

+
void                soup_session_add_feature            (SoupSession *session,
+                                                         SoupSessionFeature *feature);
+

+Adds feature's functionality to session. You can also add a +feature to the session at construct time by using the +SOUP_SESSION_ADD_FEATURE property. +

+
++ + + + + + + + + + +

session :

a SoupSession +

feature :

an object that implements SoupSessionFeature +
+

Since 2.24

+
+
+
+

soup_session_add_feature_by_type ()

+
void                soup_session_add_feature_by_type    (SoupSession *session,
+                                                         GType feature_type);
+

+If feature_type is the type of a class that implements +SoupSessionFeature, this creates a new feature of that type and +adds it to session as with soup_session_add_feature(). You can use +this when you don't need to customize the new feature in any way. +

+

+If feature_type is not a SoupSessionFeature type, this gives +each existing feature on session the chance to accept feature_type +as a "subfeature". This can be used to add new SoupAuth types, +for instance. +

+

+You can also add a feature to the session at construct time by +using the SOUP_SESSION_ADD_FEATURE_BY_TYPE property. +

+
++ + + + + + + + + + +

session :

a SoupSession +

feature_type :

a GType +
+

Since 2.24

+
+
+
+

soup_session_remove_feature ()

+
void                soup_session_remove_feature         (SoupSession *session,
+                                                         SoupSessionFeature *feature);
+

+Removes feature's functionality from session. +

+
++ + + + + + + + + + +

session :

a SoupSession +

feature :

a feature that has previously been added to session +
+

Since 2.24

+
+
+
+

soup_session_remove_feature_by_type ()

+
void                soup_session_remove_feature_by_type (SoupSession *session,
+                                                         GType feature_type);
+

+Removes all features of type feature_type (or any subclass of +feature_type) from session. You can also remove standard features +from the session at construct time by using the +SOUP_SESSION_REMOVE_FEATURE_BY_TYPE property. +

+
++ + + + + + + + + + +

session :

a SoupSession +

feature_type :

a GType +
+

Since 2.24

+
+
+
+

soup_session_get_features ()

+
GSList *            soup_session_get_features           (SoupSession *session,
+                                                         GType feature_type);
+

+Generates a list of session's features of type feature_type. (If +you want to see all features, you can pass SOUP_TYPE_SESSION_FEATURE +for feature_type.) +

+
++ + + + + + + + + + + + + + +

session :

a SoupSession +

feature_type :

the GType of the class of features to get

Returns :

a list of features. You must free the list, but not its contents. [transfer container][element-type Soup.SessionFeature] +
+

Since 2.26

+
+
+
+

soup_session_get_feature ()

+
SoupSessionFeature * soup_session_get_feature           (SoupSession *session,
+                                                         GType feature_type);
+

+Gets the first feature in session of type feature_type. For +features where there may be more than one feature of a given type, +use soup_session_get_features(). +

+
++ + + + + + + + + + + + + + +

session :

a SoupSession +

feature_type :

the GType of the feature to get

Returns :

a SoupSessionFeature, or NULL. The +feature is owned by session. [transfer none] +
+

Since 2.26

+
+
+
+

soup_session_get_feature_for_message ()

+
SoupSessionFeature * soup_session_get_feature_for_message
+                                                        (SoupSession *session,
+                                                         GType feature_type,
+                                                         SoupMessage *msg);
+

+Gets the first feature in session of type feature_type, provided +that it is not disabled for msg. As with +soup_session_get_feature(), this should only be used for features +where feature_type is only expected to match a single feature. In +particular, if there are two matching features, and the first is +disabled on msg, and the second is not, then this will return +NULL, not the second feature. +

+
++ + + + + + + + + + + + + + + + + + +

session :

a SoupSession +

feature_type :

the GType of the feature to get

msg :

a SoupMessage +

Returns :

a SoupSessionFeature, or NULL. The +feature is owned by session. [transfer none] +
+

Since 2.28

+
+
+
+

SOUP_SESSION_PROXY_URI

+
#define SOUP_SESSION_PROXY_URI              "proxy-uri"
+
+

+Alias for the "proxy-uri" property. (The HTTP +proxy to use for this session.) +

+
+
+
+

SOUP_SESSION_MAX_CONNS

+
#define SOUP_SESSION_MAX_CONNS              "max-conns"
+
+

+Alias for the "max-conns" property. (The maximum +number of connections that the session can open at once.) +

+
+
+
+

SOUP_SESSION_MAX_CONNS_PER_HOST

+
#define SOUP_SESSION_MAX_CONNS_PER_HOST     "max-conns-per-host"
+
+

+Alias for the "max-conns-per-host" property. +(The maximum number of connections that the session can +open at once to a given host.) +

+
+
+
+

SOUP_SESSION_USE_NTLM

+
#define SOUP_SESSION_USE_NTLM               "use-ntlm"
+
+

+Alias for the "use-ntlm" property. (Whether or +not to use NTLM authentication.) +

+
+
+
+

SOUP_SESSION_SSL_CA_FILE

+
#define SOUP_SESSION_SSL_CA_FILE            "ssl-ca-file"
+
+
+

Warning

+

SOUP_SESSION_SSL_CA_FILE is deprecated and should not be used in newly-written code. use "ssl-use-system-ca-file" or +"tls-database" instead

+
+

+Alias for the "ssl-ca-file" property. (File +containing SSL CA certificates.). +

+
+
+
+

SOUP_SESSION_ASYNC_CONTEXT

+
#define SOUP_SESSION_ASYNC_CONTEXT          "async-context"
+
+

+Alias for the "async-context" property. (The +session's GMainContext.) +

+
+
+
+

SOUP_SESSION_TIMEOUT

+
#define SOUP_SESSION_TIMEOUT                "timeout"
+
+

+Alias for the "timeout" property. (The timeout +in seconds for blocking socket I/O operations.) +

+
+
+
+

SOUP_SESSION_IDLE_TIMEOUT

+
#define SOUP_SESSION_IDLE_TIMEOUT           "idle-timeout"
+
+

+Alias for the "idle-timeout" property. (The idle +connection lifetime.) +

+

Since 2.4.1

+
+
+
+

SOUP_SESSION_USER_AGENT

+
#define SOUP_SESSION_USER_AGENT             "user-agent"
+
+

+Alias for the "user-agent" property, qv. +

+
+
+
+

SOUP_SESSION_ADD_FEATURE

+
#define SOUP_SESSION_ADD_FEATURE            "add-feature"
+
+

+Alias for the "add-feature" property. (Shortcut +for calling soup_session_add_feature(). +

+

Since 2.24

+
+
+
+

SOUP_SESSION_ADD_FEATURE_BY_TYPE

+
#define SOUP_SESSION_ADD_FEATURE_BY_TYPE    "add-feature-by-type"
+
+

+Alias for the "add-feature-by-type" property. +(Shortcut for calling soup_session_add_feature_by_type(). +

+

Since 2.24

+
+
+
+

SOUP_SESSION_REMOVE_FEATURE_BY_TYPE

+
#define SOUP_SESSION_REMOVE_FEATURE_BY_TYPE "remove-feature-by-type"
+
+

+Alias for the "remove-feature-by-type" +property. (Shortcut for calling +soup_session_remove_feature_by_type(). +

+

Since 2.24

+
+
+
+

SOUP_SESSION_ACCEPT_LANGUAGE

+
#define SOUP_SESSION_ACCEPT_LANGUAGE        "accept-language"
+
+

+Alias for the "accept-language" property, qv. +

+

Since 2.30

+
+
+
+

SOUP_SESSION_ACCEPT_LANGUAGE_AUTO

+
#define SOUP_SESSION_ACCEPT_LANGUAGE_AUTO   "accept-language-auto"
+
+

+Alias for the "accept-language-auto" property, qv. +

+

Since 2.30

+
+
+
+

SOUP_SESSION_SSL_STRICT

+
#define SOUP_SESSION_SSL_STRICT             "ssl-strict"
+
+

+Alias for the "ssl-strict" property, qv. +

+

Since 2.30

+
+
+
+

SOUP_SESSION_HTTP_ALIASES

+
#define SOUP_SESSION_HTTP_ALIASES       "http-aliases"
+
+

+Alias for the "http-aliases" property. (URI +schemes that will be considered aliases for "http".) +

+

Since 2.38

+
+
+
+

SOUP_SESSION_HTTPS_ALIASES

+
#define SOUP_SESSION_HTTPS_ALIASES      "https-aliases"
+
+

+Alias for the "https-aliases" property. (URI +schemes that will be considered aliases for "https".) +

+

Since 2.38

+
+
+
+

Property Details

+
+

The "accept-language" property

+
  "accept-language"          gchar*                : Read / Write
+

+If non-NULL, the value to use for the "Accept-Language" header +on SoupMessages sent from this session. +

+

+Setting this will disable +"accept-language-auto". +

+

Default value: NULL

+

Since 2.30

+
+
+
+

The "accept-language-auto" property

+
  "accept-language-auto"     gboolean              : Read / Write
+

+If TRUE, SoupSession will automatically set the string +for the "Accept-Language" header on every SoupMessage +sent, based on the return value of g_get_language_names(). +

+

+Setting this will override any previous value of +"accept-language". +

+

Default value: FALSE

+

Since 2.30

+
+
+
+

The "add-feature" property

+
  "add-feature"              SoupSessionFeature*   : Read / Write
+

+Add a feature object to the session. (Shortcut for calling +soup_session_add_feature().) +

+

Since 2.24

+
+
+
+

The "add-feature-by-type" property

+
  "add-feature-by-type"      GType*                : Read / Write
+

+Add a feature object of the given type to the session. +(Shortcut for calling soup_session_add_feature_by_type().) +

+

Allowed values: SoupSessionFeature

+

Since 2.24

+
+
+
+

The "async-context" property

+
  "async-context"            gpointer              : Read / Write / Construct Only
+

The GMainContext to dispatch async I/O in.

+
+
+
+

The "http-aliases" property

+
  "http-aliases"             GStrv                 : Read / Write
+

+A NULL-terminated array of URI schemes that should be +considered to be aliases for "http". Eg, if this included +"dav", than a URI of +dav://example.com/path would be treated +identically to http://example.com/path. +If the value is NULL, then only "http" is recognized as +meaning "http". +

+

+For backward-compatibility reasons, the default value for +this property is an array containing the single element +"*", a special value which means that +any scheme except "https" is considered to be an alias for +"http". +

+

+See also "https-aliases". +

+

Since 2.38

+
+
+
+

The "https-aliases" property

+
  "https-aliases"            GStrv                 : Read / Write
+

+A comma-delimited list of URI schemes that should be +considered to be aliases for "https". See +"http-aliases" for more information. +

+

+The default value is NULL, meaning that no URI schemes +are considered aliases for "https". +

+

Since 2.38

+
+
+
+

The "idle-timeout" property

+
  "idle-timeout"             guint                 : Read / Write
+

+Connection lifetime when idle +

+

Default value: 0

+

Since 2.4.1

+
+
+
+

The "max-conns" property

+
  "max-conns"                gint                  : Read / Write
+

The maximum number of connections that the session can open at once.

+

Allowed values: >= 1

+

Default value: 10

+
+
+
+

The "max-conns-per-host" property

+
  "max-conns-per-host"       gint                  : Read / Write
+

The maximum number of connections that the session can open at once to a given host.

+

Allowed values: >= 1

+

Default value: 2

+
+
+
+

The "proxy-uri" property

+
  "proxy-uri"                SoupURI*              : Read / Write
+

The HTTP Proxy to use for this session.

+
+
+
+

The "remove-feature-by-type" property

+
  "remove-feature-by-type"   GType*                : Read / Write
+

+Remove feature objects from the session. (Shortcut for +calling soup_session_remove_feature_by_type().) +

+

Allowed values: SoupSessionFeature

+

Since 2.24

+
+
+
+

The "ssl-ca-file" property

+
  "ssl-ca-file"              gchar*                : Read / Write
+

File containing SSL CA certificates.

+

Default value: NULL

+
+
+
+

The "ssl-strict" property

+
  "ssl-strict"               gboolean              : Read / Write
+

+Normally, if "ssl-ca-file" (or +"tlsdb" or "ssl-use-system-ca-file") +is set, then libsoup will reject any certificate that is +invalid (ie, expired) or that is not signed by one of the +given CA certificates, and the SoupMessage will fail with +the status SOUP_STATUS_SSL_FAILED. +

+

+If you set "ssl-strict" to FALSE, then all +certificates will be accepted, and you will need to call +soup_message_get_https_status() to distinguish valid from +invalid certificates. (This can be used, eg, if you want to +accept invalid certificates after giving some sort of +warning.) +

+

+If the session has no CA file or TLS database, then all +certificates are always accepted, and this property has no +effect. +

+

Default value: TRUE

+

Since 2.30

+
+
+
+

The "ssl-use-system-ca-file" property

+
  "ssl-use-system-ca-file"   gboolean              : Read / Write
+

Use the system certificate database.

+

Default value: TRUE

+
+
+
+

The "timeout" property

+
  "timeout"                  guint                 : Read / Write
+

Value in seconds to timeout a blocking I/O.

+

Default value: 0

+
+
+
+

The "tls-database" property

+
  "tls-database"             GTlsDatabase*         : Read / Write
+

TLS database to use.

+
+
+
+

The "use-ntlm" property

+
  "use-ntlm"                 gboolean              : Read / Write
+
+

Warning

+

SoupSession:use-ntlm is deprecated and should not be used in newly-written code. use soup_session_add_feature_by_type() with +SOUP_TYPE_AUTH_NTLM.

+
+

+Whether or not to use NTLM authentication. +

+

Default value: FALSE

+
+
+
+

The "use-thread-context" property

+
  "use-thread-context"       gboolean              : Read / Write
+

+If set, asynchronous operations in this session will run in +whatever the thread-default GMainContext is at the time +they are started, rather than always occurring in a context +fixed at the session's construction time. "Bookkeeping" +tasks (like expiring idle connections) will happen in the +context that was thread-default at the time the session was +created. +

+

Default value: FALSE

+

Since 2.38

+
+
+
+

The "user-agent" property

+
  "user-agent"               gchar*                : Read / Write
+

+If non-NULL, the value to use for the "User-Agent" header +on SoupMessages sent from this session. +

+

+RFC 2616 says: "The User-Agent request-header field +contains information about the user agent originating the +request. This is for statistical purposes, the tracing of +protocol violations, and automated recognition of user +agents for the sake of tailoring responses to avoid +particular user agent limitations. User agents SHOULD +include this field with requests." +

+

+The User-Agent header contains a list of one or more +product tokens, separated by whitespace, with the most +significant product token coming first. The tokens must be +brief, ASCII, and mostly alphanumeric (although "-", "_", +and "." are also allowed), and may optionally include a "/" +followed by a version string. You may also put comments, +enclosed in parentheses, between or after the tokens. +

+

+If you set a "user_agent" property that has trailing +whitespace, SoupSession will append its own product token +(eg, "libsoup/2.3.2") to the end of the +header for you. +

+

Default value: NULL

+
+
+
+

Signal Details

+
+

The "authenticate" signal

+
void                user_function                      (SoupSession *session,
+                                                        SoupMessage *msg,
+                                                        SoupAuth    *auth,
+                                                        gboolean     retrying,
+                                                        gpointer     user_data)      : Run First
+

+Emitted when the session requires authentication. If +credentials are available call soup_auth_authenticate() on +auth. If these credentials fail, the signal will be +emitted again, with retrying set to TRUE, which will +continue until you return without calling +soup_auth_authenticate() on auth. +

+

+Note that this may be emitted before msg's body has been +fully read. +

+

+If you call soup_session_pause_message() on msg before +returning, then you can authenticate auth asynchronously +(as long as you g_object_ref() it to make sure it doesn't +get destroyed), and then unpause msg when you are ready +for it to continue. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

session :

the session

msg :

the SoupMessage being sent

auth :

the SoupAuth to authenticate

retrying :

+TRUE if this is the second (or later) attempt

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "connection-created" signal

+
void                user_function                      (SoupSession *session,
+                                                        GObject     *connection,
+                                                        gpointer     user_data)       : Run First
+

+Emitted when a new connection is created. This is an +internal signal intended only to be used for debugging +purposes, and may go away in the future. +

+
++ + + + + + + + + + + + + + +

session :

the SoupSession +

connection :

the connection

user_data :

user data set when the signal handler was connected.
+

Since 2.30

+
+
+
+

The "request-queued" signal

+
void                user_function                      (SoupSession *session,
+                                                        SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted when a request is queued on session. (Note that +"queued" doesn't just mean soup_session_queue_message(); +soup_session_send_message() implicitly queues the message +as well.) +

+

+When sending a request, first "request_queued" +is emitted, indicating that the session has become aware of +the request. +

+

+Once a connection is available to send the request on, the +session emits "request_started". Then, various +SoupMessage signals are emitted as the message is +processed. If the message is requeued, it will emit +"restarted", which will then be followed by +another "request_started" and another set of +SoupMessage signals when the message is re-sent. +

+

+Eventually, the message will emit "finished". +Normally, this signals the completion of message +processing. However, it is possible that the application +will requeue the message from the "finished" handler (or +equivalently, from the soup_session_queue_message() +callback). In that case, the process will loop back to +"request_started". +

+

+Eventually, a message will reach "finished" and not be +requeued. At that point, the session will emit +"request_unqueued" to indicate that it is done +with the message. +

+

+To sum up: "request_queued" and +"request_unqueued" are guaranteed to be emitted +exactly once, but "request_started" and +"finished" (and all of the other SoupMessage +signals) may be invoked multiple times for a given message. +

+
++ + + + + + + + + + + + + + +

session :

the session

msg :

the request that was queued

user_data :

user data set when the signal handler was connected.
+

Since 2.4.1

+
+
+
+

The "request-started" signal

+
void                user_function                      (SoupSession *session,
+                                                        SoupMessage *msg,
+                                                        SoupSocket  *socket,
+                                                        gpointer     user_data)      : Run First
+

+Emitted just before a request is sent. See +"request_queued" for a detailed description of +the message lifecycle within a session. +

+
++ + + + + + + + + + + + + + + + + + +

session :

the session

msg :

the request being sent

socket :

the socket the request is being sent on

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "request-unqueued" signal

+
void                user_function                      (SoupSession *session,
+                                                        SoupMessage *msg,
+                                                        gpointer     user_data)      : Run First
+

+Emitted when a request is removed from session's queue, +indicating that session is done with it. See +"request_queued" for a detailed description of the +message lifecycle within a session. +

+
++ + + + + + + + + + + + + + +

session :

the session

msg :

the request that was unqueued

user_data :

user data set when the signal handler was connected.
+

Since 2.4.1

+
+
+
+

The "tunneling" signal

+
void                user_function                      (SoupSession *session,
+                                                        GObject     *connection,
+                                                        gpointer     user_data)       : Run First
+

+Emitted when an SSL tunnel is being created on a proxy +connection. This is an internal signal intended only to be +used for debugging purposes, and may go away in the future. +

+
++ + + + + + + + + + + + + + +

session :

the SoupSession +

connection :

the connection

user_data :

user data set when the signal handler was connected.
+

Since 2.30

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupSessionAsync.html b/docs/reference/html/SoupSessionAsync.html new file mode 100644 index 0000000..35e7c9d --- /dev/null +++ b/docs/reference/html/SoupSessionAsync.html @@ -0,0 +1,122 @@ + + + + +SoupSessionAsync + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupSessionAsync

+

SoupSessionAsync — Soup session for asynchronous (main-loop-based) I/O.

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupSessionAsync;
+SoupSession *       soup_session_async_new              (void);
+SoupSession *       soup_session_async_new_with_options (const char *optname1,
+                                                         ...);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupSession
+         +----SoupSessionAsync
+
+
+
+

Description

+

+SoupSessionAsync is an implementation of SoupSession that uses +non-blocking I/O via the glib main loop. It is intended for use in +single-threaded programs. +

+
+
+

Details

+
+

SoupSessionAsync

+
typedef struct _SoupSessionAsync SoupSessionAsync;
+

+

+
+
+
+

soup_session_async_new ()

+
SoupSession *       soup_session_async_new              (void);
+

+Creates an asynchronous SoupSession with the default options. +

+
++ + + + +

Returns :

the new session.
+
+
+
+

soup_session_async_new_with_options ()

+
SoupSession *       soup_session_async_new_with_options (const char *optname1,
+                                                         ...);
+

+Creates an asynchronous SoupSession with the specified options. +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first property to set

... :

value of optname1, followed by additional property/value pairs

Returns :

the new session.
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupSessionFeature.html b/docs/reference/html/SoupSessionFeature.html new file mode 100644 index 0000000..fdf8192 --- /dev/null +++ b/docs/reference/html/SoupSessionFeature.html @@ -0,0 +1,179 @@ + + + + +SoupSessionFeature + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupSessionFeature

+

SoupSessionFeature — Interface for miscellaneous session features

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupSessionFeature;
+                    SoupSessionFeatureInterface;
+
+
+
+

Object Hierarchy

+
+  GInterface
+   +----SoupSessionFeature
+
+
+
+

Prerequisites

+

+SoupSessionFeature requires + GObject.

+
+ +
+

Description

+

+SoupSessionFeature is the interface used by classes that extend +the functionality of a SoupSession. Some features like HTTP +authentication handling are implemented internally via +SoupSessionFeatures. Other features can be added to the session +by the application. (Eg, SoupLogger, SoupCookieJar.) +

+

+See soup_session_add_feature(), etc, to add a feature to a session. +

+
+
+

Details

+
+

SoupSessionFeature

+
typedef struct _SoupSessionFeature SoupSessionFeature;
+

+An object that implement some sort of optional feature for +SoupSession. +

+

Since 2.24

+
+
+
+

SoupSessionFeatureInterface

+
typedef struct {
+	GTypeInterface parent;
+
+	/* methods */
+	void     (*attach)           (SoupSessionFeature *feature,
+				      SoupSession        *session);
+	void     (*detach)           (SoupSessionFeature *feature,
+				      SoupSession        *session);
+
+	void     (*request_queued)   (SoupSessionFeature *feature,
+				      SoupSession        *session,
+				      SoupMessage        *msg);
+	void     (*request_started)  (SoupSessionFeature *feature,
+				      SoupSession        *session,
+				      SoupMessage        *msg,
+				      SoupSocket         *socket);
+	void     (*request_unqueued) (SoupSessionFeature *feature,
+				      SoupSession        *session,
+				      SoupMessage        *msg);
+
+	gboolean (*add_feature)      (SoupSessionFeature *feature,
+				      GType               type);
+	gboolean (*remove_feature)   (SoupSessionFeature *feature,
+				      GType               type);
+	gboolean (*has_feature)      (SoupSessionFeature *feature,
+				      GType               type);
+} SoupSessionFeatureInterface;
+
+

+The interface implemented by SoupSessionFeatures. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

GTypeInterface parent;

The parent interface.

attach ()

Perform setup when a feature is added to a session

detach ()

Perform cleanup when a feature is removed from a session

request_queued ()

Proxies the session's "request_queued" signal

request_started ()

Proxies the session's "request_started" signal

request_unqueued ()

Proxies the session's "request_unqueued" signal

add_feature ()

adds a sub-feature to the main feature

remove_feature ()

removes a sub-feature from the main feature

has_feature ()

tests if the feature includes a sub-feature
+

Since 2.24

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupSessionSync.html b/docs/reference/html/SoupSessionSync.html new file mode 100644 index 0000000..229f1b8 --- /dev/null +++ b/docs/reference/html/SoupSessionSync.html @@ -0,0 +1,140 @@ + + + + +SoupSessionSync + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupSessionSync

+

SoupSessionSync — Soup session for blocking I/O in multithreaded +programs.

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupSessionSync;
+SoupSession *       soup_session_sync_new               (void);
+SoupSession *       soup_session_sync_new_with_options  (const char *optname1,
+                                                         ...);
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupSession
+         +----SoupSessionSync
+
+
+
+

Description

+

+SoupSessionSync is an implementation of SoupSession that uses +synchronous I/O, intended for use in multi-threaded programs. +

+

+You can use SoupSessionSync from multiple threads concurrently. +Eg, you can send a SoupMessage in one thread, and then while +waiting for the response, send another SoupMessage from another +thread. You can also send a message from one thread and then call +soup_session_cancel_message() on it from any other thread (although +you need to be careful to avoid race conditions, where the message +finishes and is then unreffed by the sending thread just before you +cancel it). +

+

+However, the majority of other types and methods in libsoup are not +MT-safe. In particular, you cannot modify or +examine a SoupMessage while it is being transmitted by +SoupSessionSync in another thread. Once a message has been handed +off to SoupSessionSync, it can only be manipulated from its signal +handler callbacks, until I/O is complete. +

+
+
+

Details

+
+

SoupSessionSync

+
typedef struct _SoupSessionSync SoupSessionSync;
+

+

+
+
+
+

soup_session_sync_new ()

+
SoupSession *       soup_session_sync_new               (void);
+

+Creates an synchronous SoupSession with the default options. +

+
++ + + + +

Returns :

the new session.
+
+
+
+

soup_session_sync_new_with_options ()

+
SoupSession *       soup_session_sync_new_with_options  (const char *optname1,
+                                                         ...);
+

+Creates an synchronous SoupSession with the specified options. +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first property to set

... :

value of optname1, followed by additional property/value pairs

Returns :

the new session.
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupSocket.html b/docs/reference/html/SoupSocket.html new file mode 100644 index 0000000..06ff34e --- /dev/null +++ b/docs/reference/html/SoupSocket.html @@ -0,0 +1,1119 @@ + + + + +SoupSocket + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupSocket

+

SoupSocket — A network socket

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupSocket;
+SoupSocket *        soup_socket_new                     (const char *optname1,
+                                                         ...);
+
+void                (*SoupSocketCallback)               (SoupSocket *sock,
+                                                         guint status,
+                                                         gpointer user_data);
+void                soup_socket_connect_async           (SoupSocket *sock,
+                                                         GCancellable *cancellable,
+                                                         SoupSocketCallback callback,
+                                                         gpointer user_data);
+guint               soup_socket_connect_sync            (SoupSocket *sock,
+                                                         GCancellable *cancellable);
+
+gboolean            soup_socket_listen                  (SoupSocket *sock);
+
+gboolean            soup_socket_start_ssl               (SoupSocket *sock,
+                                                         GCancellable *cancellable);
+gboolean            soup_socket_start_proxy_ssl         (SoupSocket *sock,
+                                                         const char *ssl_host,
+                                                         GCancellable *cancellable);
+gboolean            soup_socket_is_ssl                  (SoupSocket *sock);
+
+void                soup_socket_disconnect              (SoupSocket *sock);
+gboolean            soup_socket_is_connected            (SoupSocket *sock);
+
+SoupAddress *       soup_socket_get_local_address       (SoupSocket *sock);
+SoupAddress *       soup_socket_get_remote_address      (SoupSocket *sock);
+
+enum                SoupSocketIOStatus;
+SoupSocketIOStatus  soup_socket_read                    (SoupSocket *sock,
+                                                         gpointer buffer,
+                                                         gsize len,
+                                                         gsize *nread,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+SoupSocketIOStatus  soup_socket_read_until              (SoupSocket *sock,
+                                                         gpointer buffer,
+                                                         gsize len,
+                                                         gconstpointer boundary,
+                                                         gsize boundary_len,
+                                                         gsize *nread,
+                                                         gboolean *got_boundary,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+SoupSocketIOStatus  soup_socket_write                   (SoupSocket *sock,
+                                                         gconstpointer buffer,
+                                                         gsize len,
+                                                         gsize *nwrote,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+
+
+#define             SOUP_SOCKET_LOCAL_ADDRESS
+#define             SOUP_SOCKET_REMOTE_ADDRESS
+#define             SOUP_SOCKET_FLAG_NONBLOCKING
+#define             SOUP_SOCKET_IS_SERVER
+#define             SOUP_SOCKET_SSL_CREDENTIALS
+#define             SOUP_SOCKET_ASYNC_CONTEXT
+#define             SOUP_SOCKET_TIMEOUT
+#define             SOUP_SOCKET_SSL_FALLBACK
+#define             SOUP_SOCKET_SSL_STRICT
+#define             SOUP_SOCKET_TLS_CERTIFICATE
+#define             SOUP_SOCKET_TLS_ERRORS
+#define             SOUP_SOCKET_TRUSTED_CERTIFICATE
+
+
+
+

Object Hierarchy

+
+  GObject
+   +----SoupSocket
+
+
+
+

Properties

+
+  "async-context"            gpointer              : Read / Write / Construct Only
+  "clean-dispose"            gboolean              : Write / Construct Only
+  "is-server"                gboolean              : Read
+  "local-address"            SoupAddress*          : Read / Write / Construct Only
+  "non-blocking"             gboolean              : Read / Write
+  "remote-address"           SoupAddress*          : Read / Write / Construct Only
+  "ssl-creds"                gpointer              : Read / Write
+  "ssl-fallback"             gboolean              : Read / Write / Construct Only
+  "ssl-strict"               gboolean              : Read / Write / Construct Only
+  "timeout"                  guint                 : Read / Write
+  "tls-certificate"          GTlsCertificate*      : Read
+  "tls-errors"               GTlsCertificateFlags  : Read
+  "trusted-certificate"      gboolean              : Read
+  "use-thread-context"       gboolean              : Read / Write / Construct Only
+
+
+ +
+

Description

+

+SoupSocket is libsoup's TCP socket type. While it is primarily +intended for internal use, SoupSockets are exposed in the +API in various places, and some of their methods (eg, +soup_socket_get_remote_address()) may be useful to applications. +

+
+
+

Details

+
+

SoupSocket

+
typedef struct _SoupSocket SoupSocket;
+

+

+
+
+
+

soup_socket_new ()

+
SoupSocket *        soup_socket_new                     (const char *optname1,
+                                                         ...);
+

+Creates a new (disconnected) socket +

+
++ + + + + + + + + + + + + + +

optname1 :

name of first property to set (or NULL)

... :

value of optname1, followed by additional property/value pairs

Returns :

the new socket
+
+
+
+

SoupSocketCallback ()

+
void                (*SoupSocketCallback)               (SoupSocket *sock,
+                                                         guint status,
+                                                         gpointer user_data);
+

+The callback function passed to soup_socket_connect_async(). +

+
++ + + + + + + + + + + + + + +

sock :

the SoupSocket +

status :

an HTTP status code indicating success or failure

user_data :

the data passed to soup_socket_connect_async() +
+
+
+
+

soup_socket_connect_async ()

+
void                soup_socket_connect_async           (SoupSocket *sock,
+                                                         GCancellable *cancellable,
+                                                         SoupSocketCallback callback,
+                                                         gpointer user_data);
+

+Begins asynchronously connecting to sock's remote address. The +socket will call callback when it succeeds or fails (but not +before returning from this function). +

+

+If cancellable is non-NULL, it can be used to cancel the +connection. callback will still be invoked in this case, with a +status of SOUP_STATUS_CANCELLED. +

+
++ + + + + + + + + + + + + + + + + + +

sock :

a client SoupSocket (which must not already be connected)

cancellable :

a GCancellable, or NULL +

callback :

callback to call after connecting. [scope async] +

user_data :

data to pass to callback +
+
+
+
+

soup_socket_connect_sync ()

+
guint               soup_socket_connect_sync            (SoupSocket *sock,
+                                                         GCancellable *cancellable);
+

+Attempt to synchronously connect sock to its remote address. +

+

+If cancellable is non-NULL, it can be used to cancel the +connection, in which case soup_socket_connect_sync() will return +SOUP_STATUS_CANCELLED. +

+
++ + + + + + + + + + + + + + +

sock :

a client SoupSocket (which must not already be connected)

cancellable :

a GCancellable, or NULL +

Returns :

a success or failure code.
+
+
+
+

soup_socket_listen ()

+
gboolean            soup_socket_listen                  (SoupSocket *sock);
+

+Makes sock start listening on its local address. When connections +come in, sock will emit "new_connection". +

+
++ + + + + + + + + + +

sock :

a server SoupSocket (which must not already be connected or +listening)

Returns :

whether or not sock is now listening.
+
+
+
+

soup_socket_start_ssl ()

+
gboolean            soup_socket_start_ssl               (SoupSocket *sock,
+                                                         GCancellable *cancellable);
+

+Starts using SSL on socket. +

+
++ + + + + + + + + + + + + + +

sock :

the socket

cancellable :

a GCancellable +

Returns :

success or failure
+
+
+
+

soup_socket_start_proxy_ssl ()

+
gboolean            soup_socket_start_proxy_ssl         (SoupSocket *sock,
+                                                         const char *ssl_host,
+                                                         GCancellable *cancellable);
+

+Starts using SSL on socket, expecting to find a host named +ssl_host. +

+
++ + + + + + + + + + + + + + + + + + +

sock :

the socket

ssl_host :

hostname of the SSL server

cancellable :

a GCancellable +

Returns :

success or failure
+
+
+
+

soup_socket_is_ssl ()

+
gboolean            soup_socket_is_ssl                  (SoupSocket *sock);
+

+Tests if sock is doing (or has attempted to do) SSL. +

+
++ + + + + + + + + + +

sock :

a SoupSocket +

Returns :

+TRUE if sock has SSL credentials set
+
+
+
+

soup_socket_disconnect ()

+
void                soup_socket_disconnect              (SoupSocket *sock);
+

+Disconnects sock. Any further read or write attempts on it will +fail. +

+
++ + + + +

sock :

a SoupSocket +
+
+
+
+

soup_socket_is_connected ()

+
gboolean            soup_socket_is_connected            (SoupSocket *sock);
+

+Tests if sock is connected to another host +

+
++ + + + + + + + + + +

sock :

a SoupSocket +

Returns :

+TRUE or FALSE.
+
+
+
+

soup_socket_get_local_address ()

+
SoupAddress *       soup_socket_get_local_address       (SoupSocket *sock);
+

+Returns the SoupAddress corresponding to the local end of sock. +

+
++ + + + + + + + + + +

sock :

a SoupSocket +

Returns :

the SoupAddress. [transfer none] +
+
+
+
+

soup_socket_get_remote_address ()

+
SoupAddress *       soup_socket_get_remote_address      (SoupSocket *sock);
+

+Returns the SoupAddress corresponding to the remote end of sock. +

+
++ + + + + + + + + + +

sock :

a SoupSocket +

Returns :

the SoupAddress. [transfer none] +
+
+
+
+

enum SoupSocketIOStatus

+
typedef enum {
+	SOUP_SOCKET_OK,
+	SOUP_SOCKET_WOULD_BLOCK,
+	SOUP_SOCKET_EOF,
+	SOUP_SOCKET_ERROR
+} SoupSocketIOStatus;
+
+

+Return value from the SoupSocket IO methods. +

+
++ + + + + + + + + + + + + + + + + + +

SOUP_SOCKET_OK

Success +

SOUP_SOCKET_WOULD_BLOCK

Cannot read/write any more at this time +

SOUP_SOCKET_EOF

End of file +

SOUP_SOCKET_ERROR

Other error +
+
+
+
+

soup_socket_read ()

+
SoupSocketIOStatus  soup_socket_read                    (SoupSocket *sock,
+                                                         gpointer buffer,
+                                                         gsize len,
+                                                         gsize *nread,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+

+Attempts to read up to len bytes from sock into buffer. If some +data is successfully read, soup_socket_read() will return +SOUP_SOCKET_OK, and *nread will contain the number of bytes +actually read (which may be less than len). +

+

+If sock is non-blocking, and no data is available, the return +value will be SOUP_SOCKET_WOULD_BLOCK. In this case, the caller +can connect to the "readable" signal to know when there +is more data to read. (NB: You MUST read all available data off the +socket first. "readable" is only emitted after +soup_socket_read() returns SOUP_SOCKET_WOULD_BLOCK, and it is only +emitted once. See the documentation for "non-blocking".) +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

sock :

the socket

buffer :

buffer to read into

len :

size of buffer in bytes

nread :

on return, the number of bytes read into buffer. [out] +

cancellable :

a GCancellable, or NULL +

error :

error pointer

Returns :

a SoupSocketIOStatus, as described above (or +SOUP_SOCKET_EOF if the socket is no longer connected, or +SOUP_SOCKET_ERROR on any other error, in which case error will +also be set).
+
+
+
+

soup_socket_read_until ()

+
SoupSocketIOStatus  soup_socket_read_until              (SoupSocket *sock,
+                                                         gpointer buffer,
+                                                         gsize len,
+                                                         gconstpointer boundary,
+                                                         gsize boundary_len,
+                                                         gsize *nread,
+                                                         gboolean *got_boundary,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+

+Like soup_socket_read(), but reads no further than the first +occurrence of boundary. (If the boundary is found, it will be +included in the returned data, and *got_boundary will be set to +TRUE.) Any data after the boundary will returned in future reads. +

+

+soup_socket_read_until() will almost always return fewer than len +bytes: if the boundary is found, then it will only return the bytes +up until the end of the boundary, and if the boundary is not found, +then it will leave the last (boundary_len - 1) +bytes in its internal buffer, in case they form the start of the +boundary string. Thus, len normally needs to be at least 1 byte +longer than boundary_len if you want to make any progress at all. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

sock :

the socket

buffer :

buffer to read into

len :

size of buffer in bytes

boundary :

boundary to read until

boundary_len :

length of boundary in bytes

nread :

on return, the number of bytes read into buffer. [out] +

got_boundary :

on return, whether or not the data in buffer +ends with the boundary string

cancellable :

a GCancellable, or NULL +

error :

error pointer

Returns :

as for soup_socket_read() +
+
+
+
+

soup_socket_write ()

+
SoupSocketIOStatus  soup_socket_write                   (SoupSocket *sock,
+                                                         gconstpointer buffer,
+                                                         gsize len,
+                                                         gsize *nwrote,
+                                                         GCancellable *cancellable,
+                                                         GError **error);
+

+Attempts to write len bytes from buffer to sock. If some data is +successfully written, the return status will be SOUP_SOCKET_OK, +and *nwrote will contain the number of bytes actually written +(which may be less than len). +

+

+If sock is non-blocking, and no data could be written right away, +the return value will be SOUP_SOCKET_WOULD_BLOCK. In this case, +the caller can connect to the "writable" signal to know +when more data can be written. (NB: "writable" is only +emitted after soup_socket_write() returns SOUP_SOCKET_WOULD_BLOCK, +and it is only emitted once. See the documentation for +"non-blocking".) +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

sock :

the socket

buffer :

data to write

len :

size of buffer, in bytes

nwrote :

on return, number of bytes written. [out] +

cancellable :

a GCancellable, or NULL +

error :

error pointer

Returns :

a SoupSocketIOStatus, as described above (or +SOUP_SOCKET_EOF or SOUP_SOCKET_ERROR. error will be set if the +return value is SOUP_SOCKET_ERROR.)
+
+
+
+

SOUP_SOCKET_LOCAL_ADDRESS

+
#define SOUP_SOCKET_LOCAL_ADDRESS       "local-address"
+
+

+Alias for the "local-address" property. (Address +of local end of socket.) +

+
+
+
+

SOUP_SOCKET_REMOTE_ADDRESS

+
#define SOUP_SOCKET_REMOTE_ADDRESS      "remote-address"
+
+

+Alias for the "remote-address" property. (Address +of remote end of socket.) +

+
+
+
+

SOUP_SOCKET_FLAG_NONBLOCKING

+
#define SOUP_SOCKET_FLAG_NONBLOCKING    "non-blocking"
+
+

+Alias for the "non-blocking" property. (Whether +or not the socket uses non-blocking I/O.) +

+
+
+
+

SOUP_SOCKET_IS_SERVER

+
#define SOUP_SOCKET_IS_SERVER           "is-server"
+
+

+Alias for the "is-server" property. (Whether or +not the socket is a server socket.) +

+
+
+
+

SOUP_SOCKET_SSL_CREDENTIALS

+
#define SOUP_SOCKET_SSL_CREDENTIALS     "ssl-creds"
+
+

+Alias for the "ssl-creds" property. +(SSL credential information.) +

+
+
+
+

SOUP_SOCKET_ASYNC_CONTEXT

+
#define SOUP_SOCKET_ASYNC_CONTEXT       "async-context"
+
+

+Alias for the "async-context" property. (The +socket's GMainContext.) +

+
+
+
+

SOUP_SOCKET_TIMEOUT

+
#define SOUP_SOCKET_TIMEOUT             "timeout"
+
+

+Alias for the "timeout" property. (The timeout +in seconds for blocking socket I/O operations.) +

+
+
+
+

SOUP_SOCKET_SSL_FALLBACK

+
#define SOUP_SOCKET_SSL_FALLBACK        "ssl-fallback"
+
+

+Alias for the "ssl-fallback" property. +

+
+
+
+

SOUP_SOCKET_SSL_STRICT

+
#define SOUP_SOCKET_SSL_STRICT          "ssl-strict"
+
+

+Alias for the "ssl-strict" property. +

+
+
+
+

SOUP_SOCKET_TLS_CERTIFICATE

+
#define SOUP_SOCKET_TLS_CERTIFICATE     "tls-certificate"
+
+

+Alias for the "tls-certificate" +property. Note that this property's value is only useful +if the socket is for a TLS connection, and only reliable +after some data has been transferred to or from it. +

+

Since 2.34

+
+
+
+

SOUP_SOCKET_TLS_ERRORS

+
#define SOUP_SOCKET_TLS_ERRORS          "tls-errors"
+
+

+Alias for the "tls-errors" +property. Note that this property's value is only useful +if the socket is for a TLS connection, and only reliable +after some data has been transferred to or from it. +

+

Since 2.34

+
+
+
+

SOUP_SOCKET_TRUSTED_CERTIFICATE

+
#define SOUP_SOCKET_TRUSTED_CERTIFICATE "trusted-certificate"
+
+

+Alias for the "trusted-certificate" +property. +

+
+
+
+

Property Details

+
+

The "async-context" property

+
  "async-context"            gpointer              : Read / Write / Construct Only
+

The GMainContext to dispatch this socket's async I/O in.

+
+
+
+

The "clean-dispose" property

+
  "clean-dispose"            gboolean              : Write / Construct Only
+

Warn on unclean dispose.

+

Default value: FALSE

+
+
+
+

The "is-server" property

+
  "is-server"                gboolean              : Read
+

Whether or not the socket is a server socket.

+

Default value: FALSE

+
+
+
+

The "local-address" property

+
  "local-address"            SoupAddress*          : Read / Write / Construct Only
+

Address of local end of socket.

+
+
+
+

The "non-blocking" property

+
  "non-blocking"             gboolean              : Read / Write
+

+Whether or not the socket uses non-blocking I/O. +

+

+SoupSocket's I/O methods are designed around the idea of +using a single codepath for both synchronous and +asynchronous I/O. If you want to read off a SoupSocket, +the "correct" way to do it is to call soup_socket_read() or +soup_socket_read_until() repeatedly until you have read +everything you want. If it returns SOUP_SOCKET_WOULD_BLOCK +at any point, stop reading and wait for it to emit the +"readable" signal. Then go back to the +reading-as-much-as-you-can loop. Likewise, for writing to a +SoupSocket, you should call soup_socket_write() either +until you have written everything, or it returns +SOUP_SOCKET_WOULD_BLOCK (in which case you wait for +"writable" and then go back into the loop). +

+

+Code written this way will work correctly with both +blocking and non-blocking sockets; blocking sockets will +simply never return SOUP_SOCKET_WOULD_BLOCK, and so the +code that handles that case just won't get used for them. +

+

Default value: TRUE

+
+
+
+

The "remote-address" property

+
  "remote-address"           SoupAddress*          : Read / Write / Construct Only
+

Address of remote end of socket.

+
+
+
+

The "ssl-creds" property

+
  "ssl-creds"                gpointer              : Read / Write
+

SSL credential information, passed from the session to the SSL implementation.

+
+
+
+

The "ssl-fallback" property

+
  "ssl-fallback"             gboolean              : Read / Write / Construct Only
+

Use SSLv3 instead of TLS (client-side only).

+

Default value: FALSE

+
+
+
+

The "ssl-strict" property

+
  "ssl-strict"               gboolean              : Read / Write / Construct Only
+

Whether certificate errors should be considered a connection error.

+

Default value: TRUE

+
+
+
+

The "timeout" property

+
  "timeout"                  guint                 : Read / Write
+

Value in seconds to timeout a blocking I/O.

+

Default value: 0

+
+
+
+

The "tls-certificate" property

+
  "tls-certificate"          GTlsCertificate*      : Read
+

The peer's TLS certificate.

+
+
+
+

The "tls-errors" property

+
  "tls-errors"               GTlsCertificateFlags  : Read
+

Errors with the peer's TLS certificate.

+
+
+
+

The "trusted-certificate" property

+
  "trusted-certificate"      gboolean              : Read
+

Whether the server certificate is trusted, if this is an SSL socket.

+

Default value: FALSE

+
+
+
+

The "use-thread-context" property

+
  "use-thread-context"       gboolean              : Read / Write / Construct Only
+

+Use g_main_context_get_thread_default(). +

+

Default value: FALSE

+

Since 2.36.1

+
+
+
+

Signal Details

+
+

The "disconnected" signal

+
void                user_function                      (SoupSocket *sock,
+                                                        gpointer    user_data)      : Run Last
+

+Emitted when the socket is disconnected, for whatever +reason. +

+
++ + + + + + + + + + +

sock :

the socket

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "event" signal

+
void                user_function                      (SoupSocket        *sock,
+                                                        GSocketClientEvent event,
+                                                        GIOStream         *connection,
+                                                        gpointer           user_data)       : Run Last
+

+Emitted when a network-related event occurs. See +"event" for more details. +

+
++ + + + + + + + + + + + + + + + + + +

sock :

the socket

event :

the event that occurred

connection :

the current connection state

user_data :

user data set when the signal handler was connected.
+

Since 2.38

+
+
+
+

The "new-connection" signal

+
void                user_function                      (SoupSocket *sock,
+                                                        SoupSocket *new,
+                                                        gpointer    user_data)      : Run First
+

+Emitted when a listening socket (set up with +soup_socket_listen()) receives a new connection. +

+

+You must ref the new if you want to keep it; otherwise it +will be destroyed after the signal is emitted. +

+
++ + + + + + + + + + + + + + +

sock :

the socket

new :

the new socket

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "readable" signal

+
void                user_function                      (SoupSocket *sock,
+                                                        gpointer    user_data)      : Run Last
+

+Emitted when an async socket is readable. See +soup_socket_read(), soup_socket_read_until() and +"non-blocking". +

+
++ + + + + + + + + + +

sock :

the socket

user_data :

user data set when the signal handler was connected.
+
+
+
+

The "writable" signal

+
void                user_function                      (SoupSocket *sock,
+                                                        gpointer    user_data)      : Run Last
+

+Emitted when an async socket is writable. See +soup_socket_write() and "non-blocking". +

+
++ + + + + + + + + + +

sock :

the socket

user_data :

user data set when the signal handler was connected.
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/SoupURI.html b/docs/reference/html/SoupURI.html new file mode 100644 index 0000000..57bb73a --- /dev/null +++ b/docs/reference/html/SoupURI.html @@ -0,0 +1,1119 @@ + + + + +SoupURI + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

SoupURI

+

SoupURI — URIs

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupURI;
+SoupURI *           soup_uri_new_with_base              (SoupURI *base,
+                                                         const char *uri_string);
+SoupURI *           soup_uri_new                        (const char *uri_string);
+char *              soup_uri_to_string                  (SoupURI *uri,
+                                                         gboolean just_path_and_query);
+
+SoupURI *           soup_uri_copy                       (SoupURI *uri);
+SoupURI *           soup_uri_copy_host                  (SoupURI *uri);
+gboolean            soup_uri_equal                      (SoupURI *uri1,
+                                                         SoupURI *uri2);
+gboolean            soup_uri_host_equal                 (gconstpointer v1,
+                                                         gconstpointer v2);
+guint               soup_uri_host_hash                  (gconstpointer key);
+void                soup_uri_free                       (SoupURI *uri);
+
+char *              soup_uri_encode                     (const char *part,
+                                                         const char *escape_extra);
+char *              soup_uri_decode                     (const char *part);
+char *              soup_uri_normalize                  (const char *part,
+                                                         const char *unescape_extra);
+
+#define             SOUP_URI_SCHEME_HTTP
+#define             SOUP_URI_SCHEME_HTTPS
+#define             SOUP_URI_SCHEME_DATA
+#define             SOUP_URI_SCHEME_FILE
+#define             SOUP_URI_SCHEME_FTP
+gboolean            soup_uri_uses_default_port          (SoupURI *uri);
+#define             SOUP_URI_VALID_FOR_HTTP             (uri)
+
+void                soup_uri_set_scheme                 (SoupURI *uri,
+                                                         const char *scheme);
+const char *        soup_uri_get_scheme                 (SoupURI *uri);
+void                soup_uri_set_user                   (SoupURI *uri,
+                                                         const char *user);
+const char *        soup_uri_get_user                   (SoupURI *uri);
+void                soup_uri_set_password               (SoupURI *uri,
+                                                         const char *password);
+const char *        soup_uri_get_password               (SoupURI *uri);
+void                soup_uri_set_host                   (SoupURI *uri,
+                                                         const char *host);
+const char *        soup_uri_get_host                   (SoupURI *uri);
+void                soup_uri_set_port                   (SoupURI *uri,
+                                                         guint port);
+guint               soup_uri_get_port                   (SoupURI *uri);
+void                soup_uri_set_path                   (SoupURI *uri,
+                                                         const char *path);
+const char *        soup_uri_get_path                   (SoupURI *uri);
+void                soup_uri_set_query                  (SoupURI *uri,
+                                                         const char *query);
+void                soup_uri_set_query_from_form        (SoupURI *uri,
+                                                         GHashTable *form);
+void                soup_uri_set_query_from_fields      (SoupURI *uri,
+                                                         const char *first_field,
+                                                         ...);
+const char *        soup_uri_get_query                  (SoupURI *uri);
+void                soup_uri_set_fragment               (SoupURI *uri,
+                                                         const char *fragment);
+const char *        soup_uri_get_fragment               (SoupURI *uri);
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupURI
+
+
+
+

Description

+

+A SoupURI represents a (parsed) URI. +

+

+Many applications will not need to use SoupURI directly at all; on +the client side, soup_message_new() takes a stringified URI, and on +the server side, the path and query components are provided for you +in the server callback. +

+
+
+

Details

+
+

SoupURI

+
typedef struct {
+	const char *scheme;
+
+	char       *user;
+	char       *password;
+
+	char       *host;
+	guint       port;
+
+	char       *path;
+	char       *query;
+
+	char       *fragment;
+} SoupURI;
+
+

+A SoupURI represents a (parsed) URI. SoupURI supports RFC 3986 +(URI Generic Syntax), and can parse any valid URI. However, libsoup +only uses "http" and "https" URIs internally; You can use +SOUP_URI_VALID_FOR_HTTP() to test if a SoupURI is a valid HTTP +URI. +

+

+scheme will always be set in any URI. It is an interned string and +is always all lowercase. (If you parse a URI with a non-lowercase +scheme, it will be converted to lowercase.) The macros +SOUP_URI_SCHEME_HTTP and SOUP_URI_SCHEME_HTTPS provide the +interned values for "http" and "https" and can be compared against +URI scheme values. +

+

+user and password are parsed as defined in the older URI specs +(ie, separated by a colon; RFC 3986 only talks about a single +"userinfo" field). Note that password is not included in the +output of soup_uri_to_string(). libsoup does not normally use these +fields; authentication is handled via SoupSession signals. +

+

+host contains the hostname, and port the port specified in the +URI. If the URI doesn't contain a hostname, host will be NULL, +and if it doesn't specify a port, port may be 0. However, for +"http" and "https" URIs, host is guaranteed to be non-NULL +(trying to parse an http URI with no host will return NULL), and +port will always be non-0 (because libsoup knows the default value +to use when it is not specified in the URI). +

+

+path is always non-NULL. For http/https URIs, path will never be +an empty string either; if the input URI has no path, the parsed +SoupURI will have a path of "/". +

+

+query and fragment are optional for all URI types. +soup_form_decode() may be useful for parsing query. +

+

+Note that path, query, and fragment may contain +%-encoded characters. soup_uri_new() calls +soup_uri_normalize() on them, but not soup_uri_decode(). This is +necessary to ensure that soup_uri_to_string() will generate a URI +that has exactly the same meaning as the original. (In theory, +SoupURI should leave user, password, and host partially-encoded +as well, but this would be more annoying than useful.) +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

const char *scheme;

the URI scheme (eg, "http")

char *user;

a username, or NULL +

char *password;

a password, or NULL +

char *host;

the hostname or IP address

guint port;

the port number on host +

char *path;

the path on host +

char *query;

a query for path, or NULL +

char *fragment;

a fragment identifier within path, or NULL +
+
+
+
+

soup_uri_new_with_base ()

+
SoupURI *           soup_uri_new_with_base              (SoupURI *base,
+                                                         const char *uri_string);
+

+Parses uri_string relative to base. +

+
++ + + + + + + + + + + + + + +

base :

a base URI

uri_string :

the URI

Returns :

a parsed SoupURI.
+
+
+
+

soup_uri_new ()

+
SoupURI *           soup_uri_new                        (const char *uri_string);
+

+Parses an absolute URI. +

+

+You can also pass NULL for uri_string if you want to get back an +"empty" SoupURI that you can fill in by hand. (You will need to +call at least soup_uri_set_scheme() and soup_uri_set_path(), since +those fields are required.) +

+
++ + + + + + + + + + +

uri_string :

a URI. [allow-none] +

Returns :

a SoupURI, or NULL if the given string was found to be +invalid.
+
+
+
+

soup_uri_to_string ()

+
char *              soup_uri_to_string                  (SoupURI *uri,
+                                                         gboolean just_path_and_query);
+

+Returns a string representing uri. +

+

+If just_path_and_query is TRUE, this concatenates the path and query +together. That is, it constructs the string that would be needed in +the Request-Line of an HTTP request for uri. +

+
++ + + + + + + + + + + + + + +

uri :

a SoupURI +

just_path_and_query :

if TRUE, output just the path and query portions

Returns :

a string representing uri, which the caller must free.
+
+
+
+

soup_uri_copy ()

+
SoupURI *           soup_uri_copy                       (SoupURI *uri);
+

+Copies uri +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

a copy of uri, which must be freed with soup_uri_free() +
+
+
+
+

soup_uri_copy_host ()

+
SoupURI *           soup_uri_copy_host                  (SoupURI *uri);
+

+Makes a copy of uri, considering only the protocol, host, and port +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

the new SoupURI +
+

Since 2.26.3

+
+
+
+

soup_uri_equal ()

+
gboolean            soup_uri_equal                      (SoupURI *uri1,
+                                                         SoupURI *uri2);
+

+Tests whether or not uri1 and uri2 are equal in all parts +

+
++ + + + + + + + + + + + + + +

uri1 :

a SoupURI +

uri2 :

another SoupURI +

Returns :

+TRUE or FALSE +
+
+
+
+

soup_uri_host_equal ()

+
gboolean            soup_uri_host_equal                 (gconstpointer v1,
+                                                         gconstpointer v2);
+

+Compares v1 and v2, considering only the scheme, host, and port. +

+
++ + + + + + + + + + + + + + +

v1 :

a SoupURI with a non-NULL host member. [type Soup.URI] +

v2 :

a SoupURI with a non-NULL host member. [type Soup.URI] +

Returns :

whether or not the URIs are equal in scheme, host, +and port.
+

Since 2.26.3

+
+
+
+

soup_uri_host_hash ()

+
guint               soup_uri_host_hash                  (gconstpointer key);
+

+Hashes key, considering only the scheme, host, and port. +

+
++ + + + + + + + + + +

key :

a SoupURI with a non-NULL host member. [type Soup.URI] +

Returns :

a hash
+

Since 2.26.3

+
+
+
+

soup_uri_free ()

+
void                soup_uri_free                       (SoupURI *uri);
+

+Frees uri. +

+
++ + + + +

uri :

a SoupURI +
+
+
+
+

soup_uri_encode ()

+
char *              soup_uri_encode                     (const char *part,
+                                                         const char *escape_extra);
+

+This %-encodes the given URI part and returns the escaped +version in allocated memory, which the caller must free when it is +done. +

+
++ + + + + + + + + + + + + + +

part :

a URI part

escape_extra :

additional reserved characters to +escape (or NULL). [allow-none] +

Returns :

the encoded URI part
+
+
+
+

soup_uri_decode ()

+
char *              soup_uri_decode                     (const char *part);
+

+Fully %-decodes part. +

+

+In the past, this would return NULL if part contained invalid +percent-encoding, but now it just ignores the problem (as +soup_uri_new() already did). +

+
++ + + + + + + + + + +

part :

a URI part

Returns :

the decoded URI part.
+
+
+
+

soup_uri_normalize ()

+
char *              soup_uri_normalize                  (const char *part,
+                                                         const char *unescape_extra);
+

+%-decodes any "unreserved" characters (or characters in +unescape_extra) in part. +

+

+"Unreserved" characters are those that are not allowed to be used +for punctuation according to the URI spec. For example, letters are +unreserved, so soup_uri_normalize() will turn +http://example.com/foo/b%61r into +http://example.com/foo/bar, which is guaranteed +to mean the same thing. However, "/" is "reserved", so +http://example.com/foo%2Fbar would not +be changed, because it might mean something different to the +server. +

+

+In the past, this would return NULL if part contained invalid +percent-encoding, but now it just ignores the problem (as +soup_uri_new() already did). +

+
++ + + + + + + + + + + + + + +

part :

a URI part

unescape_extra :

reserved characters to unescape (or NULL)

Returns :

the normalized URI part
+
+
+
+

SOUP_URI_SCHEME_HTTP

+
#define SOUP_URI_SCHEME_HTTP  _SOUP_ATOMIC_INTERN_STRING (_SOUP_URI_SCHEME_HTTP, "http")
+
+

+"http" as an interned string. This can be compared directly against +the value of a SoupURI's scheme +

+
+
+
+

SOUP_URI_SCHEME_HTTPS

+
#define SOUP_URI_SCHEME_HTTPS _SOUP_ATOMIC_INTERN_STRING (_SOUP_URI_SCHEME_HTTPS, "https")
+
+

+"https" as an interned string. This can be compared directly +against the value of a SoupURI's scheme +

+
+
+
+

SOUP_URI_SCHEME_DATA

+
#define SOUP_URI_SCHEME_DATA  _SOUP_ATOMIC_INTERN_STRING (_SOUP_URI_SCHEME_DATA, "data")
+
+

+

+
+
+
+

SOUP_URI_SCHEME_FILE

+
#define SOUP_URI_SCHEME_FILE  _SOUP_ATOMIC_INTERN_STRING (_SOUP_URI_SCHEME_FILE, "file")
+
+

+

+
+
+
+

SOUP_URI_SCHEME_FTP

+
#define SOUP_URI_SCHEME_FTP   _SOUP_ATOMIC_INTERN_STRING (_SOUP_URI_SCHEME_FTP, "ftp")
+
+

+

+
+
+
+

soup_uri_uses_default_port ()

+
gboolean            soup_uri_uses_default_port          (SoupURI *uri);
+

+Tests if uri uses the default port for its scheme. (Eg, 80 for +http.) (This only works for http, https and ftp; libsoup does not know +the default ports of other protocols.) +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_URI_VALID_FOR_HTTP()

+
#define   SOUP_URI_VALID_FOR_HTTP(uri) ((uri) && ((uri)->scheme == SOUP_URI_SCHEME_HTTP || (uri)->scheme == SOUP_URI_SCHEME_HTTPS) && (uri)->host && (uri)->path)
+
+

+Tests if uri is a valid SoupURI for HTTP communication; that is, if +it can be used to construct a SoupMessage. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+TRUE if uri is a valid "http" or "https" URI.
+

Since 2.24

+
+
+
+

soup_uri_set_scheme ()

+
void                soup_uri_set_scheme                 (SoupURI *uri,
+                                                         const char *scheme);
+

+Sets uri's scheme to scheme. This will also set uri's port to +the default port for scheme, if known. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

scheme :

the URI scheme
+
+
+
+

soup_uri_get_scheme ()

+
const char *        soup_uri_get_scheme                 (SoupURI *uri);
+

+Gets uri's scheme. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's scheme.
+

Since 2.32

+
+
+
+

soup_uri_set_user ()

+
void                soup_uri_set_user                   (SoupURI *uri,
+                                                         const char *user);
+

+Sets uri's user to user. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

user :

the username, or NULL. [allow-none] +
+
+
+
+

soup_uri_get_user ()

+
const char *        soup_uri_get_user                   (SoupURI *uri);
+

+Gets uri's user. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's user.
+

Since 2.32

+
+
+
+

soup_uri_set_password ()

+
void                soup_uri_set_password               (SoupURI *uri,
+                                                         const char *password);
+

+Sets uri's password to password. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

password :

the password, or NULL. [allow-none] +
+
+
+
+

soup_uri_get_password ()

+
const char *        soup_uri_get_password               (SoupURI *uri);
+

+Gets uri's password. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's password.
+

Since 2.32

+
+
+
+

soup_uri_set_host ()

+
void                soup_uri_set_host                   (SoupURI *uri,
+                                                         const char *host);
+

+Sets uri's host to host. +

+

+If host is an IPv6 IP address, it should not include the brackets +required by the URI syntax; they will be added automatically when +converting uri to a string. +

+

+http and https URIs should not have a NULL host. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

host :

the hostname or IP address, or NULL. [allow-none] +
+
+
+
+

soup_uri_get_host ()

+
const char *        soup_uri_get_host                   (SoupURI *uri);
+

+Gets uri's host. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's host.
+

Since 2.32

+
+
+
+

soup_uri_set_port ()

+
void                soup_uri_set_port                   (SoupURI *uri,
+                                                         guint port);
+

+Sets uri's port to port. If port is 0, uri will not have an +explicitly-specified port. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

port :

the port, or 0
+
+
+
+

soup_uri_get_port ()

+
guint               soup_uri_get_port                   (SoupURI *uri);
+

+Gets uri's port. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's port.
+

Since 2.32

+
+
+
+

soup_uri_set_path ()

+
void                soup_uri_set_path                   (SoupURI *uri,
+                                                         const char *path);
+

+Sets uri's path to path. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

path :

the non-NULL path
+
+
+
+

soup_uri_get_path ()

+
const char *        soup_uri_get_path                   (SoupURI *uri);
+

+Gets uri's path. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's path.
+

Since 2.32

+
+
+
+

soup_uri_set_query ()

+
void                soup_uri_set_query                  (SoupURI *uri,
+                                                         const char *query);
+

+Sets uri's query to query. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

query :

the query. [allow-none] +
+
+
+
+

soup_uri_set_query_from_form ()

+
void                soup_uri_set_query_from_form        (SoupURI *uri,
+                                                         GHashTable *form);
+

+Sets uri's query to the result of encoding form according to the +HTML form rules. See soup_form_encode_hash() for more information. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

form :

a GHashTable containing HTML form +information. [element-type utf8 utf8] +
+
+
+
+

soup_uri_set_query_from_fields ()

+
void                soup_uri_set_query_from_fields      (SoupURI *uri,
+                                                         const char *first_field,
+                                                         ...);
+

+Sets uri's query to the result of encoding the given form fields +and values according to the * HTML form rules. See +soup_form_encode() for more information. +

+
++ + + + + + + + + + + + + + +

uri :

a SoupURI +

first_field :

name of the first form field to encode into query

... :

value of first_field, followed by additional field names +and values, terminated by NULL.
+
+
+
+

soup_uri_get_query ()

+
const char *        soup_uri_get_query                  (SoupURI *uri);
+

+Gets uri's query. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's query.
+

Since 2.32

+
+
+
+

soup_uri_set_fragment ()

+
void                soup_uri_set_fragment               (SoupURI *uri,
+                                                         const char *fragment);
+

+Sets uri's fragment to fragment. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

fragment :

the fragment. [allow-none] +
+
+
+
+

soup_uri_get_fragment ()

+
const char *        soup_uri_get_fragment               (SoupURI *uri);
+

+Gets uri's fragment. +

+
++ + + + + + + + + + +

uri :

a SoupURI +

Returns :

+uri's fragment.
+

Since 2.32

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/annotation-glossary.html b/docs/reference/html/annotation-glossary.html new file mode 100644 index 0000000..2e1aa75 --- /dev/null +++ b/docs/reference/html/annotation-glossary.html @@ -0,0 +1,95 @@ + + + + +Annotation Glossary + + + + + + + + + + + + + + + + + + +
+

+Annotation Glossary

+

O

+
+out
+

Parameter for returning results. Default is transfer full.

+

S

+
+scope async
+

The callback is valid until first called.

+

A

+
+allow-none
+

NULL is ok, both for passing and for returning.

+

I

+
+inout
+

Parameter for input and for returning results. Default is transfer full.

+

T

+
+transfer none
+

Don't free data after the code is done.

+

A

+
+array
+

Parameter points to an array of items.

+

E

+
+element-type
+

Generics and defining elements of containers and arrays.

+

T

+
+transfer container
+

Free data container after the code is done.

+

S

+
+scope call
+

The callback is valid only during the call to the method.

+

T

+
+transfer full
+

Free data after the code is done.

+
+type
+

Override the parsed C type with given type

+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch01.html b/docs/reference/html/ch01.html new file mode 100644 index 0000000..8864ee8 --- /dev/null +++ b/docs/reference/html/ch01.html @@ -0,0 +1,41 @@ + + + + +Tutorial + + + + + + + + + + + + + + + + +
+

+Tutorial

+
+
+Compiling with libsoup — Notes on compiling +
+
+Soup Client Basics — Client-side tutorial +
+
+Soup Server Basics — Server-side tutorial +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch02.html b/docs/reference/html/ch02.html new file mode 100644 index 0000000..7d67692 --- /dev/null +++ b/docs/reference/html/ch02.html @@ -0,0 +1,84 @@ + + + + +Core API + + + + + + + + + + + + + + + + +
+

+Core API

+
+
+SoupAuth — HTTP client-side authentication support +
+
+SoupAuthDomain — Server-side authentication +
+
+SoupAuthDomainBasic — Server-side "Basic" authentication +
+
+SoupAuthDomainDigest — Server-side "Digest" authentication +
+
+SoupCookie — HTTP Cookies +
+
+SoupMessage — An HTTP request and response. +
+
+SoupMessageHeaders — HTTP message headers +
+
+SoupMessageBody — HTTP message body +
+
+soup-method — HTTP method definitions +
+
+SoupMultipart — multipart HTTP message bodies +
+
+SoupServer — HTTP server +
+
+SoupSession — Soup session state object +
+
+SoupSessionAsync — Soup session for asynchronous (main-loop-based) I/O. +
+
+SoupSessionSync — Soup session for blocking I/O in multithreaded +programs. +
+
+soup-status — HTTP (and libsoup) status codes +
+
+SoupURI — URIs +
+
+Soup Miscellaneous Utilities — Miscellaneous functions +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch03.html b/docs/reference/html/ch03.html new file mode 100644 index 0000000..d313878 --- /dev/null +++ b/docs/reference/html/ch03.html @@ -0,0 +1,56 @@ + + + + +Additional Features + + + + + + + + + + + + + + + + +
+

+Additional Features

+
+
+SoupSessionFeature — Interface for miscellaneous session features +
+
+SoupContentDecoder — Content-Encoding handler +
+
+SoupContentSniffer — Content sniffing for SoupSession +
+
+SoupCookieJar — Automatic cookie handling for SoupSession +
+
+SoupCookieJarText — Text-file-based ("cookies.txt") Cookie Jar +
+
+SoupLogger — Debug logging support +
+
+SoupProxyURIResolver +
+
+SoupProxyResolverDefault +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch04.html b/docs/reference/html/ch04.html new file mode 100644 index 0000000..9916385 --- /dev/null +++ b/docs/reference/html/ch04.html @@ -0,0 +1,39 @@ + + + + +Web Services APIs + + + + + + + + + + + + + + + + +
+

+Web Services APIs

+
+
+XMLRPC Support — XML-RPC support +
+
+GValue Support — GValue utilities +
+
+<xi:include></xi:include> +
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch05.html b/docs/reference/html/ch05.html new file mode 100644 index 0000000..33c9172 --- /dev/null +++ b/docs/reference/html/ch05.html @@ -0,0 +1,38 @@ + + + + +GNOME integration + + + + + + + + + + + + + + + + +
+

+GNOME integration

+
+
+soup-gnome-features +
+
+SoupCookieJarSqlite — SQLite-based Cookie Jar +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch06.html b/docs/reference/html/ch06.html new file mode 100644 index 0000000..56b447e --- /dev/null +++ b/docs/reference/html/ch06.html @@ -0,0 +1,38 @@ + + + + +Low-level Networking API + + + + + + + + + + + + + + + + +
+

+Low-level Networking API

+
+
+SoupAddress — DNS support +
+
+SoupSocket — A network socket +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/ch07.html b/docs/reference/html/ch07.html new file mode 100644 index 0000000..20e2cb3 --- /dev/null +++ b/docs/reference/html/ch07.html @@ -0,0 +1,50 @@ + + + + +Experimental streaming API + + + + + + + + + + + + + + + + +
+

+Experimental streaming API

+
+
+SoupRequester +
+
+SoupRequest — Protocol-independent streaming request interface +
+
+SoupRequestHTTP +
+
+SoupRequestFile +
+
+SoupRequestData +
+
+SoupCache +
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/home.png b/docs/reference/html/home.png new file mode 100644 index 0000000000000000000000000000000000000000..17003611d9df2b066afc682cbde962f3a575002d GIT binary patch literal 654 zcmV;90&)F`P)~yY zO1cF+0vxb!W?!x?K+*#62Jq)nA4q`)5S6sgX4ao{=)(Mgq+YMr)7sjak|a^9)zS!j zlk{-n29mabXYF=7SYBQx&vO8xC}MYams+hxqtO7sImhPaCf@rq;I^3!#u*2aUP)55 zT2&N90xmEJ0s&fGT~(T<3d2xYmK9C>IP*x-M@ib*+0pFm>>uW37N2Wzaq-fCnIZE9 zpb8}0+uN+KuQM2oZVHfP8U6kQdo3?>Wo2dT)WeM9So8DqhLi#T0 z-i(>mfjhvbsYV`;4sgfJ-p>G-SqJ!fjR6BQYs1h*y9xaN0l{VB;o%`08yiy@)$8@~ z2PD1gcDuiy;j1tR0v#V8OH%W)25-YKyx(j#IXO9*YWf0mb8}QG6@b@;cHxh9{t7+@ o!Yd`f8L$sLH?yBt^q3C6015TtIu@BS5dZ)H07*qoM6N<$f*igdr~m)} literal 0 HcmV?d00001 diff --git a/docs/reference/html/index.html b/docs/reference/html/index.html new file mode 100644 index 0000000..16fa7bc --- /dev/null +++ b/docs/reference/html/index.html @@ -0,0 +1,169 @@ + + + + +libsoup Reference Manual + + + + + + + +
+
+
+
+
+
+
Tutorial
+
+
+Compiling with libsoup — Notes on compiling +
+
+Soup Client Basics — Client-side tutorial +
+
+Soup Server Basics — Server-side tutorial +
+
+
Core API
+
+
+SoupAuth — HTTP client-side authentication support +
+
+SoupAuthDomain — Server-side authentication +
+
+SoupAuthDomainBasic — Server-side "Basic" authentication +
+
+SoupAuthDomainDigest — Server-side "Digest" authentication +
+
+SoupCookie — HTTP Cookies +
+
+SoupMessage — An HTTP request and response. +
+
+SoupMessageHeaders — HTTP message headers +
+
+SoupMessageBody — HTTP message body +
+
+soup-method — HTTP method definitions +
+
+SoupMultipart — multipart HTTP message bodies +
+
+SoupServer — HTTP server +
+
+SoupSession — Soup session state object +
+
+SoupSessionAsync — Soup session for asynchronous (main-loop-based) I/O. +
+
+SoupSessionSync — Soup session for blocking I/O in multithreaded +programs. +
+
+soup-status — HTTP (and libsoup) status codes +
+
+SoupURI — URIs +
+
+Soup Miscellaneous Utilities — Miscellaneous functions +
+
+
Additional Features
+
+
+SoupSessionFeature — Interface for miscellaneous session features +
+
+SoupContentDecoder — Content-Encoding handler +
+
+SoupContentSniffer — Content sniffing for SoupSession +
+
+SoupCookieJar — Automatic cookie handling for SoupSession +
+
+SoupCookieJarText — Text-file-based ("cookies.txt") Cookie Jar +
+
+SoupLogger — Debug logging support +
+
+SoupProxyURIResolver +
+
+SoupProxyResolverDefault +
+
+
Web Services APIs
+
+
+XMLRPC Support — XML-RPC support +
+
+GValue Support — GValue utilities +
+
+
GNOME integration
+
+
+soup-gnome-features +
+
+SoupCookieJarSqlite — SQLite-based Cookie Jar +
+
+
Low-level Networking API
+
+
+SoupAddress — DNS support +
+
+SoupSocket — A network socket +
+
+
Experimental streaming API
+
+
+SoupRequester +
+
+SoupRequest — Protocol-independent streaming request interface +
+
+SoupRequestHTTP +
+
+SoupRequestFile +
+
+SoupRequestData +
+
+SoupCache +
+
+
Index
+
Annotation Glossary
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/index.sgml b/docs/reference/html/index.sgml new file mode 100644 index 0000000..675dcb9 --- /dev/null +++ b/docs/reference/html/index.sgml @@ -0,0 +1,1032 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/html/ix01.html b/docs/reference/html/ix01.html new file mode 100644 index 0000000..64a4878 --- /dev/null +++ b/docs/reference/html/ix01.html @@ -0,0 +1,1325 @@ + + + + +Index + + + + + + + + + + + + + + + + +
+

+Index

+
+

S

+
+
SoupAddress, SoupAddress +
+
SoupAddress:family, The "family" property +
+
SoupAddress:name, The "name" property +
+
SoupAddress:physical, The "physical" property +
+
SoupAddress:port, The "port" property +
+
SoupAddress:sockaddr, The "sockaddr" property +
+
SoupAddressCallback, SoupAddressCallback () +
+
SoupAddressFamily, enum SoupAddressFamily +
+
SoupAuth, SoupAuth +
+
SoupAuth::save-password, The "save-password" signal +
+
SoupAuth:host, The "host" property +
+
SoupAuth:is-authenticated, The "is-authenticated" property +
+
SoupAuth:is-for-proxy, The "is-for-proxy" property +
+
SoupAuth:realm, The "realm" property +
+
SoupAuth:scheme-name, The "scheme-name" property +
+
SoupAuthDomain, SoupAuthDomain +
+
SoupAuthDomain:add-path, The "add-path" property +
+
SoupAuthDomain:filter, The "filter" property +
+
SoupAuthDomain:filter-data, The "filter-data" property +
+
SoupAuthDomain:generic-auth-callback, The "generic-auth-callback" property +
+
SoupAuthDomain:generic-auth-data, The "generic-auth-data" property +
+
SoupAuthDomain:proxy, The "proxy" property +
+
SoupAuthDomain:realm, The "realm" property +
+
SoupAuthDomain:remove-path, The "remove-path" property +
+
SoupAuthDomainBasic, SoupAuthDomainBasic +
+
SoupAuthDomainBasic:auth-callback, The "auth-callback" property +
+
SoupAuthDomainBasic:auth-data, The "auth-data" property +
+
SoupAuthDomainBasicAuthCallback, SoupAuthDomainBasicAuthCallback () +
+
SoupAuthDomainDigest, SoupAuthDomainDigest +
+
SoupAuthDomainDigest:auth-callback, The "auth-callback" property +
+
SoupAuthDomainDigest:auth-data, The "auth-data" property +
+
SoupAuthDomainDigestAuthCallback, SoupAuthDomainDigestAuthCallback () +
+
SoupAuthDomainFilter, SoupAuthDomainFilter () +
+
SoupAuthDomainGenericAuthCallback, SoupAuthDomainGenericAuthCallback () +
+
SoupBuffer, SoupBuffer +
+
SoupCache, struct SoupCache +
+
SoupCache:cache-dir, The "cache-dir" property +
+
SoupCache:cache-type, The "cache-type" property +
+
SoupCacheType, enum SoupCacheType +
+
SoupChunkAllocator, SoupChunkAllocator () +
+
SoupClientContext, SoupServer +
+
SoupContentDecoder, SoupContentDecoder +
+
SoupContentSniffer, SoupContentSniffer +
+
SoupCookie, SoupCookie +
+
SoupCookieJar, SoupCookieJar +
+
SoupCookieJar::changed, The "changed" signal +
+
SoupCookieJar:accept-policy, The "accept-policy" property +
+
SoupCookieJar:read-only, The "read-only" property +
+
SoupCookieJarAcceptPolicy, enum SoupCookieJarAcceptPolicy +
+
SoupCookieJarSqlite, SoupCookieJarSqlite +
+
SoupCookieJarSqlite:filename, The "filename" property +
+
SoupCookieJarText, SoupCookieJarText +
+
SoupCookieJarText:filename, The "filename" property +
+
SoupDate, SoupDate +
+
SoupDateFormat, enum SoupDateFormat +
+
SoupEncoding, enum SoupEncoding +
+
SoupExpectation, enum SoupExpectation +
+
SoupHTTPVersion, enum SoupHTTPVersion +
+
SoupKnownStatusCode, enum SoupKnownStatusCode +
+
SoupLogger, SoupLogger +
+
SoupLoggerFilter, SoupLoggerFilter () +
+
SoupLoggerLogLevel, enum SoupLoggerLogLevel +
+
SoupLoggerPrinter, SoupLoggerPrinter () +
+
SoupMemoryUse, enum SoupMemoryUse +
+
SoupMessage, SoupMessage +
+
SoupMessage::content-sniffed, The "content-sniffed" signal +
+
SoupMessage::finished, The "finished" signal +
+
SoupMessage::got-body, The "got-body" signal +
+
SoupMessage::got-chunk, The "got-chunk" signal +
+
SoupMessage::got-headers, The "got-headers" signal +
+
SoupMessage::got-informational, The "got-informational" signal +
+
SoupMessage::network-event, The "network-event" signal +
+
SoupMessage::restarted, The "restarted" signal +
+
SoupMessage::wrote-body, The "wrote-body" signal +
+
SoupMessage::wrote-body-data, The "wrote-body-data" signal +
+
SoupMessage::wrote-chunk, The "wrote-chunk" signal +
+
SoupMessage::wrote-headers, The "wrote-headers" signal +
+
SoupMessage::wrote-informational, The "wrote-informational" signal +
+
SoupMessage:first-party, The "first-party" property +
+
SoupMessage:flags, The "flags" property +
+
SoupMessage:http-version, The "http-version" property +
+
SoupMessage:method, The "method" property +
+
SoupMessage:reason-phrase, The "reason-phrase" property +
+
SoupMessage:request-body, The "request-body" property +
+
SoupMessage:request-headers, The "request-headers" property +
+
SoupMessage:response-body, The "response-body" property +
+
SoupMessage:response-headers, The "response-headers" property +
+
SoupMessage:server-side, The "server-side" property +
+
SoupMessage:status-code, The "status-code" property +
+
SoupMessage:tls-certificate, The "tls-certificate" property +
+
SoupMessage:tls-errors, The "tls-errors" property +
+
SoupMessage:uri, The "uri" property +
+
SoupMessageBody, SoupMessageBody +
+
SoupMessageFlags, enum SoupMessageFlags +
+
SoupMessageHeaders, SoupMessageHeaders +
+
SoupMessageHeadersForeachFunc, SoupMessageHeadersForeachFunc () +
+
SoupMessageHeadersIter, SoupMessageHeadersIter +
+
SoupMessageHeadersType, enum SoupMessageHeadersType +
+
SoupMultipart, SoupMultipart +
+
SoupProxyResolverDefault, SoupProxyResolverDefault +
+
SoupProxyResolverDefault:gproxy-resolver, The "gproxy-resolver" property +
+
SoupProxyURIResolver, SoupProxyURIResolver +
+
SoupProxyURIResolverCallback, SoupProxyURIResolverCallback () +
+
SoupRange, SoupRange +
+
SoupRequest, struct SoupRequest +
+
SoupRequest:session, The "session" property +
+
SoupRequest:uri, The "uri" property +
+
SoupRequestData, SoupRequestData +
+
SoupRequester, struct SoupRequester +
+
SoupRequesterError, enum SoupRequesterError +
+
SoupRequestFile, SoupRequestFile +
+
SoupRequestHTTP, SoupRequestHTTP +
+
SoupServer, SoupServer +
+
SoupServer::request-aborted, The "request-aborted" signal +
+
SoupServer::request-finished, The "request-finished" signal +
+
SoupServer::request-read, The "request-read" signal +
+
SoupServer::request-started, The "request-started" signal +
+
SoupServer:async-context, The "async-context" property +
+
SoupServer:interface, The "interface" property +
+
SoupServer:port, The "port" property +
+
SoupServer:raw-paths, The "raw-paths" property +
+
SoupServer:server-header, The "server-header" property +
+
SoupServer:ssl-cert-file, The "ssl-cert-file" property +
+
SoupServer:ssl-key-file, The "ssl-key-file" property +
+
SoupServer:tls-certificate, The "tls-certificate" property +
+
SoupServerCallback, SoupServerCallback () +
+
SoupSession, SoupSession +
+
SoupSession::authenticate, The "authenticate" signal +
+
SoupSession::connection-created, The "connection-created" signal +
+
SoupSession::request-queued, The "request-queued" signal +
+
SoupSession::request-started, The "request-started" signal +
+
SoupSession::request-unqueued, The "request-unqueued" signal +
+
SoupSession::tunneling, The "tunneling" signal +
+
SoupSession:accept-language, The "accept-language" property +
+
SoupSession:accept-language-auto, The "accept-language-auto" property +
+
SoupSession:add-feature, The "add-feature" property +
+
SoupSession:add-feature-by-type, The "add-feature-by-type" property +
+
SoupSession:async-context, The "async-context" property +
+
SoupSession:http-aliases, The "http-aliases" property +
+
SoupSession:https-aliases, The "https-aliases" property +
+
SoupSession:idle-timeout, The "idle-timeout" property +
+
SoupSession:max-conns, The "max-conns" property +
+
SoupSession:max-conns-per-host, The "max-conns-per-host" property +
+
SoupSession:proxy-uri, The "proxy-uri" property +
+
SoupSession:remove-feature-by-type, The "remove-feature-by-type" property +
+
SoupSession:ssl-ca-file, The "ssl-ca-file" property +
+
SoupSession:ssl-strict, The "ssl-strict" property +
+
SoupSession:ssl-use-system-ca-file, The "ssl-use-system-ca-file" property +
+
SoupSession:timeout, The "timeout" property +
+
SoupSession:tls-database, The "tls-database" property +
+
SoupSession:use-ntlm, The "use-ntlm" property +
+
SoupSession:use-thread-context, The "use-thread-context" property +
+
SoupSession:user-agent, The "user-agent" property +
+
SoupSessionAsync, SoupSessionAsync +
+
SoupSessionCallback, SoupSessionCallback () +
+
SoupSessionFeature, SoupSessionFeature +
+
SoupSessionFeatureInterface, SoupSessionFeatureInterface +
+
SoupSessionSync, SoupSessionSync +
+
SoupSocket, SoupSocket +
+
SoupSocket::disconnected, The "disconnected" signal +
+
SoupSocket::event, The "event" signal +
+
SoupSocket::new-connection, The "new-connection" signal +
+
SoupSocket::readable, The "readable" signal +
+
SoupSocket::writable, The "writable" signal +
+
SoupSocket:async-context, The "async-context" property +
+
SoupSocket:clean-dispose, The "clean-dispose" property +
+
SoupSocket:is-server, The "is-server" property +
+
SoupSocket:local-address, The "local-address" property +
+
SoupSocket:non-blocking, The "non-blocking" property +
+
SoupSocket:remote-address, The "remote-address" property +
+
SoupSocket:ssl-creds, The "ssl-creds" property +
+
SoupSocket:ssl-fallback, The "ssl-fallback" property +
+
SoupSocket:ssl-strict, The "ssl-strict" property +
+
SoupSocket:timeout, The "timeout" property +
+
SoupSocket:tls-certificate, The "tls-certificate" property +
+
SoupSocket:tls-errors, The "tls-errors" property +
+
SoupSocket:trusted-certificate, The "trusted-certificate" property +
+
SoupSocket:use-thread-context, The "use-thread-context" property +
+
SoupSocketCallback, SoupSocketCallback () +
+
SoupSocketIOStatus, enum SoupSocketIOStatus +
+
SoupURI, SoupURI +
+
SoupXMLRPCFault, enum SoupXMLRPCFault +
+
SOUP_ADDRESS_ANY_PORT, SOUP_ADDRESS_ANY_PORT +
+
soup_address_equal_by_ip, soup_address_equal_by_ip () +
+
soup_address_equal_by_name, soup_address_equal_by_name () +
+
SOUP_ADDRESS_FAMILY, SOUP_ADDRESS_FAMILY +
+
soup_address_get_gsockaddr, soup_address_get_gsockaddr () +
+
soup_address_get_name, soup_address_get_name () +
+
soup_address_get_physical, soup_address_get_physical () +
+
soup_address_get_port, soup_address_get_port () +
+
soup_address_get_sockaddr, soup_address_get_sockaddr () +
+
soup_address_hash_by_ip, soup_address_hash_by_ip () +
+
soup_address_hash_by_name, soup_address_hash_by_name () +
+
soup_address_is_resolved, soup_address_is_resolved () +
+
SOUP_ADDRESS_NAME, SOUP_ADDRESS_NAME +
+
soup_address_new, soup_address_new () +
+
soup_address_new_any, soup_address_new_any () +
+
soup_address_new_from_sockaddr, soup_address_new_from_sockaddr () +
+
SOUP_ADDRESS_PHYSICAL, SOUP_ADDRESS_PHYSICAL +
+
SOUP_ADDRESS_PORT, SOUP_ADDRESS_PORT +
+
soup_address_resolve_async, soup_address_resolve_async () +
+
soup_address_resolve_sync, soup_address_resolve_sync () +
+
SOUP_ADDRESS_SOCKADDR, SOUP_ADDRESS_SOCKADDR +
+
soup_add_completion, soup_add_completion () +
+
soup_add_idle, soup_add_idle () +
+
soup_add_io_watch, soup_add_io_watch () +
+
soup_add_timeout, soup_add_timeout () +
+
soup_auth_authenticate, soup_auth_authenticate () +
+
soup_auth_domain_accepts, soup_auth_domain_accepts () +
+
soup_auth_domain_add_path, soup_auth_domain_add_path () +
+
SOUP_AUTH_DOMAIN_ADD_PATH, SOUP_AUTH_DOMAIN_ADD_PATH +
+
SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK, SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK +
+
SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA, SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA +
+
soup_auth_domain_basic_new, soup_auth_domain_basic_new () +
+
soup_auth_domain_basic_set_auth_callback, soup_auth_domain_basic_set_auth_callback () +
+
soup_auth_domain_challenge, soup_auth_domain_challenge () +
+
soup_auth_domain_check_password, soup_auth_domain_check_password () +
+
soup_auth_domain_covers, soup_auth_domain_covers () +
+
SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK, SOUP_AUTH_DOMAIN_DIGEST_AUTH_CALLBACK +
+
SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA, SOUP_AUTH_DOMAIN_DIGEST_AUTH_DATA +
+
soup_auth_domain_digest_encode_password, soup_auth_domain_digest_encode_password () +
+
soup_auth_domain_digest_new, soup_auth_domain_digest_new () +
+
soup_auth_domain_digest_set_auth_callback, soup_auth_domain_digest_set_auth_callback () +
+
SOUP_AUTH_DOMAIN_FILTER, SOUP_AUTH_DOMAIN_FILTER +
+
SOUP_AUTH_DOMAIN_FILTER_DATA, SOUP_AUTH_DOMAIN_FILTER_DATA +
+
SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK, SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK +
+
SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA, SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA +
+
soup_auth_domain_get_realm, soup_auth_domain_get_realm () +
+
SOUP_AUTH_DOMAIN_PROXY, SOUP_AUTH_DOMAIN_PROXY +
+
SOUP_AUTH_DOMAIN_REALM, SOUP_AUTH_DOMAIN_REALM +
+
soup_auth_domain_remove_path, soup_auth_domain_remove_path () +
+
SOUP_AUTH_DOMAIN_REMOVE_PATH, SOUP_AUTH_DOMAIN_REMOVE_PATH +
+
soup_auth_domain_set_filter, soup_auth_domain_set_filter () +
+
soup_auth_domain_set_generic_auth_callback, soup_auth_domain_set_generic_auth_callback () +
+
soup_auth_free_protection_space, soup_auth_free_protection_space () +
+
soup_auth_get_authorization, soup_auth_get_authorization () +
+
soup_auth_get_host, soup_auth_get_host () +
+
soup_auth_get_info, soup_auth_get_info () +
+
soup_auth_get_protection_space, soup_auth_get_protection_space () +
+
soup_auth_get_realm, soup_auth_get_realm () +
+
soup_auth_get_scheme_name, soup_auth_get_scheme_name () +
+
SOUP_AUTH_HOST, SOUP_AUTH_HOST +
+
soup_auth_is_authenticated, soup_auth_is_authenticated () +
+
SOUP_AUTH_IS_AUTHENTICATED, SOUP_AUTH_IS_AUTHENTICATED +
+
soup_auth_is_for_proxy, soup_auth_is_for_proxy () +
+
SOUP_AUTH_IS_FOR_PROXY, SOUP_AUTH_IS_FOR_PROXY +
+
soup_auth_new, soup_auth_new () +
+
SOUP_AUTH_REALM, SOUP_AUTH_REALM +
+
SOUP_AUTH_SCHEME_NAME, SOUP_AUTH_SCHEME_NAME +
+
soup_auth_update, soup_auth_update () +
+
soup_buffer_copy, soup_buffer_copy () +
+
soup_buffer_free, soup_buffer_free () +
+
soup_buffer_get_data, soup_buffer_get_data () +
+
soup_buffer_get_owner, soup_buffer_get_owner () +
+
soup_buffer_new, soup_buffer_new () +
+
soup_buffer_new_subbuffer, soup_buffer_new_subbuffer () +
+
soup_buffer_new_take, soup_buffer_new_take () +
+
soup_buffer_new_with_owner, soup_buffer_new_with_owner () +
+
soup_cache_clear, soup_cache_clear () +
+
soup_cache_dump, soup_cache_dump () +
+
soup_cache_flush, soup_cache_flush () +
+
soup_cache_get_max_size, soup_cache_get_max_size () +
+
soup_cache_load, soup_cache_load () +
+
soup_cache_new, soup_cache_new () +
+
soup_cache_set_max_size, soup_cache_set_max_size () +
+
soup_client_context_get_address, soup_client_context_get_address () +
+
soup_client_context_get_auth_domain, soup_client_context_get_auth_domain () +
+
soup_client_context_get_auth_user, soup_client_context_get_auth_user () +
+
soup_client_context_get_host, soup_client_context_get_host () +
+
soup_client_context_get_socket, soup_client_context_get_socket () +
+
soup_content_sniffer_new, soup_content_sniffer_new () +
+
soup_content_sniffer_sniff, soup_content_sniffer_sniff () +
+
soup_cookies_free, soup_cookies_free () +
+
soup_cookies_from_request, soup_cookies_from_request () +
+
soup_cookies_from_response, soup_cookies_from_response () +
+
soup_cookies_to_cookie_header, soup_cookies_to_cookie_header () +
+
soup_cookies_to_request, soup_cookies_to_request () +
+
soup_cookies_to_response, soup_cookies_to_response () +
+
soup_cookie_applies_to_uri, soup_cookie_applies_to_uri () +
+
soup_cookie_copy, soup_cookie_copy () +
+
soup_cookie_domain_matches, soup_cookie_domain_matches () +
+
soup_cookie_free, soup_cookie_free () +
+
soup_cookie_get_domain, soup_cookie_get_domain () +
+
soup_cookie_get_expires, soup_cookie_get_expires () +
+
soup_cookie_get_http_only, soup_cookie_get_http_only () +
+
soup_cookie_get_name, soup_cookie_get_name () +
+
soup_cookie_get_path, soup_cookie_get_path () +
+
soup_cookie_get_secure, soup_cookie_get_secure () +
+
soup_cookie_get_value, soup_cookie_get_value () +
+
SOUP_COOKIE_JAR_ACCEPT_POLICY, SOUP_COOKIE_JAR_ACCEPT_POLICY +
+
soup_cookie_jar_add_cookie, soup_cookie_jar_add_cookie () +
+
soup_cookie_jar_all_cookies, soup_cookie_jar_all_cookies () +
+
soup_cookie_jar_delete_cookie, soup_cookie_jar_delete_cookie () +
+
soup_cookie_jar_get_accept_policy, soup_cookie_jar_get_accept_policy () +
+
soup_cookie_jar_get_cookies, soup_cookie_jar_get_cookies () +
+
soup_cookie_jar_new, soup_cookie_jar_new () +
+
SOUP_COOKIE_JAR_READ_ONLY, SOUP_COOKIE_JAR_READ_ONLY +
+
soup_cookie_jar_set_accept_policy, soup_cookie_jar_set_accept_policy () +
+
soup_cookie_jar_set_cookie, soup_cookie_jar_set_cookie () +
+
soup_cookie_jar_set_cookie_with_first_party, soup_cookie_jar_set_cookie_with_first_party () +
+
SOUP_COOKIE_JAR_SQLITE_FILENAME, SOUP_COOKIE_JAR_SQLITE_FILENAME +
+
soup_cookie_jar_sqlite_new, soup_cookie_jar_sqlite_new () +
+
SOUP_COOKIE_JAR_TEXT_FILENAME, SOUP_COOKIE_JAR_TEXT_FILENAME +
+
soup_cookie_jar_text_new, soup_cookie_jar_text_new () +
+
SOUP_COOKIE_MAX_AGE_ONE_DAY, SOUP_COOKIE_MAX_AGE_ONE_DAY +
+
SOUP_COOKIE_MAX_AGE_ONE_HOUR, SOUP_COOKIE_MAX_AGE_ONE_HOUR +
+
SOUP_COOKIE_MAX_AGE_ONE_WEEK, SOUP_COOKIE_MAX_AGE_ONE_WEEK +
+
SOUP_COOKIE_MAX_AGE_ONE_YEAR, SOUP_COOKIE_MAX_AGE_ONE_YEAR +
+
soup_cookie_new, soup_cookie_new () +
+
soup_cookie_parse, soup_cookie_parse () +
+
soup_cookie_set_domain, soup_cookie_set_domain () +
+
soup_cookie_set_expires, soup_cookie_set_expires () +
+
soup_cookie_set_http_only, soup_cookie_set_http_only () +
+
soup_cookie_set_max_age, soup_cookie_set_max_age () +
+
soup_cookie_set_name, soup_cookie_set_name () +
+
soup_cookie_set_path, soup_cookie_set_path () +
+
soup_cookie_set_secure, soup_cookie_set_secure () +
+
soup_cookie_set_value, soup_cookie_set_value () +
+
soup_cookie_to_cookie_header, soup_cookie_to_cookie_header () +
+
soup_cookie_to_set_cookie_header, soup_cookie_to_set_cookie_header () +
+
soup_date_free, soup_date_free () +
+
soup_date_get_day, soup_date_get_day () +
+
soup_date_get_hour, soup_date_get_hour () +
+
soup_date_get_minute, soup_date_get_minute () +
+
soup_date_get_month, soup_date_get_month () +
+
soup_date_get_offset, soup_date_get_offset () +
+
soup_date_get_second, soup_date_get_second () +
+
soup_date_get_utc, soup_date_get_utc () +
+
soup_date_get_year, soup_date_get_year () +
+
soup_date_is_past, soup_date_is_past () +
+
soup_date_new, soup_date_new () +
+
soup_date_new_from_now, soup_date_new_from_now () +
+
soup_date_new_from_string, soup_date_new_from_string () +
+
soup_date_new_from_time_t, soup_date_new_from_time_t () +
+
soup_date_to_string, soup_date_to_string () +
+
soup_date_to_timeval, soup_date_to_timeval () +
+
soup_date_to_time_t, soup_date_to_time_t () +
+
soup_headers_parse, soup_headers_parse () +
+
soup_headers_parse_request, soup_headers_parse_request () +
+
soup_headers_parse_response, soup_headers_parse_response () +
+
soup_headers_parse_status_line, soup_headers_parse_status_line () +
+
soup_header_contains, soup_header_contains () +
+
soup_header_free_list, soup_header_free_list () +
+
soup_header_free_param_list, soup_header_free_param_list () +
+
soup_header_g_string_append_param, soup_header_g_string_append_param () +
+
soup_header_g_string_append_param_quoted, soup_header_g_string_append_param_quoted () +
+
soup_header_parse_list, soup_header_parse_list () +
+
soup_header_parse_param_list, soup_header_parse_param_list () +
+
soup_header_parse_quality_list, soup_header_parse_quality_list () +
+
soup_header_parse_semi_param_list, soup_header_parse_semi_param_list () +
+
SOUP_HTTP_ERROR, SOUP_HTTP_ERROR +
+
soup_logger_attach, soup_logger_attach () +
+
soup_logger_detach, soup_logger_detach () +
+
soup_logger_new, soup_logger_new () +
+
soup_logger_set_printer, soup_logger_set_printer () +
+
soup_logger_set_request_filter, soup_logger_set_request_filter () +
+
soup_logger_set_response_filter, soup_logger_set_response_filter () +
+
soup_message_add_header_handler, soup_message_add_header_handler () +
+
soup_message_add_status_code_handler, soup_message_add_status_code_handler () +
+
soup_message_body_append, soup_message_body_append () +
+
soup_message_body_append_buffer, soup_message_body_append_buffer () +
+
soup_message_body_append_take, soup_message_body_append_take () +
+
soup_message_body_complete, soup_message_body_complete () +
+
soup_message_body_flatten, soup_message_body_flatten () +
+
soup_message_body_free, soup_message_body_free () +
+
soup_message_body_get_accumulate, soup_message_body_get_accumulate () +
+
soup_message_body_get_chunk, soup_message_body_get_chunk () +
+
soup_message_body_got_chunk, soup_message_body_got_chunk () +
+
soup_message_body_new, soup_message_body_new () +
+
soup_message_body_set_accumulate, soup_message_body_set_accumulate () +
+
soup_message_body_truncate, soup_message_body_truncate () +
+
soup_message_body_wrote_chunk, soup_message_body_wrote_chunk () +
+
soup_message_disable_feature, soup_message_disable_feature () +
+
SOUP_MESSAGE_FIRST_PARTY, SOUP_MESSAGE_FIRST_PARTY +
+
SOUP_MESSAGE_FLAGS, SOUP_MESSAGE_FLAGS +
+
soup_message_get_address, soup_message_get_address () +
+
soup_message_get_first_party, soup_message_get_first_party () +
+
soup_message_get_flags, soup_message_get_flags () +
+
soup_message_get_https_status, soup_message_get_https_status () +
+
soup_message_get_http_version, soup_message_get_http_version () +
+
soup_message_get_uri, soup_message_get_uri () +
+
soup_message_headers_append, soup_message_headers_append () +
+
soup_message_headers_clean_connection_headers, soup_message_headers_clean_connection_headers () +
+
soup_message_headers_clear, soup_message_headers_clear () +
+
soup_message_headers_foreach, soup_message_headers_foreach () +
+
soup_message_headers_free, soup_message_headers_free () +
+
soup_message_headers_free_ranges, soup_message_headers_free_ranges () +
+
soup_message_headers_get, soup_message_headers_get () +
+
soup_message_headers_get_content_disposition, soup_message_headers_get_content_disposition () +
+
soup_message_headers_get_content_length, soup_message_headers_get_content_length () +
+
soup_message_headers_get_content_range, soup_message_headers_get_content_range () +
+
soup_message_headers_get_content_type, soup_message_headers_get_content_type () +
+
soup_message_headers_get_encoding, soup_message_headers_get_encoding () +
+
soup_message_headers_get_expectations, soup_message_headers_get_expectations () +
+
soup_message_headers_get_list, soup_message_headers_get_list () +
+
soup_message_headers_get_one, soup_message_headers_get_one () +
+
soup_message_headers_get_ranges, soup_message_headers_get_ranges () +
+
soup_message_headers_iter_init, soup_message_headers_iter_init () +
+
soup_message_headers_iter_next, soup_message_headers_iter_next () +
+
soup_message_headers_new, soup_message_headers_new () +
+
soup_message_headers_remove, soup_message_headers_remove () +
+
soup_message_headers_replace, soup_message_headers_replace () +
+
soup_message_headers_set_content_disposition, soup_message_headers_set_content_disposition () +
+
soup_message_headers_set_content_length, soup_message_headers_set_content_length () +
+
soup_message_headers_set_content_range, soup_message_headers_set_content_range () +
+
soup_message_headers_set_content_type, soup_message_headers_set_content_type () +
+
soup_message_headers_set_encoding, soup_message_headers_set_encoding () +
+
soup_message_headers_set_expectations, soup_message_headers_set_expectations () +
+
soup_message_headers_set_range, soup_message_headers_set_range () +
+
soup_message_headers_set_ranges, soup_message_headers_set_ranges () +
+
SOUP_MESSAGE_HTTP_VERSION, SOUP_MESSAGE_HTTP_VERSION +
+
soup_message_is_keepalive, soup_message_is_keepalive () +
+
SOUP_MESSAGE_METHOD, SOUP_MESSAGE_METHOD +
+
soup_message_new, soup_message_new () +
+
soup_message_new_from_uri, soup_message_new_from_uri () +
+
SOUP_MESSAGE_REASON_PHRASE, SOUP_MESSAGE_REASON_PHRASE +
+
SOUP_MESSAGE_REQUEST_BODY, SOUP_MESSAGE_REQUEST_BODY +
+
SOUP_MESSAGE_REQUEST_HEADERS, SOUP_MESSAGE_REQUEST_HEADERS +
+
SOUP_MESSAGE_RESPONSE_BODY, SOUP_MESSAGE_RESPONSE_BODY +
+
SOUP_MESSAGE_RESPONSE_HEADERS, SOUP_MESSAGE_RESPONSE_HEADERS +
+
SOUP_MESSAGE_SERVER_SIDE, SOUP_MESSAGE_SERVER_SIDE +
+
soup_message_set_chunk_allocator, soup_message_set_chunk_allocator () +
+
soup_message_set_first_party, soup_message_set_first_party () +
+
soup_message_set_flags, soup_message_set_flags () +
+
soup_message_set_http_version, soup_message_set_http_version () +
+
soup_message_set_redirect, soup_message_set_redirect () +
+
soup_message_set_request, soup_message_set_request () +
+
soup_message_set_response, soup_message_set_response () +
+
soup_message_set_status, soup_message_set_status () +
+
soup_message_set_status_full, soup_message_set_status_full () +
+
soup_message_set_uri, soup_message_set_uri () +
+
SOUP_MESSAGE_STATUS_CODE, SOUP_MESSAGE_STATUS_CODE +
+
SOUP_MESSAGE_TLS_CERTIFICATE, SOUP_MESSAGE_TLS_CERTIFICATE +
+
SOUP_MESSAGE_TLS_ERRORS, SOUP_MESSAGE_TLS_ERRORS +
+
SOUP_MESSAGE_URI, SOUP_MESSAGE_URI +
+
SOUP_METHOD_CONNECT, SOUP_METHOD_CONNECT +
+
SOUP_METHOD_COPY, SOUP_METHOD_COPY +
+
SOUP_METHOD_DELETE, SOUP_METHOD_DELETE +
+
SOUP_METHOD_GET, SOUP_METHOD_GET +
+
SOUP_METHOD_HEAD, SOUP_METHOD_HEAD +
+
SOUP_METHOD_LOCK, SOUP_METHOD_LOCK +
+
SOUP_METHOD_MKCOL, SOUP_METHOD_MKCOL +
+
SOUP_METHOD_MOVE, SOUP_METHOD_MOVE +
+
SOUP_METHOD_OPTIONS, SOUP_METHOD_OPTIONS +
+
SOUP_METHOD_POST, SOUP_METHOD_POST +
+
SOUP_METHOD_PROPFIND, SOUP_METHOD_PROPFIND +
+
SOUP_METHOD_PROPPATCH, SOUP_METHOD_PROPPATCH +
+
SOUP_METHOD_PUT, SOUP_METHOD_PUT +
+
SOUP_METHOD_TRACE, SOUP_METHOD_TRACE +
+
SOUP_METHOD_UNLOCK, SOUP_METHOD_UNLOCK +
+
soup_multipart_append_form_file, soup_multipart_append_form_file () +
+
soup_multipart_append_form_string, soup_multipart_append_form_string () +
+
soup_multipart_append_part, soup_multipart_append_part () +
+
soup_multipart_free, soup_multipart_free () +
+
soup_multipart_get_length, soup_multipart_get_length () +
+
soup_multipart_get_part, soup_multipart_get_part () +
+
soup_multipart_new, soup_multipart_new () +
+
soup_multipart_new_from_message, soup_multipart_new_from_message () +
+
soup_multipart_to_message, soup_multipart_to_message () +
+
soup_proxy_uri_resolver_get_proxy_uri_async, soup_proxy_uri_resolver_get_proxy_uri_async () +
+
soup_proxy_uri_resolver_get_proxy_uri_sync, soup_proxy_uri_resolver_get_proxy_uri_sync () +
+
SOUP_REQUESTER_ERROR, SOUP_REQUESTER_ERROR +
+
soup_requester_new, soup_requester_new () +
+
soup_requester_request, soup_requester_request () +
+
soup_requester_request_uri, soup_requester_request_uri () +
+
soup_request_file_get_file, soup_request_file_get_file () +
+
soup_request_get_content_length, soup_request_get_content_length () +
+
soup_request_get_content_type, soup_request_get_content_type () +
+
soup_request_get_session, soup_request_get_session () +
+
soup_request_get_uri, soup_request_get_uri () +
+
soup_request_http_get_message, soup_request_http_get_message () +
+
soup_request_send, soup_request_send () +
+
soup_request_send_async, soup_request_send_async () +
+
soup_request_send_finish, soup_request_send_finish () +
+
SOUP_REQUEST_SESSION, SOUP_REQUEST_SESSION +
+
SOUP_REQUEST_URI, SOUP_REQUEST_URI +
+
soup_server_add_auth_domain, soup_server_add_auth_domain () +
+
soup_server_add_handler, soup_server_add_handler () +
+
SOUP_SERVER_ASYNC_CONTEXT, SOUP_SERVER_ASYNC_CONTEXT +
+
soup_server_disconnect, soup_server_disconnect () +
+
soup_server_get_async_context, soup_server_get_async_context () +
+
soup_server_get_listener, soup_server_get_listener () +
+
soup_server_get_port, soup_server_get_port () +
+
SOUP_SERVER_INTERFACE, SOUP_SERVER_INTERFACE +
+
soup_server_is_https, soup_server_is_https () +
+
soup_server_new, soup_server_new () +
+
soup_server_pause_message, soup_server_pause_message () +
+
SOUP_SERVER_PORT, SOUP_SERVER_PORT +
+
soup_server_quit, soup_server_quit () +
+
SOUP_SERVER_RAW_PATHS, SOUP_SERVER_RAW_PATHS +
+
soup_server_remove_auth_domain, soup_server_remove_auth_domain () +
+
soup_server_remove_handler, soup_server_remove_handler () +
+
soup_server_run, soup_server_run () +
+
soup_server_run_async, soup_server_run_async () +
+
SOUP_SERVER_SERVER_HEADER, SOUP_SERVER_SERVER_HEADER +
+
SOUP_SERVER_SSL_CERT_FILE, SOUP_SERVER_SSL_CERT_FILE +
+
SOUP_SERVER_SSL_KEY_FILE, SOUP_SERVER_SSL_KEY_FILE +
+
soup_server_unpause_message, soup_server_unpause_message () +
+
soup_session_abort, soup_session_abort () +
+
SOUP_SESSION_ACCEPT_LANGUAGE, SOUP_SESSION_ACCEPT_LANGUAGE +
+
SOUP_SESSION_ACCEPT_LANGUAGE_AUTO, SOUP_SESSION_ACCEPT_LANGUAGE_AUTO +
+
soup_session_add_feature, soup_session_add_feature () +
+
SOUP_SESSION_ADD_FEATURE, SOUP_SESSION_ADD_FEATURE +
+
soup_session_add_feature_by_type, soup_session_add_feature_by_type () +
+
SOUP_SESSION_ADD_FEATURE_BY_TYPE, SOUP_SESSION_ADD_FEATURE_BY_TYPE +
+
SOUP_SESSION_ASYNC_CONTEXT, SOUP_SESSION_ASYNC_CONTEXT +
+
soup_session_async_new, soup_session_async_new () +
+
soup_session_async_new_with_options, soup_session_async_new_with_options () +
+
soup_session_cancel_message, soup_session_cancel_message () +
+
soup_session_get_async_context, soup_session_get_async_context () +
+
soup_session_get_feature, soup_session_get_feature () +
+
soup_session_get_features, soup_session_get_features () +
+
soup_session_get_feature_for_message, soup_session_get_feature_for_message () +
+
SOUP_SESSION_HTTPS_ALIASES, SOUP_SESSION_HTTPS_ALIASES +
+
SOUP_SESSION_HTTP_ALIASES, SOUP_SESSION_HTTP_ALIASES +
+
SOUP_SESSION_IDLE_TIMEOUT, SOUP_SESSION_IDLE_TIMEOUT +
+
SOUP_SESSION_MAX_CONNS, SOUP_SESSION_MAX_CONNS +
+
SOUP_SESSION_MAX_CONNS_PER_HOST, SOUP_SESSION_MAX_CONNS_PER_HOST +
+
soup_session_pause_message, soup_session_pause_message () +
+
soup_session_prepare_for_uri, soup_session_prepare_for_uri () +
+
SOUP_SESSION_PROXY_URI, SOUP_SESSION_PROXY_URI +
+
soup_session_queue_message, soup_session_queue_message () +
+
soup_session_redirect_message, soup_session_redirect_message () +
+
soup_session_remove_feature, soup_session_remove_feature () +
+
soup_session_remove_feature_by_type, soup_session_remove_feature_by_type () +
+
SOUP_SESSION_REMOVE_FEATURE_BY_TYPE, SOUP_SESSION_REMOVE_FEATURE_BY_TYPE +
+
soup_session_requeue_message, soup_session_requeue_message () +
+
soup_session_send_message, soup_session_send_message () +
+
SOUP_SESSION_SSL_CA_FILE, SOUP_SESSION_SSL_CA_FILE +
+
SOUP_SESSION_SSL_STRICT, SOUP_SESSION_SSL_STRICT +
+
soup_session_sync_new, soup_session_sync_new () +
+
soup_session_sync_new_with_options, soup_session_sync_new_with_options () +
+
SOUP_SESSION_TIMEOUT, SOUP_SESSION_TIMEOUT +
+
soup_session_unpause_message, soup_session_unpause_message () +
+
SOUP_SESSION_USER_AGENT, SOUP_SESSION_USER_AGENT +
+
SOUP_SESSION_USE_NTLM, SOUP_SESSION_USE_NTLM +
+
soup_session_would_redirect, soup_session_would_redirect () +
+
SOUP_SOCKET_ASYNC_CONTEXT, SOUP_SOCKET_ASYNC_CONTEXT +
+
soup_socket_connect_async, soup_socket_connect_async () +
+
soup_socket_connect_sync, soup_socket_connect_sync () +
+
soup_socket_disconnect, soup_socket_disconnect () +
+
SOUP_SOCKET_FLAG_NONBLOCKING, SOUP_SOCKET_FLAG_NONBLOCKING +
+
soup_socket_get_local_address, soup_socket_get_local_address () +
+
soup_socket_get_remote_address, soup_socket_get_remote_address () +
+
soup_socket_is_connected, soup_socket_is_connected () +
+
SOUP_SOCKET_IS_SERVER, SOUP_SOCKET_IS_SERVER +
+
soup_socket_is_ssl, soup_socket_is_ssl () +
+
soup_socket_listen, soup_socket_listen () +
+
SOUP_SOCKET_LOCAL_ADDRESS, SOUP_SOCKET_LOCAL_ADDRESS +
+
soup_socket_new, soup_socket_new () +
+
soup_socket_read, soup_socket_read () +
+
soup_socket_read_until, soup_socket_read_until () +
+
SOUP_SOCKET_REMOTE_ADDRESS, SOUP_SOCKET_REMOTE_ADDRESS +
+
SOUP_SOCKET_SSL_CREDENTIALS, SOUP_SOCKET_SSL_CREDENTIALS +
+
SOUP_SOCKET_SSL_FALLBACK, SOUP_SOCKET_SSL_FALLBACK +
+
SOUP_SOCKET_SSL_STRICT, SOUP_SOCKET_SSL_STRICT +
+
soup_socket_start_proxy_ssl, soup_socket_start_proxy_ssl () +
+
soup_socket_start_ssl, soup_socket_start_ssl () +
+
SOUP_SOCKET_TIMEOUT, SOUP_SOCKET_TIMEOUT +
+
SOUP_SOCKET_TLS_CERTIFICATE, SOUP_SOCKET_TLS_CERTIFICATE +
+
SOUP_SOCKET_TLS_ERRORS, SOUP_SOCKET_TLS_ERRORS +
+
SOUP_SOCKET_TRUSTED_CERTIFICATE, SOUP_SOCKET_TRUSTED_CERTIFICATE +
+
soup_socket_write, soup_socket_write () +
+
soup_ssl_supported, soup_ssl_supported +
+
soup_status_get_phrase, soup_status_get_phrase () +
+
SOUP_STATUS_IS_CLIENT_ERROR, SOUP_STATUS_IS_CLIENT_ERROR() +
+
SOUP_STATUS_IS_INFORMATIONAL, SOUP_STATUS_IS_INFORMATIONAL() +
+
SOUP_STATUS_IS_REDIRECTION, SOUP_STATUS_IS_REDIRECTION() +
+
SOUP_STATUS_IS_SERVER_ERROR, SOUP_STATUS_IS_SERVER_ERROR() +
+
SOUP_STATUS_IS_SUCCESSFUL, SOUP_STATUS_IS_SUCCESSFUL() +
+
SOUP_STATUS_IS_TRANSPORT_ERROR, SOUP_STATUS_IS_TRANSPORT_ERROR() +
+
soup_status_proxify, soup_status_proxify () +
+
soup_str_case_equal, soup_str_case_equal () +
+
soup_str_case_hash, soup_str_case_hash () +
+
SOUP_TYPE_AUTH_BASIC, SOUP_TYPE_AUTH_BASIC +
+
SOUP_TYPE_AUTH_DIGEST, SOUP_TYPE_AUTH_DIGEST +
+
SOUP_TYPE_AUTH_NTLM, SOUP_TYPE_AUTH_NTLM +
+
SOUP_TYPE_BYTE_ARRAY, SOUP_TYPE_BYTE_ARRAY +
+
SOUP_TYPE_GNOME_FEATURES_2_26, SOUP_TYPE_GNOME_FEATURES_2_26 +
+
SOUP_TYPE_PROXY_RESOLVER_GNOME, SOUP_TYPE_PROXY_RESOLVER_GNOME +
+
soup_uri_copy, soup_uri_copy () +
+
soup_uri_copy_host, soup_uri_copy_host () +
+
soup_uri_decode, soup_uri_decode () +
+
soup_uri_encode, soup_uri_encode () +
+
soup_uri_equal, soup_uri_equal () +
+
soup_uri_free, soup_uri_free () +
+
soup_uri_get_fragment, soup_uri_get_fragment () +
+
soup_uri_get_host, soup_uri_get_host () +
+
soup_uri_get_password, soup_uri_get_password () +
+
soup_uri_get_path, soup_uri_get_path () +
+
soup_uri_get_port, soup_uri_get_port () +
+
soup_uri_get_query, soup_uri_get_query () +
+
soup_uri_get_scheme, soup_uri_get_scheme () +
+
soup_uri_get_user, soup_uri_get_user () +
+
soup_uri_host_equal, soup_uri_host_equal () +
+
soup_uri_host_hash, soup_uri_host_hash () +
+
soup_uri_new, soup_uri_new () +
+
soup_uri_new_with_base, soup_uri_new_with_base () +
+
soup_uri_normalize, soup_uri_normalize () +
+
SOUP_URI_SCHEME_DATA, SOUP_URI_SCHEME_DATA +
+
SOUP_URI_SCHEME_FILE, SOUP_URI_SCHEME_FILE +
+
SOUP_URI_SCHEME_FTP, SOUP_URI_SCHEME_FTP +
+
SOUP_URI_SCHEME_HTTP, SOUP_URI_SCHEME_HTTP +
+
SOUP_URI_SCHEME_HTTPS, SOUP_URI_SCHEME_HTTPS +
+
soup_uri_set_fragment, soup_uri_set_fragment () +
+
soup_uri_set_host, soup_uri_set_host () +
+
soup_uri_set_password, soup_uri_set_password () +
+
soup_uri_set_path, soup_uri_set_path () +
+
soup_uri_set_port, soup_uri_set_port () +
+
soup_uri_set_query, soup_uri_set_query () +
+
soup_uri_set_query_from_fields, soup_uri_set_query_from_fields () +
+
soup_uri_set_query_from_form, soup_uri_set_query_from_form () +
+
soup_uri_set_scheme, soup_uri_set_scheme () +
+
soup_uri_set_user, soup_uri_set_user () +
+
soup_uri_to_string, soup_uri_to_string () +
+
soup_uri_uses_default_port, soup_uri_uses_default_port () +
+
SOUP_URI_VALID_FOR_HTTP, SOUP_URI_VALID_FOR_HTTP() +
+
soup_value_array_append, soup_value_array_append () +
+
soup_value_array_append_vals, soup_value_array_append_vals () +
+
soup_value_array_from_args, soup_value_array_from_args () +
+
soup_value_array_get_nth, soup_value_array_get_nth () +
+
soup_value_array_insert, soup_value_array_insert () +
+
soup_value_array_new, soup_value_array_new () +
+
soup_value_array_new_with_vals, soup_value_array_new_with_vals () +
+
soup_value_array_to_args, soup_value_array_to_args () +
+
SOUP_VALUE_GETV, SOUP_VALUE_GETV() +
+
soup_value_hash_insert, soup_value_hash_insert () +
+
soup_value_hash_insert_vals, soup_value_hash_insert_vals () +
+
soup_value_hash_insert_value, soup_value_hash_insert_value () +
+
soup_value_hash_lookup, soup_value_hash_lookup () +
+
soup_value_hash_lookup_vals, soup_value_hash_lookup_vals () +
+
soup_value_hash_new, soup_value_hash_new () +
+
soup_value_hash_new_with_vals, soup_value_hash_new_with_vals () +
+
SOUP_VALUE_SETV, SOUP_VALUE_SETV() +
+
soup_xmlrpc_build_fault, soup_xmlrpc_build_fault () +
+
soup_xmlrpc_build_method_call, soup_xmlrpc_build_method_call () +
+
soup_xmlrpc_build_method_response, soup_xmlrpc_build_method_response () +
+
soup_xmlrpc_extract_method_call, soup_xmlrpc_extract_method_call () +
+
soup_xmlrpc_extract_method_response, soup_xmlrpc_extract_method_response () +
+
SOUP_XMLRPC_FAULT, SOUP_XMLRPC_FAULT +
+
soup_xmlrpc_parse_method_call, soup_xmlrpc_parse_method_call () +
+
soup_xmlrpc_parse_method_response, soup_xmlrpc_parse_method_response () +
+
soup_xmlrpc_request_new, soup_xmlrpc_request_new () +
+
soup_xmlrpc_set_fault, soup_xmlrpc_set_fault () +
+
soup_xmlrpc_set_response, soup_xmlrpc_set_response () +
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/left.png b/docs/reference/html/left.png new file mode 100644 index 0000000000000000000000000000000000000000..2d05b3d5b4aeec9384bbfe404bfc4ed0897051c4 GIT binary patch literal 459 zcmV;+0W|)JP)40xL?wO*>WZ(J#ML5j2<9jD6A%Q&kC}jOeEc;X{s;`zcnxLeZR6?6h#^ihmNF6NpGdilO$m<82oD9WQ|6nVv1`? z>KufRi{?QPXg;4;wroQu4?mN1Ydd@|kaQ|ZyWLK!)yi7Wb%=0{}lD)tfliHAUyWRQ+fD_;aV6j->y6!O_8bENg + + + +GValue Support + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

GValue Support

+

GValue Support — GValue utilities

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+GHashTable *        soup_value_hash_new                 (void);
+GHashTable *        soup_value_hash_new_with_vals       (const char *first_key,
+                                                         ...);
+void                soup_value_hash_insert_value        (GHashTable *hash,
+                                                         const char *key,
+                                                         GValue *value);
+void                soup_value_hash_insert              (GHashTable *hash,
+                                                         const char *key,
+                                                         GType type,
+                                                         ...);
+void                soup_value_hash_insert_vals         (GHashTable *hash,
+                                                         const char *first_key,
+                                                         ...);
+gboolean            soup_value_hash_lookup              (GHashTable *hash,
+                                                         const char *key,
+                                                         GType type,
+                                                         ...);
+gboolean            soup_value_hash_lookup_vals         (GHashTable *hash,
+                                                         const char *first_key,
+                                                         ...);
+
+GValueArray *       soup_value_array_from_args          (va_list args);
+gboolean            soup_value_array_to_args            (GValueArray *array,
+                                                         va_list args);
+GValueArray *       soup_value_array_new                (void);
+GValueArray *       soup_value_array_new_with_vals      (GType first_type,
+                                                         ...);
+void                soup_value_array_insert             (GValueArray *array,
+                                                         guint index_,
+                                                         GType type,
+                                                         ...);
+void                soup_value_array_append             (GValueArray *array,
+                                                         GType type,
+                                                         ...);
+void                soup_value_array_append_vals        (GValueArray *array,
+                                                         GType first_type,
+                                                         ...);
+gboolean            soup_value_array_get_nth            (GValueArray *array,
+                                                         guint index_,
+                                                         GType type,
+                                                         ...);
+
+#define             SOUP_VALUE_SETV                     (val,
+                                                         type,
+                                                         args)
+#define             SOUP_VALUE_GETV                     (val,
+                                                         type,
+                                                         args)
+
+#define             SOUP_TYPE_BYTE_ARRAY
+
+
+
+

Description

+

+These methods are useful for manipulating GValues, and in +particular, arrays and hash tables of GValues, in a +slightly nicer way than the standard GValue API. +

+

+They are written for use with soup-xmlrpc, but they also work with +types not used by XML-RPC. +

+
+
+

Details

+
+

soup_value_hash_new ()

+
GHashTable *        soup_value_hash_new                 (void);
+

+Creates a GHashTable whose keys are strings and whose values +are GValue. +

+
++ + + + +

Returns :

a new +empty GHashTable. [element-type utf8 GValue][transfer full] +
+
+
+
+

soup_value_hash_new_with_vals ()

+
GHashTable *        soup_value_hash_new_with_vals       (const char *first_key,
+                                                         ...);
+

+Creates a GHashTable whose keys are strings and whose values +are GValue, and initializes it with the provided data. As +with soup_value_hash_insert(), the keys and values are copied +rather than being inserted directly. +

+
++ + + + + + + + + + + + + + +

first_key :

the key for the first value

... :

the type of first_key, followed by the value, followed +by additional key/type/value triplets, terminated by NULL +

Returns :

a new +GHashTable, initialized with the given values. [element-type utf8 GValue][transfer full] +
+
+
+
+

soup_value_hash_insert_value ()

+
void                soup_value_hash_insert_value        (GHashTable *hash,
+                                                         const char *key,
+                                                         GValue *value);
+

+Inserts value into hash. (Unlike with g_hash_table_insert(), both +the key and the value are copied). +

+
++ + + + + + + + + + + + + + +

hash :

a value hash. [element-type utf8 GValue] +

key :

the key

value :

a value
+
+
+
+

soup_value_hash_insert ()

+
void                soup_value_hash_insert              (GHashTable *hash,
+                                                         const char *key,
+                                                         GType type,
+                                                         ...);
+

+Inserts the provided value of type type into hash. (Unlike with +g_hash_table_insert(), both the key and the value are copied). +

+
++ + + + + + + + + + + + + + + + + + +

hash :

a value hash. [element-type utf8 GValue] +

key :

the key

type :

a GType +

... :

a value of type type +
+
+
+
+

soup_value_hash_insert_vals ()

+
void                soup_value_hash_insert_vals         (GHashTable *hash,
+                                                         const char *first_key,
+                                                         ...);
+

+Inserts the given data into hash. As with +soup_value_hash_insert(), the keys and values are copied rather +than being inserted directly. +

+
++ + + + + + + + + + + + + + +

hash :

a value hash. [element-type utf8 GValue] +

first_key :

the key for the first value

... :

the type of first_key, followed by the value, followed +by additional key/type/value triplets, terminated by NULL +
+
+
+
+

soup_value_hash_lookup ()

+
gboolean            soup_value_hash_lookup              (GHashTable *hash,
+                                                         const char *key,
+                                                         GType type,
+                                                         ...);
+

+Looks up key in hash and stores its value into the provided +location. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

hash :

a value hash. [element-type utf8 GValue] +

key :

the key to look up

type :

a GType +

... :

a value of type pointer-to-type +

Returns :

+TRUE if hash contained a value with key key and +type type, FALSE if not.
+
+
+
+

soup_value_hash_lookup_vals ()

+
gboolean            soup_value_hash_lookup_vals         (GHashTable *hash,
+                                                         const char *first_key,
+                                                         ...);
+

+Looks up a number of keys in hash and returns their values. +

+
++ + + + + + + + + + + + + + + + + + +

hash :

a value hash. [element-type utf8 GValue] +

first_key :

the first key to look up

... :

the type of first_key, a pointer to that type, and +then additional key/type/pointer triplets, terminated +by NULL.

Returns :

+TRUE if all of the keys were found, FALSE +if any were missing; note that you will generally need to +initialize each destination variable to a reasonable default +value, since there is no way to tell which keys were found +and which were not.
+
+
+
+

soup_value_array_from_args ()

+
GValueArray *       soup_value_array_from_args          (va_list args);
+

+Creates a GValueArray from the provided arguments, which must +consist of pairs of a GType and a value of that type, terminated +by G_TYPE_INVALID. (The array will contain copies of the provided +data rather than pointing to the passed-in data directly.) +

+
++ + + + + + + + + + +

args :

arguments to create a GValueArray from

Returns :

a new GValueArray, or NULL if an error occurred.
+
+
+
+

soup_value_array_to_args ()

+
gboolean            soup_value_array_to_args            (GValueArray *array,
+                                                         va_list args);
+

+Extracts a GValueArray into the provided arguments, which must +consist of pairs of a GType and a value of pointer-to-that-type, +terminated by G_TYPE_INVALID. The returned values will point to the +same memory as the values in the array. +

+
++ + + + + + + + + + + + + + +

array :

a GValueArray +

args :

arguments to extract array into

Returns :

success or failure
+
+
+
+

soup_value_array_new ()

+
GValueArray *       soup_value_array_new                (void);
+

+Creates a new GValueArray. (This is just a wrapper around +g_value_array_new(), for naming consistency purposes.) +

+
++ + + + +

Returns :

a new GValueArray +
+
+
+
+

soup_value_array_new_with_vals ()

+
GValueArray *       soup_value_array_new_with_vals      (GType first_type,
+                                                         ...);
+

+Creates a new GValueArray and copies the provided values +into it. +

+
++ + + + + + + + + + + + + + +

first_type :

the type of the first value to add

... :

the first value to add, followed by other type/value +pairs, terminated by G_TYPE_INVALID +

Returns :

a new GValueArray +
+
+
+
+

soup_value_array_insert ()

+
void                soup_value_array_insert             (GValueArray *array,
+                                                         guint index_,
+                                                         GType type,
+                                                         ...);
+

+Inserts the provided value of type type into array as with +g_value_array_insert(). (The provided data is copied rather than +being inserted directly.) +

+
++ + + + + + + + + + + + + + + + + + +

array :

a GValueArray +

index_ :

the index to insert at

type :

a GType +

... :

a value of type type +
+
+
+
+

soup_value_array_append ()

+
void                soup_value_array_append             (GValueArray *array,
+                                                         GType type,
+                                                         ...);
+

+Appends the provided value of type type to array as with +g_value_array_append(). (The provided data is copied rather than +being inserted directly.) +

+
++ + + + + + + + + + + + + + +

array :

a GValueArray +

type :

a GType +

... :

a value of type type +
+
+
+
+

soup_value_array_append_vals ()

+
void                soup_value_array_append_vals        (GValueArray *array,
+                                                         GType first_type,
+                                                         ...);
+

+Appends the provided values into array as with +g_value_array_append(). (The provided data is copied rather than +being inserted directly.) +

+
++ + + + + + + + + + + + + + +

array :

a GValueArray +

first_type :

the type of the first value to add

... :

the first value to add, followed by other type/value +pairs, terminated by G_TYPE_INVALID +
+
+
+
+

soup_value_array_get_nth ()

+
gboolean            soup_value_array_get_nth            (GValueArray *array,
+                                                         guint index_,
+                                                         GType type,
+                                                         ...);
+

+Gets the index_ element of array and stores its value into the +provided location. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

array :

a GValueArray +

index_ :

the index to look up

type :

a GType +

... :

a value of type pointer-to-type +

Returns :

+TRUE if array contained a value with index index_ +and type type, FALSE if not.
+
+
+
+

SOUP_VALUE_SETV()

+
#define             SOUP_VALUE_SETV(val, type, args)
+

+Copies an argument of type type from args into val. val will +point directly to the value in args rather than copying it, so you +must g_value_copy() it if you want it to remain valid. +

+
++ + + + + + + + + + + + + + +

val :

a GValue +

type :

a GType +

args :

+va_list pointing to a value of type type +
+
+
+
+

SOUP_VALUE_GETV()

+
#define             SOUP_VALUE_GETV(val, type, args)
+

+Extracts a value of type type from val into args. The return +value will point to the same data as val rather than being a copy +of it. +

+
++ + + + + + + + + + + + + + +

val :

a GValue +

type :

a GType +

args :

+va_list pointing to a value of type pointer-to-type +
+
+
+
+

SOUP_TYPE_BYTE_ARRAY

+
#define SOUP_TYPE_BYTE_ARRAY (soup_byte_array_get_type ())
+
+

+glib does not define a GType for GByteArray, so libsoup +defines this one itself. +

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4-Soup-Miscellaneous-Utilities.html b/docs/reference/html/libsoup-2.4-Soup-Miscellaneous-Utilities.html new file mode 100644 index 0000000..756c5fc --- /dev/null +++ b/docs/reference/html/libsoup-2.4-Soup-Miscellaneous-Utilities.html @@ -0,0 +1,1422 @@ + + + + +Soup Miscellaneous Utilities + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

Soup Miscellaneous Utilities

+

Soup Miscellaneous Utilities — Miscellaneous functions

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+                    SoupDate;
+enum                SoupDateFormat;
+SoupDate *          soup_date_new                       (int year,
+                                                         int month,
+                                                         int day,
+                                                         int hour,
+                                                         int minute,
+                                                         int second);
+SoupDate *          soup_date_new_from_string           (const char *date_string);
+SoupDate *          soup_date_new_from_time_t           (time_t when);
+SoupDate *          soup_date_new_from_now              (int offset_seconds);
+char *              soup_date_to_string                 (SoupDate *date,
+                                                         SoupDateFormat format);
+time_t              soup_date_to_time_t                 (SoupDate *date);
+void                soup_date_to_timeval                (SoupDate *date,
+                                                         GTimeVal *time);
+gboolean            soup_date_is_past                   (SoupDate *date);
+int                 soup_date_get_day                   (SoupDate *date);
+int                 soup_date_get_hour                  (SoupDate *date);
+int                 soup_date_get_minute                (SoupDate *date);
+int                 soup_date_get_month                 (SoupDate *date);
+int                 soup_date_get_offset                (SoupDate *date);
+int                 soup_date_get_second                (SoupDate *date);
+int                 soup_date_get_utc                   (SoupDate *date);
+int                 soup_date_get_year                  (SoupDate *date);
+void                soup_date_free                      (SoupDate *date);
+
+guint               soup_headers_parse_request          (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *req_headers,
+                                                         char **req_method,
+                                                         char **req_path,
+                                                         SoupHTTPVersion *ver);
+gboolean            soup_headers_parse_response         (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *headers,
+                                                         SoupHTTPVersion *ver,
+                                                         guint *status_code,
+                                                         char **reason_phrase);
+gboolean            soup_headers_parse_status_line      (const char *status_line,
+                                                         SoupHTTPVersion *ver,
+                                                         guint *status_code,
+                                                         char **reason_phrase);
+gboolean            soup_headers_parse                  (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *dest);
+
+GSList *            soup_header_parse_list              (const char *header);
+GSList *            soup_header_parse_quality_list      (const char *header,
+                                                         GSList **unacceptable);
+void                soup_header_free_list               (GSList *list);
+gboolean            soup_header_contains                (const char *header,
+                                                         const char *token);
+GHashTable *        soup_header_parse_param_list        (const char *header);
+GHashTable *        soup_header_parse_semi_param_list   (const char *header);
+void                soup_header_free_param_list         (GHashTable *param_list);
+void                soup_header_g_string_append_param   (GString *string,
+                                                         const char *name,
+                                                         const char *value);
+void                soup_header_g_string_append_param_quoted
+                                                        (GString *string,
+                                                         const char *name,
+                                                         const char *value);
+
+gboolean            soup_str_case_equal                 (gconstpointer v1,
+                                                         gconstpointer v2);
+guint               soup_str_case_hash                  (gconstpointer key);
+
+GSource *           soup_add_completion                 (GMainContext *async_context,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+GSource *           soup_add_idle                       (GMainContext *async_context,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+GSource *           soup_add_io_watch                   (GMainContext *async_context,
+                                                         GIOChannel *chan,
+                                                         GIOCondition condition,
+                                                         GIOFunc function,
+                                                         gpointer data);
+GSource *           soup_add_timeout                    (GMainContext *async_context,
+                                                         guint interval,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+
+extern const gboolean soup_ssl_supported;
+
+
+
+

Object Hierarchy

+
+  GBoxed
+   +----SoupDate
+
+
+
+

Description

+
+
+

Details

+
+

SoupDate

+
typedef struct {
+	int      year;
+	int      month;
+	int      day;
+
+	int      hour;
+	int      minute;
+	int      second;
+
+	gboolean utc;
+	int      offset;
+} SoupDate;
+
+

+A date and time. The date is assumed to be in the (proleptic) +Gregorian calendar. The time is in UTC if utc is TRUE. Otherwise, +the time is a local time, and offset gives the offset from UTC in +minutes (such that adding offset to the time would give the +correct UTC time). If utc is FALSE and offset is 0, then the +SoupDate represents a "floating" time with no associated timezone +information. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

int year;

the year, 1 to 9999

int month;

the month, 1 to 12

int day;

day of the month, 1 to 31

int hour;

hour of the day, 0 to 23

int minute;

minute, 0 to 59

int second;

second, 0 to 59 (or up to 61 in the case of leap seconds)

gboolean utc;

+TRUE if the date is in UTC

int offset;

offset from UTC
+
+
+
+

enum SoupDateFormat

+
typedef enum {
+	SOUP_DATE_HTTP = 1,
+	SOUP_DATE_COOKIE,
+	SOUP_DATE_RFC2822,
+	SOUP_DATE_ISO8601_COMPACT,
+	SOUP_DATE_ISO8601_FULL,
+	SOUP_DATE_ISO8601 = SOUP_DATE_ISO8601_FULL,
+	SOUP_DATE_ISO8601_XMLRPC
+} SoupDateFormat;
+
+

+Date formats that soup_date_to_string() can use. +

+

+SOUP_DATE_HTTP and SOUP_DATE_COOKIE always coerce the time to +UTC. SOUP_DATE_ISO8601_XMLRPC uses the time as given, ignoring the +offset completely. SOUP_DATE_RFC2822 and the other ISO 8601 +variants use the local time, appending the offset information if +available. +

+

+This enum may be extended with more values in future releases. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_DATE_HTTP

RFC 1123 format, used by the HTTP "Date" header. Eg +"Sun, 06 Nov 1994 08:49:37 GMT" +

SOUP_DATE_COOKIE

The format for the "Expires" timestamp in the +Netscape cookie specification. Eg, "Sun, 06-Nov-1994 08:49:37 GMT". +

SOUP_DATE_RFC2822

RFC 2822 format, eg "Sun, 6 Nov 1994 09:49:37 -0100" +

SOUP_DATE_ISO8601_COMPACT

ISO 8601 date/time with no optional +punctuation. Eg, "19941106T094937-0100". +

SOUP_DATE_ISO8601_FULL

ISO 8601 date/time with all optional +punctuation. Eg, "1994-11-06T09:49:37-01:00". +

SOUP_DATE_ISO8601

An alias for SOUP_DATE_ISO8601_FULL. +

SOUP_DATE_ISO8601_XMLRPC

ISO 8601 date/time as used by XML-RPC. +Eg, "19941106T09:49:37". +
+
+
+
+

soup_date_new ()

+
SoupDate *          soup_date_new                       (int year,
+                                                         int month,
+                                                         int day,
+                                                         int hour,
+                                                         int minute,
+                                                         int second);
+

+Creates a SoupDate representing the indicated time, UTC. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

year :

the year (1-9999)

month :

the month (1-12)

day :

the day of the month (1-31, as appropriate for month)

hour :

the hour (0-23)

minute :

the minute (0-59)

second :

the second (0-59, or up to 61 for leap seconds)

Returns :

a new SoupDate +
+
+
+
+

soup_date_new_from_string ()

+
SoupDate *          soup_date_new_from_string           (const char *date_string);
+

+Parses date_string and tries to extract a date from it. This +recognizes all of the "HTTP-date" formats from RFC 2616, all ISO +8601 formats containing both a time and a date, RFC 2822 dates, +and reasonable approximations thereof. (Eg, it is lenient about +whitespace, leading "0"s, etc.) +

+
++ + + + + + + + + + +

date_string :

the date in some plausible format

Returns :

a new SoupDate, or NULL if date_string could not +be parsed.
+
+
+
+

soup_date_new_from_time_t ()

+
SoupDate *          soup_date_new_from_time_t           (time_t when);
+

+Creates a SoupDate corresponding to when +

+
++ + + + + + + + + + +

when :

a time_t +

Returns :

a new SoupDate +
+
+
+
+

soup_date_new_from_now ()

+
SoupDate *          soup_date_new_from_now              (int offset_seconds);
+

+Creates a SoupDate representing a time offset_seconds after the +current time (or before it, if offset_seconds is negative). If +offset_seconds is 0, returns the current time. +

+

+If offset_seconds would indicate a time not expressible as a +time_t, the return value will be clamped into range. +

+
++ + + + + + + + + + +

offset_seconds :

offset from current time

Returns :

a new SoupDate +
+
+
+
+

soup_date_to_string ()

+
char *              soup_date_to_string                 (SoupDate *date,
+                                                         SoupDateFormat format);
+

+Converts date to a string in the format described by format. +

+
++ + + + + + + + + + + + + + +

date :

a SoupDate +

format :

the format to generate the date in

Returns :

+date as a string
+
+
+
+

soup_date_to_time_t ()

+
time_t              soup_date_to_time_t                 (SoupDate *date);
+

+Converts date to a time_t. +

+

+If date is not representable as a time_t, it will be clamped into +range. (In particular, some HTTP cookies have expiration dates +after "Y2.038k" (2038-01-19T03:14:07Z).) +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date as a time_t +
+
+
+
+

soup_date_to_timeval ()

+
void                soup_date_to_timeval                (SoupDate *date,
+                                                         GTimeVal *time);
+

+Converts date to a GTimeVal. +

+
++ + + + + + + + + + +

date :

a SoupDate +

time :

a GTimeVal structure in which to store the converted time. [out] +
+

Since 2.24

+
+
+
+

soup_date_is_past ()

+
gboolean            soup_date_is_past                   (SoupDate *date);
+

+Determines if date is in the past. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+TRUE if date is in the past
+

Since 2.24

+
+
+
+

soup_date_get_day ()

+
int                 soup_date_get_day                   (SoupDate *date);
+

+Gets date's day. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's day
+

Since 2.32

+
+
+
+

soup_date_get_hour ()

+
int                 soup_date_get_hour                  (SoupDate *date);
+

+Gets date's hour. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's hour
+

Since 2.32

+
+
+
+

soup_date_get_minute ()

+
int                 soup_date_get_minute                (SoupDate *date);
+

+Gets date's minute. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's minute
+

Since 2.32

+
+
+
+

soup_date_get_month ()

+
int                 soup_date_get_month                 (SoupDate *date);
+

+Gets date's month. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's month
+

Since 2.32

+
+
+
+

soup_date_get_offset ()

+
int                 soup_date_get_offset                (SoupDate *date);
+

+Gets date's offset from UTC. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's offset from UTC. If soup_date_get_utc() +returns FALSE but soup_date_get_offset() returns 0, that means the +date is a "floating" time with no associated offset information.
+

Since 2.32

+
+
+
+

soup_date_get_second ()

+
int                 soup_date_get_second                (SoupDate *date);
+

+Gets date's second. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's second
+

Since 2.32

+
+
+
+

soup_date_get_utc ()

+
int                 soup_date_get_utc                   (SoupDate *date);
+

+Gets date's UTC flag +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+TRUE if date is UTC.
+

Since 2.32

+
+
+
+

soup_date_get_year ()

+
int                 soup_date_get_year                  (SoupDate *date);
+

+Gets date's year. +

+
++ + + + + + + + + + +

date :

a SoupDate +

Returns :

+date's year
+

Since 2.32

+
+
+
+

soup_date_free ()

+
void                soup_date_free                      (SoupDate *date);
+

+Frees date. +

+
++ + + + +

date :

a SoupDate +
+
+
+
+

soup_headers_parse_request ()

+
guint               soup_headers_parse_request          (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *req_headers,
+                                                         char **req_method,
+                                                         char **req_path,
+                                                         SoupHTTPVersion *ver);
+

+Parses the headers of an HTTP request in str and stores the +results in req_method, req_path, ver, and req_headers. +

+

+Beware that req_headers may be modified even on failure. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

str :

the headers (up to, but not including, the trailing blank line)

len :

length of str +

req_headers :

+SoupMessageHeaders to store the header values in

req_method :

if non-NULL, will be filled in with the +request method. [out][allow-none] +

req_path :

if non-NULL, will be filled in with the +request path. [out][allow-none] +

ver :

if non-NULL, will be filled in with the HTTP +version. [out][allow-none] +

Returns :

+SOUP_STATUS_OK if the headers could be parsed, or an +HTTP error to be returned to the client if they could not be.
+
+
+
+

soup_headers_parse_response ()

+
gboolean            soup_headers_parse_response         (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *headers,
+                                                         SoupHTTPVersion *ver,
+                                                         guint *status_code,
+                                                         char **reason_phrase);
+

+Parses the headers of an HTTP response in str and stores the +results in ver, status_code, reason_phrase, and headers. +

+

+Beware that headers may be modified even on failure. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

str :

the headers (up to, but not including, the trailing blank line)

len :

length of str +

headers :

+SoupMessageHeaders to store the header values in

ver :

if non-NULL, will be filled in with the HTTP +version. [out][allow-none] +

status_code :

if non-NULL, will be filled in with +the status code. [out][allow-none] +

reason_phrase :

if non-NULL, will be filled in with +the reason phrase. [out][allow-none] +

Returns :

success or failure.
+
+
+
+

soup_headers_parse_status_line ()

+
gboolean            soup_headers_parse_status_line      (const char *status_line,
+                                                         SoupHTTPVersion *ver,
+                                                         guint *status_code,
+                                                         char **reason_phrase);
+

+Parses the HTTP Status-Line string in status_line into ver, +status_code, and reason_phrase. status_line must be terminated by +either "\0" or "\r\n". +

+
++ + + + + + + + + + + + + + + + + + + + + + +

status_line :

an HTTP Status-Line

ver :

if non-NULL, will be filled in with the HTTP +version. [out][allow-none] +

status_code :

if non-NULL, will be filled in with +the status code. [out][allow-none] +

reason_phrase :

if non-NULL, will be filled in with +the reason phrase. [out][allow-none] +

Returns :

+TRUE if status_line was parsed successfully.
+
+
+
+

soup_headers_parse ()

+
gboolean            soup_headers_parse                  (const char *str,
+                                                         int len,
+                                                         SoupMessageHeaders *dest);
+

+Parses the headers of an HTTP request or response in str and +stores the results in dest. Beware that dest may be modified even +on failure. +

+

+This is a low-level method; normally you would use +soup_headers_parse_request() or soup_headers_parse_response(). +

+
++ + + + + + + + + + + + + + + + + + +

str :

the header string (including the Request-Line or Status-Line, +but not the trailing blank line)

len :

length of str +

dest :

+SoupMessageHeaders to store the header values in

Returns :

success or failure
+

Since 2.26

+
+
+
+

soup_header_parse_list ()

+
GSList *            soup_header_parse_list              (const char *header);
+

+Parses a header whose content is described by RFC2616 as +"something", where "something" does not itself contain commas, +except as part of quoted-strings. +

+
++ + + + + + + + + + +

header :

a header value

Returns :

a GSList of +list elements, as allocated strings. [transfer full][element-type utf8] +
+
+
+
+

soup_header_parse_quality_list ()

+
GSList *            soup_header_parse_quality_list      (const char *header,
+                                                         GSList **unacceptable);
+

+Parses a header whose content is a list of items with optional +"qvalue"s (eg, Accept, Accept-Charset, Accept-Encoding, +Accept-Language, TE). +

+

+If unacceptable is not NULL, then on return, it will contain the +items with qvalue 0. Either way, those items will be removed from +the main list. +

+
++ + + + + + + + + + + + + + +

header :

a header value

unacceptable :

on +return, will contain a list of unacceptable values. [out][allow-none][transfer full][element-type utf8] +

Returns :

a GSList of +acceptable values (as allocated strings), highest-qvalue first. [transfer full][element-type utf8] +
+
+
+
+

soup_header_free_list ()

+
void                soup_header_free_list               (GSList *list);
+

+Frees list. +

+
++ + + + +

list :

a GSList returned from soup_header_parse_list() or +soup_header_parse_quality_list() +
+
+
+
+

soup_header_contains ()

+
gboolean            soup_header_contains                (const char *header,
+                                                         const char *token);
+

+Parses header to see if it contains the token token (matched +case-insensitively). Note that this can't be used with lists +that have qvalues. +

+
++ + + + + + + + + + + + + + +

header :

An HTTP header suitable for parsing with +soup_header_parse_list() +

token :

a token

Returns :

whether or not header contains token +
+
+
+
+

soup_header_parse_param_list ()

+
GHashTable *        soup_header_parse_param_list        (const char *header);
+

+Parses a header which is a comma-delimited list of something like: +token [ "=" ( token | quoted-string ) ]. +

+

+Tokens that don't have an associated value will still be added to +the resulting hash table, but with a NULL value. +

+

+This also handles RFC5987 encoding (which in HTTP is mostly used +for giving UTF8-encoded filenames in the Content-Disposition +header). +

+
++ + + + + + + + + + +

header :

a header value

Returns :

a +GHashTable of list elements, which can be freed with +soup_header_free_param_list(). [element-type utf8 utf8][transfer full] +
+
+
+
+

soup_header_parse_semi_param_list ()

+
GHashTable *        soup_header_parse_semi_param_list   (const char *header);
+

+Parses a header which is a semicolon-delimited list of something +like: token [ "=" ( token | quoted-string ) ]. +

+

+Tokens that don't have an associated value will still be added to +the resulting hash table, but with a NULL value. +

+

+This also handles RFC5987 encoding (which in HTTP is mostly used +for giving UTF8-encoded filenames in the Content-Disposition +header). +

+
++ + + + + + + + + + +

header :

a header value

Returns :

a +GHashTable of list elements, which can be freed with +soup_header_free_param_list(). [element-type utf8 utf8][transfer full] +
+

Since 2.24

+
+
+
+

soup_header_free_param_list ()

+
void                soup_header_free_param_list         (GHashTable *param_list);
+

+Frees param_list. +

+
++ + + + +

param_list :

a GHashTable returned from soup_header_parse_param_list() +or soup_header_parse_semi_param_list(). [element-type utf8 utf8] +
+
+
+
+

soup_header_g_string_append_param ()

+
void                soup_header_g_string_append_param   (GString *string,
+                                                         const char *name,
+                                                         const char *value);
+

+Appends something like name=value to string, +taking care to quote value if needed, and if so, to escape any +quotes or backslashes in value. +

+

+Alternatively, if value is a non-ASCII UTF-8 string, it will be +appended using RFC5987 syntax. Although in theory this is supposed +to work anywhere in HTTP that uses this style of parameter, in +reality, it can only be used portably with the Content-Disposition +"filename" parameter. +

+

+If value is NULL, this will just append name to string. +

+
++ + + + + + + + + + + + + + +

string :

a GString being used to construct an HTTP header value

name :

a parameter name

value :

a parameter value, or NULL +
+

Since 2.26

+
+
+
+

soup_header_g_string_append_param_quoted ()

+
void                soup_header_g_string_append_param_quoted
+                                                        (GString *string,
+                                                         const char *name,
+                                                         const char *value);
+

+Appends something like name="value" to +string, taking care to escape any quotes or backslashes in value. +

+

+If value is (non-ASCII) UTF-8, this will instead use RFC 5987 +encoding, just like soup_header_g_string_append_param(). +

+
++ + + + + + + + + + + + + + +

string :

a GString being used to construct an HTTP header value

name :

a parameter name

value :

a parameter value
+

Since 2.30

+
+
+
+

soup_str_case_equal ()

+
gboolean            soup_str_case_equal                 (gconstpointer v1,
+                                                         gconstpointer v2);
+

+Compares v1 and v2 in a case-insensitive manner +

+
++ + + + + + + + + + + + + + +

v1 :

an ASCII string

v2 :

another ASCII string

Returns :

+TRUE if they are equal (modulo case)
+
+
+
+

soup_str_case_hash ()

+
guint               soup_str_case_hash                  (gconstpointer key);
+

+Hashes key in a case-insensitive manner. +

+
++ + + + + + + + + + +

key :

ASCII string to hash

Returns :

the hash code.
+
+
+
+

soup_add_completion ()

+
GSource *           soup_add_completion                 (GMainContext *async_context,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+

+Adds function to be executed from inside async_context with the +default priority. Use this when you want to complete an action in +async_context's main loop, as soon as possible. +

+
++ + + + + + + + + + + + + + + + + + +

async_context :

the GMainContext to dispatch the I/O +watch in, or NULL for the default context. [allow-none] +

function :

the callback to invoke

data :

user data to pass to function +

Returns :

a GSource, which can be removed from async_context +with g_source_destroy().
+

Since 2.24

+
+
+
+

soup_add_idle ()

+
GSource *           soup_add_idle                       (GMainContext *async_context,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+

+Adds an idle event as with g_idle_add(), but using the given +async_context. +

+

+If you want function to run "right away", use +soup_add_completion(), since that sets a higher priority on the +GSource than soup_add_idle() does. +

+
++ + + + + + + + + + + + + + + + + + +

async_context :

the GMainContext to dispatch the I/O +watch in, or NULL for the default context. [allow-none] +

function :

the callback to invoke at idle time

data :

user data to pass to function +

Returns :

a GSource, which can be removed from async_context +with g_source_destroy().
+
+
+
+

soup_add_io_watch ()

+
GSource *           soup_add_io_watch                   (GMainContext *async_context,
+                                                         GIOChannel *chan,
+                                                         GIOCondition condition,
+                                                         GIOFunc function,
+                                                         gpointer data);
+

+Adds an I/O watch as with g_io_add_watch(), but using the given +async_context. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

async_context :

the GMainContext to dispatch the I/O +watch in, or NULL for the default context. [allow-none] +

chan :

the GIOChannel to watch

condition :

the condition to watch for

function :

the callback to invoke when condition occurs

data :

user data to pass to function +

Returns :

a GSource, which can be removed from async_context +with g_source_destroy().
+
+
+
+

soup_add_timeout ()

+
GSource *           soup_add_timeout                    (GMainContext *async_context,
+                                                         guint interval,
+                                                         GSourceFunc function,
+                                                         gpointer data);
+

+Adds a timeout as with g_timeout_add(), but using the given +async_context. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

async_context :

the GMainContext to dispatch the I/O +watch in, or NULL for the default context. [allow-none] +

interval :

the timeout interval, in milliseconds

function :

the callback to invoke at timeout time

data :

user data to pass to function +

Returns :

a GSource, which can be removed from async_context +with g_source_destroy().
+
+
+
+

soup_ssl_supported

+
extern const gboolean soup_ssl_supported;
+
+

+

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4-XMLRPC-Support.html b/docs/reference/html/libsoup-2.4-XMLRPC-Support.html new file mode 100644 index 0000000..c2f0601 --- /dev/null +++ b/docs/reference/html/libsoup-2.4-XMLRPC-Support.html @@ -0,0 +1,600 @@ + + + + +XMLRPC Support + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

XMLRPC Support

+

XMLRPC Support — XML-RPC support

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+char *              soup_xmlrpc_build_method_call       (const char *method_name,
+                                                         GValue *params,
+                                                         int n_params);
+SoupMessage *       soup_xmlrpc_request_new             (const char *uri,
+                                                         const char *method_name,
+                                                         ...);
+gboolean            soup_xmlrpc_parse_method_response   (const char *method_response,
+                                                         int length,
+                                                         GValue *value,
+                                                         GError **error);
+gboolean            soup_xmlrpc_extract_method_response (const char *method_response,
+                                                         int length,
+                                                         GError **error,
+                                                         GType type,
+                                                         ...);
+
+gboolean            soup_xmlrpc_parse_method_call       (const char *method_call,
+                                                         int length,
+                                                         char **method_name,
+                                                         GValueArray **params);
+gboolean            soup_xmlrpc_extract_method_call     (const char *method_call,
+                                                         int length,
+                                                         char **method_name,
+                                                         ...);
+char *              soup_xmlrpc_build_method_response   (GValue *value);
+char *              soup_xmlrpc_build_fault             (int fault_code,
+                                                         const char *fault_format,
+                                                         ...);
+void                soup_xmlrpc_set_response            (SoupMessage *msg,
+                                                         GType type,
+                                                         ...);
+void                soup_xmlrpc_set_fault               (SoupMessage *msg,
+                                                         int fault_code,
+                                                         const char *fault_format,
+                                                         ...);
+
+#define             SOUP_XMLRPC_FAULT
+enum                SoupXMLRPCFault;
+
+
+
+

Description

+
+
+

Details

+
+

soup_xmlrpc_build_method_call ()

+
char *              soup_xmlrpc_build_method_call       (const char *method_name,
+                                                         GValue *params,
+                                                         int n_params);
+

+This creates an XML-RPC methodCall and returns it as a string. +This is the low-level method that soup_xmlrpc_request_new() is +built on. +

+

+params is an array of GValue representing the parameters to +method. (It is *not* a GValueArray, although if you have a +GValueArray, you can just pass its valuesf and +n_values fields.) +

+

+The correspondence between glib types and XML-RPC types is: +

+

+ int: int (G_TYPE_INT) + boolean: gboolean (G_TYPE_BOOLEAN) + string: char* (G_TYPE_STRING) + double: double (G_TYPE_DOUBLE) + datetime.iso8601: SoupDate (SOUP_TYPE_DATE) + base64: GByteArray (SOUP_TYPE_BYTE_ARRAY) + struct: GHashTable (G_TYPE_HASH_TABLE) + array: GValueArray (G_TYPE_VALUE_ARRAY) +

+

+For structs, use a GHashTable that maps strings to GValue; +soup_value_hash_new() and related methods can help with this. +

+
++ + + + + + + + + + + + + + + + + + +

method_name :

the name of the XML-RPC method

params :

arguments to method. [array length=n_params] +

n_params :

length of params +

Returns :

the text of the methodCall, or NULL on error
+
+
+
+

soup_xmlrpc_request_new ()

+
SoupMessage *       soup_xmlrpc_request_new             (const char *uri,
+                                                         const char *method_name,
+                                                         ...);
+

+Creates an XML-RPC methodCall and returns a SoupMessage, ready +to send, for that method call. +

+

+The parameters are passed as type/value pairs; ie, first a GType, +and then a value of the appropriate type, finally terminated by +G_TYPE_INVALID. +

+
++ + + + + + + + + + + + + + + + + + +

uri :

URI of the XML-RPC service

method_name :

the name of the XML-RPC method to invoke at uri +

... :

parameters for method +

Returns :

a SoupMessage encoding the +indicated XML-RPC request. [transfer full] +
+
+
+
+

soup_xmlrpc_parse_method_response ()

+
gboolean            soup_xmlrpc_parse_method_response   (const char *method_response,
+                                                         int length,
+                                                         GValue *value,
+                                                         GError **error);
+

+Parses method_response and returns the return value in value. If +method_response is a fault, value will be unchanged, and error +will be set to an error of type SOUP_XMLRPC_FAULT, with the error +code containing the fault code, and the error message containing +the fault string. (If method_response cannot be parsed at all, +soup_xmlrpc_parse_method_response() will return FALSE, but error +will be unset.) +

+
++ + + + + + + + + + + + + + + + + + + + + + +

method_response :

the XML-RPC methodResponse string

length :

the length of method_response, or -1 if it is NUL-terminated

value :

on return, the return value from method_call. [out] +

error :

error return value

Returns :

+TRUE if a return value was parsed, FALSE if the +response could not be parsed, or contained a fault.
+
+
+
+

soup_xmlrpc_extract_method_response ()

+
gboolean            soup_xmlrpc_extract_method_response (const char *method_response,
+                                                         int length,
+                                                         GError **error,
+                                                         GType type,
+                                                         ...);
+

+Parses method_response and extracts the return value into +a variable of the correct type. +

+

+If method_response is a fault, the return value will be unset, +and error will be set to an error of type SOUP_XMLRPC_FAULT, with +the error code containing the fault code, and the error message +containing the fault string. (If method_response cannot be parsed +at all, soup_xmlrpc_extract_method_response() will return FALSE, +but error will be unset.) +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

method_response :

the XML-RPC methodResponse string

length :

the length of method_response, or -1 if it is NUL-terminated

error :

error return value

type :

the expected type of the return value

... :

location for return value

Returns :

+TRUE if a return value was parsed, FALSE if the +response was of the wrong type, or contained a fault.
+
+
+
+

soup_xmlrpc_parse_method_call ()

+
gboolean            soup_xmlrpc_parse_method_call       (const char *method_call,
+                                                         int length,
+                                                         char **method_name,
+                                                         GValueArray **params);
+

+Parses method_call to get the name and parameters, and returns the +parameter values in a GValueArray; see also +soup_xmlrpc_extract_method_call(), which is more convenient if you +know in advance what the types of the parameters will be. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

method_call :

the XML-RPC methodCall string

length :

the length of method_call, or -1 if it is NUL-terminated

method_name :

on return, the methodName from method_call. [out] +

params :

on return, the parameters from method_call. [out] +

Returns :

success or failure.
+
+
+
+

soup_xmlrpc_extract_method_call ()

+
gboolean            soup_xmlrpc_extract_method_call     (const char *method_call,
+                                                         int length,
+                                                         char **method_name,
+                                                         ...);
+

+Parses method_call to get the name and parameters, and puts +the parameters into variables of the appropriate types. +

+

+The parameters are handled similarly to +soup_xmlrpc_build_method_call, with pairs of types and values, +terminated by G_TYPE_INVALID, except that values are pointers to +variables of the indicated type, rather than values of the type. +

+

+See also soup_xmlrpc_parse_method_call(), which can be used if +you don't know the types of the parameters. +

+
++ + + + + + + + + + + + + + + + + + + + + + +

method_call :

the XML-RPC methodCall string

length :

the length of method_call, or -1 if it is NUL-terminated

method_name :

on return, the methodName from method_call. [out] +

... :

return types and locations for parameters

Returns :

success or failure.
+
+
+
+

soup_xmlrpc_build_method_response ()

+
char *              soup_xmlrpc_build_method_response   (GValue *value);
+

+This creates a (successful) XML-RPC methodResponse and returns it +as a string. To create a fault response, use +soup_xmlrpc_build_fault(). +

+

+The glib type to XML-RPC type mapping is as with +soup_xmlrpc_build_method_call(), qv. +

+
++ + + + + + + + + + +

value :

the return value

Returns :

the text of the methodResponse, or NULL on error
+
+
+
+

soup_xmlrpc_build_fault ()

+
char *              soup_xmlrpc_build_fault             (int fault_code,
+                                                         const char *fault_format,
+                                                         ...);
+

+This creates an XML-RPC fault response and returns it as a string. +(To create a successful response, use +soup_xmlrpc_build_method_response().) +

+
++ + + + + + + + + + + + + + + + + + +

fault_code :

the fault code

fault_format :

a printf()-style format string

... :

the parameters to fault_format +

Returns :

the text of the fault
+
+
+
+

soup_xmlrpc_set_response ()

+
void                soup_xmlrpc_set_response            (SoupMessage *msg,
+                                                         GType type,
+                                                         ...);
+

+Sets the status code and response body of msg to indicate a +successful XML-RPC call, with a return value given by type and the +following varargs argument, of the type indicated by type. +

+
++ + + + + + + + + + + + + + +

msg :

an XML-RPC request

type :

the type of the response value

... :

the response value
+
+
+
+

soup_xmlrpc_set_fault ()

+
void                soup_xmlrpc_set_fault               (SoupMessage *msg,
+                                                         int fault_code,
+                                                         const char *fault_format,
+                                                         ...);
+

+Sets the status code and response body of msg to indicate an +unsuccessful XML-RPC call, with the error described by fault_code +and fault_format. +

+
++ + + + + + + + + + + + + + + + + + +

msg :

an XML-RPC request

fault_code :

the fault code

fault_format :

a printf()-style format string

... :

the parameters to fault_format +
+
+
+
+

SOUP_XMLRPC_FAULT

+
#define SOUP_XMLRPC_FAULT soup_xmlrpc_fault_quark()
+
+

+A GError domain representing an XML-RPC fault code. Used with +SoupXMLRPCFault (although servers may also return fault codes not +in that enumeration). +

+
+
+
+

enum SoupXMLRPCFault

+
typedef enum {
+	SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED = -32700,
+	SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING = -32701,
+	SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING = -32702,
+	SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC = -32600,
+	SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND = -32601,
+	SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS = -32602,
+	SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR = -32603,
+	SOUP_XMLRPC_FAULT_APPLICATION_ERROR = -32500,
+	SOUP_XMLRPC_FAULT_SYSTEM_ERROR = -32400,
+	SOUP_XMLRPC_FAULT_TRANSPORT_ERROR = -32300
+} SoupXMLRPCFault;
+
+

+Pre-defined XML-RPC fault codes from http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php. +These are an extension, not part of the XML-RPC spec; you can't +assume servers will use them. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED

request was not + well-formed +

SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING

request was in + an unsupported encoding +

SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING

request contained an invalid character +

SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC

request was not + valid XML-RPC +

SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND

method + not found +

SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS

invalid + parameters +

SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR

internal + error +

SOUP_XMLRPC_FAULT_APPLICATION_ERROR

start of reserved range for + application error codes +

SOUP_XMLRPC_FAULT_SYSTEM_ERROR

start of reserved range for + system error codes +

SOUP_XMLRPC_FAULT_TRANSPORT_ERROR

start of reserved range for + transport error codes +
+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4-soup-gnome-features.html b/docs/reference/html/libsoup-2.4-soup-gnome-features.html new file mode 100644 index 0000000..957efe7 --- /dev/null +++ b/docs/reference/html/libsoup-2.4-soup-gnome-features.html @@ -0,0 +1,94 @@ + + + + +soup-gnome-features + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

soup-gnome-features

+

soup-gnome-features

+
+
+

Synopsis

+
+#include <libsoup/soup-gnome.h>
+
+#define             SOUP_TYPE_GNOME_FEATURES_2_26
+#define             SOUP_TYPE_PROXY_RESOLVER_GNOME
+
+
+
+

Description

+

+

+
+
+

Details

+
+

SOUP_TYPE_GNOME_FEATURES_2_26

+
#define SOUP_TYPE_GNOME_FEATURES_2_26 (soup_gnome_features_2_26_get_type ())
+
+

+This returns the GType of a SoupSessionFeature that automatically +adds all of the GNOME features defined for libsoup 2.26 (which is +just SOUP_TYPE_PROXY_RESOLVER_GNOME). +

+

+You can add this to a session using +soup_session_add_feature_by_type() or by using the +SOUP_SESSION_ADD_FEATURE_BY_TYPE construct-time property. +

+

Since 2.26

+
+
+
+

SOUP_TYPE_PROXY_RESOLVER_GNOME

+
#define SOUP_TYPE_PROXY_RESOLVER_GNOME (soup_proxy_resolver_gnome_get_type ())
+
+

+This returns the GType of a SoupProxyURIResolver that can be used to +resolve HTTP proxies for GNOME applications. You can add this to +a session using soup_session_add_feature_by_type() or by using the +SOUP_SESSION_ADD_FEATURE_BY_TYPE construct-time property. +

+

+This feature is included in SOUP_TYPE_GNOME_FEATURES_2_26, so if +you are using that feature, you do not need to include this feature +separately. +

+

Since 2.26

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4-soup-method.html b/docs/reference/html/libsoup-2.4-soup-method.html new file mode 100644 index 0000000..f49ae4b --- /dev/null +++ b/docs/reference/html/libsoup-2.4-soup-method.html @@ -0,0 +1,240 @@ + + + + +soup-method + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

soup-method

+

soup-method — HTTP method definitions

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+#define             SOUP_METHOD_OPTIONS
+#define             SOUP_METHOD_GET
+#define             SOUP_METHOD_HEAD
+#define             SOUP_METHOD_PUT
+#define             SOUP_METHOD_POST
+#define             SOUP_METHOD_DELETE
+#define             SOUP_METHOD_TRACE
+#define             SOUP_METHOD_CONNECT
+
+#define             SOUP_METHOD_PROPFIND
+#define             SOUP_METHOD_PROPPATCH
+#define             SOUP_METHOD_MKCOL
+#define             SOUP_METHOD_COPY
+#define             SOUP_METHOD_MOVE
+#define             SOUP_METHOD_LOCK
+#define             SOUP_METHOD_UNLOCK
+
+
+
+

Description

+

+soup-method.h contains a number of defines for standard HTTP and +WebDAV headers. You do not need to use these defines; you can pass +arbitrary strings to soup_message_new() if you prefer. +

+

+The thing that these defines are useful for is +performing quick comparisons against SoupMessage's method field; +because that field always contains an interned string, and these +macros return interned strings, you can compare method directly +against these macros rather than needing to use strcmp(). This is +most useful in SoupServer handlers. Eg: +

+

+

+
+ + + + + + + +
1
+2
+3
+4
if (msg->method != SOUP_METHOD_GET && msg->method != SOUP_METHOD_HEAD) {
+    soup_message_set_status (msg, SOUP_METHOD_NOT_IMPLEMENTED);
+    return;
+}
+
+ +

+

+
+
+

Details

+
+

SOUP_METHOD_OPTIONS

+
#define SOUP_METHOD_OPTIONS   _SOUP_INTERN_METHOD (OPTIONS)
+
+

+"OPTIONS" as an interned string. +

+
+
+
+

SOUP_METHOD_GET

+
#define SOUP_METHOD_GET       _SOUP_INTERN_METHOD (GET)
+
+

+"GET" as an interned string. +

+
+
+
+

SOUP_METHOD_HEAD

+
#define SOUP_METHOD_HEAD      _SOUP_INTERN_METHOD (HEAD)
+
+

+"HEAD" as an interned string. +

+
+
+
+

SOUP_METHOD_PUT

+
#define SOUP_METHOD_PUT       _SOUP_INTERN_METHOD (PUT)
+
+

+"PUT" as an interned string. +

+
+
+
+

SOUP_METHOD_POST

+
#define SOUP_METHOD_POST      _SOUP_INTERN_METHOD (POST)
+
+

+"POST" as an interned string. +

+
+
+
+

SOUP_METHOD_DELETE

+
#define SOUP_METHOD_DELETE    _SOUP_INTERN_METHOD (DELETE)
+
+

+"DELETE" as an interned string. +

+
+
+
+

SOUP_METHOD_TRACE

+
#define SOUP_METHOD_TRACE     _SOUP_INTERN_METHOD (TRACE)
+
+

+"TRACE" as an interned string. +

+
+
+
+

SOUP_METHOD_CONNECT

+
#define SOUP_METHOD_CONNECT   _SOUP_INTERN_METHOD (CONNECT)
+
+

+"CONNECT" as an interned string. +

+
+
+
+

SOUP_METHOD_PROPFIND

+
#define SOUP_METHOD_PROPFIND  _SOUP_INTERN_METHOD (PROPFIND)
+
+

+"PROPFIND" as an interned string. +

+
+
+
+

SOUP_METHOD_PROPPATCH

+
#define SOUP_METHOD_PROPPATCH _SOUP_INTERN_METHOD (PROPPATCH)
+
+

+"PROPPATCH" as an interned string. +

+
+
+
+

SOUP_METHOD_MKCOL

+
#define SOUP_METHOD_MKCOL     _SOUP_INTERN_METHOD (MKCOL)
+
+

+"MKCOL" as an interned string. +

+
+
+
+

SOUP_METHOD_COPY

+
#define SOUP_METHOD_COPY      _SOUP_INTERN_METHOD (COPY)
+
+

+"COPY" as an interned string. +

+
+
+
+

SOUP_METHOD_MOVE

+
#define SOUP_METHOD_MOVE      _SOUP_INTERN_METHOD (MOVE)
+
+

+"MOVE" as an interned string. +

+
+
+
+

SOUP_METHOD_LOCK

+
#define SOUP_METHOD_LOCK      _SOUP_INTERN_METHOD (LOCK)
+
+

+"LOCK" as an interned string. +

+
+
+
+

SOUP_METHOD_UNLOCK

+
#define SOUP_METHOD_UNLOCK    _SOUP_INTERN_METHOD (UNLOCK)
+
+

+"UNLOCK" as an interned string. +

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4-soup-status.html b/docs/reference/html/libsoup-2.4-soup-status.html new file mode 100644 index 0000000..6badfef --- /dev/null +++ b/docs/reference/html/libsoup-2.4-soup-status.html @@ -0,0 +1,698 @@ + + + + +soup-status + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+

soup-status

+

soup-status — HTTP (and libsoup) status codes

+
+
+

Synopsis

+
+#include <libsoup/soup.h>
+
+#define             SOUP_STATUS_IS_TRANSPORT_ERROR      (status)
+#define             SOUP_STATUS_IS_INFORMATIONAL        (status)
+#define             SOUP_STATUS_IS_SUCCESSFUL           (status)
+#define             SOUP_STATUS_IS_REDIRECTION          (status)
+#define             SOUP_STATUS_IS_CLIENT_ERROR         (status)
+#define             SOUP_STATUS_IS_SERVER_ERROR         (status)
+enum                SoupKnownStatusCode;
+const char *        soup_status_get_phrase              (guint status_code);
+guint               soup_status_proxify                 (guint status_code);
+
+#define             SOUP_HTTP_ERROR
+
+
+
+

Description

+
+
+

Details

+
+

SOUP_STATUS_IS_TRANSPORT_ERROR()

+
#define SOUP_STATUS_IS_TRANSPORT_ERROR(status) ((status) >  0   && (status) < 100)
+
+

+Tests if status is a libsoup transport error. +

+
++ + + + + + + + + + +

status :

a status code

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_STATUS_IS_INFORMATIONAL()

+
#define SOUP_STATUS_IS_INFORMATIONAL(status)   ((status) >= 100 && (status) < 200)
+
+

+Tests if status is an Informational (1xx) response. +

+
++ + + + + + + + + + +

status :

an HTTP status code

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_STATUS_IS_SUCCESSFUL()

+
#define SOUP_STATUS_IS_SUCCESSFUL(status)      ((status) >= 200 && (status) < 300)
+
+

+Tests if status is a Successful (2xx) response. +

+
++ + + + + + + + + + +

status :

an HTTP status code

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_STATUS_IS_REDIRECTION()

+
#define SOUP_STATUS_IS_REDIRECTION(status)     ((status) >= 300 && (status) < 400)
+
+

+Tests if status is a Redirection (3xx) response. +

+
++ + + + + + + + + + +

status :

an HTTP status code

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_STATUS_IS_CLIENT_ERROR()

+
#define SOUP_STATUS_IS_CLIENT_ERROR(status)    ((status) >= 400 && (status) < 500)
+
+

+Tests if status is a Client Error (4xx) response. +

+
++ + + + + + + + + + +

status :

an HTTP status code

Returns :

+TRUE or FALSE +
+
+
+
+

SOUP_STATUS_IS_SERVER_ERROR()

+
#define SOUP_STATUS_IS_SERVER_ERROR(status)    ((status) >= 500 && (status) < 600)
+
+

+Tests if status is a Server Error (5xx) response. +

+
++ + + + + + + + + + +

status :

an HTTP status code

Returns :

+TRUE or FALSE +
+
+
+
+

enum SoupKnownStatusCode

+
typedef enum {
+	SOUP_STATUS_NONE,
+
+	/* Transport Errors */
+	SOUP_STATUS_CANCELLED                       = 1,
+	SOUP_STATUS_CANT_RESOLVE,
+	SOUP_STATUS_CANT_RESOLVE_PROXY,
+	SOUP_STATUS_CANT_CONNECT,
+	SOUP_STATUS_CANT_CONNECT_PROXY,
+	SOUP_STATUS_SSL_FAILED,
+	SOUP_STATUS_IO_ERROR,
+	SOUP_STATUS_MALFORMED,
+	SOUP_STATUS_TRY_AGAIN,
+	SOUP_STATUS_TOO_MANY_REDIRECTS,
+	SOUP_STATUS_TLS_FAILED,
+
+	/* HTTP Status Codes */
+	SOUP_STATUS_CONTINUE                        = 100,
+	SOUP_STATUS_SWITCHING_PROTOCOLS             = 101,
+	SOUP_STATUS_PROCESSING                      = 102, /* WebDAV */
+
+	SOUP_STATUS_OK                              = 200,
+	SOUP_STATUS_CREATED                         = 201,
+	SOUP_STATUS_ACCEPTED                        = 202,
+	SOUP_STATUS_NON_AUTHORITATIVE               = 203,
+	SOUP_STATUS_NO_CONTENT                      = 204,
+	SOUP_STATUS_RESET_CONTENT                   = 205,
+	SOUP_STATUS_PARTIAL_CONTENT                 = 206,
+	SOUP_STATUS_MULTI_STATUS                    = 207, /* WebDAV */
+
+	SOUP_STATUS_MULTIPLE_CHOICES                = 300,
+	SOUP_STATUS_MOVED_PERMANENTLY               = 301,
+	SOUP_STATUS_FOUND                           = 302,
+	SOUP_STATUS_MOVED_TEMPORARILY               = 302, /* RFC 2068 */
+	SOUP_STATUS_SEE_OTHER                       = 303,
+	SOUP_STATUS_NOT_MODIFIED                    = 304,
+	SOUP_STATUS_USE_PROXY                       = 305,
+	SOUP_STATUS_NOT_APPEARING_IN_THIS_PROTOCOL  = 306, /* (reserved) */
+	SOUP_STATUS_TEMPORARY_REDIRECT              = 307,
+
+	SOUP_STATUS_BAD_REQUEST                     = 400,
+	SOUP_STATUS_UNAUTHORIZED                    = 401,
+	SOUP_STATUS_PAYMENT_REQUIRED                = 402, /* (reserved) */
+	SOUP_STATUS_FORBIDDEN                       = 403,
+	SOUP_STATUS_NOT_FOUND                       = 404,
+	SOUP_STATUS_METHOD_NOT_ALLOWED              = 405,
+	SOUP_STATUS_NOT_ACCEPTABLE                  = 406,
+	SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED   = 407,
+	SOUP_STATUS_PROXY_UNAUTHORIZED              = SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED,
+	SOUP_STATUS_REQUEST_TIMEOUT                 = 408,
+	SOUP_STATUS_CONFLICT                        = 409,
+	SOUP_STATUS_GONE                            = 410,
+	SOUP_STATUS_LENGTH_REQUIRED                 = 411,
+	SOUP_STATUS_PRECONDITION_FAILED             = 412,
+	SOUP_STATUS_REQUEST_ENTITY_TOO_LARGE        = 413,
+	SOUP_STATUS_REQUEST_URI_TOO_LONG            = 414,
+	SOUP_STATUS_UNSUPPORTED_MEDIA_TYPE          = 415,
+	SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE = 416,
+	SOUP_STATUS_INVALID_RANGE                   = SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE,
+	SOUP_STATUS_EXPECTATION_FAILED              = 417,
+	SOUP_STATUS_UNPROCESSABLE_ENTITY            = 422, /* WebDAV */
+	SOUP_STATUS_LOCKED                          = 423, /* WebDAV */
+	SOUP_STATUS_FAILED_DEPENDENCY               = 424, /* WebDAV */
+
+	SOUP_STATUS_INTERNAL_SERVER_ERROR           = 500,
+	SOUP_STATUS_NOT_IMPLEMENTED                 = 501,
+	SOUP_STATUS_BAD_GATEWAY                     = 502,
+	SOUP_STATUS_SERVICE_UNAVAILABLE             = 503,
+	SOUP_STATUS_GATEWAY_TIMEOUT                 = 504,
+	SOUP_STATUS_HTTP_VERSION_NOT_SUPPORTED      = 505,
+	SOUP_STATUS_INSUFFICIENT_STORAGE            = 507, /* WebDAV search */
+	SOUP_STATUS_NOT_EXTENDED                    = 510  /* RFC 2774 */
+} SoupKnownStatusCode;
+
+

+These represent the known HTTP status code values, plus various +network and internal errors. +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_STATUS_NONE

No status available. (Eg, the message has not +been sent yet) +

SOUP_STATUS_CANCELLED

Message was cancelled locally +

SOUP_STATUS_CANT_RESOLVE

Unable to resolve destination host name +

SOUP_STATUS_CANT_RESOLVE_PROXY

Unable to resolve proxy host name +

SOUP_STATUS_CANT_CONNECT

Unable to connect to remote host +

SOUP_STATUS_CANT_CONNECT_PROXY

Unable to connect to proxy +

SOUP_STATUS_SSL_FAILED

SSL/TLS negotiation failed +

SOUP_STATUS_IO_ERROR

A network error occurred, or the other end +closed the connection unexpectedly +

SOUP_STATUS_MALFORMED

Malformed data (usually a programmer error) +

SOUP_STATUS_TRY_AGAIN

Used internally +

SOUP_STATUS_TOO_MANY_REDIRECTS

There were too many redirections +

SOUP_STATUS_TLS_FAILED

Used internally +

SOUP_STATUS_CONTINUE

100 Continue (HTTP) +

SOUP_STATUS_SWITCHING_PROTOCOLS

101 Switching Protocols (HTTP) +

SOUP_STATUS_PROCESSING

102 Processing (WebDAV) +

SOUP_STATUS_OK

200 Success (HTTP). Also used by many lower-level +soup routines to indicate success. +

SOUP_STATUS_CREATED

201 Created (HTTP) +

SOUP_STATUS_ACCEPTED

202 Accepted (HTTP) +

SOUP_STATUS_NON_AUTHORITATIVE

203 Non-Authoritative Information +(HTTP) +

SOUP_STATUS_NO_CONTENT

204 No Content (HTTP) +

SOUP_STATUS_RESET_CONTENT

205 Reset Content (HTTP) +

SOUP_STATUS_PARTIAL_CONTENT

206 Partial Content (HTTP) +

SOUP_STATUS_MULTI_STATUS

207 Multi-Status (WebDAV) +

SOUP_STATUS_MULTIPLE_CHOICES

300 Multiple Choices (HTTP) +

SOUP_STATUS_MOVED_PERMANENTLY

301 Moved Permanently (HTTP) +

SOUP_STATUS_FOUND

302 Found (HTTP) +

SOUP_STATUS_MOVED_TEMPORARILY

302 Moved Temporarily (old name, +RFC 2068) +

SOUP_STATUS_SEE_OTHER

303 See Other (HTTP) +

SOUP_STATUS_NOT_MODIFIED

304 Not Modified (HTTP) +

SOUP_STATUS_USE_PROXY

305 Use Proxy (HTTP) +

SOUP_STATUS_NOT_APPEARING_IN_THIS_PROTOCOL

306 [Unused] (HTTP) +

SOUP_STATUS_TEMPORARY_REDIRECT

307 Temporary Redirect (HTTP) +

SOUP_STATUS_BAD_REQUEST

400 Bad Request (HTTP) +

SOUP_STATUS_UNAUTHORIZED

401 Unauthorized (HTTP) +

SOUP_STATUS_PAYMENT_REQUIRED

402 Payment Required (HTTP) +

SOUP_STATUS_FORBIDDEN

403 Forbidden (HTTP) +

SOUP_STATUS_NOT_FOUND

404 Not Found (HTTP) +

SOUP_STATUS_METHOD_NOT_ALLOWED

405 Method Not Allowed (HTTP) +

SOUP_STATUS_NOT_ACCEPTABLE

406 Not Acceptable (HTTP) +

SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED

407 Proxy Authentication +Required (HTTP) +

SOUP_STATUS_PROXY_UNAUTHORIZED

shorter alias for +SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED +

SOUP_STATUS_REQUEST_TIMEOUT

408 Request Timeout (HTTP) +

SOUP_STATUS_CONFLICT

409 Conflict (HTTP) +

SOUP_STATUS_GONE

410 Gone (HTTP) +

SOUP_STATUS_LENGTH_REQUIRED

411 Length Required (HTTP) +

SOUP_STATUS_PRECONDITION_FAILED

412 Precondition Failed (HTTP) +

SOUP_STATUS_REQUEST_ENTITY_TOO_LARGE

413 Request Entity Too Large +(HTTP) +

SOUP_STATUS_REQUEST_URI_TOO_LONG

414 Request-URI Too Long (HTTP) +

SOUP_STATUS_UNSUPPORTED_MEDIA_TYPE

415 Unsupported Media Type +(HTTP) +

SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE

416 Requested Range +Not Satisfiable (HTTP) +

SOUP_STATUS_INVALID_RANGE

shorter alias for +SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE +

SOUP_STATUS_EXPECTATION_FAILED

417 Expectation Failed (HTTP) +

SOUP_STATUS_UNPROCESSABLE_ENTITY

422 Unprocessable Entity +(WebDAV) +

SOUP_STATUS_LOCKED

423 Locked (WebDAV) +

SOUP_STATUS_FAILED_DEPENDENCY

424 Failed Dependency (WebDAV) +

SOUP_STATUS_INTERNAL_SERVER_ERROR

500 Internal Server Error +(HTTP) +

SOUP_STATUS_NOT_IMPLEMENTED

501 Not Implemented (HTTP) +

SOUP_STATUS_BAD_GATEWAY

502 Bad Gateway (HTTP) +

SOUP_STATUS_SERVICE_UNAVAILABLE

503 Service Unavailable (HTTP) +

SOUP_STATUS_GATEWAY_TIMEOUT

504 Gateway Timeout (HTTP) +

SOUP_STATUS_HTTP_VERSION_NOT_SUPPORTED

505 HTTP Version Not +Supported (HTTP) +

SOUP_STATUS_INSUFFICIENT_STORAGE

507 Insufficient Storage +(WebDAV) +

SOUP_STATUS_NOT_EXTENDED

510 Not Extended (RFC 2774) +
+
+
+
+

soup_status_get_phrase ()

+
const char *        soup_status_get_phrase              (guint status_code);
+

+Looks up the stock HTTP description of status_code. This is used +by soup_message_set_status() to get the correct text to go with a +given status code. +

+

+There is no reason for you to ever use this +function. If you wanted the textual description for the +"status_code" of a given SoupMessage, you should just +look at the message's "reason_phrase". However, you +should only do that for use in debugging messages; HTTP reason +phrases are not localized, and are not generally very descriptive +anyway, and so they should never be presented to the user directly. +Instead, you should create you own error messages based on the +status code, and on what you were trying to do. +

+
++ + + + + + + + + + +

status_code :

an HTTP status code

Returns :

the (terse, English) description of status_code +
+
+
+
+

soup_status_proxify ()

+
guint               soup_status_proxify                 (guint status_code);
+

+Turns SOUP_STATUS_CANT_RESOLVE into +SOUP_STATUS_CANT_RESOLVE_PROXY and SOUP_STATUS_CANT_CONNECT into +SOUP_STATUS_CANT_CONNECT_PROXY. Other status codes are passed +through unchanged. +

+
++ + + + + + + + + + +

status_code :

a status code

Returns :

the "proxified" equivalent of status_code.
+

Since 2.26

+
+
+
+

SOUP_HTTP_ERROR

+
#define SOUP_HTTP_ERROR soup_http_error_quark()
+
+

+A GError domain representing an HTTP status. Use a +SoupKnownStatusCode for the code +value. +

+
+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-2.4.devhelp2 b/docs/reference/html/libsoup-2.4.devhelp2 new file mode 100644 index 0000000..5e21a98 --- /dev/null +++ b/docs/reference/html/libsoup-2.4.devhelp2 @@ -0,0 +1,844 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/html/libsoup-build-howto.html b/docs/reference/html/libsoup-build-howto.html new file mode 100644 index 0000000..d54574b --- /dev/null +++ b/docs/reference/html/libsoup-build-howto.html @@ -0,0 +1,161 @@ + + + + +Compiling with libsoup + + + + + + + + + + + + + + + + +
+
+
+ + +
+

Compiling with libsoup

+

Compiling with libsoup — Notes on compiling

+
+
+

Using pkg-config

+

+Like other GNOME libraries, libsoup uses +pkg-config to provide compiler options. The +package name is "libsoup-2.4". So in your +configure script, you might specify something like: +

+
+ + + + + + + +
1
+2
+3
PKG_CHECK_MODULES(LIBSOUP, [libsoup-2.4 >= 2.26])
+AC_SUBST(LIBSOUP_CFLAGS)
+AC_SUBST(LIBSOUP_LIBS)
+
+ +

+The "2.4" in the package name is the "API version" +(indicating "the version of the libsoup API +that first appeared in version 2.4") and is essentially just part of +the package name. +

+

+If you are using any of the GNOME-specific features of +libsoup (such as automatic proxy +configuration), you must require +"libsoup-gnome-2.4" instead: +

+
+ + + + + + + +
1
+2
+3
PKG_CHECK_MODULES(LIBSOUP, [libsoup-gnome-2.4 >= 2.26])
+AC_SUBST(LIBSOUP_CFLAGS)
+AC_SUBST(LIBSOUP_LIBS)
+
+ +

+You can also make libsoup-gnome an optional +dependency: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
PKG_CHECK_MODULES(LIBSOUP_GNOME,
+          [libsoup-gnome-2.4 >= 2.26],
+          [LIBSOUP_CFLAGS="$LIBSOUP_GNOME_CFLAGS"
+           LIBSOUP_LIBS="$LIBSOUP_GNOME_LIBS"
+           AC_DEFINE(HAVE_LIBSOUP_GNOME, 1, [Have libsoup-gnome])],
+          [PKG_CHECK_MODULES(LIBSOUP, [libsoup-2.4 >= 2.26])])
+AC_SUBST(LIBSOUP_CFLAGS)
+AC_SUBST(LIBSOUP_LIBS)
+
+ +

+This will allow the application to be built with either plain +libsoup or with +libsoup-gnome, and it will define the C +preprocessor symbol HAVE_LIBSOUP_GNOME if +libsoup-gnome features are available. +

+
+
+
+

Headers

+

+Code using libsoup should do: +

+
+ + + + + + + +
1
#include <libsoup/soup.h>
+
+ +

+or, for libsoup-gnome: +

+
+ + + + + + + +
1
#include <libsoup/soup-gnome.h>
+
+ +

+Including individual headers besides the two main header files is not +recommended. You may include both soup.h and +soup-gnome.h (though this is not required; the +latter automatically includes the former). +

+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-client-howto.html b/docs/reference/html/libsoup-client-howto.html new file mode 100644 index 0000000..68578db --- /dev/null +++ b/docs/reference/html/libsoup-client-howto.html @@ -0,0 +1,550 @@ + + + + +Soup Client Basics + + + + + + + + + + + + + + + + +
+
+
+ + +
+

Soup Client Basics

+

Soup Client Basics — Client-side tutorial

+
+
+

Creating a SoupSession +

+

+The first step in using the client API is to create a SoupSession. The session object +encapsulates all of the state that libsoup +is keeping on behalf of your program; cached HTTP connections, +authentication information, etc. +

+

+There are two subclasses of SoupSession that you can use, with +slightly different behavior: +

+
    +
  • + SoupSessionAsync, + which uses callbacks and the glib main loop to provide + asynchronous I/O. +

  • +
  • + SoupSessionSync, + which uses blocking I/O rather than callbacks, making it more + suitable for threaded applications. +

  • +
+

+If you want to do a mix of mainloop-based and blocking I/O, you will +need to create two different session objects. +

+

+When you create the session (with soup_session_async_new_with_options +or soup_session_sync_new_with_options), +you can specify various additional options: +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_SESSION_MAX_CONNS

+ Allows you to set the maximum total number of connections + the session will have open at one time. (Once it reaches + this limit, it will either close idle connections, or + wait for existing connections to free up before starting + new requests.) +

SOUP_SESSION_MAX_CONNS_PER_HOST

+ Allows you to set the maximum total number of connections + the session will have open to a single + host at one time. +

SOUP_SESSION_USE_NTLM

+ If TRUE, then Microsoft NTLM + authentication will be used if available (and will be + preferred to HTTP Basic or Digest authentication). + If FALSE, NTLM authentication won't be + used, even if it's the only authentication type available. + (NTLM works differently from the standard HTTP + authentication types, so it needs to be handled + specially.) +

SOUP_SESSION_SSL_CA_FILE

+ Points to a file containing certificates for recognized + SSL Certificate Authorities. If this is set, then HTTPS + connections will be checked against these authorities, and + rejected if they can't be verified. (Otherwise all SSL + certificates will be accepted automatically.) +

SOUP_SESSION_ASYNC_CONTEXT

+ A GMainContext + which the session will use for asynchronous operations. + This can be set if you want to use a + SoupSessionAsync in a thread other than the + main thread. +

SOUP_SESSION_ADD_FEATURE and SOUP_SESSION_ADD_FEATURE_BY_TYPE

+ These allow you to specify SoupSessionFeatures + (discussed below) + to add at construct-time. +

+

+If you don't need to specify any options, you can just use soup_session_async_new or +soup_session_sync_new, +which take no arguments. +

+
+
+
+

Session features

+

+Additional session functionality is provided as SoupSessionFeatures, +which can be added to a session, via the SOUP_SESSION_ADD_FEATURE +and SOUP_SESSION_ADD_FEATURE_BY_TYPE +options at session-construction-time, or afterward via the soup_session_add_feature +and soup_session_add_feature_by_type +functions. Some of the features available in +libsoup are: +

+
++ + + + + + + + + + +

SoupLogger

+ A debugging aid, which logs all of libsoup's HTTP traffic + to stdout (or another place you specify). +

SoupCookieJar and SoupCookieJarText

+ Support for HTTP cookies. SoupCookieJar + provides non-persistent cookie storage, while + SoupCookieJarText uses a text file to keep + track of cookies between sessions. +

+

+And in libsoup-gnome: +

+
++ + + + + + + + + + +

SoupProxyResolverGNOME

+ A feature that automatically determines the correct HTTP + proxy to use for requests. +

SoupCookieJarSqlite

+ Support for HTTP cookies stored in an + SQLite database. +

+

+Use the "add_feature_by_type" property/function to add features that +don't require any configuration (such as SoupProxyResolverGNOME), +and the "add_feature" property/function to add features that must be +constructed first (such as SoupLogger). For example, an +application might do something like the following: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
session = soup_session_async_new_with_options (
+#ifdef HAVE_LIBSOUP_GNOME
+    SOUP_SESSION_ADD_FEATURE_BY_TYPE, SOUP_TYPE_PROXY_RESOLVER_GNOME,
+#endif
+    NULL);
+if (debug_level) {
+    SoupLogger *logger;
+
+    logger = soup_logger_new (debug_level, -1);
+    soup_session_add_feature (session, SOUP_SESSION_FEATURE (logger));
+    g_object_unref (logger);
+}
+
+ +
+
+
+

Creating and Sending SoupMessages

+

+Once you have a session, you do HTTP traffic using SoupMessage. In the simplest +case, you only need to create the message and it's ready to send: +

+
+ + + + + + + +
1
+2
+3
SoupMessage *msg;
+
+msg = soup_message_new ("GET", "http://example.com/");
+
+ +

+In more complicated cases, you can use various SoupMessage, SoupMessageHeaders, and SoupMessageBody methods to set the +request headers and body of the message: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
SoupMessage *msg;
+
+msg = soup_message_new ("POST", "http://example.com/form.cgi");
+soup_message_set_request (msg, "application/x-www-form-urlencoded",
+              SOUP_MEMORY_COPY, formdata, strlen (formdata));
+soup_message_headers_append (msg->request_headers, "Referer", referring_url);
+
+ +

+(Although this is a bad example, because +libsoup actually has convenience methods +for dealing with HTML +forms, as well as XML-RPC.) +

+

+You can also use soup_message_set_flags +to change some default behaviors. For example, by default, +SoupSession automatically handles responses from the +server that redirect to another URL. If you would like to handle these +yourself, you can set the SOUP_MESSAGE_NO_REDIRECT +flag. +

+
+

Sending a Message Synchronously

+

+To send a message and wait for the response, use soup_session_send_message: +

+
+ + + + + + + +
1
+2
+3
guint status;
+
+status = soup_session_send_message (session, msg);
+
+ +

+(If you use soup_session_send_message with a +SoupSessionAsync, +it will run the main loop itself until the message is complete.) +

+

+The return value from soup_session_send_message +is a libsoup status code, +indicating either a transport error that prevented the message from +being sent, or the HTTP status that was returned by the server in +response to the message. (The status is also available as +msg->status_code.) +

+
+
+

Sending a Message Asynchronously

+

+To send a message asynchronously, use soup_session_queue_message: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
...
+soup_session_queue_message (session, msg, my_callback, my_callback_data);
+...
+}
+
+static void
+my_callback (SoupSession *session, SoupMessage *msg, gpointer user_data)
+{
+/* Handle the response here */
+}
+
+ +

+The message will be added to the session's queue, and eventually (when +control is returned back to the main loop), it will be sent and the +response be will be read. When the message is complete, +callback will be invoked, along with the data you +passed to soup_session_queue_message. +

+

+soup_session_queue_message +steals a reference to the message object, and unrefs it after the last +callback is invoked on it. So in the usual case, messages sent +asynchronously will be automatically freed for you without you needing +to do anything. (Of course, this wouldn't work when using the synchronous +API, since you will usually need continue working with the message +after calling soup_session_send_message, +so in that case, you must unref it explicitly when you are done with +it.) +

+

+(If you use soup_session_queue_message +with a SoupSessionSync, the +message will be sent in another thread, with the callback eventually +being invoked in the session's SOUP_SESSION_ASYNC_CONTEXT.) +

+
+
+
+
+

Processing the Response

+

+Once you have received the response from the server, synchronously or +asynchronously, you can look at the response fields in the +SoupMessage to decide what to do next. The +status_code and +reason_phrase fields contain the numeric +status and textual status response from the server. +response_headers contains the response +headers, which you can investigate using soup_message_headers_get and +soup_message_headers_foreach. +The response body (if any) is in the +response_body field. +

+

+SoupMessageHeaders +automatically parses several important headers in +response_headers for you and provides +specialized accessors for them. Eg, soup_message_headers_get_content_type. +There are several generic methods such as soup_header_parse_param_list +(for parsing an attribute-list-type header) and soup_header_contains +(for quickly testing if a list-type header contains a particular +token). These handle the various syntactical oddities of parsing HTTP +headers much better than functions like +g_strsplit or strstr. +

+
+
+
+

Intermediate/Automatic Processing

+

+You can also connect to various SoupMessage signals to do +processing at intermediate stages of HTTP I/O. Eg, the got-chunk +signal is emitted as each piece of the response body is read (allowing +you to provide progress information when receiving a large response, +for example). SoupMessage also provides two convenience +methods, soup_message_add_header_handler, +and soup_message_add_status_code_handler, +which allow you to set up a signal handler that will only be invoked +for messages with certain response headers or status codes. +SoupSession uses this internally to handle authentication +and redirection. +

+

+When using the synchronous API, the callbacks and signal handlers will +be invoked during the call to soup_session_send_message. +

+

+To automatically set up handlers on all messages sent via a session, +you can connect to the session's request_started +signal, and add handlers to each message from there. +

+
+
+
+

Handling Authentication

+

+SoupSession handles most of the details of HTTP +authentication for you. If it receives a 401 ("Unauthorized") or 407 +("Proxy Authentication Required") response, the session will emit the +authenticate signal, +providing you with a SoupAuth object indicating the +authentication type ("Basic", "Digest", or "NTLM") and the realm name +provided by the server. If you have a username and password available +(or can generate one), call soup_auth_authenticate +to give the information to libsoup. The session will automatically +requeue the message and try it again with that authentication +information. (If you don't call +soup_auth_authenticate, the session will just +return the message to the application with its 401 or 407 status.) +

+

+If the server doesn't accept the username and password provided, the +session will emit authenticate again, with the +retrying parameter set to TRUE. This lets the +application know that the information it provided earlier was +incorrect, and gives it a chance to try again. If this +username/password pair also doesn't work, the session will contine to +emit authenticate again and again until the +provided username/password successfully authenticates, or until the +signal handler fails to call soup_auth_authenticate, +at which point libsoup will allow the +message to fail (with status 401 or 407). +

+

+If you need to handle authentication asynchronously (eg, to pop up a +password dialog without recursively entering the main loop), you can +do that as well. Just call soup_session_pause_message +on the message before returning from the signal handler, and +g_object_ref the SoupAuth. Then, +later on, after calling soup_auth_authenticate +(or deciding not to), call soup_session_unpause_message +to resume the paused message. +

+
+
+
+

Multi-threaded usage

+

+The only explicitly thread-safe operations in +libsoup are SoupSessionSync's +implementations of the SoupSession methods. So +after creating a SoupSessionSync, you can call soup_session_send_message +and soup_session_cancel_message +on it from any thread. But, eg, while the session is processing a +message, you should not call any SoupMessage methods on it +from any thread other than the one in which it is being sent. (That +is, you should not call any SoupMessage methods on it +except from a message or session callback or signal handler.) +

+

+All other objects (including SoupSessionAsync) +should only be used from a single thread, with objects that are also +only be used from that thread. (And in particular, if you set a +non-default GMainContext on a session, +socket, etc, then you can only use that object from the thread in +which that GMainContext is running.) +

+
+
+
+

Sample Programs

+

+A few sample programs are available in the +libsoup sources: +

+
    +
  • + get is a simple command-line + HTTP GET utility using the asynchronous API. +

  • +
  • + getbug is a trivial + demonstration of the XMLRPC interface. + (xmlrpc-test provides + a slightly more complicated example.) +

  • +
  • + auth-test shows how to use + authentication handlers and status-code handlers, although in + a fairly unusual way. +

  • +
  • + simple-proxy uses both the + client and server APIs to create a simple (and not very + RFC-compliant) proxy server. It shows how to use the SOUP_MESSAGE_OVERWRITE_CHUNKS + flag when reading a message to save memory by processing each + chunk of the message as it is read, rather than accumulating + them all into a single buffer to process all at the end. +

  • +
+

+More complicated examples are available in GNOME CVS. The libsoup +pages on the GNOME wiki include a list of applications using +libsoup. +

+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/libsoup-server-howto.html b/docs/reference/html/libsoup-server-howto.html new file mode 100644 index 0000000..4f8d405 --- /dev/null +++ b/docs/reference/html/libsoup-server-howto.html @@ -0,0 +1,441 @@ + + + + +Soup Server Basics + + + + + + + + + + + + + + + + +
+
+
+ + +
+

Soup Server Basics

+

Soup Server Basics — Server-side tutorial

+
+
+

Creating a SoupSession

+

+As with the client API, there is a single object that will encapsulate +most of your interactions with libsoup. In this case, SoupServer. +

+

+You create the server with soup_server_new, +and as with the SoupSession constructor, you can specify +various additional options: +

+
++ + + + + + + + + + + + + + + + + + + + + + + + + + +

SOUP_SERVER_PORT

+ The TCP port to listen on. If 0 (or + left unspecified), some unused port will be selected for + you. (You can find out what port by calling soup_server_get_port. +

SOUP_SERVER_INTERFACE

+ A SoupAddress, + specifying the IP address of the network interface to run + the server on. If NULL (or left + unspecified), the server will listen on all interfaces. +

SOUP_SERVER_SSL_CERT_FILE

+ Points to a file containing an SSL certificate to use. If + this is set, then the server will speak HTTPS; otherwise + it will speak HTTP. +

SOUP_SERVER_SSL_KEY_FILE

+ Points to a file containing the private key for the + SOUP_SERVER_SSL_CERT_FILE. (It may + point to the same file.) +

SOUP_SERVER_ASYNC_CONTEXT

+ A GMainContext which + the server will use for asynchronous operations. This can + be set if you want to use a SoupServer in a thread + other than the main thread. +

SOUP_SERVER_RAW_PATHS

+ Set this to TRUE if you don't want + libsoup to decode %-encoding + in the Request-URI. (Eg, because you need to treat + "/foo/bar" and + "/foo%2Fbar" as different paths. +

+
+
+
+

Adding Handlers

+

+By default, SoupServer +returns "404 Not Found" in response to all requests (except ones that +it can't parse, which get "400 Bad Request"). To override this +behavior, call soup_server_add_handler +to set a callback to handle certain URI paths. +

+
+ + + + + + + +
1
+2
soup_server_add_handler (server, "/foo", server_callback,
+             data, destroy_notify);
+
+ +

+The "/foo" indicates the base path for this +handler. When a request comes in, if there is a handler registered for +exactly the path in the request's Request-URI, then +that handler will be called. Otherwise +libsoup will strip path components one by +one until it finds a matching handler. So for example, a request of +the form +"GET /foo/bar/baz.html?a=1&b=2 HTTP/1.1" +would look for handlers for "/foo/bar/baz.html", +"/foo/bar", and "/foo". If a +handler has been registered with a NULL base path, +then it is used as the default handler for any request that doesn't +match any other handler. +

+
+
+
+

Responding to Requests

+

+A handler callback looks something like this: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
static void
+server_callback (SoupServer        *server,
+         SoupMessage       *msg, 
+         const char        *path,
+         GHashTable        *query,
+         SoupClientContext *client,
+         gpointer           user_data)
+{
+    ...
+}
+
+ +

+msg is the request that has been received and +user_data is the data that was passed to soup_server_add_handler. +path is the path (from msg's +URI), and query contains the result of parsing the +URI query field. (It is NULL if there was no +query.) client is a SoupClientContext, +which contains additional information about the client (including its +IP address, and whether or not it used HTTP authentication). +

+

+By default, libsoup assumes that you have +completely finished processing the message when you return from the +callback, and that it can therefore begin sending the response. If you +are not ready to send a response immediately (eg, you have to contact +another server, or wait for data from a database), you must call soup_server_pause_message +on the message before returning from the callback. This will delay +sending a response until you call soup_server_unpause_message. +(You must also connect to the finished signal on the message +in this case, so that you can break off processing if the client +unexpectedly disconnects before you start sending the data.) +

+

+To set the response status, call soup_message_set_status +or soup_message_set_status_full. +If the response requires a body, you must decide whether to use +Content-Length encoding (the default), or +chunked encoding. +

+
+

Responding with Content-Length +Encoding

+

+This is the simpler way to set a response body, if you have all of the +data available at once. +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
static void
+server_callback (SoupServer        *server,
+         SoupMessage       *msg, 
+         const char        *path,
+         GHashTable        *query,
+         SoupClientContext *client,
+         gpointer           user_data)
+{
+    MyServerData *server_data = user_data;
+    const char *mime_type;
+    GByteArray *body;
+
+    if (msg->method != SOUP_METHOD_GET) {
+        soup_message_set_status (msg, SOUP_STATUS_NOT_IMPLEMENTED);
+        return;
+    }
+
+    /* This is somewhat silly. Presumably your server will do
+     * something more interesting.
+     */
+    body = g_hash_table_lookup (server_data->bodies, path);
+    mime_type = g_hash_table_lookup (server_data->mime_types, path);
+    if (!body || !mime_type) {
+        soup_message_set_status (msg, SOUP_STATUS_NOT_FOUND);
+        return;
+    }
+
+    soup_message_set_status (msg, SOUP_STATUS_OK);
+    soup_message_set_response (msg, mime_type, SOUP_MEMORY_COPY,
+                   body->data, body->len);
+}
+
+ +
+
+

Responding with chunked Encoding

+

+If you want to supply the response body in chunks as it becomes +available, use chunked encoding instead. In this +case, first call soup_message_headers_set_encoding (msg->response_headers, SOUP_ENCODING_CHUNKED) +to tell libsoup that you'll be using +chunked encoding. Then call soup_message_body_append +(or soup_message_body_append_buffer) +on msg->response_body with each chunk of the +response body as it becomes available, and call soup_message_body_complete +when the response is complete. After each of these calls, you must +also call soup_server_unpause_message +to cause the chunk to be sent. (You do not normally need to call soup_server_pause_message, +because I/O is automatically paused when doing a +chunked transfer if no chunks are available.) +

+

+When using chunked encoding, you must also connect to the finished signal on the message, +so that you will be notified if the client disconnects between two +chunks; SoupServer will unref the message if that +happens, so you must stop adding new chunks to the response at that +point. (An alternate possibility is to write each new chunk only when +the wrote_chunk signal +is emitted indicating that the previous one was written successfully.) +

+

+The simple-proxy +example in the tests/ directory gives an example of +using chunked encoding. +

+
+
+
+
+

Handling Authentication

+

+To have SoupServer +handle HTTP authentication for you, create a SoupAuthDomainBasic +or SoupAuthDomainDigest, +and pass it to soup_server_add_auth_domain: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
SoupAuthDomain *domain;
+
+domain = soup_auth_domain_basic_new (
+    SOUP_AUTH_DOMAIN_REALM, "My Realm",
+    SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK, auth_callback,
+    SOUP_AUTH_DOMAIN_BASIC_AUTH_DATA, auth_data,
+    SOUP_AUTH_DOMAIN_ADD_PATH, "/foo",
+    SOUP_AUTH_DOMAIN_ADD_PATH, "/bar/private",
+    NULL);
+soup_server_add_auth_domain (server, domain);
+g_object_unref (domain);
+
+ +

+Then, every request under one of the auth domain's paths will be +passed to the auth_callback first before being +passed to the server_callback: +

+
+ + + + + + + +
1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
static gboolean
+auth_callback (SoupAuthDomain *domain, SoupMessage *msg,
+           const char *username, const char *password,
+           gpointer user_data)
+{
+    MyServerData *server_data = user_data;
+    MyUserData *user;
+
+    user = my_server_data_lookup_user (server_data, username);
+    if (!user)
+        return FALSE;
+
+    /* FIXME: Don't do this. Keeping a cleartext password database
+     * is bad.
+     */
+    return strcmp (password, user->password) == 0;
+}
+
+ +

+The SoupAuthDomainBasicAuthCallback +is given the username and password from the +Authorization header and must determine, in some +server-specific manner, whether or not to accept them. (In this +example we compare the password against a cleartext password database, +but it would be better to store the password somehow encoded, as in +the UNIX password database. Alternatively, you may need to delegate +the password check to PAM or some other service.) +

+

+If you are using Digest authentication, note that SoupAuthDomainDigestAuthCallback +works completely differently (since the server doesn't receive the +cleartext password from the client in that case, so there's no way to +compare it directly). See the documentation for SoupAuthDomainDigest +for more details. +

+

+You can have multiple SoupAuthDomains attached to a +SoupServer, either in separate parts of the path +hierarchy, or overlapping. (Eg, you might want to accept either Basic +or Digest authentication for a given path.) When more than one auth +domain covers a given path, the request will be accepted if the user +authenticates successfully against any of the +domains. +

+

+If you want to require authentication for some requests under a +certain path, but not all of them (eg, you want to authenticate +PUTs, but not GETs), use a +SoupAuthDomainFilter. +

+
+
+ + + \ No newline at end of file diff --git a/docs/reference/html/right.png b/docs/reference/html/right.png new file mode 100644 index 0000000000000000000000000000000000000000..92832e3a4566e59d6e4092010e08d28f3be3a68d GIT binary patch literal 472 zcmV;}0Vn>6P)Cd4HCN^TYHBC0dz3r5|}*T3c5!K}0^NPTey!^rYo;W&eW{b1SE%dR-1ljcju- zJITo5P_e{cPDWDszO|97o#m$fni3V4d%~7^?0HU4-k!+X`e~w55Q}HA=c?CM9`EK` z^o5GF_RsnG`ey+9wOf8O4bzg>7W*;jU~M?g`OZAA$mNp|Lz<$s+~N9!2`ir8RcClo$(Q~19INM~9}j;&*|enC yGd}kJak0wj?aUKd8;%}`i}SSew>!A-2iw}^5}Rh(M>+vRkipZ{&t;ucLK6U4uc96R literal 0 HcmV?d00001 diff --git a/docs/reference/libsoup-2.4-docs.sgml b/docs/reference/libsoup-2.4-docs.sgml index c52208f..ec6fa0d 100644 --- a/docs/reference/libsoup-2.4-docs.sgml +++ b/docs/reference/libsoup-2.4-docs.sgml @@ -11,7 +11,6 @@ - @@ -38,11 +37,13 @@ Additional Features + + @@ -64,6 +65,16 @@ + + Experimental streaming API + + + + + + + + Index diff --git a/docs/reference/libsoup-2.4-sections.txt b/docs/reference/libsoup-2.4-sections.txt index 3980677..460e0c4 100644 --- a/docs/reference/libsoup-2.4-sections.txt +++ b/docs/reference/libsoup-2.4-sections.txt @@ -18,7 +18,12 @@ soup_message_get_address soup_message_set_status soup_message_set_status_full +soup_message_set_redirect soup_message_is_keepalive +soup_message_get_https_status + +soup_message_set_first_party +soup_message_get_first_party soup_message_add_header_handler soup_message_add_status_code_handler @@ -38,6 +43,13 @@ SOUP_MESSAGE_FLAGS SOUP_MESSAGE_STATUS_CODE SOUP_MESSAGE_REASON_PHRASE SOUP_MESSAGE_SERVER_SIDE +SOUP_MESSAGE_FIRST_PARTY +SOUP_MESSAGE_REQUEST_BODY +SOUP_MESSAGE_REQUEST_HEADERS +SOUP_MESSAGE_RESPONSE_BODY +SOUP_MESSAGE_RESPONSE_HEADERS +SOUP_MESSAGE_TLS_CERTIFICATE +SOUP_MESSAGE_TLS_ERRORS SOUP_MESSAGE SOUP_IS_MESSAGE @@ -94,6 +106,7 @@ soup_message_headers_append soup_message_headers_replace soup_message_headers_remove soup_message_headers_clear +soup_message_headers_clean_connection_headers soup_message_headers_get_one soup_message_headers_get_list soup_message_headers_get @@ -141,7 +154,9 @@ SoupMemoryUse soup_buffer_new soup_buffer_new_subbuffer soup_buffer_new_with_owner +soup_buffer_new_take soup_buffer_get_owner +soup_buffer_get_data soup_buffer_copy soup_buffer_free @@ -154,6 +169,7 @@ soup_message_body_get_accumulate soup_message_body_append soup_message_body_append_buffer +soup_message_body_append_take soup_message_body_truncate soup_message_body_complete soup_message_body_flatten @@ -196,6 +212,7 @@ soup_server_get_listener soup_server_run soup_server_run_async soup_server_quit +soup_server_disconnect soup_server_get_async_context SoupServerCallback @@ -337,6 +354,7 @@ soup_address_is_resolved soup_address_get_name soup_address_get_sockaddr +soup_address_get_gsockaddr soup_address_get_physical soup_address_get_port @@ -359,8 +377,6 @@ SOUP_ADDRESS_CLASS SOUP_IS_ADDRESS_CLASS SOUP_ADDRESS_GET_CLASS SoupAddressClass - -AF_INET6
@@ -373,8 +389,13 @@ soup_session_queue_message soup_session_requeue_message soup_session_send_message soup_session_cancel_message +soup_session_prefetch_dns +soup_session_prepare_for_uri soup_session_abort +soup_session_would_redirect +soup_session_redirect_message + soup_session_pause_message soup_session_unpause_message @@ -400,6 +421,11 @@ SOUP_SESSION_USER_AGENT SOUP_SESSION_ADD_FEATURE SOUP_SESSION_ADD_FEATURE_BY_TYPE SOUP_SESSION_REMOVE_FEATURE_BY_TYPE +SOUP_SESSION_ACCEPT_LANGUAGE +SOUP_SESSION_ACCEPT_LANGUAGE_AUTO +SOUP_SESSION_SSL_STRICT +SOUP_SESSION_HTTP_ALIASES +SOUP_SESSION_HTTPS_ALIASES SOUP_IS_SESSION SOUP_IS_SESSION_CLASS @@ -409,6 +435,11 @@ SOUP_SESSION_GET_CLASS SOUP_TYPE_SESSION SoupSessionClass soup_session_get_type + +SoupConnection +SoupConnectionState +SoupMessageQueue +SoupMessageQueueItem
@@ -461,6 +492,9 @@ SOUP_TYPE_SESSION_FEATURE soup_session_feature_attach soup_session_feature_detach +soup_session_feature_add_feature +soup_session_feature_has_feature +soup_session_feature_remove_feature
@@ -470,6 +504,10 @@ SoupAuth soup_auth_new soup_auth_update +SOUP_TYPE_AUTH_BASIC +SOUP_TYPE_AUTH_DIGEST +SOUP_TYPE_AUTH_NTLM + soup_auth_is_for_proxy soup_auth_get_scheme_name soup_auth_get_host @@ -497,6 +535,14 @@ SOUP_AUTH_CLASS SOUP_IS_AUTH_CLASS SOUP_AUTH_GET_CLASS SoupAuthClass +soup_auth_basic_get_type +soup_auth_digest_get_type +soup_auth_ntlm_get_type + +soup_auth_get_saved_password +soup_auth_get_saved_users +soup_auth_has_saved_password +soup_auth_save_password
@@ -536,6 +582,11 @@ SOUP_SOCKET_IS_SERVER SOUP_SOCKET_SSL_CREDENTIALS SOUP_SOCKET_ASYNC_CONTEXT SOUP_SOCKET_TIMEOUT +SOUP_SOCKET_SSL_FALLBACK +SOUP_SOCKET_SSL_STRICT +SOUP_SOCKET_TLS_CERTIFICATE +SOUP_SOCKET_TLS_ERRORS +SOUP_SOCKET_TRUSTED_CERTIFICATE SOUP_SOCKET SOUP_IS_SOCKET @@ -547,6 +598,9 @@ SOUP_SOCKET_GET_CLASS SoupSocketClass soup_ssl_error_quark +soup_socket_get_fd +soup_socket_handshake_async +soup_socket_handshake_sync
@@ -558,7 +612,10 @@ soup_uri_new soup_uri_to_string soup_uri_copy +soup_uri_copy_host soup_uri_equal +soup_uri_host_equal +soup_uri_host_hash soup_uri_free soup_uri_encode @@ -567,22 +624,35 @@ soup_uri_normalize SOUP_URI_SCHEME_HTTP SOUP_URI_SCHEME_HTTPS +SOUP_URI_SCHEME_DATA +SOUP_URI_SCHEME_FILE +SOUP_URI_SCHEME_FTP soup_uri_uses_default_port SOUP_URI_VALID_FOR_HTTP soup_uri_set_scheme +soup_uri_get_scheme soup_uri_set_user +soup_uri_get_user soup_uri_set_password +soup_uri_get_password soup_uri_set_host +soup_uri_get_host soup_uri_set_port +soup_uri_get_port soup_uri_set_path +soup_uri_get_path soup_uri_set_query soup_uri_set_query_from_form soup_uri_set_query_from_fields +soup_uri_get_query soup_uri_set_fragment +soup_uri_get_fragment SOUP_TYPE_URI soup_uri_get_type + +uri_decoded_copy
@@ -598,6 +668,14 @@ soup_date_to_string soup_date_to_time_t soup_date_to_timeval soup_date_is_past +soup_date_get_day +soup_date_get_hour +soup_date_get_minute +soup_date_get_month +soup_date_get_offset +soup_date_get_second +soup_date_get_utc +soup_date_get_year soup_date_free soup_headers_parse_request @@ -613,6 +691,7 @@ soup_header_parse_param_list soup_header_parse_semi_param_list soup_header_free_param_list soup_header_g_string_append_param +soup_header_g_string_append_param_quoted soup_str_case_equal soup_str_case_hash @@ -627,13 +706,20 @@ soup_ssl_supported soup_date_copy SOUP_TYPE_DATE soup_date_get_type -soup_form_decode_urlencoded -soup_form_encode_urlencoded -soup_form_encode_urlencoded_list +soup_char_is_token +soup_char_is_uri_gen_delims +soup_char_is_uri_percent_encoded +soup_char_is_uri_sub_delims +soup_char_is_uri_unreserved +SOUP_CHAR_HTTP_CTL +SOUP_CHAR_HTTP_SEPARATOR +SOUP_CHAR_URI_GEN_DELIMS +SOUP_CHAR_URI_PERCENT_ENCODED +SOUP_CHAR_URI_SUB_DELIMS
-soup-forms +soup-form HTML Form Support SOUP_FORM_MIME_TYPE_MULTIPART @@ -648,6 +734,10 @@ soup_form_request_new soup_form_request_new_from_datalist soup_form_request_new_from_hash soup_form_request_new_from_multipart + +soup_form_decode_urlencoded +soup_form_encode_urlencoded +soup_form_encode_urlencoded_list
@@ -666,12 +756,12 @@ soup_xmlrpc_build_fault soup_xmlrpc_set_response soup_xmlrpc_set_fault -SOUP_XMLRPC_ERROR -SoupXMLRPCError SOUP_XMLRPC_FAULT SoupXMLRPCFault soup_xmlrpc_error_quark +SOUP_XMLRPC_ERROR +SoupXMLRPCError soup_xmlrpc_fault_quark
@@ -736,23 +826,30 @@ SoupCookie soup_cookie_new soup_cookie_parse soup_cookie_copy -soup_cookie_equal soup_cookie_free soup_cookie_set_name +soup_cookie_get_name soup_cookie_set_value +soup_cookie_get_value soup_cookie_set_domain +soup_cookie_get_domain soup_cookie_set_path +soup_cookie_get_path soup_cookie_set_max_age SOUP_COOKIE_MAX_AGE_ONE_HOUR SOUP_COOKIE_MAX_AGE_ONE_DAY SOUP_COOKIE_MAX_AGE_ONE_WEEK SOUP_COOKIE_MAX_AGE_ONE_YEAR soup_cookie_set_expires +soup_cookie_get_expires soup_cookie_set_secure +soup_cookie_get_secure soup_cookie_set_http_only +soup_cookie_get_http_only soup_cookie_applies_to_uri +soup_cookie_domain_matches soup_cookie_to_cookie_header soup_cookie_to_set_cookie_header @@ -766,6 +863,8 @@ soup_cookies_free SOUP_TYPE_COOKIE soup_cookie_get_type + +soup_cookie_equal
@@ -773,15 +872,20 @@ soup_cookie_get_type SoupCookieJar SoupCookieJar soup_cookie_jar_new -soup_cookie_jar_save soup_cookie_jar_get_cookies soup_cookie_jar_set_cookie +soup_cookie_jar_set_cookie_with_first_party soup_cookie_jar_add_cookie soup_cookie_jar_delete_cookie soup_cookie_jar_all_cookies +SoupCookieJarAcceptPolicy +soup_cookie_jar_get_accept_policy +soup_cookie_jar_set_accept_policy + SOUP_COOKIE_JAR_READ_ONLY +SOUP_COOKIE_JAR_ACCEPT_POLICY SoupCookieJarClass SOUP_COOKIE_JAR @@ -791,6 +895,7 @@ SOUP_IS_COOKIE_JAR SOUP_IS_COOKIE_JAR_CLASS SOUP_TYPE_COOKIE_JAR soup_cookie_jar_get_type +soup_cookie_jar_save
@@ -875,6 +980,8 @@ SOUP_TYPE_PROXY_RESOLVER_GNOME soup_proxy_resolver_gnome_get_type soup_gnome_features_2_26_get_type +SOUP_TYPE_PASSWORD_MANAGER_GNOME +soup_password_manager_gnome_get_type
@@ -894,3 +1001,167 @@ SoupContentSnifferClass SoupContentSnifferPrivate soup_content_sniffer_get_type
+ +
+soup-cache +SoupCache +SoupCache +SoupCacheType +soup_cache_new +soup_cache_flush +soup_cache_clear +soup_cache_dump +soup_cache_load +soup_cache_get_max_size +soup_cache_set_max_size + +SOUP_TYPE_CACHE +SOUP_IS_CACHE +SOUP_IS_CACHE_CLASS +SOUP_CACHE +SOUP_CACHE_CLASS +SOUP_CACHE_GET_CLASS +SoupCacheClass +SoupCachePrivate + +soup_cache_generate_conditional_request +soup_cache_get_cacheability +soup_cache_get_type +soup_cache_has_response +soup_cache_send_response +SoupCacheResponse +SoupCacheability +
+ +
+soup-content-decoder +SoupContentDecoder +SoupContentDecoder + +SOUP_TYPE_CONTENT_DECODER +SOUP_IS_CONTENT_DECODER +SOUP_IS_CONTENT_DECODER_CLASS +SOUP_CONTENT_DECODER +SOUP_CONTENT_DECODER_CLASS +SOUP_CONTENT_DECODER_GET_CLASS +SoupContentDecoderClass +SoupContentDecoderPrivate +soup_content_decoder_get_type +
+ +
+soup-proxy-resolver-default +SoupProxyResolverDefault +SoupProxyResolverDefault + +soup_proxy_resolver_default_get_type +SoupProxyResolverDefaultClass +SOUP_TYPE_PROXY_RESOLVER_DEFAULT +SOUP_PROXY_RESOLVER_DEFAULT +SOUP_PROXY_RESOLVER_DEFAULT_CLASS +SOUP_PROXY_RESOLVER_DEFAULT_GET_CLASS +SOUP_IS_PROXY_RESOLVER_DEFAULT +SOUP_IS_PROXY_RESOLVER_DEFAULT_CLASS +
+ +
+soup-requester +SoupRequester +SoupRequester +soup_requester_new +soup_requester_request +soup_requester_request_uri + +SoupRequesterError +SOUP_REQUESTER_ERROR + +soup_requester_get_type +SoupRequesterClass +SoupRequesterPrivate +SOUP_TYPE_REQUESTER +SOUP_REQUESTER +SOUP_REQUESTER_CLASS +SOUP_REQUESTER_GET_CLASS +SOUP_IS_REQUESTER +SOUP_IS_REQUESTER_CLASS + +soup_requester_error_quark +
+ +
+soup-request +SoupRequest +SoupRequest +soup_request_send +soup_request_send_async +soup_request_send_finish + +soup_request_get_content_length +soup_request_get_content_type +soup_request_get_session +soup_request_get_uri + +SOUP_REQUEST_SESSION +SOUP_REQUEST_URI + +SOUP_IS_REQUEST +SOUP_IS_REQUEST_CLASS +SOUP_REQUEST +SOUP_REQUEST_CLASS +SOUP_REQUEST_GET_CLASS +SOUP_TYPE_REQUEST +SoupRequestClass +SoupRequestPrivate +soup_request_get_type +
+ +
+soup-request-data +SoupRequestData +SoupRequestData + +SOUP_IS_REQUEST_DATA +SOUP_IS_REQUEST_DATA_CLASS +SOUP_REQUEST_DATA +SOUP_REQUEST_DATA_CLASS +SOUP_REQUEST_DATA_GET_CLASS +SOUP_TYPE_REQUEST_DATA +SoupRequestDataClass +SoupRequestDataPrivate +soup_request_data_get_type +
+ +
+soup-request-file +SoupRequestFile +SoupRequestFile +soup_request_file_get_file + +SOUP_IS_REQUEST_FILE +SOUP_IS_REQUEST_FILE_CLASS +SOUP_REQUEST_FILE +SOUP_REQUEST_FILE_CLASS +SOUP_REQUEST_FILE_GET_CLASS +SOUP_TYPE_REQUEST_FILE +SoupRequestFileClass +SoupRequestFilePrivate +soup_request_file_get_type +
+ +
+soup-request-http +SoupRequestHTTP +SoupRequestHTTP +soup_request_http_get_message + +SOUP_IS_REQUEST_HTTP +SOUP_IS_REQUEST_HTTP_CLASS +SOUP_REQUEST_HTTP +SOUP_REQUEST_HTTP_CLASS +SOUP_REQUEST_HTTP_GET_CLASS +SOUP_TYPE_REQUEST_HTTP +SoupRequestHTTPClass +SoupRequestHTTPPrivate +soup_request_http_get_type +
+ diff --git a/docs/reference/libsoup-2.4.types b/docs/reference/libsoup-2.4.types new file mode 100644 index 0000000..71035a1 --- /dev/null +++ b/docs/reference/libsoup-2.4.types @@ -0,0 +1,41 @@ +soup_address_get_type +soup_auth_basic_get_type +soup_auth_digest_get_type +soup_auth_domain_basic_get_type +soup_auth_domain_digest_get_type +soup_auth_domain_get_type +soup_auth_get_type +soup_auth_ntlm_get_type +soup_buffer_get_type +soup_byte_array_get_type +soup_cache_get_type +soup_client_context_get_type +soup_content_decoder_get_type +soup_content_sniffer_get_type +soup_cookie_get_type +soup_cookie_jar_get_type +soup_cookie_jar_sqlite_get_type +soup_cookie_jar_text_get_type +soup_date_get_type +soup_gnome_features_2_26_get_type +soup_logger_get_type +soup_message_body_get_type +soup_message_get_type +soup_message_headers_get_type +soup_multipart_get_type +soup_password_manager_gnome_get_type +soup_proxy_resolver_default_get_type +soup_proxy_resolver_gnome_get_type +soup_proxy_uri_resolver_get_type +soup_request_data_get_type +soup_request_file_get_type +soup_request_get_type +soup_request_http_get_type +soup_requester_get_type +soup_server_get_type +soup_session_async_get_type +soup_session_feature_get_type +soup_session_get_type +soup_session_sync_get_type +soup_socket_get_type +soup_uri_get_type diff --git a/docs/reference/porting-2.2-2.4.xml b/docs/reference/porting-2.2-2.4.xml deleted file mode 100644 index 37632bf..0000000 --- a/docs/reference/porting-2.2-2.4.xml +++ /dev/null @@ -1,878 +0,0 @@ - - - - -libsoup 2.2 to 2.4 porting notes -3 -LIBSOUP Library - - - -Porting notesNotes on porting from libsoup 2.2 to 2.4 - - - -Overview - - -After many API-compatible releases in the 2.2 series, -libsoup has now changed its API and bumped -its version number to 2.4. Changes were made for a variety of reasons: - - - - - To fix bugs and add features that couldn't be done ABI-compatibly. - - - - To make it easier to generate bindings for libsoup for - languages other than C. - - - - To clean up ugly/confusing old APIs - - - - To be more glib/gobject/gtk-like in general. - - - - - - -SoupMessage - - -SoupMessage has had a -number of API changes made, mostly to increase its -language-bindability. - - - -SoupMessageHeaders - - - SoupMessage's - request_headers and - response_headers fields are now an - opaque type (SoupMessageHeaders) - rather than being GHashTables. The method names have - changed slightly to reflect this: - - - - - soup_message_add_header - - → soup_message_headers_append - - - - soup_message_get_header - - → soup_message_headers_get - - - - soup_message_foreach_header - - → soup_message_headers_foreach - - - - soup_message_remove_header - - → soup_message_headers_remove - - - - soup_message_clear_headers - - → soup_message_headers_clear - - - - - - soup_message_get_header_list has no equivalent; - if multiple copies of a header are present, - soup_message_headers_get will return all of - them, concatenated together and separated by commas; RFC 2616 says - that the two forms (multiple headers, and a single header with - comma-separated values) are equivalent; this change to libsoup - ensures that applications will treat them as equivalent. - - - - In addition, certain important header fields now have - dedicated get/set methods: - - - - - soup_message_headers_get_encoding / soup_message_headers_set_encoding - - - - soup_message_headers_get_content_length / soup_message_headers_set_content_length - - - - soup_message_headers_get_expectations / soup_message_headers_set_expectations - - - - - (soup_message_headers_set_expectation(msg, SOUP_EXPECTATION_CONTINUE) - replaces the SOUP_MESSAGE_EXPECT_CONTINUE - message flag). - - - - - -SoupMessageBody - - - Similarly, the request_body and - response fields (renamed from - request and response) are - now a new type, SoupMessageBody, - implemented in terms of SoupBuffer, a refcounted - memory buffer type with clearer semantics than the old - SoupDataBuffer/SoupOwnership. - - - - - SOUP_BUFFER_STATIC - - → SOUP_MEMORY_STATIC - - - - SOUP_BUFFER_SYSTEM_OWNED - - → SOUP_MEMORY_TAKE - (meaning libsoup - should take ownership of the memory from your). - - - - SOUP_BUFFER_USER_OWNED - - → SOUP_MEMORY_COPY - (meaning libsoup - should make a copy of the memory, because you - can't make any guarantees about how long it will - last.) - - - - - - A fourth SoupMemoryUse value is also available: SOUP_MEMORY_TEMPORARY, - which helps to avoid extra copies in some cases. - SOUP_MEMORY_TEMPORARY means that the memory - will last at least as long as the object you are handing it to (a - SoupBuffer, SoupMessageBody, or - SoupMessage), and so doesn't need to be copied right - away, but that if anyone makes a copy of the buffer, - libsoup needs to make a new copy of the - memory for them at that point, since the original pointer may not - remain valid for the lifetime of the new copy. - - - - (In the future, there may be additional SoupBuffer - and SoupMessageBody methods to work directly with - mmapped memory, splicing to file descriptors, etc.) - - - - soup_message_set_request - and soup_message_set_response - still work roughly like they used to. - - - - Unlike the old request and - response fields, the new - request_body and - response_body fields are not guaranteed - to be filled in at all times. (In particular, the - response_body is not filled in until it - has been fully read, although you can use soup_message_body_get_chunk - to iterate through the chunks before that point if you need to.) - - - - When request_body and - response_body are - filled in, they are '\0'-terminated for your - processing convenience. (The terminating 0 byte is not included in - their length.) - - - - - -Chunked encoding - - - The prototype of the SoupMessage::got_chunk - signal has been changed; it now includes the chunk as a - SoupBuffer parameter (rather than storing the chunk - data in msg->response as in 2.2). SOUP_MESSAGE_OVERWRITE_CHUNKS - is now somewhat poorly named, but still has essentially the same - semantics: if you set it, each chunk will be discarded after it is - read, and msg->response_body will not be filled - in with the complete response at the end of message processing. - - - - The API for sending chunked responses from a - SoupServer is also slightly different now: - - - - - soup_server_message_set_encoding - - → soup_message_headers_set_encoding - - - - soup_message_add_chunk - - → soup_message_body_append - or soup_message_body_append_buffer - - - - soup_message_add_final_chunk - - → soup_message_body_complete - - - - - - Since the new chunk-sending APIs require you to explicitly pass - the - request_headers/request_body - fields, rather than just assuming you're talking about the - response body, in theory it is now possible to use chunked - encoding with the request as well. As of the 2.3.0 release this - has not yet been tested. - - - - - -Methods - - - SoupMessage's - method field is now an interned - string, and you can compare the method directly against - the defines such as SOUP_METHOD_GET - (eg, in a SoupServer request handler). - soup_method_get_id and the - SOUP_METHOD_ID_* macros are now gone. - - - - -Handlers - - - soup_message_add_header_handler - and soup_message_add_status_code_handler - are now just clever wrappers around - g_signal_connect. In particular, you now pass - a signal name to them rather than a SoupHandlerPhase, - and you remove them with the normal signal handler remove methods. - However, they still retain the special behavior that if the - message has been cancelled or requeued when the time comes for the - handler to run, then the handler will be skipped. (Use plain - g_signal_connect if you don't want that - behavior.) - - - - -I/O-related <type>SoupMessage</type> methods - - - soup_message_io_pause and - soup_message_io_unpause have been moved to - SoupSession and SoupServer, to better - reflect the fact that the session/server control the I/O, and - SoupMessage is merely acted-upon by them. - - - - - soup_message_io_pause - - → soup_session_pause_message / soup_server_pause_message - - - - soup_message_io_unpause - - → soup_session_unpause_message / soup_server_unpause_message - - - - - - msg->status (the I/O status) is now - gone as well, because (a) it's really an internal state of - SoupSession, and (b) it's too easy to confuse - with msg->status_code (the HTTP status) - anyway. Code that used to check if status was - SOUP_MESSAGE_STATUS_FINISHED needs to - be rewritten to track whether or not the finished - signal has been emitted. - - - - - -HTTP-Version - - - SoupHttpVersion is now SoupHTTPVersion - - - - - - -SoupSession - - -<function>soup_session_queue_message</function> callback - - - soup_session_queue_message's - callback parameter now includes the SoupSession as a - parameter, reflecting the fact that it is a - SoupSession callback, not a SoupMessage - callback. (It has also been renamed, from - SoupMessageCallbackFn to SoupSessionCallback.) - - - - -Authentication - - - SoupSession's authenticate and - reauthenticate signals have been merged into a - single authenticate - signal with a retrying parameter to indicate if - it's the second (or later) try. Also, the signal now includes a - SoupAuth directly, - and you authenticate by calling soup_auth_authenticate - on the auth (rather than passing back a username and password from - the signal handler). - - - - -<type>SoupLogger</type> - - -SoupLogger is a -new object that copies the behavior of -evolution-exchange's -E2K_DEBUG and its clones. That is, it causes a -SoupSession to start logging some or all of its HTTP -traffic to stdout, for debugging purposes. - - - - -<type>SoupMessageFilter</type> - - - SoupMessageFilter is gone; code that used to use it - can now connect to the SoupSession::request-started - signal to get a chance to act on each message as it is sent. - (This is how SoupLogger works.) - - - - -Internal types - - - The SoupConnection and SoupMessageQueue - types (which should always have been internal to - SoupSession) have been removed from the public API. - - - - - - -SoupURI - -SoupUri has been renamed SoupURI, and its behavior has -changed in a few ways: - - - - - It no longer fully-decodes %-encoded URI components. This - is necessary to ensure that complicated URIs (eg, URIs - that include other URIs as query parameters) can be - round-tripped correctly. This corresponds to the old - broken_encoding behavior, but - that flag no longer exists, since it is the default and - there's no way to turn it off. - - - - In theory, this is an ABI-breaking change, especially for - SoupServers. - However, it is unlikely to actually break anything. (And - in the SoupServer case, servers now - fully-decode the path component - themselves unless you set the SOUP_SERVER_RAW_PATHS - flag on the server, so the behavior should still be the - same. - - - - - It uses the RFC3986 parsing rules, including support for IPv6 literal - addresses. - - - - - The field formerly called - protocol is now - scheme, to match the spec, and - it's an interned string rather than a quark. The names of - the predefined values have changed to match: - - - - - SOUP_PROTOCOL_HTTP - - → SOUP_URI_SCHEME_HTTP - - - - SOUP_PROTOCOL_HTTPS - - → SOUP_URI_SCHEME_HTTPS - - - - - - - -soup_uri_decode -now returns a new string rather than modifying its input string in -place. The new method soup_uri_normalize, -which removes some, but not all, %-encoding, behaves similarly. - - - -Finally, SoupURI (as well as most other struct types in -libsoup) now uses the glib "slice" -allocator, so any code that uses g_new to create -SoupURIs is wrong. If you want to create a URI "by hand", -you can call soup_uri_new, -passing NULL, and you will get back an empty -SoupURI. There are also now methods that can be used to -set its fields (eg, soup_uri_set_scheme, -soup_uri_set_path, -etc) rather than mucking with the fields directly. - - - -Forms - - -Related to SoupURI, there are some new helper methods for -dealing with HTML forms. soup_form_decode_urlencoded -decodes a URI query component (or an -application/x-www-form-urlencoded request body) -into a GHashTable. soup_form_encode_urlencoded -reverses the process, allowing you to fill in a -uri->query with a properly-encoded form dataset. -(SoupURI also provides soup_uri_set_query_from_form -to help with this.) - - - - - - - -XML-RPC and SOAP - - -SOAP - -SOAP support has been removed; the existing methods covered only a -teeny tiny subset of SOAP, which was really only useful to a single -application. (The code that was formerly in libsoup has been moved to -that application.). If you were using this code, you can resurrect a -libsoup-2.4-compatible version of it from revision 1016 of libsoup -svn. - - - - -XML-RPC - -The XML-RPC code has been completely rewritten to make it simpler to -implement XML-RPC clients and servers. (Note: the server-side code has -not been heavily tested yet.) The new XML-RPC API makes use of -GValues, with the following type mappings: - - - - - int - - → int (G_TYPE_INT) - - - - boolean - - → gboolean (G_TYPE_BOOLEAN) - - - - string - - → char * (G_TYPE_STRING) - - - - double - - → double (G_TYPE_DOUBLE) - - - - dateTime.iso8601 - - → SoupDate (SOUP_TYPE_DATE) - - - - base64 - - → GByteArray (SOUP_TYPE_BYTE_ARRAY) - - - - struct - - → GHashTable (G_TYPE_HASH_TABLE) - - - - array - - → GValueArray (G_TYPE_VALUE_ARRAY) - - - - - -SoupDate is discussed below. -SOUP_TYPE_BYTE_ARRAY is just a new -GType value defined by libsoup -to represent GByteArrays, which glib does not define a -GType for. - - - -libsoup provides some additional GValue support -methods for working with -GValueArrays, and GHashTables of -GValues, for the XML-RPC struct and -array types. Eg, you can use soup_value_hash_new -to create a GHashTable to use with the XML-RPC methods, -and soup_value_hash_insert -to add values to it without needing to muck with GValues -directly. - - - -The getbug and xmlrpc-test -programs in the libsoup sources provide -examples of how to use the new API. (Beware that -xmlrpc-test's use of the API is a little -complicated because of the way it sends all calls through a single -do_xmlrpc method.) - - - - - - -SoupServer - - -SoupServer handlers - - - The prototypes for soup_server_add_handler, - and for the SoupServer - handlers themselves have changed: - - - -typedef void (*SoupServerCallback) (SoupServer *server, - SoupMessage *msg, - const char *path, - GHashTable *query, - SoupClientContext *client, - gpointer user_data); - -void soup_server_add_handler (SoupServer *server, - const char *path, - SoupServerCallback callback, - gpointer data, - GDestroyNotify destroy); - - - - soup_server_add_handler no longer takes a - SoupServerAuthContext (see the discussion of server - authentication below), and the order of the final two arguments - has been swapped. (Additionally, SoupServerCallbackFn - has been renamed to SoupServerCallback, and the old - unregister parameter of type - SoupServerUnregisterFn is now a standard - GDestroyNotify. The change to - GDestroyNotify and the swapping of the final two - arguments is to make the method conform to standard glib/gtk - practices.) - - - - In SoupServerCallback, several bits of data that used - to be part of the context argument are now - provided directly, and context specifically - only contains more specifically-client-related information (such - as the SoupSocket that the request arrived on, and - information about authentication). - - - - path is the fully %-decoded path component - of msg's URI, and - query is a hash table containing - msg's URI's - query component decoded with soup_form_decode_urlencoded. - These are provided for your convenience; if you need the raw - query, you can get it out of msg's URI - directly. If you need the raw path, you'll need to set the SOUP_SERVER_RAW_PATHS - property on the server, which actually changes the behavior of the - server with respect to how paths are matched; see the - documentation for details. - - - - -Server-side authentication - - - SoupServer authentication has been completely - rewritten, with SoupServerAuthContext being replaced - with SoupAuthDomain. Among - other improvements, you no longer need to have the cleartext - password available to check against. See the - SoupAuthDomain documentation, the server tutorial, and - tests/server-auth-test.c. - - - - -<literal>Expect: 100-continue</literal> and other early <type>SoupMessage</type> processing - - - SoupServer now handles - "Expect: 100-continue" correctly. In - particular, if the client passes that header, and your server - requires authentication, then authentication will be checked - before reading the request body. - - - - If you want to do additional pre-request-body handling, you can - connect to SoupServer's request_started - signal, and connect to the request's got_headers - signal from there. (See the description of - request_started for information about other - related SoupServer signals.) - - - - -Date header - - - SoupServer now automatically sets the - Date header on all responses, as required by - RFC 2616. - - - - -SoupServerMessage - - - SoupServerMessage is now merged into - SoupMessage. - soup_server_message_set_encoding is replaced - with soup_message_headers_set_encoding - as described in the section on SoupMessage above. - - - - -<function>soup_server_run</function> / <function>soup_server_quit</function> - - - soup_server_run - and soup_server_run_async - no longer g_object_ref the server, and - soup_server_quit - no longer unrefs it. - - - - - - -Miscellaneous - - -SoupDate - - - The new SoupDate type - replaces the old soup_date_* methods, and has - an improved (more liberal) date parser. - - - - -Header parsing - - - soup-headers.h now has a few additional methods - for parsing list-type headers. - - - - -SoupAddress, SoupSocket - - - SoupSocket has had various simplifications made to - reflect the fact that this is specifically libsoup's socket - implementation, not some random generic socket API. - - - - Various SoupAddress and SoupSocket - methods now take arguments of the new GCancellable type, from - libgio. When porting old code, you can just pass - NULL for these. (soup_address_resolve_async - also takes another new argument, a GMainContext that - you'll want to pass NULL for.) If you pass a - GCancellable, you can use it to cleanly cancel the - address resolution / socket operation. - - - - - -Base64 methods - - - The deprecated base64 methods are now gone; use glib's base64 - methods instead. - - - - - - diff --git a/docs/reference/server-howto.xml b/docs/reference/server-howto.xml index 0a9a53d..76c1918 100644 --- a/docs/reference/server-howto.xml +++ b/docs/reference/server-howto.xml @@ -30,7 +30,7 @@ various additional options: - SOUP_SERVER_PORT + SOUP_SERVER_PORT The TCP port to listen on. If 0 (or left unspecified), some unused port will be selected for @@ -39,7 +39,7 @@ various additional options: - SOUP_SERVER_INTERFACE + SOUP_SERVER_INTERFACE A SoupAddress, specifying the IP address of the network interface to run @@ -48,7 +48,7 @@ various additional options: - SOUP_SERVER_SSL_CERT_FILE + SOUP_SERVER_SSL_CERT_FILE Points to a file containing an SSL certificate to use. If this is set, then the server will speak HTTPS; otherwise @@ -56,7 +56,7 @@ various additional options: - SOUP_SERVER_SSL_KEY_FILE + SOUP_SERVER_SSL_KEY_FILE Points to a file containing the private key for the SOUP_SERVER_SSL_CERT_FILE. (It may @@ -64,7 +64,7 @@ various additional options: - SOUP_SERVER_ASYNC_CONTEXT + SOUP_SERVER_ASYNC_CONTEXT A GMainContext which the server will use for asynchronous operations. This can @@ -73,7 +73,7 @@ various additional options: - SOUP_SERVER_RAW_PATHS + SOUP_SERVER_RAW_PATHS Set this to TRUE if you don't want libsoup to decode %-encoding diff --git a/debian/dirs b/docs/reference/tmpl/libsoup-2.4-unused.sgml similarity index 100% rename from debian/dirs rename to docs/reference/tmpl/libsoup-2.4-unused.sgml diff --git a/docs/reference/tmpl/soup-address.sgml b/docs/reference/tmpl/soup-address.sgml new file mode 100644 index 0000000..b286d27 --- /dev/null +++ b/docs/reference/tmpl/soup-address.sgml @@ -0,0 +1,259 @@ + +SoupAddress + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_ADDRESS_FAMILY_INVALID: +@SOUP_ADDRESS_FAMILY_IPV4: +@SOUP_ADDRESS_FAMILY_IPV6: + + + + + + + + + + + + + +@name: +@port: +@Returns: + + + + + + + +@sa: +@len: +@Returns: + + + + + + + +@family: +@port: +@Returns: + + + + + + + +@addr: +@status: +@user_data: + + + + + + + +@addr: +@async_context: +@cancellable: +@callback: +@user_data: + + + + + + + +@addr: +@cancellable: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr: +@len: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr1: +@addr2: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + +@addr1: +@addr2: +@Returns: + + + + + + + +@addr: +@Returns: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-auth-domain-basic.sgml b/docs/reference/tmpl/soup-auth-domain-basic.sgml new file mode 100644 index 0000000..0b9a30c --- /dev/null +++ b/docs/reference/tmpl/soup-auth-domain-basic.sgml @@ -0,0 +1,86 @@ + +SoupAuthDomainBasic + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@optname1: +@...: +@Returns: + + + + + + + +@domain: +@msg: +@username: +@password: +@user_data: +@Returns: + + + + + + + +@domain: +@callback: +@user_data: +@dnotify: + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-auth-domain-digest.sgml b/docs/reference/tmpl/soup-auth-domain-digest.sgml new file mode 100644 index 0000000..942cf3c --- /dev/null +++ b/docs/reference/tmpl/soup-auth-domain-digest.sgml @@ -0,0 +1,96 @@ + +SoupAuthDomainDigest + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@optname1: +@...: +@Returns: + + + + + + + +@domain: +@msg: +@username: +@user_data: +@Returns: + + + + + + + +@domain: +@callback: +@user_data: +@dnotify: + + + + + + + +@username: +@realm: +@password: +@Returns: + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-auth-domain.sgml b/docs/reference/tmpl/soup-auth-domain.sgml new file mode 100644 index 0000000..ba058c0 --- /dev/null +++ b/docs/reference/tmpl/soup-auth-domain.sgml @@ -0,0 +1,237 @@ + +SoupAuthDomain + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@domain: +@path: + + + + + + + +@domain: +@path: + + + + + + + +@domain: +@msg: +@user_data: +@Returns: + + + + + + + +@domain: +@filter: +@filter_data: +@dnotify: + + + + + + + +@domain: +@Returns: + + + + + + + +@domain: +@msg: +@username: +@user_data: +@Returns: + + + + + + + +@domain: +@auth_callback: +@auth_data: +@dnotify: + + + + + + + +@domain: +@msg: +@username: +@password: +@Returns: + + + + + + + +@domain: +@msg: +@Returns: + + + + + + + +@domain: +@msg: +@Returns: + + + + + + + +@domain: +@msg: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-auth.sgml b/docs/reference/tmpl/soup-auth.sgml new file mode 100644 index 0000000..85e004c --- /dev/null +++ b/docs/reference/tmpl/soup-auth.sgml @@ -0,0 +1,233 @@ + +SoupAuth + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@soupauth: the object which received the signal. +@arg1: +@arg2: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@type: +@msg: +@auth_header: +@Returns: + + + + + + + +@auth: +@msg: +@auth_header: +@Returns: + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@username: +@password: + + + + + + + +@auth: +@Returns: + + + + + + + +@auth: +@msg: +@Returns: + + + + + + + +@auth: +@source_uri: +@Returns: + + + + + + + +@auth: +@space: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-cache.sgml b/docs/reference/tmpl/soup-cache.sgml new file mode 100644 index 0000000..30d60d5 --- /dev/null +++ b/docs/reference/tmpl/soup-cache.sgml @@ -0,0 +1,106 @@ + +SoupCache + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_CACHE_SINGLE_USER: +@SOUP_CACHE_SHARED: + + + + + + +@cache_dir: +@cache_type: +@Returns: + + + + + + + +@cache: + + + + + + + +@cache: + + + + + + + +@cache: + + + + + + + +@cache: + + + + + + + +@cache: +@Returns: + + + + + + + +@cache: +@max_size: + + diff --git a/docs/reference/tmpl/soup-content-decoder.sgml b/docs/reference/tmpl/soup-content-decoder.sgml new file mode 100644 index 0000000..23b9b45 --- /dev/null +++ b/docs/reference/tmpl/soup-content-decoder.sgml @@ -0,0 +1,28 @@ + +SoupContentDecoder + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-content-sniffer.sgml b/docs/reference/tmpl/soup-content-sniffer.sgml new file mode 100644 index 0000000..2cf941b --- /dev/null +++ b/docs/reference/tmpl/soup-content-sniffer.sgml @@ -0,0 +1,49 @@ + +SoupContentSniffer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@sniffer: +@msg: +@buffer: +@params: +@Returns: + + diff --git a/docs/reference/tmpl/soup-cookie-jar-sqlite.sgml b/docs/reference/tmpl/soup-cookie-jar-sqlite.sgml new file mode 100644 index 0000000..f7ee9ce --- /dev/null +++ b/docs/reference/tmpl/soup-cookie-jar-sqlite.sgml @@ -0,0 +1,50 @@ + +SoupCookieJarSqlite + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@filename: +@read_only: +@Returns: + + + + + + + + + diff --git a/docs/reference/tmpl/soup-cookie-jar-text.sgml b/docs/reference/tmpl/soup-cookie-jar-text.sgml new file mode 100644 index 0000000..7fa93d1 --- /dev/null +++ b/docs/reference/tmpl/soup-cookie-jar-text.sgml @@ -0,0 +1,50 @@ + +SoupCookieJarText + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@filename: +@read_only: +@Returns: + + + + + + + + + diff --git a/docs/reference/tmpl/soup-cookie-jar.sgml b/docs/reference/tmpl/soup-cookie-jar.sgml new file mode 100644 index 0000000..e1a2dd2 --- /dev/null +++ b/docs/reference/tmpl/soup-cookie-jar.sgml @@ -0,0 +1,156 @@ + +SoupCookieJar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@soupcookiejar: the object which received the signal. +@arg1: +@arg2: + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@jar: +@uri: +@for_http: +@Returns: + + + + + + + +@jar: +@uri: +@cookie: + + + + + + + +@jar: +@uri: +@first_party: +@cookie: + + + + + + + +@jar: +@cookie: + + + + + + + +@jar: +@cookie: + + + + + + + +@jar: +@Returns: + + + + + + + +@SOUP_COOKIE_JAR_ACCEPT_ALWAYS: +@SOUP_COOKIE_JAR_ACCEPT_NEVER: +@SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY: + + + + + + +@jar: +@Returns: + + + + + + + +@jar: +@policy: + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-cookie.sgml b/docs/reference/tmpl/soup-cookie.sgml new file mode 100644 index 0000000..5e850bc --- /dev/null +++ b/docs/reference/tmpl/soup-cookie.sgml @@ -0,0 +1,322 @@ + +SoupCookie + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@name: +@value: +@domain: +@path: +@max_age: +@Returns: + + + + + + + +@header: +@origin: +@Returns: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: + + + + + + + +@cookie: +@name: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@value: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@domain: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@path: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@max_age: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@cookie: +@expires: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@secure: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@http_only: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@uri: +@Returns: + + + + + + + +@cookie: +@host: +@Returns: + + + + + + + +@cookie: +@Returns: + + + + + + + +@cookie: +@Returns: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@Returns: + + + + + + + +@cookies: +@msg: + + + + + + + +@cookies: +@msg: + + + + + + + +@cookies: +@Returns: + + + + + + + +@cookies: + + diff --git a/docs/reference/tmpl/soup-form.sgml b/docs/reference/tmpl/soup-form.sgml new file mode 100644 index 0000000..b80fa4b --- /dev/null +++ b/docs/reference/tmpl/soup-form.sgml @@ -0,0 +1,140 @@ + +HTML Form Support + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@encoded_form: +@Returns: + + + + + + + +@msg: +@file_control_name: +@filename: +@content_type: +@file: +@Returns: + + + + + + + +@first_field: +@...: +@Returns: + + + + + + + +@form_data_set: +@Returns: + + + + + + + +@form_data_set: +@Returns: + + + + + + + +@first_field: +@args: +@Returns: + + + + + + + +@method: +@uri: +@first_field: +@...: +@Returns: + + + + + + + +@method: +@uri: +@form_data_set: +@Returns: + + + + + + + +@method: +@uri: +@form_data_set: +@Returns: + + + + + + + +@uri: +@multipart: +@Returns: + + diff --git a/docs/reference/tmpl/soup-gnome-features.sgml b/docs/reference/tmpl/soup-gnome-features.sgml new file mode 100644 index 0000000..207bca1 --- /dev/null +++ b/docs/reference/tmpl/soup-gnome-features.sgml @@ -0,0 +1,36 @@ + +soup-gnome-features + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-logger.sgml b/docs/reference/tmpl/soup-logger.sgml new file mode 100644 index 0000000..5056e1f --- /dev/null +++ b/docs/reference/tmpl/soup-logger.sgml @@ -0,0 +1,122 @@ + +SoupLogger + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_LOGGER_LOG_NONE: +@SOUP_LOGGER_LOG_MINIMAL: +@SOUP_LOGGER_LOG_HEADERS: +@SOUP_LOGGER_LOG_BODY: + + + + + + +@level: +@max_body_size: +@Returns: + + + + + + + +@logger: +@session: + + + + + + + +@logger: +@session: + + + + + + + +@logger: +@msg: +@user_data: +@Returns: + + + + + + + +@logger: +@request_filter: +@filter_data: +@destroy: + + + + + + + +@logger: +@response_filter: +@filter_data: +@destroy: + + + + + + + +@logger: +@level: +@direction: +@data: +@user_data: + + + + + + + +@logger: +@printer: +@printer_data: +@destroy: + + diff --git a/docs/reference/tmpl/soup-message-body.sgml b/docs/reference/tmpl/soup-message-body.sgml new file mode 100644 index 0000000..5d7328e --- /dev/null +++ b/docs/reference/tmpl/soup-message-body.sgml @@ -0,0 +1,242 @@ + +SoupMessageBody + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_MEMORY_STATIC: +@SOUP_MEMORY_TAKE: +@SOUP_MEMORY_COPY: +@SOUP_MEMORY_TEMPORARY: + + + + + + +@use: +@data: +@length: +@Returns: + + + + + + + +@parent: +@offset: +@length: +@Returns: + + + + + + + +@data: +@length: +@owner: +@owner_dnotify: +@Returns: + + + + + + + +@data: +@length: +@Returns: + + + + + + + +@buffer: +@Returns: + + + + + + + +@buffer: +@data: +@length: + + + + + + + +@buffer: +@Returns: + + + + + + + +@buffer: + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@body: + + + + + + + +@body: +@accumulate: + + + + + + + +@body: +@Returns: + + + + + + + +@body: +@use: +@data: +@length: + + + + + + + +@body: +@buffer: + + + + + + + +@body: +@data: +@length: + + + + + + + +@body: + + + + + + + +@body: + + + + + + + +@body: +@Returns: + + + + + + + +@body: +@offset: +@Returns: + + + + + + + +@body: +@chunk: + + + + + + + +@body: +@chunk: + + diff --git a/docs/reference/tmpl/soup-message-headers.sgml b/docs/reference/tmpl/soup-message-headers.sgml new file mode 100644 index 0000000..b29eb9e --- /dev/null +++ b/docs/reference/tmpl/soup-message-headers.sgml @@ -0,0 +1,362 @@ + +SoupMessageHeaders + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_MESSAGE_HEADERS_REQUEST: +@SOUP_MESSAGE_HEADERS_RESPONSE: +@SOUP_MESSAGE_HEADERS_MULTIPART: + + + + + + +@type: +@Returns: + + + + + + + +@hdrs: + + + + + + + +@hdrs: +@name: +@value: + + + + + + + +@hdrs: +@name: +@value: + + + + + + + +@hdrs: +@name: + + + + + + + +@hdrs: + + + + + + + +@hdrs: + + + + + + + +@hdrs: +@name: +@Returns: + + + + + + + +@hdrs: +@name: +@Returns: + + + + + + + +@hdrs: +@name: +@Returns: + + + + + + + +@name: +@value: +@user_data: + + + + + + + +@hdrs: +@func: +@user_data: + + + + + + + + + + + + + +@iter: +@hdrs: + + + + + + + +@iter: +@name: +@value: +@Returns: + + + + + + + +@SOUP_ENCODING_UNRECOGNIZED: +@SOUP_ENCODING_NONE: +@SOUP_ENCODING_CONTENT_LENGTH: +@SOUP_ENCODING_EOF: +@SOUP_ENCODING_CHUNKED: +@SOUP_ENCODING_BYTERANGES: + + + + + + +@hdrs: +@Returns: + + + + + + + +@hdrs: +@encoding: + + + + + + + +@hdrs: +@Returns: + + + + + + + +@hdrs: +@content_length: + + + + + + + +@SOUP_EXPECTATION_UNRECOGNIZED: +@SOUP_EXPECTATION_CONTINUE: + + + + + + +@hdrs: +@Returns: + + + + + + + +@hdrs: +@expectations: + + + + + + + +@hdrs: +@params: +@Returns: + + + + + + + +@hdrs: +@content_type: +@params: + + + + + + + +@hdrs: +@disposition: +@params: +@Returns: + + + + + + + +@hdrs: +@disposition: +@params: + + + + + + + +@start: +@end: + + + + + + +@hdrs: +@total_length: +@ranges: +@length: +@Returns: + + + + + + + +@hdrs: +@ranges: +@length: + + + + + + + +@hdrs: +@start: +@end: + + + + + + + +@hdrs: +@ranges: + + + + + + + +@hdrs: +@start: +@end: +@total_length: +@Returns: + + + + + + + +@hdrs: +@start: +@end: +@total_length: + + diff --git a/docs/reference/tmpl/soup-message.sgml b/docs/reference/tmpl/soup-message.sgml new file mode 100644 index 0000000..d99c9f5 --- /dev/null +++ b/docs/reference/tmpl/soup-message.sgml @@ -0,0 +1,551 @@ + +SoupMessage + + + + + + + + + + + + + + + + + + + + + + + + + +@method: +@status_code: +@reason_phrase: +@request_body: +@request_headers: +@response_body: +@response_headers: + + + + + + +@soupmessage: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. +@arg1: + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. +@arg1: + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. + + + + + + +@soupmessage: the object which received the signal. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@method: +@uri_string: +@Returns: + + + + + + + +@method: +@uri: +@Returns: + + + + + + + +@msg: +@content_type: +@req_use: +@req_body: +@req_length: + + + + + + + +@msg: +@content_type: +@resp_use: +@resp_body: +@resp_length: + + + + + + + +@SOUP_HTTP_1_0: +@SOUP_HTTP_1_1: + + + + + + +@msg: +@version: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@uri: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@status_code: + + + + + + + +@msg: +@status_code: +@reason_phrase: + + + + + + + +@msg: +@status_code: +@redirect_uri: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@certificate: +@errors: +@Returns: + + + + + + + +@msg: +@first_party: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@signal: +@header: +@callback: +@user_data: +@Returns: + + + + + + + +@msg: +@signal: +@status_code: +@callback: +@user_data: +@Returns: + + + + + + + +@SOUP_MESSAGE_NO_REDIRECT: +@SOUP_MESSAGE_CAN_REBUILD: +@SOUP_MESSAGE_OVERWRITE_CHUNKS: +@SOUP_MESSAGE_CONTENT_DECODED: +@SOUP_MESSAGE_CERTIFICATE_TRUSTED: +@SOUP_MESSAGE_NEW_CONNECTION: + + + + + + +@msg: +@flags: + + + + + + + +@msg: +@Returns: + + + + + + + +@msg: +@max_len: +@user_data: +@Returns: + + + + + + + +@msg: +@allocator: +@user_data: +@destroy_notify: + + + + + + + +@msg: +@feature_type: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-method.sgml b/docs/reference/tmpl/soup-method.sgml new file mode 100644 index 0000000..d2996e1 --- /dev/null +++ b/docs/reference/tmpl/soup-method.sgml @@ -0,0 +1,127 @@ + +soup-method + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-misc.sgml b/docs/reference/tmpl/soup-misc.sgml new file mode 100644 index 0000000..bf6a259 --- /dev/null +++ b/docs/reference/tmpl/soup-misc.sgml @@ -0,0 +1,405 @@ + +Soup Miscellaneous Utilities + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@SOUP_DATE_HTTP: +@SOUP_DATE_COOKIE: +@SOUP_DATE_RFC2822: +@SOUP_DATE_ISO8601_COMPACT: +@SOUP_DATE_ISO8601_FULL: +@SOUP_DATE_ISO8601: +@SOUP_DATE_ISO8601_XMLRPC: + + + + + + +@year: +@month: +@day: +@hour: +@minute: +@second: +@Returns: + + + + + + + +@date_string: +@Returns: + + + + + + + +@when: +@Returns: + + + + + + + +@offset_seconds: +@Returns: + + + + + + + +@date: +@format: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@time: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: +@Returns: + + + + + + + +@date: + + + + + + + +@str: +@len: +@req_headers: +@req_method: +@req_path: +@ver: +@Returns: + + + + + + + +@str: +@len: +@headers: +@ver: +@status_code: +@reason_phrase: +@Returns: + + + + + + + +@status_line: +@ver: +@status_code: +@reason_phrase: +@Returns: + + + + + + + +@str: +@len: +@dest: +@Returns: + + + + + + + +@header: +@Returns: + + + + + + + +@header: +@unacceptable: +@Returns: + + + + + + + +@list: + + + + + + + +@header: +@token: +@Returns: + + + + + + + +@header: +@Returns: + + + + + + + +@header: +@Returns: + + + + + + + +@param_list: + + + + + + + +@string: +@name: +@value: + + + + + + + +@string: +@name: +@value: + + + + + + + +@v1: +@v2: +@Returns: + + + + + + + +@key: +@Returns: + + + + + + + +@async_context: +@function: +@data: +@Returns: + + + + + + + +@async_context: +@function: +@data: +@Returns: + + + + + + + +@async_context: +@chan: +@condition: +@function: +@data: +@Returns: + + + + + + + +@async_context: +@interval: +@function: +@data: +@Returns: + + + + + + + + diff --git a/docs/reference/tmpl/soup-multipart.sgml b/docs/reference/tmpl/soup-multipart.sgml new file mode 100644 index 0000000..5b0145d --- /dev/null +++ b/docs/reference/tmpl/soup-multipart.sgml @@ -0,0 +1,118 @@ + +SoupMultipart + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@mime_type: +@Returns: + + + + + + + +@headers: +@body: +@Returns: + + + + + + + +@multipart: + + + + + + + +@multipart: +@Returns: + + + + + + + +@multipart: +@part: +@headers: +@body: +@Returns: + + + + + + + +@multipart: +@headers: +@body: + + + + + + + +@multipart: +@control_name: +@data: + + + + + + + +@multipart: +@control_name: +@filename: +@content_type: +@body: + + + + + + + +@multipart: +@dest_headers: +@dest_body: + + diff --git a/docs/reference/tmpl/soup-proxy-resolver-default.sgml b/docs/reference/tmpl/soup-proxy-resolver-default.sgml new file mode 100644 index 0000000..a36aaa2 --- /dev/null +++ b/docs/reference/tmpl/soup-proxy-resolver-default.sgml @@ -0,0 +1,33 @@ + +SoupProxyResolverDefault + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-proxy-uri-resolver.sgml b/docs/reference/tmpl/soup-proxy-uri-resolver.sgml new file mode 100644 index 0000000..f78d846 --- /dev/null +++ b/docs/reference/tmpl/soup-proxy-uri-resolver.sgml @@ -0,0 +1,64 @@ + +SoupProxyURIResolver + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@resolver: +@status: +@proxy_uri: +@user_data: + + + + + + + +@proxy_uri_resolver: +@uri: +@async_context: +@cancellable: +@callback: +@user_data: + + + + + + + +@proxy_uri_resolver: +@uri: +@cancellable: +@proxy_uri: +@Returns: + + diff --git a/docs/reference/tmpl/soup-request-data.sgml b/docs/reference/tmpl/soup-request-data.sgml new file mode 100644 index 0000000..31ab1e0 --- /dev/null +++ b/docs/reference/tmpl/soup-request-data.sgml @@ -0,0 +1,28 @@ + +SoupRequestData + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-request-file.sgml b/docs/reference/tmpl/soup-request-file.sgml new file mode 100644 index 0000000..229a99c --- /dev/null +++ b/docs/reference/tmpl/soup-request-file.sgml @@ -0,0 +1,37 @@ + +SoupRequestFile + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@file: +@Returns: + + diff --git a/docs/reference/tmpl/soup-request-http.sgml b/docs/reference/tmpl/soup-request-http.sgml new file mode 100644 index 0000000..0a0d1cf --- /dev/null +++ b/docs/reference/tmpl/soup-request-http.sgml @@ -0,0 +1,37 @@ + +SoupRequestHTTP + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@http: +@Returns: + + diff --git a/docs/reference/tmpl/soup-request.sgml b/docs/reference/tmpl/soup-request.sgml new file mode 100644 index 0000000..3003847 --- /dev/null +++ b/docs/reference/tmpl/soup-request.sgml @@ -0,0 +1,121 @@ + +SoupRequest + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@request: +@cancellable: +@error: +@Returns: + + + + + + + +@request: +@cancellable: +@callback: +@user_data: + + + + + + + +@request: +@result: +@error: +@Returns: + + + + + + + +@request: +@Returns: + + + + + + + +@request: +@Returns: + + + + + + + +@request: +@Returns: + + + + + + + +@request: +@Returns: + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-requester.sgml b/docs/reference/tmpl/soup-requester.sgml new file mode 100644 index 0000000..5dccae6 --- /dev/null +++ b/docs/reference/tmpl/soup-requester.sgml @@ -0,0 +1,74 @@ + +SoupRequester + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@requester: +@uri_string: +@error: +@Returns: + + + + + + + +@requester: +@uri: +@error: +@Returns: + + + + + + + +@SOUP_REQUESTER_ERROR_BAD_URI: +@SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME: + + + + + + + + diff --git a/docs/reference/tmpl/soup-server.sgml b/docs/reference/tmpl/soup-server.sgml new file mode 100644 index 0000000..5e2db40 --- /dev/null +++ b/docs/reference/tmpl/soup-server.sgml @@ -0,0 +1,352 @@ + +SoupServer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@soupserver: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupserver: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupserver: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupserver: the object which received the signal. +@arg1: +@arg2: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@optname1: +@...: +@Returns: + + + + + + + +@server: +@Returns: + + + + + + + +@server: +@Returns: + + + + + + + +@server: +@Returns: + + + + + + + +@server: + + + + + + + +@server: + + + + + + + +@server: + + + + + + + +@server: + + + + + + + +@server: +@Returns: + + + + + + + +@server: +@msg: +@path: +@query: +@client: +@user_data: + + + + + + + +@server: +@path: +@callback: +@user_data: +@destroy: + + + + + + + +@server: +@path: + + + + + + + + + + + + + +@client: +@Returns: + + + + + + + +@client: +@Returns: + + + + + + + +@client: +@Returns: + + + + + + + +@client: +@Returns: + + + + + + + +@client: +@Returns: + + + + + + + +@server: +@auth_domain: + + + + + + + +@server: +@auth_domain: + + + + + + + +@server: +@msg: + + + + + + + +@server: +@msg: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-session-async.sgml b/docs/reference/tmpl/soup-session-async.sgml new file mode 100644 index 0000000..84ce2d8 --- /dev/null +++ b/docs/reference/tmpl/soup-session-async.sgml @@ -0,0 +1,47 @@ + +SoupSessionAsync + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@optname1: +@...: +@Returns: + + diff --git a/docs/reference/tmpl/soup-session-feature.sgml b/docs/reference/tmpl/soup-session-feature.sgml new file mode 100644 index 0000000..7ba9a25 --- /dev/null +++ b/docs/reference/tmpl/soup-session-feature.sgml @@ -0,0 +1,43 @@ + +SoupSessionFeature + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@parent: +@attach: +@detach: +@request_queued: +@request_started: +@request_unqueued: +@add_feature: +@remove_feature: +@has_feature: + diff --git a/docs/reference/tmpl/soup-session-sync.sgml b/docs/reference/tmpl/soup-session-sync.sgml new file mode 100644 index 0000000..3f520a5 --- /dev/null +++ b/docs/reference/tmpl/soup-session-sync.sgml @@ -0,0 +1,47 @@ + +SoupSessionSync + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@optname1: +@...: +@Returns: + + diff --git a/docs/reference/tmpl/soup-session.sgml b/docs/reference/tmpl/soup-session.sgml new file mode 100644 index 0000000..a5e2677 --- /dev/null +++ b/docs/reference/tmpl/soup-session.sgml @@ -0,0 +1,479 @@ + +SoupSession + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@soupsession: the object which received the signal. +@arg1: +@arg2: +@arg3: + + + + + + +@soupsession: the object which received the signal. +@arg1: + + + + + + +@soupsession: the object which received the signal. +@arg1: + + + + + + +@soupsession: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupsession: the object which received the signal. +@arg1: + + + + + + +@soupsession: the object which received the signal. +@arg1: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@session: +@msg: +@user_data: + + + + + + + +@session: +@msg: +@callback: +@user_data: + + + + + + + +@session: +@msg: + + + + + + + +@session: +@msg: +@Returns: + + + + + + + +@session: +@msg: +@status_code: + + + + + + + +@session: +@uri: + + + + + + + +@session: + + + + + + + +@session: +@msg: +@Returns: + + + + + + + +@session: +@msg: +@Returns: + + + + + + + +@session: +@msg: + + + + + + + +@session: +@msg: + + + + + + + +@session: +@Returns: + + + + + + + +@session: +@feature: + + + + + + + +@session: +@feature_type: + + + + + + + +@session: +@feature: + + + + + + + +@session: +@feature_type: + + + + + + + +@session: +@feature_type: +@Returns: + + + + + + + +@session: +@feature_type: +@Returns: + + + + + + + +@session: +@feature_type: +@msg: +@Returns: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-socket.sgml b/docs/reference/tmpl/soup-socket.sgml new file mode 100644 index 0000000..b3d7039 --- /dev/null +++ b/docs/reference/tmpl/soup-socket.sgml @@ -0,0 +1,390 @@ + +SoupSocket + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@soupsocket: the object which received the signal. + + + + + + +@soupsocket: the object which received the signal. +@arg1: +@arg2: + + + + + + +@soupsocket: the object which received the signal. +@arg1: + + + + + + +@soupsocket: the object which received the signal. + + + + + + +@soupsocket: the object which received the signal. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@optname1: +@...: +@Returns: + + + + + + + +@sock: +@status: +@user_data: + + + + + + + +@sock: +@cancellable: +@callback: +@user_data: + + + + + + + +@sock: +@cancellable: +@Returns: + + + + + + + +@sock: +@Returns: + + + + + + + +@sock: +@cancellable: +@Returns: + + + + + + + +@sock: +@ssl_host: +@cancellable: +@Returns: + + + + + + + +@sock: +@Returns: + + + + + + + +@sock: + + + + + + + +@sock: +@Returns: + + + + + + + +@sock: +@Returns: + + + + + + + +@sock: +@Returns: + + + + + + + +@SOUP_SOCKET_OK: +@SOUP_SOCKET_WOULD_BLOCK: +@SOUP_SOCKET_EOF: +@SOUP_SOCKET_ERROR: + + + + + + +@sock: +@buffer: +@len: +@nread: +@cancellable: +@error: +@Returns: + + + + + + + +@sock: +@buffer: +@len: +@boundary: +@boundary_len: +@nread: +@got_boundary: +@cancellable: +@error: +@Returns: + + + + + + + +@sock: +@buffer: +@len: +@nwrote: +@cancellable: +@error: +@Returns: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/reference/tmpl/soup-status.sgml b/docs/reference/tmpl/soup-status.sgml new file mode 100644 index 0000000..eb2e385 --- /dev/null +++ b/docs/reference/tmpl/soup-status.sgml @@ -0,0 +1,164 @@ + +soup-status + + + + + + + + + + + + + + + + + + + + + + + + + +@status: + + + + + + + +@status: + + + + + + + +@status: + + + + + + + +@status: + + + + + + + +@status: + + + + + + + +@status: + + + + + + + +@SOUP_STATUS_NONE: +@SOUP_STATUS_CANCELLED: +@SOUP_STATUS_CANT_RESOLVE: +@SOUP_STATUS_CANT_RESOLVE_PROXY: +@SOUP_STATUS_CANT_CONNECT: +@SOUP_STATUS_CANT_CONNECT_PROXY: +@SOUP_STATUS_SSL_FAILED: +@SOUP_STATUS_IO_ERROR: +@SOUP_STATUS_MALFORMED: +@SOUP_STATUS_TRY_AGAIN: +@SOUP_STATUS_TOO_MANY_REDIRECTS: +@SOUP_STATUS_TLS_FAILED: +@SOUP_STATUS_CONTINUE: +@SOUP_STATUS_SWITCHING_PROTOCOLS: +@SOUP_STATUS_PROCESSING: +@SOUP_STATUS_OK: +@SOUP_STATUS_CREATED: +@SOUP_STATUS_ACCEPTED: +@SOUP_STATUS_NON_AUTHORITATIVE: +@SOUP_STATUS_NO_CONTENT: +@SOUP_STATUS_RESET_CONTENT: +@SOUP_STATUS_PARTIAL_CONTENT: +@SOUP_STATUS_MULTI_STATUS: +@SOUP_STATUS_MULTIPLE_CHOICES: +@SOUP_STATUS_MOVED_PERMANENTLY: +@SOUP_STATUS_FOUND: +@SOUP_STATUS_MOVED_TEMPORARILY: +@SOUP_STATUS_SEE_OTHER: +@SOUP_STATUS_NOT_MODIFIED: +@SOUP_STATUS_USE_PROXY: +@SOUP_STATUS_NOT_APPEARING_IN_THIS_PROTOCOL: +@SOUP_STATUS_TEMPORARY_REDIRECT: +@SOUP_STATUS_BAD_REQUEST: +@SOUP_STATUS_UNAUTHORIZED: +@SOUP_STATUS_PAYMENT_REQUIRED: +@SOUP_STATUS_FORBIDDEN: +@SOUP_STATUS_NOT_FOUND: +@SOUP_STATUS_METHOD_NOT_ALLOWED: +@SOUP_STATUS_NOT_ACCEPTABLE: +@SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED: +@SOUP_STATUS_PROXY_UNAUTHORIZED: +@SOUP_STATUS_REQUEST_TIMEOUT: +@SOUP_STATUS_CONFLICT: +@SOUP_STATUS_GONE: +@SOUP_STATUS_LENGTH_REQUIRED: +@SOUP_STATUS_PRECONDITION_FAILED: +@SOUP_STATUS_REQUEST_ENTITY_TOO_LARGE: +@SOUP_STATUS_REQUEST_URI_TOO_LONG: +@SOUP_STATUS_UNSUPPORTED_MEDIA_TYPE: +@SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE: +@SOUP_STATUS_INVALID_RANGE: +@SOUP_STATUS_EXPECTATION_FAILED: +@SOUP_STATUS_UNPROCESSABLE_ENTITY: +@SOUP_STATUS_LOCKED: +@SOUP_STATUS_FAILED_DEPENDENCY: +@SOUP_STATUS_INTERNAL_SERVER_ERROR: +@SOUP_STATUS_NOT_IMPLEMENTED: +@SOUP_STATUS_BAD_GATEWAY: +@SOUP_STATUS_SERVICE_UNAVAILABLE: +@SOUP_STATUS_GATEWAY_TIMEOUT: +@SOUP_STATUS_HTTP_VERSION_NOT_SUPPORTED: +@SOUP_STATUS_INSUFFICIENT_STORAGE: +@SOUP_STATUS_NOT_EXTENDED: + + + + + + +@status_code: +@Returns: + + + + + + + +@status_code: +@Returns: + + + + + + + + + diff --git a/docs/reference/tmpl/soup-uri.sgml b/docs/reference/tmpl/soup-uri.sgml new file mode 100644 index 0000000..1318868 --- /dev/null +++ b/docs/reference/tmpl/soup-uri.sgml @@ -0,0 +1,356 @@ + +SoupURI + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@base: +@uri_string: +@Returns: + + + + + + + +@uri_string: +@Returns: + + + + + + + +@uri: +@just_path_and_query: +@Returns: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri1: +@uri2: +@Returns: + + + + + + + +@v1: +@v2: +@Returns: + + + + + + + +@key: +@Returns: + + + + + + + +@uri: + + + + + + + +@part: +@escape_extra: +@Returns: + + + + + + + +@part: +@Returns: + + + + + + + +@part: +@unescape_extra: +@Returns: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: + + + + + + + +@uri: +@scheme: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@user: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@password: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@host: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@port: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@path: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@query: + + + + + + + +@uri: +@form: + + + + + + + +@uri: +@first_field: +@...: + + + + + + + +@uri: +@Returns: + + + + + + + +@uri: +@fragment: + + + + + + + +@uri: +@Returns: + + diff --git a/docs/reference/tmpl/soup-value-utils.sgml b/docs/reference/tmpl/soup-value-utils.sgml new file mode 100644 index 0000000..aa70d0e --- /dev/null +++ b/docs/reference/tmpl/soup-value-utils.sgml @@ -0,0 +1,203 @@ + +GValue Support + + + + + + + + + + + + + + + + + + + + + + + + + +@void: +@Returns: + + + + + + + +@first_key: +@...: +@Returns: + + + + + + + +@hash: +@key: +@value: + + + + + + + +@hash: +@key: +@type: +@...: + + + + + + + +@hash: +@first_key: +@...: + + + + + + + +@hash: +@key: +@type: +@...: +@Returns: + + + + + + + +@hash: +@first_key: +@...: +@Returns: + + + + + + + +@args: +@Returns: + + + + + + + +@array: +@args: +@Returns: + + + + + + + +@void: +@Returns: + + + + + + + +@first_type: +@...: +@Returns: + + + + + + + +@array: +@index_: +@type: +@...: + + + + + + + +@array: +@type: +@...: + + + + + + + +@array: +@first_type: +@...: + + + + + + + +@array: +@index_: +@type: +@...: +@Returns: + + + + + + + +@val: +@type: +@args: + + + + + + + +@val: +@type: +@args: + + + + + + + + + diff --git a/docs/reference/tmpl/soup-xmlrpc.sgml b/docs/reference/tmpl/soup-xmlrpc.sgml new file mode 100644 index 0000000..4a5e7e9 --- /dev/null +++ b/docs/reference/tmpl/soup-xmlrpc.sgml @@ -0,0 +1,157 @@ + +XMLRPC Support + + + + + + + + + + + + + + + + + + + + + + + + + +@method_name: +@params: +@n_params: +@Returns: + + + + + + + +@uri: +@method_name: +@...: +@Returns: + + + + + + + +@method_response: +@length: +@value: +@error: +@Returns: + + + + + + + +@method_response: +@length: +@error: +@type: +@...: +@Returns: + + + + + + + +@method_call: +@length: +@method_name: +@params: +@Returns: + + + + + + + +@method_call: +@length: +@method_name: +@...: +@Returns: + + + + + + + +@value: +@Returns: + + + + + + + +@fault_code: +@fault_format: +@...: +@Returns: + + + + + + + +@msg: +@type: +@...: + + + + + + + +@msg: +@fault_code: +@fault_format: +@...: + + + + + + + + + + + + + + +@SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED: +@SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING: +@SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING: +@SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC: +@SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND: +@SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS: +@SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR: +@SOUP_XMLRPC_FAULT_APPLICATION_ERROR: +@SOUP_XMLRPC_FAULT_SYSTEM_ERROR: +@SOUP_XMLRPC_FAULT_TRANSPORT_ERROR: + diff --git a/docs/specs/README b/docs/specs/README deleted file mode 100644 index 0dee62d..0000000 --- a/docs/specs/README +++ /dev/null @@ -1,13 +0,0 @@ -rfc1945 - HTTP/1.0 -rfc2068 - HTTP/1.1 (mostly obsoleted original specification) -rfc2109 - HTTP State Management Mechanism -rfc2145 - Use and Interpretation of HTTP Version Numbers -rfc2324 - Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0) -rfc2388 - Returning Values from Forms: multipart/form-data -rfc2518 - HTTP Extensions for Distributed Authoring -- WEBDAV -rfc2616 - HTTP/1.1 (revised) [plus errata] -rfc2617 - HTTP Authentication: Basic and Digest Access Authentication [plus errata] -rfc2817 - Upgrading to TLS Within HTTP/1.1 -rfc2818 - HTTP Over TLS -rfc2965 - HTTP State Management Mechanism (allegedly obsoletes 2109) -rfc3986 - Uniform Resource Identifiers (URI): Generic Syntax diff --git a/docs/specs/rfc1945.txt b/docs/specs/rfc1945.txt deleted file mode 100644 index 37f3f23..0000000 --- a/docs/specs/rfc1945.txt +++ /dev/null @@ -1,3363 +0,0 @@ - - - - - - -Network Working Group T. Berners-Lee -Request for Comments: 1945 MIT/LCS -Category: Informational R. Fielding - UC Irvine - H. Frystyk - MIT/LCS - May 1996 - - - Hypertext Transfer Protocol -- HTTP/1.0 - -Status of This Memo - - This memo provides information for the Internet community. This memo - does not specify an Internet standard of any kind. Distribution of - this memo is unlimited. - -IESG Note: - - The IESG has concerns about this protocol, and expects this document - to be replaced relatively soon by a standards track document. - -Abstract - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol with the lightness and speed necessary for distributed, - collaborative, hypermedia information systems. It is a generic, - stateless, object-oriented protocol which can be used for many tasks, - such as name servers and distributed object management systems, - through extension of its request methods (commands). A feature of - HTTP is the typing of data representation, allowing systems to be - built independently of the data being transferred. - - HTTP has been in use by the World-Wide Web global information - initiative since 1990. This specification reflects common usage of - the protocol referred to as "HTTP/1.0". - -Table of Contents - - 1. Introduction .............................................. 4 - 1.1 Purpose .............................................. 4 - 1.2 Terminology .......................................... 4 - 1.3 Overall Operation .................................... 6 - 1.4 HTTP and MIME ........................................ 8 - 2. Notational Conventions and Generic Grammar ................ 8 - 2.1 Augmented BNF ........................................ 8 - 2.2 Basic Rules .......................................... 10 - 3. Protocol Parameters ....................................... 12 - - - -Berners-Lee, et al Informational [Page 1] - -RFC 1945 HTTP/1.0 May 1996 - - - 3.1 HTTP Version ......................................... 12 - 3.2 Uniform Resource Identifiers ......................... 14 - 3.2.1 General Syntax ................................ 14 - 3.2.2 http URL ...................................... 15 - 3.3 Date/Time Formats .................................... 15 - 3.4 Character Sets ....................................... 17 - 3.5 Content Codings ...................................... 18 - 3.6 Media Types .......................................... 19 - 3.6.1 Canonicalization and Text Defaults ............ 19 - 3.6.2 Multipart Types ............................... 20 - 3.7 Product Tokens ....................................... 20 - 4. HTTP Message .............................................. 21 - 4.1 Message Types ........................................ 21 - 4.2 Message Headers ...................................... 22 - 4.3 General Header Fields ................................ 23 - 5. Request ................................................... 23 - 5.1 Request-Line ......................................... 23 - 5.1.1 Method ........................................ 24 - 5.1.2 Request-URI ................................... 24 - 5.2 Request Header Fields ................................ 25 - 6. Response .................................................. 25 - 6.1 Status-Line .......................................... 26 - 6.1.1 Status Code and Reason Phrase ................. 26 - 6.2 Response Header Fields ............................... 28 - 7. Entity .................................................... 28 - 7.1 Entity Header Fields ................................. 29 - 7.2 Entity Body .......................................... 29 - 7.2.1 Type .......................................... 29 - 7.2.2 Length ........................................ 30 - 8. Method Definitions ........................................ 30 - 8.1 GET .................................................. 31 - 8.2 HEAD ................................................. 31 - 8.3 POST ................................................. 31 - 9. Status Code Definitions ................................... 32 - 9.1 Informational 1xx .................................... 32 - 9.2 Successful 2xx ....................................... 32 - 9.3 Redirection 3xx ...................................... 34 - 9.4 Client Error 4xx ..................................... 35 - 9.5 Server Error 5xx ..................................... 37 - 10. Header Field Definitions .................................. 37 - 10.1 Allow ............................................... 38 - 10.2 Authorization ....................................... 38 - 10.3 Content-Encoding .................................... 39 - 10.4 Content-Length ...................................... 39 - 10.5 Content-Type ........................................ 40 - 10.6 Date ................................................ 40 - 10.7 Expires ............................................. 41 - 10.8 From ................................................ 42 - - - -Berners-Lee, et al Informational [Page 2] - -RFC 1945 HTTP/1.0 May 1996 - - - 10.9 If-Modified-Since ................................... 42 - 10.10 Last-Modified ....................................... 43 - 10.11 Location ............................................ 44 - 10.12 Pragma .............................................. 44 - 10.13 Referer ............................................. 44 - 10.14 Server .............................................. 45 - 10.15 User-Agent .......................................... 46 - 10.16 WWW-Authenticate .................................... 46 - 11. Access Authentication ..................................... 47 - 11.1 Basic Authentication Scheme ......................... 48 - 12. Security Considerations ................................... 49 - 12.1 Authentication of Clients ........................... 49 - 12.2 Safe Methods ........................................ 49 - 12.3 Abuse of Server Log Information ..................... 50 - 12.4 Transfer of Sensitive Information ................... 50 - 12.5 Attacks Based On File and Path Names ................ 51 - 13. Acknowledgments ........................................... 51 - 14. References ................................................ 52 - 15. Authors' Addresses ........................................ 54 - Appendix A. Internet Media Type message/http ................ 55 - Appendix B. Tolerant Applications ........................... 55 - Appendix C. Relationship to MIME ............................ 56 - C.1 Conversion to Canonical Form ......................... 56 - C.2 Conversion of Date Formats ........................... 57 - C.3 Introduction of Content-Encoding ..................... 57 - C.4 No Content-Transfer-Encoding ......................... 57 - C.5 HTTP Header Fields in Multipart Body-Parts ........... 57 - Appendix D. Additional Features ............................. 57 - D.1 Additional Request Methods ........................... 58 - D.1.1 PUT ........................................... 58 - D.1.2 DELETE ........................................ 58 - D.1.3 LINK .......................................... 58 - D.1.4 UNLINK ........................................ 58 - D.2 Additional Header Field Definitions .................. 58 - D.2.1 Accept ........................................ 58 - D.2.2 Accept-Charset ................................ 59 - D.2.3 Accept-Encoding ............................... 59 - D.2.4 Accept-Language ............................... 59 - D.2.5 Content-Language .............................. 59 - D.2.6 Link .......................................... 59 - D.2.7 MIME-Version .................................. 59 - D.2.8 Retry-After ................................... 60 - D.2.9 Title ......................................... 60 - D.2.10 URI ........................................... 60 - - - - - - - -Berners-Lee, et al Informational [Page 3] - -RFC 1945 HTTP/1.0 May 1996 - - -1. Introduction - -1.1 Purpose - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol with the lightness and speed necessary for distributed, - collaborative, hypermedia information systems. HTTP has been in use - by the World-Wide Web global information initiative since 1990. This - specification reflects common usage of the protocol referred too as - "HTTP/1.0". This specification describes the features that seem to be - consistently implemented in most HTTP/1.0 clients and servers. The - specification is split into two sections. Those features of HTTP for - which implementations are usually consistent are described in the - main body of this document. Those features which have few or - inconsistent implementations are listed in Appendix D. - - Practical information systems require more functionality than simple - retrieval, including search, front-end update, and annotation. HTTP - allows an open-ended set of methods to be used to indicate the - purpose of a request. It builds on the discipline of reference - provided by the Uniform Resource Identifier (URI) [2], as a location - (URL) [4] or name (URN) [16], for indicating the resource on which a - method is to be applied. Messages are passed in a format similar to - that used by Internet Mail [7] and the Multipurpose Internet Mail - Extensions (MIME) [5]. - - HTTP is also used as a generic protocol for communication between - user agents and proxies/gateways to other Internet protocols, such as - SMTP [12], NNTP [11], FTP [14], Gopher [1], and WAIS [8], allowing - basic hypermedia access to resources available from diverse - applications and simplifying the implementation of user agents. - -1.2 Terminology - - This specification uses a number of terms to refer to the roles - played by participants in, and objects of, the HTTP communication. - - connection - - A transport layer virtual circuit established between two - application programs for the purpose of communication. - - message - - The basic unit of HTTP communication, consisting of a structured - sequence of octets matching the syntax defined in Section 4 and - transmitted via the connection. - - - - -Berners-Lee, et al Informational [Page 4] - -RFC 1945 HTTP/1.0 May 1996 - - - request - - An HTTP request message (as defined in Section 5). - - response - - An HTTP response message (as defined in Section 6). - - resource - - A network data object or service which can be identified by a - URI (Section 3.2). - - entity - - A particular representation or rendition of a data resource, or - reply from a service resource, that may be enclosed within a - request or response message. An entity consists of - metainformation in the form of entity headers and content in the - form of an entity body. - - client - - An application program that establishes connections for the - purpose of sending requests. - - user agent - - The client which initiates a request. These are often browsers, - editors, spiders (web-traversing robots), or other end user - tools. - - server - - An application program that accepts connections in order to - service requests by sending back responses. - - origin server - - The server on which a given resource resides or is to be created. - - proxy - - An intermediary program which acts as both a server and a client - for the purpose of making requests on behalf of other clients. - Requests are serviced internally or by passing them, with - possible translation, on to other servers. A proxy must - interpret and, if necessary, rewrite a request message before - - - -Berners-Lee, et al Informational [Page 5] - -RFC 1945 HTTP/1.0 May 1996 - - - forwarding it. Proxies are often used as client-side portals - through network firewalls and as helper applications for - handling requests via protocols not implemented by the user - agent. - - gateway - - A server which acts as an intermediary for some other server. - Unlike a proxy, a gateway receives requests as if it were the - origin server for the requested resource; the requesting client - may not be aware that it is communicating with a gateway. - Gateways are often used as server-side portals through network - firewalls and as protocol translators for access to resources - stored on non-HTTP systems. - - tunnel - - A tunnel is an intermediary program which is acting as a blind - relay between two connections. Once active, a tunnel is not - considered a party to the HTTP communication, though the tunnel - may have been initiated by an HTTP request. The tunnel ceases to - exist when both ends of the relayed connections are closed. - Tunnels are used when a portal is necessary and the intermediary - cannot, or should not, interpret the relayed communication. - - cache - - A program's local store of response messages and the subsystem - that controls its message storage, retrieval, and deletion. A - cache stores cachable responses in order to reduce the response - time and network bandwidth consumption on future, equivalent - requests. Any client or server may include a cache, though a - cache cannot be used by a server while it is acting as a tunnel. - - Any given program may be capable of being both a client and a server; - our use of these terms refers only to the role being performed by the - program for a particular connection, rather than to the program's - capabilities in general. Likewise, any server may act as an origin - server, proxy, gateway, or tunnel, switching behavior based on the - nature of each request. - -1.3 Overall Operation - - The HTTP protocol is based on a request/response paradigm. A client - establishes a connection with a server and sends a request to the - server in the form of a request method, URI, and protocol version, - followed by a MIME-like message containing request modifiers, client - information, and possible body content. The server responds with a - - - -Berners-Lee, et al Informational [Page 6] - -RFC 1945 HTTP/1.0 May 1996 - - - status line, including the message's protocol version and a success - or error code, followed by a MIME-like message containing server - information, entity metainformation, and possible body content. - - Most HTTP communication is initiated by a user agent and consists of - a request to be applied to a resource on some origin server. In the - simplest case, this may be accomplished via a single connection (v) - between the user agent (UA) and the origin server (O). - - request chain ------------------------> - UA -------------------v------------------- O - <----------------------- response chain - - A more complicated situation occurs when one or more intermediaries - are present in the request/response chain. There are three common - forms of intermediary: proxy, gateway, and tunnel. A proxy is a - forwarding agent, receiving requests for a URI in its absolute form, - rewriting all or parts of the message, and forwarding the reformatted - request toward the server identified by the URI. A gateway is a - receiving agent, acting as a layer above some other server(s) and, if - necessary, translating the requests to the underlying server's - protocol. A tunnel acts as a relay point between two connections - without changing the messages; tunnels are used when the - communication needs to pass through an intermediary (such as a - firewall) even when the intermediary cannot understand the contents - of the messages. - - request chain --------------------------------------> - UA -----v----- A -----v----- B -----v----- C -----v----- O - <------------------------------------- response chain - - The figure above shows three intermediaries (A, B, and C) between the - user agent and origin server. A request or response message that - travels the whole chain must pass through four separate connections. - This distinction is important because some HTTP communication options - may apply only to the connection with the nearest, non-tunnel - neighbor, only to the end-points of the chain, or to all connections - along the chain. Although the diagram is linear, each participant may - be engaged in multiple, simultaneous communications. For example, B - may be receiving requests from many clients other than A, and/or - forwarding requests to servers other than C, at the same time that it - is handling A's request. - - Any party to the communication which is not acting as a tunnel may - employ an internal cache for handling requests. The effect of a cache - is that the request/response chain is shortened if one of the - participants along the chain has a cached response applicable to that - request. The following illustrates the resulting chain if B has a - - - -Berners-Lee, et al Informational [Page 7] - -RFC 1945 HTTP/1.0 May 1996 - - - cached copy of an earlier response from O (via C) for a request which - has not been cached by UA or A. - - request chain ----------> - UA -----v----- A -----v----- B - - - - - - C - - - - - - O - <--------- response chain - - Not all responses are cachable, and some requests may contain - modifiers which place special requirements on cache behavior. Some - HTTP/1.0 applications use heuristics to describe what is or is not a - "cachable" response, but these rules are not standardized. - - On the Internet, HTTP communication generally takes place over TCP/IP - connections. The default port is TCP 80 [15], but other ports can be - used. This does not preclude HTTP from being implemented on top of - any other protocol on the Internet, or on other networks. HTTP only - presumes a reliable transport; any protocol that provides such - guarantees can be used, and the mapping of the HTTP/1.0 request and - response structures onto the transport data units of the protocol in - question is outside the scope of this specification. - - Except for experimental applications, current practice requires that - the connection be established by the client prior to each request and - closed by the server after sending the response. Both clients and - servers should be aware that either party may close the connection - prematurely, due to user action, automated time-out, or program - failure, and should handle such closing in a predictable fashion. In - any case, the closing of the connection by either or both parties - always terminates the current request, regardless of its status. - -1.4 HTTP and MIME - - HTTP/1.0 uses many of the constructs defined for MIME, as defined in - RFC 1521 [5]. Appendix C describes the ways in which the context of - HTTP allows for different use of Internet Media Types than is - typically found in Internet mail, and gives the rationale for those - differences. - -2. Notational Conventions and Generic Grammar - -2.1 Augmented BNF - - All of the mechanisms specified in this document are described in - both prose and an augmented Backus-Naur Form (BNF) similar to that - used by RFC 822 [7]. Implementors will need to be familiar with the - notation in order to understand this specification. The augmented BNF - includes the following constructs: - - - - -Berners-Lee, et al Informational [Page 8] - -RFC 1945 HTTP/1.0 May 1996 - - - name = definition - - The name of a rule is simply the name itself (without any - enclosing "<" and ">") and is separated from its definition by - the equal character "=". Whitespace is only significant in that - indentation of continuation lines is used to indicate a rule - definition that spans more than one line. Certain basic rules - are in uppercase, such as SP, LWS, HT, CRLF, DIGIT, ALPHA, etc. - Angle brackets are used within definitions whenever their - presence will facilitate discerning the use of rule names. - - "literal" - - Quotation marks surround literal text. Unless stated otherwise, - the text is case-insensitive. - - rule1 | rule2 - - Elements separated by a bar ("I") are alternatives, - e.g., "yes | no" will accept yes or no. - - (rule1 rule2) - - Elements enclosed in parentheses are treated as a single - element. Thus, "(elem (foo | bar) elem)" allows the token - sequences "elem foo elem" and "elem bar elem". - - *rule - - The character "*" preceding an element indicates repetition. The - full form is "*element" indicating at least and at - most occurrences of element. Default values are 0 and - infinity so that "*(element)" allows any number, including zero; - "1*element" requires at least one; and "1*2element" allows one - or two. - - [rule] - - Square brackets enclose optional elements; "[foo bar]" is - equivalent to "*1(foo bar)". - - N rule - - Specific repetition: "(element)" is equivalent to - "*(element)"; that is, exactly occurrences of - (element). Thus 2DIGIT is a 2-digit number, and 3ALPHA is a - string of three alphabetic characters. - - - - -Berners-Lee, et al Informational [Page 9] - -RFC 1945 HTTP/1.0 May 1996 - - - #rule - - A construct "#" is defined, similar to "*", for defining lists - of elements. The full form is "#element" indicating at - least and at most elements, each separated by one or - more commas (",") and optional linear whitespace (LWS). This - makes the usual form of lists very easy; a rule such as - "( *LWS element *( *LWS "," *LWS element ))" can be shown as - "1#element". Wherever this construct is used, null elements are - allowed, but do not contribute to the count of elements present. - That is, "(element), , (element)" is permitted, but counts as - only two elements. Therefore, where at least one element is - required, at least one non-null element must be present. Default - values are 0 and infinity so that "#(element)" allows any - number, including zero; "1#element" requires at least one; and - "1#2element" allows one or two. - - ; comment - - A semi-colon, set off some distance to the right of rule text, - starts a comment that continues to the end of line. This is a - simple way of including useful notes in parallel with the - specifications. - - implied *LWS - - The grammar described by this specification is word-based. - Except where noted otherwise, linear whitespace (LWS) can be - included between any two adjacent words (token or - quoted-string), and between adjacent tokens and delimiters - (tspecials), without changing the interpretation of a field. At - least one delimiter (tspecials) must exist between any two - tokens, since they would otherwise be interpreted as a single - token. However, applications should attempt to follow "common - form" when generating HTTP constructs, since there exist some - implementations that fail to accept anything beyond the common - forms. - -2.2 Basic Rules - - The following rules are used throughout this specification to - describe basic parsing constructs. The US-ASCII coded character set - is defined by [17]. - - OCTET = - CHAR = - UPALPHA = - LOALPHA = - - - -Berners-Lee, et al Informational [Page 10] - -RFC 1945 HTTP/1.0 May 1996 - - - ALPHA = UPALPHA | LOALPHA - DIGIT = - CTL = - CR = - LF = - SP = - HT = - <"> = - - HTTP/1.0 defines the octet sequence CR LF as the end-of-line marker - for all protocol elements except the Entity-Body (see Appendix B for - tolerant applications). The end-of-line marker within an Entity-Body - is defined by its associated media type, as described in Section 3.6. - - CRLF = CR LF - - HTTP/1.0 headers may be folded onto multiple lines if each - continuation line begins with a space or horizontal tab. All linear - whitespace, including folding, has the same semantics as SP. - - LWS = [CRLF] 1*( SP | HT ) - - However, folding of header lines is not expected by some - applications, and should not be generated by HTTP/1.0 applications. - - The TEXT rule is only used for descriptive field contents and values - that are not intended to be interpreted by the message parser. Words - of *TEXT may contain octets from character sets other than US-ASCII. - - TEXT = - - Recipients of header field TEXT containing octets outside the US- - ASCII character set may assume that they represent ISO-8859-1 - characters. - - Hexadecimal numeric characters are used in several protocol elements. - - HEX = "A" | "B" | "C" | "D" | "E" | "F" - | "a" | "b" | "c" | "d" | "e" | "f" | DIGIT - - Many HTTP/1.0 header field values consist of words separated by LWS - or special characters. These special characters must be in a quoted - string to be used within a parameter value. - - word = token | quoted-string - - - - -Berners-Lee, et al Informational [Page 11] - -RFC 1945 HTTP/1.0 May 1996 - - - token = 1* - - tspecials = "(" | ")" | "<" | ">" | "@" - | "," | ";" | ":" | "\" | <"> - | "/" | "[" | "]" | "?" | "=" - | "{" | "}" | SP | HT - - Comments may be included in some HTTP header fields by surrounding - the comment text with parentheses. Comments are only allowed in - fields containing "comment" as part of their field value definition. - In all other fields, parentheses are considered part of the field - value. - - comment = "(" *( ctext | comment ) ")" - ctext = - - A string of text is parsed as a single word if it is quoted using - double-quote marks. - - quoted-string = ( <"> *(qdtext) <"> ) - - qdtext = and CTLs, - but including LWS> - - Single-character quoting using the backslash ("\") character is not - permitted in HTTP/1.0. - -3. Protocol Parameters - -3.1 HTTP Version - - HTTP uses a "." numbering scheme to indicate versions - of the protocol. The protocol versioning policy is intended to allow - the sender to indicate the format of a message and its capacity for - understanding further HTTP communication, rather than the features - obtained via that communication. No change is made to the version - number for the addition of message components which do not affect - communication behavior or which only add to extensible field values. - The number is incremented when the changes made to the - protocol add features which do not change the general message parsing - algorithm, but which may add to the message semantics and imply - additional capabilities of the sender. The number is - incremented when the format of a message within the protocol is - changed. - - The version of an HTTP message is indicated by an HTTP-Version field - in the first line of the message. If the protocol version is not - specified, the recipient must assume that the message is in the - - - -Berners-Lee, et al Informational [Page 12] - -RFC 1945 HTTP/1.0 May 1996 - - - simple HTTP/0.9 format. - - HTTP-Version = "HTTP" "/" 1*DIGIT "." 1*DIGIT - - Note that the major and minor numbers should be treated as separate - integers and that each may be incremented higher than a single digit. - Thus, HTTP/2.4 is a lower version than HTTP/2.13, which in turn is - lower than HTTP/12.3. Leading zeros should be ignored by recipients - and never generated by senders. - - This document defines both the 0.9 and 1.0 versions of the HTTP - protocol. Applications sending Full-Request or Full-Response - messages, as defined by this specification, must include an HTTP- - Version of "HTTP/1.0". - - HTTP/1.0 servers must: - - o recognize the format of the Request-Line for HTTP/0.9 and - HTTP/1.0 requests; - - o understand any valid request in the format of HTTP/0.9 or - HTTP/1.0; - - o respond appropriately with a message in the same protocol - version used by the client. - - HTTP/1.0 clients must: - - o recognize the format of the Status-Line for HTTP/1.0 responses; - - o understand any valid response in the format of HTTP/0.9 or - HTTP/1.0. - - Proxy and gateway applications must be careful in forwarding requests - that are received in a format different than that of the - application's native HTTP version. Since the protocol version - indicates the protocol capability of the sender, a proxy/gateway must - never send a message with a version indicator which is greater than - its native version; if a higher version request is received, the - proxy/gateway must either downgrade the request version or respond - with an error. Requests with a version lower than that of the - application's native format may be upgraded before being forwarded; - the proxy/gateway's response to that request must follow the server - requirements listed above. - - - - - - - -Berners-Lee, et al Informational [Page 13] - -RFC 1945 HTTP/1.0 May 1996 - - -3.2 Uniform Resource Identifiers - - URIs have been known by many names: WWW addresses, Universal Document - Identifiers, Universal Resource Identifiers [2], and finally the - combination of Uniform Resource Locators (URL) [4] and Names (URN) - [16]. As far as HTTP is concerned, Uniform Resource Identifiers are - simply formatted strings which identify--via name, location, or any - other characteristic--a network resource. - -3.2.1 General Syntax - - URIs in HTTP can be represented in absolute form or relative to some - known base URI [9], depending upon the context of their use. The two - forms are differentiated by the fact that absolute URIs always begin - with a scheme name followed by a colon. - - URI = ( absoluteURI | relativeURI ) [ "#" fragment ] - - absoluteURI = scheme ":" *( uchar | reserved ) - - relativeURI = net_path | abs_path | rel_path - - net_path = "//" net_loc [ abs_path ] - abs_path = "/" rel_path - rel_path = [ path ] [ ";" params ] [ "?" query ] - - path = fsegment *( "/" segment ) - fsegment = 1*pchar - segment = *pchar - - params = param *( ";" param ) - param = *( pchar | "/" ) - - scheme = 1*( ALPHA | DIGIT | "+" | "-" | "." ) - net_loc = *( pchar | ";" | "?" ) - query = *( uchar | reserved ) - fragment = *( uchar | reserved ) - - pchar = uchar | ":" | "@" | "&" | "=" | "+" - uchar = unreserved | escape - unreserved = ALPHA | DIGIT | safe | extra | national - - escape = "%" HEX HEX - reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" - extra = "!" | "*" | "'" | "(" | ")" | "," - safe = "$" | "-" | "_" | "." - unsafe = CTL | SP | <"> | "#" | "%" | "<" | ">" - national = - - For definitive information on URL syntax and semantics, see RFC 1738 - [4] and RFC 1808 [9]. The BNF above includes national characters not - allowed in valid URLs as specified by RFC 1738, since HTTP servers - are not restricted in the set of unreserved characters allowed to - represent the rel_path part of addresses, and HTTP proxies may - receive requests for URIs not defined by RFC 1738. - -3.2.2 http URL - - The "http" scheme is used to locate network resources via the HTTP - protocol. This section defines the scheme-specific syntax and - semantics for http URLs. - - http_URL = "http:" "//" host [ ":" port ] [ abs_path ] - - host = - - port = *DIGIT - - If the port is empty or not given, port 80 is assumed. The semantics - are that the identified resource is located at the server listening - for TCP connections on that port of that host, and the Request-URI - for the resource is abs_path. If the abs_path is not present in the - URL, it must be given as "/" when used as a Request-URI (Section - 5.1.2). - - Note: Although the HTTP protocol is independent of the transport - layer protocol, the http URL only identifies resources by their - TCP location, and thus non-TCP resources must be identified by - some other URI scheme. - - The canonical form for "http" URLs is obtained by converting any - UPALPHA characters in host to their LOALPHA equivalent (hostnames are - case-insensitive), eliding the [ ":" port ] if the port is 80, and - replacing an empty abs_path with "/". - -3.3 Date/Time Formats - - HTTP/1.0 applications have historically allowed three different - formats for the representation of date/time stamps: - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - - -Berners-Lee, et al Informational [Page 15] - -RFC 1945 HTTP/1.0 May 1996 - - - The first format is preferred as an Internet standard and represents - a fixed-length subset of that defined by RFC 1123 [6] (an update to - RFC 822 [7]). The second format is in common use, but is based on the - obsolete RFC 850 [10] date format and lacks a four-digit year. - HTTP/1.0 clients and servers that parse the date value should accept - all three formats, though they must never generate the third - (asctime) format. - - Note: Recipients of date values are encouraged to be robust in - accepting date values that may have been generated by non-HTTP - applications, as is sometimes the case when retrieving or posting - messages via proxies/gateways to SMTP or NNTP. - - All HTTP/1.0 date/time stamps must be represented in Universal Time - (UT), also known as Greenwich Mean Time (GMT), without exception. - This is indicated in the first two formats by the inclusion of "GMT" - as the three-letter abbreviation for time zone, and should be assumed - when reading the asctime format. - - HTTP-date = rfc1123-date | rfc850-date | asctime-date - - rfc1123-date = wkday "," SP date1 SP time SP "GMT" - rfc850-date = weekday "," SP date2 SP time SP "GMT" - asctime-date = wkday SP date3 SP time SP 4DIGIT - - date1 = 2DIGIT SP month SP 4DIGIT - ; day month year (e.g., 02 Jun 1982) - date2 = 2DIGIT "-" month "-" 2DIGIT - ; day-month-year (e.g., 02-Jun-82) - date3 = month SP ( 2DIGIT | ( SP 1DIGIT )) - ; month day (e.g., Jun 2) - - time = 2DIGIT ":" 2DIGIT ":" 2DIGIT - ; 00:00:00 - 23:59:59 - - wkday = "Mon" | "Tue" | "Wed" - | "Thu" | "Fri" | "Sat" | "Sun" - - weekday = "Monday" | "Tuesday" | "Wednesday" - | "Thursday" | "Friday" | "Saturday" | "Sunday" - - month = "Jan" | "Feb" | "Mar" | "Apr" - | "May" | "Jun" | "Jul" | "Aug" - | "Sep" | "Oct" | "Nov" | "Dec" - - Note: HTTP requirements for the date/time stamp format apply - only to their usage within the protocol stream. Clients and - servers are not required to use these formats for user - - - -Berners-Lee, et al Informational [Page 16] - -RFC 1945 HTTP/1.0 May 1996 - - - presentation, request logging, etc. - -3.4 Character Sets - - HTTP uses the same definition of the term "character set" as that - described for MIME: - - The term "character set" is used in this document to refer to a - method used with one or more tables to convert a sequence of - octets into a sequence of characters. Note that unconditional - conversion in the other direction is not required, in that not all - characters may be available in a given character set and a - character set may provide more than one sequence of octets to - represent a particular character. This definition is intended to - allow various kinds of character encodings, from simple single- - table mappings such as US-ASCII to complex table switching methods - such as those that use ISO 2022's techniques. However, the - definition associated with a MIME character set name must fully - specify the mapping to be performed from octets to characters. In - particular, use of external profiling information to determine the - exact mapping is not permitted. - - Note: This use of the term "character set" is more commonly - referred to as a "character encoding." However, since HTTP and - MIME share the same registry, it is important that the terminology - also be shared. - - HTTP character sets are identified by case-insensitive tokens. The - complete set of tokens are defined by the IANA Character Set registry - [15]. However, because that registry does not define a single, - consistent token for each character set, we define here the preferred - names for those character sets most likely to be used with HTTP - entities. These character sets include those registered by RFC 1521 - [5] -- the US-ASCII [17] and ISO-8859 [18] character sets -- and - other names specifically recommended for use within MIME charset - parameters. - - charset = "US-ASCII" - | "ISO-8859-1" | "ISO-8859-2" | "ISO-8859-3" - | "ISO-8859-4" | "ISO-8859-5" | "ISO-8859-6" - | "ISO-8859-7" | "ISO-8859-8" | "ISO-8859-9" - | "ISO-2022-JP" | "ISO-2022-JP-2" | "ISO-2022-KR" - | "UNICODE-1-1" | "UNICODE-1-1-UTF-7" | "UNICODE-1-1-UTF-8" - | token - - Although HTTP allows an arbitrary token to be used as a charset - value, any token that has a predefined value within the IANA - Character Set registry [15] must represent the character set defined - - - -Berners-Lee, et al Informational [Page 17] - -RFC 1945 HTTP/1.0 May 1996 - - - by that registry. Applications should limit their use of character - sets to those defined by the IANA registry. - - The character set of an entity body should be labelled as the lowest - common denominator of the character codes used within that body, with - the exception that no label is preferred over the labels US-ASCII or - ISO-8859-1. - -3.5 Content Codings - - Content coding values are used to indicate an encoding transformation - that has been applied to a resource. Content codings are primarily - used to allow a document to be compressed or encrypted without losing - the identity of its underlying media type. Typically, the resource is - stored in this encoding and only decoded before rendering or - analogous usage. - - content-coding = "x-gzip" | "x-compress" | token - - Note: For future compatibility, HTTP/1.0 applications should - consider "gzip" and "compress" to be equivalent to "x-gzip" - and "x-compress", respectively. - - All content-coding values are case-insensitive. HTTP/1.0 uses - content-coding values in the Content-Encoding (Section 10.3) header - field. Although the value describes the content-coding, what is more - important is that it indicates what decoding mechanism will be - required to remove the encoding. Note that a single program may be - capable of decoding multiple content-coding formats. Two values are - defined by this specification: - - x-gzip - An encoding format produced by the file compression program - "gzip" (GNU zip) developed by Jean-loup Gailly. This format is - typically a Lempel-Ziv coding (LZ77) with a 32 bit CRC. - - x-compress - The encoding format produced by the file compression program - "compress". This format is an adaptive Lempel-Ziv-Welch coding - (LZW). - - Note: Use of program names for the identification of - encoding formats is not desirable and should be discouraged - for future encodings. Their use here is representative of - historical practice, not good design. - - - - - - -Berners-Lee, et al Informational [Page 18] - -RFC 1945 HTTP/1.0 May 1996 - - -3.6 Media Types - - HTTP uses Internet Media Types [13] in the Content-Type header field - (Section 10.5) in order to provide open and extensible data typing. - - media-type = type "/" subtype *( ";" parameter ) - type = token - subtype = token - - Parameters may follow the type/subtype in the form of attribute/value - pairs. - - parameter = attribute "=" value - attribute = token - value = token | quoted-string - - The type, subtype, and parameter attribute names are case- - insensitive. Parameter values may or may not be case-sensitive, - depending on the semantics of the parameter name. LWS must not be - generated between the type and subtype, nor between an attribute and - its value. Upon receipt of a media type with an unrecognized - parameter, a user agent should treat the media type as if the - unrecognized parameter and its value were not present. - - Some older HTTP applications do not recognize media type parameters. - HTTP/1.0 applications should only use media type parameters when they - are necessary to define the content of a message. - - Media-type values are registered with the Internet Assigned Number - Authority (IANA [15]). The media type registration process is - outlined in RFC 1590 [13]. Use of non-registered media types is - discouraged. - -3.6.1 Canonicalization and Text Defaults - - Internet media types are registered with a canonical form. In - general, an Entity-Body transferred via HTTP must be represented in - the appropriate canonical form prior to its transmission. If the body - has been encoded with a Content-Encoding, the underlying data should - be in canonical form prior to being encoded. - - Media subtypes of the "text" type use CRLF as the text line break - when in canonical form. However, HTTP allows the transport of text - media with plain CR or LF alone representing a line break when used - consistently within the Entity-Body. HTTP applications must accept - CRLF, bare CR, and bare LF as being representative of a line break in - text media received via HTTP. - - - - -Berners-Lee, et al Informational [Page 19] - -RFC 1945 HTTP/1.0 May 1996 - - - In addition, if the text media is represented in a character set that - does not use octets 13 and 10 for CR and LF respectively, as is the - case for some multi-byte character sets, HTTP allows the use of - whatever octet sequences are defined by that character set to - represent the equivalent of CR and LF for line breaks. This - flexibility regarding line breaks applies only to text media in the - Entity-Body; a bare CR or LF should not be substituted for CRLF - within any of the HTTP control structures (such as header fields and - multipart boundaries). - - The "charset" parameter is used with some media types to define the - character set (Section 3.4) of the data. When no explicit charset - parameter is provided by the sender, media subtypes of the "text" - type are defined to have a default charset value of "ISO-8859-1" when - received via HTTP. Data in character sets other than "ISO-8859-1" or - its subsets must be labelled with an appropriate charset value in - order to be consistently interpreted by the recipient. - - Note: Many current HTTP servers provide data using charsets other - than "ISO-8859-1" without proper labelling. This situation reduces - interoperability and is not recommended. To compensate for this, - some HTTP user agents provide a configuration option to allow the - user to change the default interpretation of the media type - character set when no charset parameter is given. - -3.6.2 Multipart Types - - MIME provides for a number of "multipart" types -- encapsulations of - several entities within a single message's Entity-Body. The multipart - types registered by IANA [15] do not have any special meaning for - HTTP/1.0, though user agents may need to understand each type in - order to correctly interpret the purpose of each body-part. An HTTP - user agent should follow the same or similar behavior as a MIME user - agent does upon receipt of a multipart type. HTTP servers should not - assume that all HTTP clients are prepared to handle multipart types. - - All multipart types share a common syntax and must include a boundary - parameter as part of the media type value. The message body is itself - a protocol element and must therefore use only CRLF to represent line - breaks between body-parts. Multipart body-parts may contain HTTP - header fields which are significant to the meaning of that part. - -3.7 Product Tokens - - Product tokens are used to allow communicating applications to - identify themselves via a simple product token, with an optional - slash and version designator. Most fields using product tokens also - allow subproducts which form a significant part of the application to - - - -Berners-Lee, et al Informational [Page 20] - -RFC 1945 HTTP/1.0 May 1996 - - - be listed, separated by whitespace. By convention, the products are - listed in order of their significance for identifying the - application. - - product = token ["/" product-version] - product-version = token - - Examples: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - - Server: Apache/0.8.4 - - Product tokens should be short and to the point -- use of them for - advertizing or other non-essential information is explicitly - forbidden. Although any token character may appear in a product- - version, this token should only be used for a version identifier - (i.e., successive versions of the same product should only differ in - the product-version portion of the product value). - -4. HTTP Message - -4.1 Message Types - - HTTP messages consist of requests from client to server and responses - from server to client. - - HTTP-message = Simple-Request ; HTTP/0.9 messages - | Simple-Response - | Full-Request ; HTTP/1.0 messages - | Full-Response - - Full-Request and Full-Response use the generic message format of RFC - 822 [7] for transferring entities. Both messages may include optional - header fields (also known as "headers") and an entity body. The - entity body is separated from the headers by a null line (i.e., a - line with nothing preceding the CRLF). - - Full-Request = Request-Line ; Section 5.1 - *( General-Header ; Section 4.3 - | Request-Header ; Section 5.2 - | Entity-Header ) ; Section 7.1 - CRLF - [ Entity-Body ] ; Section 7.2 - - Full-Response = Status-Line ; Section 6.1 - *( General-Header ; Section 4.3 - | Response-Header ; Section 6.2 - - - -Berners-Lee, et al Informational [Page 21] - -RFC 1945 HTTP/1.0 May 1996 - - - | Entity-Header ) ; Section 7.1 - CRLF - [ Entity-Body ] ; Section 7.2 - - Simple-Request and Simple-Response do not allow the use of any header - information and are limited to a single request method (GET). - - Simple-Request = "GET" SP Request-URI CRLF - - Simple-Response = [ Entity-Body ] - - Use of the Simple-Request format is discouraged because it prevents - the server from identifying the media type of the returned entity. - -4.2 Message Headers - - HTTP header fields, which include General-Header (Section 4.3), - Request-Header (Section 5.2), Response-Header (Section 6.2), and - Entity-Header (Section 7.1) fields, follow the same generic format as - that given in Section 3.1 of RFC 822 [7]. Each header field consists - of a name followed immediately by a colon (":"), a single space (SP) - character, and the field value. Field names are case-insensitive. - Header fields can be extended over multiple lines by preceding each - extra line with at least one SP or HT, though this is not - recommended. - - HTTP-header = field-name ":" [ field-value ] CRLF - - field-name = token - field-value = *( field-content | LWS ) - - field-content = - - The order in which header fields are received is not significant. - However, it is "good practice" to send General-Header fields first, - followed by Request-Header or Response-Header fields prior to the - Entity-Header fields. - - Multiple HTTP-header fields with the same field-name may be present - in a message if and only if the entire field-value for that header - field is defined as a comma-separated list [i.e., #(values)]. It must - be possible to combine the multiple header fields into one "field- - name: field-value" pair, without changing the semantics of the - message, by appending each subsequent field-value to the first, each - separated by a comma. - - - - -Berners-Lee, et al Informational [Page 22] - -RFC 1945 HTTP/1.0 May 1996 - - -4.3 General Header Fields - - There are a few header fields which have general applicability for - both request and response messages, but which do not apply to the - entity being transferred. These headers apply only to the message - being transmitted. - - General-Header = Date ; Section 10.6 - | Pragma ; Section 10.12 - - General header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields may be given the semantics of general - header fields if all parties in the communication recognize them to - be general header fields. Unrecognized header fields are treated as - Entity-Header fields. - -5. Request - - A request message from a client to a server includes, within the - first line of that message, the method to be applied to the resource, - the identifier of the resource, and the protocol version in use. For - backwards compatibility with the more limited HTTP/0.9 protocol, - there are two valid formats for an HTTP request: - - Request = Simple-Request | Full-Request - - Simple-Request = "GET" SP Request-URI CRLF - - Full-Request = Request-Line ; Section 5.1 - *( General-Header ; Section 4.3 - | Request-Header ; Section 5.2 - | Entity-Header ) ; Section 7.1 - CRLF - [ Entity-Body ] ; Section 7.2 - - If an HTTP/1.0 server receives a Simple-Request, it must respond with - an HTTP/0.9 Simple-Response. An HTTP/1.0 client capable of receiving - a Full-Response should never generate a Simple-Request. - -5.1 Request-Line - - The Request-Line begins with a method token, followed by the - Request-URI and the protocol version, and ending with CRLF. The - elements are separated by SP characters. No CR or LF are allowed - except in the final CRLF sequence. - - Request-Line = Method SP Request-URI SP HTTP-Version CRLF - - - -Berners-Lee, et al Informational [Page 23] - -RFC 1945 HTTP/1.0 May 1996 - - - Note that the difference between a Simple-Request and the Request- - Line of a Full-Request is the presence of the HTTP-Version field and - the availability of methods other than GET. - -5.1.1 Method - - The Method token indicates the method to be performed on the resource - identified by the Request-URI. The method is case-sensitive. - - Method = "GET" ; Section 8.1 - | "HEAD" ; Section 8.2 - | "POST" ; Section 8.3 - | extension-method - - extension-method = token - - The list of methods acceptable by a specific resource can change - dynamically; the client is notified through the return code of the - response if a method is not allowed on a resource. Servers should - return the status code 501 (not implemented) if the method is - unrecognized or not implemented. - - The methods commonly used by HTTP/1.0 applications are fully defined - in Section 8. - -5.1.2 Request-URI - - The Request-URI is a Uniform Resource Identifier (Section 3.2) and - identifies the resource upon which to apply the request. - - Request-URI = absoluteURI | abs_path - - The two options for Request-URI are dependent on the nature of the - request. - - The absoluteURI form is only allowed when the request is being made - to a proxy. The proxy is requested to forward the request and return - the response. If the request is GET or HEAD and a prior response is - cached, the proxy may use the cached message if it passes any - restrictions in the Expires header field. Note that the proxy may - forward the request on to another proxy or directly to the server - specified by the absoluteURI. In order to avoid request loops, a - proxy must be able to recognize all of its server names, including - any aliases, local variations, and the numeric IP address. An example - Request-Line would be: - - GET http://www.w3.org/pub/WWW/TheProject.html HTTP/1.0 - - - - -Berners-Lee, et al Informational [Page 24] - -RFC 1945 HTTP/1.0 May 1996 - - - The most common form of Request-URI is that used to identify a - resource on an origin server or gateway. In this case, only the - absolute path of the URI is transmitted (see Section 3.2.1, - abs_path). For example, a client wishing to retrieve the resource - above directly from the origin server would create a TCP connection - to port 80 of the host "www.w3.org" and send the line: - - GET /pub/WWW/TheProject.html HTTP/1.0 - - followed by the remainder of the Full-Request. Note that the absolute - path cannot be empty; if none is present in the original URI, it must - be given as "/" (the server root). - - The Request-URI is transmitted as an encoded string, where some - characters may be escaped using the "% HEX HEX" encoding defined by - RFC 1738 [4]. The origin server must decode the Request-URI in order - to properly interpret the request. - -5.2 Request Header Fields - - The request header fields allow the client to pass additional - information about the request, and about the client itself, to the - server. These fields act as request modifiers, with semantics - equivalent to the parameters on a programming language method - (procedure) invocation. - - Request-Header = Authorization ; Section 10.2 - | From ; Section 10.8 - | If-Modified-Since ; Section 10.9 - | Referer ; Section 10.13 - | User-Agent ; Section 10.15 - - Request-Header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields may be given the semantics of request - header fields if all parties in the communication recognize them to - be request header fields. Unrecognized header fields are treated as - Entity-Header fields. - -6. Response - - After receiving and interpreting a request message, a server responds - in the form of an HTTP response message. - - Response = Simple-Response | Full-Response - - Simple-Response = [ Entity-Body ] - - - - -Berners-Lee, et al Informational [Page 25] - -RFC 1945 HTTP/1.0 May 1996 - - - Full-Response = Status-Line ; Section 6.1 - *( General-Header ; Section 4.3 - | Response-Header ; Section 6.2 - | Entity-Header ) ; Section 7.1 - CRLF - [ Entity-Body ] ; Section 7.2 - - A Simple-Response should only be sent in response to an HTTP/0.9 - Simple-Request or if the server only supports the more limited - HTTP/0.9 protocol. If a client sends an HTTP/1.0 Full-Request and - receives a response that does not begin with a Status-Line, it should - assume that the response is a Simple-Response and parse it - accordingly. Note that the Simple-Response consists only of the - entity body and is terminated by the server closing the connection. - -6.1 Status-Line - - The first line of a Full-Response message is the Status-Line, - consisting of the protocol version followed by a numeric status code - and its associated textual phrase, with each element separated by SP - characters. No CR or LF is allowed except in the final CRLF sequence. - - Status-Line = HTTP-Version SP Status-Code SP Reason-Phrase CRLF - - Since a status line always begins with the protocol version and - status code - - "HTTP/" 1*DIGIT "." 1*DIGIT SP 3DIGIT SP - - (e.g., "HTTP/1.0 200 "), the presence of that expression is - sufficient to differentiate a Full-Response from a Simple-Response. - Although the Simple-Response format may allow such an expression to - occur at the beginning of an entity body, and thus cause a - misinterpretation of the message if it was given in response to a - Full-Request, most HTTP/0.9 servers are limited to responses of type - "text/html" and therefore would never generate such a response. - -6.1.1 Status Code and Reason Phrase - - The Status-Code element is a 3-digit integer result code of the - attempt to understand and satisfy the request. The Reason-Phrase is - intended to give a short textual description of the Status-Code. The - Status-Code is intended for use by automata and the Reason-Phrase is - intended for the human user. The client is not required to examine or - display the Reason-Phrase. - - - - - - -Berners-Lee, et al Informational [Page 26] - -RFC 1945 HTTP/1.0 May 1996 - - - The first digit of the Status-Code defines the class of response. The - last two digits do not have any categorization role. There are 5 - values for the first digit: - - o 1xx: Informational - Not used, but reserved for future use - - o 2xx: Success - The action was successfully received, - understood, and accepted. - - o 3xx: Redirection - Further action must be taken in order to - complete the request - - o 4xx: Client Error - The request contains bad syntax or cannot - be fulfilled - - o 5xx: Server Error - The server failed to fulfill an apparently - valid request - - The individual values of the numeric status codes defined for - HTTP/1.0, and an example set of corresponding Reason-Phrase's, are - presented below. The reason phrases listed here are only recommended - -- they may be replaced by local equivalents without affecting the - protocol. These codes are fully defined in Section 9. - - Status-Code = "200" ; OK - | "201" ; Created - | "202" ; Accepted - | "204" ; No Content - | "301" ; Moved Permanently - | "302" ; Moved Temporarily - | "304" ; Not Modified - | "400" ; Bad Request - | "401" ; Unauthorized - | "403" ; Forbidden - | "404" ; Not Found - | "500" ; Internal Server Error - | "501" ; Not Implemented - | "502" ; Bad Gateway - | "503" ; Service Unavailable - | extension-code - - extension-code = 3DIGIT - - Reason-Phrase = * - - HTTP status codes are extensible, but the above codes are the only - ones generally recognized in current practice. HTTP applications are - not required to understand the meaning of all registered status - - - -Berners-Lee, et al Informational [Page 27] - -RFC 1945 HTTP/1.0 May 1996 - - - codes, though such understanding is obviously desirable. However, - applications must understand the class of any status code, as - indicated by the first digit, and treat any unrecognized response as - being equivalent to the x00 status code of that class, with the - exception that an unrecognized response must not be cached. For - example, if an unrecognized status code of 431 is received by the - client, it can safely assume that there was something wrong with its - request and treat the response as if it had received a 400 status - code. In such cases, user agents should present to the user the - entity returned with the response, since that entity is likely to - include human-readable information which will explain the unusual - status. - -6.2 Response Header Fields - - The response header fields allow the server to pass additional - information about the response which cannot be placed in the Status- - Line. These header fields give information about the server and about - further access to the resource identified by the Request-URI. - - Response-Header = Location ; Section 10.11 - | Server ; Section 10.14 - | WWW-Authenticate ; Section 10.16 - - Response-Header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields may be given the semantics of response - header fields if all parties in the communication recognize them to - be response header fields. Unrecognized header fields are treated as - Entity-Header fields. - -7. Entity - - Full-Request and Full-Response messages may transfer an entity within - some requests and responses. An entity consists of Entity-Header - fields and (usually) an Entity-Body. In this section, both sender and - recipient refer to either the client or the server, depending on who - sends and who receives the entity. - - - - - - - - - - - - - -Berners-Lee, et al Informational [Page 28] - -RFC 1945 HTTP/1.0 May 1996 - - -7.1 Entity Header Fields - - Entity-Header fields define optional metainformation about the - Entity-Body or, if no body is present, about the resource identified - by the request. - - Entity-Header = Allow ; Section 10.1 - | Content-Encoding ; Section 10.3 - | Content-Length ; Section 10.4 - | Content-Type ; Section 10.5 - | Expires ; Section 10.7 - | Last-Modified ; Section 10.10 - | extension-header - - extension-header = HTTP-header - - The extension-header mechanism allows additional Entity-Header fields - to be defined without changing the protocol, but these fields cannot - be assumed to be recognizable by the recipient. Unrecognized header - fields should be ignored by the recipient and forwarded by proxies. - -7.2 Entity Body - - The entity body (if any) sent with an HTTP request or response is in - a format and encoding defined by the Entity-Header fields. - - Entity-Body = *OCTET - - An entity body is included with a request message only when the - request method calls for one. The presence of an entity body in a - request is signaled by the inclusion of a Content-Length header field - in the request message headers. HTTP/1.0 requests containing an - entity body must include a valid Content-Length header field. - - For response messages, whether or not an entity body is included with - a message is dependent on both the request method and the response - code. All responses to the HEAD request method must not include a - body, even though the presence of entity header fields may lead one - to believe they do. All 1xx (informational), 204 (no content), and - 304 (not modified) responses must not include a body. All other - responses must include an entity body or a Content-Length header - field defined with a value of zero (0). - -7.2.1 Type - - When an Entity-Body is included with a message, the data type of that - body is determined via the header fields Content-Type and Content- - Encoding. These define a two-layer, ordered encoding model: - - - -Berners-Lee, et al Informational [Page 29] - -RFC 1945 HTTP/1.0 May 1996 - - - entity-body := Content-Encoding( Content-Type( data ) ) - - A Content-Type specifies the media type of the underlying data. A - Content-Encoding may be used to indicate any additional content - coding applied to the type, usually for the purpose of data - compression, that is a property of the resource requested. The - default for the content encoding is none (i.e., the identity - function). - - Any HTTP/1.0 message containing an entity body should include a - Content-Type header field defining the media type of that body. If - and only if the media type is not given by a Content-Type header, as - is the case for Simple-Response messages, the recipient may attempt - to guess the media type via inspection of its content and/or the name - extension(s) of the URL used to identify the resource. If the media - type remains unknown, the recipient should treat it as type - "application/octet-stream". - -7.2.2 Length - - When an Entity-Body is included with a message, the length of that - body may be determined in one of two ways. If a Content-Length header - field is present, its value in bytes represents the length of the - Entity-Body. Otherwise, the body length is determined by the closing - of the connection by the server. - - Closing the connection cannot be used to indicate the end of a - request body, since it leaves no possibility for the server to send - back a response. Therefore, HTTP/1.0 requests containing an entity - body must include a valid Content-Length header field. If a request - contains an entity body and Content-Length is not specified, and the - server does not recognize or cannot calculate the length from other - fields, then the server should send a 400 (bad request) response. - - Note: Some older servers supply an invalid Content-Length when - sending a document that contains server-side includes dynamically - inserted into the data stream. It must be emphasized that this - will not be tolerated by future versions of HTTP. Unless the - client knows that it is receiving a response from a compliant - server, it should not depend on the Content-Length value being - correct. - -8. Method Definitions - - The set of common methods for HTTP/1.0 is defined below. Although - this set can be expanded, additional methods cannot be assumed to - share the same semantics for separately extended clients and servers. - - - - -Berners-Lee, et al Informational [Page 30] - -RFC 1945 HTTP/1.0 May 1996 - - -8.1 GET - - The GET method means retrieve whatever information (in the form of an - entity) is identified by the Request-URI. If the Request-URI refers - to a data-producing process, it is the produced data which shall be - returned as the entity in the response and not the source text of the - process, unless that text happens to be the output of the process. - - The semantics of the GET method changes to a "conditional GET" if the - request message includes an If-Modified-Since header field. A - conditional GET method requests that the identified resource be - transferred only if it has been modified since the date given by the - If-Modified-Since header, as described in Section 10.9. The - conditional GET method is intended to reduce network usage by - allowing cached entities to be refreshed without requiring multiple - requests or transferring unnecessary data. - -8.2 HEAD - - The HEAD method is identical to GET except that the server must not - return any Entity-Body in the response. The metainformation contained - in the HTTP headers in response to a HEAD request should be identical - to the information sent in response to a GET request. This method can - be used for obtaining metainformation about the resource identified - by the Request-URI without transferring the Entity-Body itself. This - method is often used for testing hypertext links for validity, - accessibility, and recent modification. - - There is no "conditional HEAD" request analogous to the conditional - GET. If an If-Modified-Since header field is included with a HEAD - request, it should be ignored. - -8.3 POST - - The POST method is used to request that the destination server accept - the entity enclosed in the request as a new subordinate of the - resource identified by the Request-URI in the Request-Line. POST is - designed to allow a uniform method to cover the following functions: - - o Annotation of existing resources; - - o Posting a message to a bulletin board, newsgroup, mailing list, - or similar group of articles; - - o Providing a block of data, such as the result of submitting a - form [3], to a data-handling process; - - o Extending a database through an append operation. - - - -Berners-Lee, et al Informational [Page 31] - -RFC 1945 HTTP/1.0 May 1996 - - - The actual function performed by the POST method is determined by the - server and is usually dependent on the Request-URI. The posted entity - is subordinate to that URI in the same way that a file is subordinate - to a directory containing it, a news article is subordinate to a - newsgroup to which it is posted, or a record is subordinate to a - database. - - A successful POST does not require that the entity be created as a - resource on the origin server or made accessible for future - reference. That is, the action performed by the POST method might not - result in a resource that can be identified by a URI. In this case, - either 200 (ok) or 204 (no content) is the appropriate response - status, depending on whether or not the response includes an entity - that describes the result. - - If a resource has been created on the origin server, the response - should be 201 (created) and contain an entity (preferably of type - "text/html") which describes the status of the request and refers to - the new resource. - - A valid Content-Length is required on all HTTP/1.0 POST requests. An - HTTP/1.0 server should respond with a 400 (bad request) message if it - cannot determine the length of the request message's content. - - Applications must not cache responses to a POST request because the - application has no way of knowing that the server would return an - equivalent response on some future request. - -9. Status Code Definitions - - Each Status-Code is described below, including a description of which - method(s) it can follow and any metainformation required in the - response. - -9.1 Informational 1xx - - This class of status code indicates a provisional response, - consisting only of the Status-Line and optional headers, and is - terminated by an empty line. HTTP/1.0 does not define any 1xx status - codes and they are not a valid response to a HTTP/1.0 request. - However, they may be useful for experimental applications which are - outside the scope of this specification. - -9.2 Successful 2xx - - This class of status code indicates that the client's request was - successfully received, understood, and accepted. - - - - -Berners-Lee, et al Informational [Page 32] - -RFC 1945 HTTP/1.0 May 1996 - - - 200 OK - - The request has succeeded. The information returned with the - response is dependent on the method used in the request, as follows: - - GET an entity corresponding to the requested resource is sent - in the response; - - HEAD the response must only contain the header information and - no Entity-Body; - - POST an entity describing or containing the result of the action. - - 201 Created - - The request has been fulfilled and resulted in a new resource being - created. The newly created resource can be referenced by the URI(s) - returned in the entity of the response. The origin server should - create the resource before using this Status-Code. If the action - cannot be carried out immediately, the server must include in the - response body a description of when the resource will be available; - otherwise, the server should respond with 202 (accepted). - - Of the methods defined by this specification, only POST can create a - resource. - - 202 Accepted - - The request has been accepted for processing, but the processing - has not been completed. The request may or may not eventually be - acted upon, as it may be disallowed when processing actually takes - place. There is no facility for re-sending a status code from an - asynchronous operation such as this. - - The 202 response is intentionally non-committal. Its purpose is to - allow a server to accept a request for some other process (perhaps - a batch-oriented process that is only run once per day) without - requiring that the user agent's connection to the server persist - until the process is completed. The entity returned with this - response should include an indication of the request's current - status and either a pointer to a status monitor or some estimate of - when the user can expect the request to be fulfilled. - - 204 No Content - - The server has fulfilled the request but there is no new - information to send back. If the client is a user agent, it should - not change its document view from that which caused the request to - - - -Berners-Lee, et al Informational [Page 33] - -RFC 1945 HTTP/1.0 May 1996 - - - be generated. This response is primarily intended to allow input - for scripts or other actions to take place without causing a change - to the user agent's active document view. The response may include - new metainformation in the form of entity headers, which should - apply to the document currently in the user agent's active view. - -9.3 Redirection 3xx - - This class of status code indicates that further action needs to be - taken by the user agent in order to fulfill the request. The action - required may be carried out by the user agent without interaction - with the user if and only if the method used in the subsequent - request is GET or HEAD. A user agent should never automatically - redirect a request more than 5 times, since such redirections usually - indicate an infinite loop. - - 300 Multiple Choices - - This response code is not directly used by HTTP/1.0 applications, - but serves as the default for interpreting the 3xx class of - responses. - - The requested resource is available at one or more locations. - Unless it was a HEAD request, the response should include an entity - containing a list of resource characteristics and locations from - which the user or user agent can choose the one most appropriate. - If the server has a preferred choice, it should include the URL in - a Location field; user agents may use this field value for - automatic redirection. - - 301 Moved Permanently - - The requested resource has been assigned a new permanent URL and - any future references to this resource should be done using that - URL. Clients with link editing capabilities should automatically - relink references to the Request-URI to the new reference returned - by the server, where possible. - - The new URL must be given by the Location field in the response. - Unless it was a HEAD request, the Entity-Body of the response - should contain a short note with a hyperlink to the new URL. - - If the 301 status code is received in response to a request using - the POST method, the user agent must not automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - - - - -Berners-Lee, et al Informational [Page 34] - -RFC 1945 HTTP/1.0 May 1996 - - - Note: When automatically redirecting a POST request after - receiving a 301 status code, some existing user agents will - erroneously change it into a GET request. - - 302 Moved Temporarily - - The requested resource resides temporarily under a different URL. - Since the redirection may be altered on occasion, the client should - continue to use the Request-URI for future requests. - - The URL must be given by the Location field in the response. Unless - it was a HEAD request, the Entity-Body of the response should - contain a short note with a hyperlink to the new URI(s). - - If the 302 status code is received in response to a request using - the POST method, the user agent must not automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - Note: When automatically redirecting a POST request after - receiving a 302 status code, some existing user agents will - erroneously change it into a GET request. - - 304 Not Modified - - If the client has performed a conditional GET request and access is - allowed, but the document has not been modified since the date and - time specified in the If-Modified-Since field, the server must - respond with this status code and not send an Entity-Body to the - client. Header fields contained in the response should only include - information which is relevant to cache managers or which may have - changed independently of the entity's Last-Modified date. Examples - of relevant header fields include: Date, Server, and Expires. A - cache should update its cached entity to reflect any new field - values given in the 304 response. - -9.4 Client Error 4xx - - The 4xx class of status code is intended for cases in which the - client seems to have erred. If the client has not completed the - request when a 4xx code is received, it should immediately cease - sending data to the server. Except when responding to a HEAD request, - the server should include an entity containing an explanation of the - error situation, and whether it is a temporary or permanent - condition. These status codes are applicable to any request method. - - - - - - -Berners-Lee, et al Informational [Page 35] - -RFC 1945 HTTP/1.0 May 1996 - - - Note: If the client is sending data, server implementations on TCP - should be careful to ensure that the client acknowledges receipt - of the packet(s) containing the response prior to closing the - input connection. If the client continues sending data to the - server after the close, the server's controller will send a reset - packet to the client, which may erase the client's unacknowledged - input buffers before they can be read and interpreted by the HTTP - application. - - 400 Bad Request - - The request could not be understood by the server due to malformed - syntax. The client should not repeat the request without - modifications. - - 401 Unauthorized - - The request requires user authentication. The response must include - a WWW-Authenticate header field (Section 10.16) containing a - challenge applicable to the requested resource. The client may - repeat the request with a suitable Authorization header field - (Section 10.2). If the request already included Authorization - credentials, then the 401 response indicates that authorization has - been refused for those credentials. If the 401 response contains - the same challenge as the prior response, and the user agent has - already attempted authentication at least once, then the user - should be presented the entity that was given in the response, - since that entity may include relevant diagnostic information. HTTP - access authentication is explained in Section 11. - - 403 Forbidden - - The server understood the request, but is refusing to fulfill it. - Authorization will not help and the request should not be repeated. - If the request method was not HEAD and the server wishes to make - public why the request has not been fulfilled, it should describe - the reason for the refusal in the entity body. This status code is - commonly used when the server does not wish to reveal exactly why - the request has been refused, or when no other response is - applicable. - - 404 Not Found - - The server has not found anything matching the Request-URI. No - indication is given of whether the condition is temporary or - permanent. If the server does not wish to make this information - available to the client, the status code 403 (forbidden) can be - used instead. - - - -Berners-Lee, et al Informational [Page 36] - -RFC 1945 HTTP/1.0 May 1996 - - -9.5 Server Error 5xx - - Response status codes beginning with the digit "5" indicate cases in - which the server is aware that it has erred or is incapable of - performing the request. If the client has not completed the request - when a 5xx code is received, it should immediately cease sending data - to the server. Except when responding to a HEAD request, the server - should include an entity containing an explanation of the error - situation, and whether it is a temporary or permanent condition. - These response codes are applicable to any request method and there - are no required header fields. - - 500 Internal Server Error - - The server encountered an unexpected condition which prevented it - from fulfilling the request. - - 501 Not Implemented - - The server does not support the functionality required to fulfill - the request. This is the appropriate response when the server does - not recognize the request method and is not capable of supporting - it for any resource. - - 502 Bad Gateway - - The server, while acting as a gateway or proxy, received an invalid - response from the upstream server it accessed in attempting to - fulfill the request. - - 503 Service Unavailable - - The server is currently unable to handle the request due to a - temporary overloading or maintenance of the server. The implication - is that this is a temporary condition which will be alleviated - after some delay. - - Note: The existence of the 503 status code does not imply - that a server must use it when becoming overloaded. Some - servers may wish to simply refuse the connection. - -10. Header Field Definitions - - This section defines the syntax and semantics of all commonly used - HTTP/1.0 header fields. For general and entity header fields, both - sender and recipient refer to either the client or the server, - depending on who sends and who receives the message. - - - - -Berners-Lee, et al Informational [Page 37] - -RFC 1945 HTTP/1.0 May 1996 - - -10.1 Allow - - The Allow entity-header field lists the set of methods supported by - the resource identified by the Request-URI. The purpose of this field - is strictly to inform the recipient of valid methods associated with - the resource. The Allow header field is not permitted in a request - using the POST method, and thus should be ignored if it is received - as part of a POST entity. - - Allow = "Allow" ":" 1#method - - Example of use: - - Allow: GET, HEAD - - This field cannot prevent a client from trying other methods. - However, the indications given by the Allow header field value should - be followed. The actual set of allowed methods is defined by the - origin server at the time of each request. - - A proxy must not modify the Allow header field even if it does not - understand all the methods specified, since the user agent may have - other means of communicating with the origin server. - - The Allow header field does not indicate what methods are implemented - by the server. - -10.2 Authorization - - A user agent that wishes to authenticate itself with a server-- - usually, but not necessarily, after receiving a 401 response--may do - so by including an Authorization request-header field with the - request. The Authorization field value consists of credentials - containing the authentication information of the user agent for the - realm of the resource being requested. - - Authorization = "Authorization" ":" credentials - - HTTP access authentication is described in Section 11. If a request - is authenticated and a realm specified, the same credentials should - be valid for all other requests within this realm. - - Responses to requests containing an Authorization field are not - cachable. - - - - - - - -Berners-Lee, et al Informational [Page 38] - -RFC 1945 HTTP/1.0 May 1996 - - -10.3 Content-Encoding - - The Content-Encoding entity-header field is used as a modifier to the - media-type. When present, its value indicates what additional content - coding has been applied to the resource, and thus what decoding - mechanism must be applied in order to obtain the media-type - referenced by the Content-Type header field. The Content-Encoding is - primarily used to allow a document to be compressed without losing - the identity of its underlying media type. - - Content-Encoding = "Content-Encoding" ":" content-coding - - Content codings are defined in Section 3.5. An example of its use is - - Content-Encoding: x-gzip - - The Content-Encoding is a characteristic of the resource identified - by the Request-URI. Typically, the resource is stored with this - encoding and is only decoded before rendering or analogous usage. - -10.4 Content-Length - - The Content-Length entity-header field indicates the size of the - Entity-Body, in decimal number of octets, sent to the recipient or, - in the case of the HEAD method, the size of the Entity-Body that - would have been sent had the request been a GET. - - Content-Length = "Content-Length" ":" 1*DIGIT - - An example is - - Content-Length: 3495 - - Applications should use this field to indicate the size of the - Entity-Body to be transferred, regardless of the media type of the - entity. A valid Content-Length field value is required on all - HTTP/1.0 request messages containing an entity body. - - Any Content-Length greater than or equal to zero is a valid value. - Section 7.2.2 describes how to determine the length of a response - entity body if a Content-Length is not given. - - Note: The meaning of this field is significantly different from - the corresponding definition in MIME, where it is an optional - field used within the "message/external-body" content-type. In - HTTP, it should be used whenever the entity's length can be - determined prior to being transferred. - - - - -Berners-Lee, et al Informational [Page 39] - -RFC 1945 HTTP/1.0 May 1996 - - -10.5 Content-Type - - The Content-Type entity-header field indicates the media type of the - Entity-Body sent to the recipient or, in the case of the HEAD method, - the media type that would have been sent had the request been a GET. - - Content-Type = "Content-Type" ":" media-type - - Media types are defined in Section 3.6. An example of the field is - - Content-Type: text/html - - Further discussion of methods for identifying the media type of an - entity is provided in Section 7.2.1. - -10.6 Date - - The Date general-header field represents the date and time at which - the message was originated, having the same semantics as orig-date in - RFC 822. The field value is an HTTP-date, as described in Section - 3.3. - - Date = "Date" ":" HTTP-date - - An example is - - Date: Tue, 15 Nov 1994 08:12:31 GMT - - If a message is received via direct connection with the user agent - (in the case of requests) or the origin server (in the case of - responses), then the date can be assumed to be the current date at - the receiving end. However, since the date--as it is believed by the - origin--is important for evaluating cached responses, origin servers - should always include a Date header. Clients should only send a Date - header field in messages that include an entity body, as in the case - of the POST request, and even then it is optional. A received message - which does not have a Date header field should be assigned one by the - recipient if the message will be cached by that recipient or - gatewayed via a protocol which requires a Date. - - In theory, the date should represent the moment just before the - entity is generated. In practice, the date can be generated at any - time during the message origination without affecting its semantic - value. - - Note: An earlier version of this document incorrectly specified - that this field should contain the creation date of the enclosed - Entity-Body. This has been changed to reflect actual (and proper) - - - -Berners-Lee, et al Informational [Page 40] - -RFC 1945 HTTP/1.0 May 1996 - - - usage. - -10.7 Expires - - The Expires entity-header field gives the date/time after which the - entity should be considered stale. This allows information providers - to suggest the volatility of the resource, or a date after which the - information may no longer be valid. Applications must not cache this - entity beyond the date given. The presence of an Expires field does - not imply that the original resource will change or cease to exist - at, before, or after that time. However, information providers that - know or even suspect that a resource will change by a certain date - should include an Expires header with that date. The format is an - absolute date and time as defined by HTTP-date in Section 3.3. - - Expires = "Expires" ":" HTTP-date - - An example of its use is - - Expires: Thu, 01 Dec 1994 16:00:00 GMT - - If the date given is equal to or earlier than the value of the Date - header, the recipient must not cache the enclosed entity. If a - resource is dynamic by nature, as is the case with many data- - producing processes, entities from that resource should be given an - appropriate Expires value which reflects that dynamism. - - The Expires field cannot be used to force a user agent to refresh its - display or reload a resource; its semantics apply only to caching - mechanisms, and such mechanisms need only check a resource's - expiration status when a new request for that resource is initiated. - - User agents often have history mechanisms, such as "Back" buttons and - history lists, which can be used to redisplay an entity retrieved - earlier in a session. By default, the Expires field does not apply to - history mechanisms. If the entity is still in storage, a history - mechanism should display it even if the entity has expired, unless - the user has specifically configured the agent to refresh expired - history documents. - - Note: Applications are encouraged to be tolerant of bad or - misinformed implementations of the Expires header. A value of zero - (0) or an invalid date format should be considered equivalent to - an "expires immediately." Although these values are not legitimate - for HTTP/1.0, a robust implementation is always desirable. - - - - - - -Berners-Lee, et al Informational [Page 41] - -RFC 1945 HTTP/1.0 May 1996 - - -10.8 From - - The From request-header field, if given, should contain an Internet - e-mail address for the human user who controls the requesting user - agent. The address should be machine-usable, as defined by mailbox in - RFC 822 [7] (as updated by RFC 1123 [6]): - - From = "From" ":" mailbox - - An example is: - - From: webmaster@w3.org - - This header field may be used for logging purposes and as a means for - identifying the source of invalid or unwanted requests. It should not - be used as an insecure form of access protection. The interpretation - of this field is that the request is being performed on behalf of the - person given, who accepts responsibility for the method performed. In - particular, robot agents should include this header so that the - person responsible for running the robot can be contacted if problems - occur on the receiving end. - - The Internet e-mail address in this field may be separate from the - Internet host which issued the request. For example, when a request - is passed through a proxy, the original issuer's address should be - used. - - Note: The client should not send the From header field without the - user's approval, as it may conflict with the user's privacy - interests or their site's security policy. It is strongly - recommended that the user be able to disable, enable, and modify - the value of this field at any time prior to a request. - -10.9 If-Modified-Since - - The If-Modified-Since request-header field is used with the GET - method to make it conditional: if the requested resource has not been - modified since the time specified in this field, a copy of the - resource will not be returned from the server; instead, a 304 (not - modified) response will be returned without any Entity-Body. - - If-Modified-Since = "If-Modified-Since" ":" HTTP-date - - An example of the field is: - - If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT - - - - - -Berners-Lee, et al Informational [Page 42] - -RFC 1945 HTTP/1.0 May 1996 - - - A conditional GET method requests that the identified resource be - transferred only if it has been modified since the date given by the - If-Modified-Since header. The algorithm for determining this includes - the following cases: - - a) If the request would normally result in anything other than - a 200 (ok) status, or if the passed If-Modified-Since date - is invalid, the response is exactly the same as for a - normal GET. A date which is later than the server's current - time is invalid. - - b) If the resource has been modified since the - If-Modified-Since date, the response is exactly the same as - for a normal GET. - - c) If the resource has not been modified since a valid - If-Modified-Since date, the server shall return a 304 (not - modified) response. - - The purpose of this feature is to allow efficient updates of cached - information with a minimum amount of transaction overhead. - -10.10 Last-Modified - - The Last-Modified entity-header field indicates the date and time at - which the sender believes the resource was last modified. The exact - semantics of this field are defined in terms of how the recipient - should interpret it: if the recipient has a copy of this resource - which is older than the date given by the Last-Modified field, that - copy should be considered stale. - - Last-Modified = "Last-Modified" ":" HTTP-date - - An example of its use is - - Last-Modified: Tue, 15 Nov 1994 12:45:26 GMT - - The exact meaning of this header field depends on the implementation - of the sender and the nature of the original resource. For files, it - may be just the file system last-modified time. For entities with - dynamically included parts, it may be the most recent of the set of - last-modify times for its component parts. For database gateways, it - may be the last-update timestamp of the record. For virtual objects, - it may be the last time the internal state changed. - - An origin server must not send a Last-Modified date which is later - than the server's time of message origination. In such cases, where - the resource's last modification would indicate some time in the - - - -Berners-Lee, et al Informational [Page 43] - -RFC 1945 HTTP/1.0 May 1996 - - - future, the server must replace that date with the message - origination date. - -10.11 Location - - The Location response-header field defines the exact location of the - resource that was identified by the Request-URI. For 3xx responses, - the location must indicate the server's preferred URL for automatic - redirection to the resource. Only one absolute URL is allowed. - - Location = "Location" ":" absoluteURI - - An example is - - Location: http://www.w3.org/hypertext/WWW/NewLocation.html - -10.12 Pragma - - The Pragma general-header field is used to include implementation- - specific directives that may apply to any recipient along the - request/response chain. All pragma directives specify optional - behavior from the viewpoint of the protocol; however, some systems - may require that behavior be consistent with the directives. - - Pragma = "Pragma" ":" 1#pragma-directive - - pragma-directive = "no-cache" | extension-pragma - extension-pragma = token [ "=" word ] - - When the "no-cache" directive is present in a request message, an - application should forward the request toward the origin server even - if it has a cached copy of what is being requested. This allows a - client to insist upon receiving an authoritative response to its - request. It also allows a client to refresh a cached copy which is - known to be corrupted or stale. - - Pragma directives must be passed through by a proxy or gateway - application, regardless of their significance to that application, - since the directives may be applicable to all recipients along the - request/response chain. It is not possible to specify a pragma for a - specific recipient; however, any pragma directive not relevant to a - recipient should be ignored by that recipient. - -10.13 Referer - - The Referer request-header field allows the client to specify, for - the server's benefit, the address (URI) of the resource from which - the Request-URI was obtained. This allows a server to generate lists - - - -Berners-Lee, et al Informational [Page 44] - -RFC 1945 HTTP/1.0 May 1996 - - - of back-links to resources for interest, logging, optimized caching, - etc. It also allows obsolete or mistyped links to be traced for - maintenance. The Referer field must not be sent if the Request-URI - was obtained from a source that does not have its own URI, such as - input from the user keyboard. - - Referer = "Referer" ":" ( absoluteURI | relativeURI ) - - Example: - - Referer: http://www.w3.org/hypertext/DataSources/Overview.html - - If a partial URI is given, it should be interpreted relative to the - Request-URI. The URI must not include a fragment. - - Note: Because the source of a link may be private information or - may reveal an otherwise private information source, it is strongly - recommended that the user be able to select whether or not the - Referer field is sent. For example, a browser client could have a - toggle switch for browsing openly/anonymously, which would - respectively enable/disable the sending of Referer and From - information. - -10.14 Server - - The Server response-header field contains information about the - software used by the origin server to handle the request. The field - can contain multiple product tokens (Section 3.7) and comments - identifying the server and any significant subproducts. By - convention, the product tokens are listed in order of their - significance for identifying the application. - - Server = "Server" ":" 1*( product | comment ) - - Example: - - Server: CERN/3.0 libwww/2.17 - - If the response is being forwarded through a proxy, the proxy - application must not add its data to the product list. - - Note: Revealing the specific software version of the server may - allow the server machine to become more vulnerable to attacks - against software that is known to contain security holes. Server - implementors are encouraged to make this field a configurable - option. - - - - - -Berners-Lee, et al Informational [Page 45] - -RFC 1945 HTTP/1.0 May 1996 - - - Note: Some existing servers fail to restrict themselves to the - product token syntax within the Server field. - -10.15 User-Agent - - The User-Agent request-header field contains information about the - user agent originating the request. This is for statistical purposes, - the tracing of protocol violations, and automated recognition of user - agents for the sake of tailoring responses to avoid particular user - agent limitations. Although it is not required, user agents should - include this field with requests. The field can contain multiple - product tokens (Section 3.7) and comments identifying the agent and - any subproducts which form a significant part of the user agent. By - convention, the product tokens are listed in order of their - significance for identifying the application. - - User-Agent = "User-Agent" ":" 1*( product | comment ) - - Example: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - - Note: Some current proxy applications append their product - information to the list in the User-Agent field. This is not - recommended, since it makes machine interpretation of these - fields ambiguous. - - Note: Some existing clients fail to restrict themselves to - the product token syntax within the User-Agent field. - -10.16 WWW-Authenticate - - The WWW-Authenticate response-header field must be included in 401 - (unauthorized) response messages. The field value consists of at - least one challenge that indicates the authentication scheme(s) and - parameters applicable to the Request-URI. - - WWW-Authenticate = "WWW-Authenticate" ":" 1#challenge - - The HTTP access authentication process is described in Section 11. - User agents must take special care in parsing the WWW-Authenticate - field value if it contains more than one challenge, or if more than - one WWW-Authenticate header field is provided, since the contents of - a challenge may itself contain a comma-separated list of - authentication parameters. - - - - - - -Berners-Lee, et al Informational [Page 46] - -RFC 1945 HTTP/1.0 May 1996 - - -11. Access Authentication - - HTTP provides a simple challenge-response authentication mechanism - which may be used by a server to challenge a client request and by a - client to provide authentication information. It uses an extensible, - case-insensitive token to identify the authentication scheme, - followed by a comma-separated list of attribute-value pairs which - carry the parameters necessary for achieving authentication via that - scheme. - - auth-scheme = token - - auth-param = token "=" quoted-string - - The 401 (unauthorized) response message is used by an origin server - to challenge the authorization of a user agent. This response must - include a WWW-Authenticate header field containing at least one - challenge applicable to the requested resource. - - challenge = auth-scheme 1*SP realm *( "," auth-param ) - - realm = "realm" "=" realm-value - realm-value = quoted-string - - The realm attribute (case-insensitive) is required for all - authentication schemes which issue a challenge. The realm value - (case-sensitive), in combination with the canonical root URL of the - server being accessed, defines the protection space. These realms - allow the protected resources on a server to be partitioned into a - set of protection spaces, each with its own authentication scheme - and/or authorization database. The realm value is a string, generally - assigned by the origin server, which may have additional semantics - specific to the authentication scheme. - - A user agent that wishes to authenticate itself with a server-- - usually, but not necessarily, after receiving a 401 response--may do - so by including an Authorization header field with the request. The - Authorization field value consists of credentials containing the - authentication information of the user agent for the realm of the - resource being requested. - - credentials = basic-credentials - | ( auth-scheme #auth-param ) - - The domain over which credentials can be automatically applied by a - user agent is determined by the protection space. If a prior request - has been authorized, the same credentials may be reused for all other - requests within that protection space for a period of time determined - - - -Berners-Lee, et al Informational [Page 47] - -RFC 1945 HTTP/1.0 May 1996 - - - by the authentication scheme, parameters, and/or user preference. - Unless otherwise defined by the authentication scheme, a single - protection space cannot extend outside the scope of its server. - - If the server does not wish to accept the credentials sent with a - request, it should return a 403 (forbidden) response. - - The HTTP protocol does not restrict applications to this simple - challenge-response mechanism for access authentication. Additional - mechanisms may be used, such as encryption at the transport level or - via message encapsulation, and with additional header fields - specifying authentication information. However, these additional - mechanisms are not defined by this specification. - - Proxies must be completely transparent regarding user agent - authentication. That is, they must forward the WWW-Authenticate and - Authorization headers untouched, and must not cache the response to a - request containing Authorization. HTTP/1.0 does not provide a means - for a client to be authenticated with a proxy. - -11.1 Basic Authentication Scheme - - The "basic" authentication scheme is based on the model that the user - agent must authenticate itself with a user-ID and a password for each - realm. The realm value should be considered an opaque string which - can only be compared for equality with other realms on that server. - The server will authorize the request only if it can validate the - user-ID and password for the protection space of the Request-URI. - There are no optional authentication parameters. - - Upon receipt of an unauthorized request for a URI within the - protection space, the server should respond with a challenge like the - following: - - WWW-Authenticate: Basic realm="WallyWorld" - - where "WallyWorld" is the string assigned by the server to identify - the protection space of the Request-URI. - - To receive authorization, the client sends the user-ID and password, - separated by a single colon (":") character, within a base64 [5] - encoded string in the credentials. - - basic-credentials = "Basic" SP basic-cookie - - basic-cookie = - - - - -Berners-Lee, et al Informational [Page 48] - -RFC 1945 HTTP/1.0 May 1996 - - - userid-password = [ token ] ":" *TEXT - - If the user agent wishes to send the user-ID "Aladdin" and password - "open sesame", it would use the following header field: - - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - - The basic authentication scheme is a non-secure method of filtering - unauthorized access to resources on an HTTP server. It is based on - the assumption that the connection between the client and the server - can be regarded as a trusted carrier. As this is not generally true - on an open network, the basic authentication scheme should be used - accordingly. In spite of this, clients should implement the scheme in - order to communicate with servers that use it. - -12. Security Considerations - - This section is meant to inform application developers, information - providers, and users of the security limitations in HTTP/1.0 as - described by this document. The discussion does not include - definitive solutions to the problems revealed, though it does make - some suggestions for reducing security risks. - -12.1 Authentication of Clients - - As mentioned in Section 11.1, the Basic authentication scheme is not - a secure method of user authentication, nor does it prevent the - Entity-Body from being transmitted in clear text across the physical - network used as the carrier. HTTP/1.0 does not prevent additional - authentication schemes and encryption mechanisms from being employed - to increase security. - -12.2 Safe Methods - - The writers of client software should be aware that the software - represents the user in their interactions over the Internet, and - should be careful to allow the user to be aware of any actions they - may take which may have an unexpected significance to themselves or - others. - - In particular, the convention has been established that the GET and - HEAD methods should never have the significance of taking an action - other than retrieval. These methods should be considered "safe." This - allows user agents to represent other methods, such as POST, in a - special way, so that the user is made aware of the fact that a - possibly unsafe action is being requested. - - - - - -Berners-Lee, et al Informational [Page 49] - -RFC 1945 HTTP/1.0 May 1996 - - - Naturally, it is not possible to ensure that the server does not - generate side-effects as a result of performing a GET request; in - fact, some dynamic resources consider that a feature. The important - distinction here is that the user did not request the side-effects, - so therefore cannot be held accountable for them. - -12.3 Abuse of Server Log Information - - A server is in the position to save personal data about a user's - requests which may identify their reading patterns or subjects of - interest. This information is clearly confidential in nature and its - handling may be constrained by law in certain countries. People using - the HTTP protocol to provide data are responsible for ensuring that - such material is not distributed without the permission of any - individuals that are identifiable by the published results. - -12.4 Transfer of Sensitive Information - - Like any generic data transfer protocol, HTTP cannot regulate the - content of the data that is transferred, nor is there any a priori - method of determining the sensitivity of any particular piece of - information within the context of any given request. Therefore, - applications should supply as much control over this information as - possible to the provider of that information. Three header fields are - worth special mention in this context: Server, Referer and From. - - Revealing the specific software version of the server may allow the - server machine to become more vulnerable to attacks against software - that is known to contain security holes. Implementors should make the - Server header field a configurable option. - - The Referer field allows reading patterns to be studied and reverse - links drawn. Although it can be very useful, its power can be abused - if user details are not separated from the information contained in - the Referer. Even when the personal information has been removed, the - Referer field may indicate a private document's URI whose publication - would be inappropriate. - - The information sent in the From field might conflict with the user's - privacy interests or their site's security policy, and hence it - should not be transmitted without the user being able to disable, - enable, and modify the contents of the field. The user must be able - to set the contents of this field within a user preference or - application defaults configuration. - - We suggest, though do not require, that a convenient toggle interface - be provided for the user to enable or disable the sending of From and - Referer information. - - - -Berners-Lee, et al Informational [Page 50] - -RFC 1945 HTTP/1.0 May 1996 - - -12.5 Attacks Based On File and Path Names - - Implementations of HTTP origin servers should be careful to restrict - the documents returned by HTTP requests to be only those that were - intended by the server administrators. If an HTTP server translates - HTTP URIs directly into file system calls, the server must take - special care not to serve files that were not intended to be - delivered to HTTP clients. For example, Unix, Microsoft Windows, and - other operating systems use ".." as a path component to indicate a - directory level above the current one. On such a system, an HTTP - server must disallow any such construct in the Request-URI if it - would otherwise allow access to a resource outside those intended to - be accessible via the HTTP server. Similarly, files intended for - reference only internally to the server (such as access control - files, configuration files, and script code) must be protected from - inappropriate retrieval, since they might contain sensitive - information. Experience has shown that minor bugs in such HTTP server - implementations have turned into security risks. - -13. Acknowledgments - - This specification makes heavy use of the augmented BNF and generic - constructs defined by David H. Crocker for RFC 822 [7]. Similarly, it - reuses many of the definitions provided by Nathaniel Borenstein and - Ned Freed for MIME [5]. We hope that their inclusion in this - specification will help reduce past confusion over the relationship - between HTTP/1.0 and Internet mail message formats. - - The HTTP protocol has evolved considerably over the past four years. - It has benefited from a large and active developer community--the - many people who have participated on the www-talk mailing list--and - it is that community which has been most responsible for the success - of HTTP and of the World-Wide Web in general. Marc Andreessen, Robert - Cailliau, Daniel W. Connolly, Bob Denny, Jean-Francois Groff, Phillip - M. Hallam-Baker, Hakon W. Lie, Ari Luotonen, Rob McCool, Lou - Montulli, Dave Raggett, Tony Sanders, and Marc VanHeyningen deserve - special recognition for their efforts in defining aspects of the - protocol for early versions of this specification. - - Paul Hoffman contributed sections regarding the informational status - of this document and Appendices C and D. - - - - - - - - - - -Berners-Lee, et al Informational [Page 51] - -RFC 1945 HTTP/1.0 May 1996 - - - This document has benefited greatly from the comments of all those - participating in the HTTP-WG. In addition to those already mentioned, - the following individuals have contributed to this specification: - - Gary Adams Harald Tveit Alvestrand - Keith Ball Brian Behlendorf - Paul Burchard Maurizio Codogno - Mike Cowlishaw Roman Czyborra - Michael A. Dolan John Franks - Jim Gettys Marc Hedlund - Koen Holtman Alex Hopmann - Bob Jernigan Shel Kaphan - Martijn Koster Dave Kristol - Daniel LaLiberte Paul Leach - Albert Lunde John C. Mallery - Larry Masinter Mitra - Jeffrey Mogul Gavin Nicol - Bill Perry Jeffrey Perry - Owen Rees Luigi Rizzo - David Robinson Marc Salomon - Rich Salz Jim Seidman - Chuck Shotton Eric W. Sink - Simon E. Spero Robert S. Thau - Francois Yergeau Mary Ellen Zurko - Jean-Philippe Martin-Flatin - -14. References - - [1] Anklesaria, F., McCahill, M., Lindner, P., Johnson, D., - Torrey, D., and B. Alberti, "The Internet Gopher Protocol: A - Distributed Document Search and Retrieval Protocol", RFC 1436, - University of Minnesota, March 1993. - - [2] Berners-Lee, T., "Universal Resource Identifiers in WWW: A - Unifying Syntax for the Expression of Names and Addresses of - Objects on the Network as used in the World-Wide Web", - RFC 1630, CERN, June 1994. - - [3] Berners-Lee, T., and D. Connolly, "Hypertext Markup Language - - 2.0", RFC 1866, MIT/W3C, November 1995. - - [4] Berners-Lee, T., Masinter, L., and M. McCahill, "Uniform - Resource Locators (URL)", RFC 1738, CERN, Xerox PARC, - University of Minnesota, December 1994. - - - - - - - -Berners-Lee, et al Informational [Page 52] - -RFC 1945 HTTP/1.0 May 1996 - - - [5] Borenstein, N., and N. Freed, "MIME (Multipurpose Internet Mail - Extensions) Part One: Mechanisms for Specifying and Describing - the Format of Internet Message Bodies", RFC 1521, Bellcore, - Innosoft, September 1993. - - [6] Braden, R., "Requirements for Internet hosts - Application and - Support", STD 3, RFC 1123, IETF, October 1989. - - [7] Crocker, D., "Standard for the Format of ARPA Internet Text - Messages", STD 11, RFC 822, UDEL, August 1982. - - [8] F. Davis, B. Kahle, H. Morris, J. Salem, T. Shen, R. Wang, - J. Sui, and M. Grinbaum. "WAIS Interface Protocol Prototype - Functional Specification." (v1.5), Thinking Machines - Corporation, April 1990. - - [9] Fielding, R., "Relative Uniform Resource Locators", RFC 1808, - UC Irvine, June 1995. - - [10] Horton, M., and R. Adams, "Standard for interchange of USENET - Messages", RFC 1036 (Obsoletes RFC 850), AT&T Bell - Laboratories, Center for Seismic Studies, December 1987. - - [11] Kantor, B., and P. Lapsley, "Network News Transfer Protocol: - A Proposed Standard for the Stream-Based Transmission of News", - RFC 977, UC San Diego, UC Berkeley, February 1986. - - [12] Postel, J., "Simple Mail Transfer Protocol." STD 10, RFC 821, - USC/ISI, August 1982. - - [13] Postel, J., "Media Type Registration Procedure." RFC 1590, - USC/ISI, March 1994. - - [14] Postel, J., and J. Reynolds, "File Transfer Protocol (FTP)", - STD 9, RFC 959, USC/ISI, October 1985. - - [15] Reynolds, J., and J. Postel, "Assigned Numbers", STD 2, RFC - 1700, USC/ISI, October 1994. - - [16] Sollins, K., and L. Masinter, "Functional Requirements for - Uniform Resource Names", RFC 1737, MIT/LCS, Xerox Corporation, - December 1994. - - [17] US-ASCII. Coded Character Set - 7-Bit American Standard Code - for Information Interchange. Standard ANSI X3.4-1986, ANSI, - 1986. - - - - - -Berners-Lee, et al Informational [Page 53] - -RFC 1945 HTTP/1.0 May 1996 - - - [18] ISO-8859. International Standard -- Information Processing -- - 8-bit Single-Byte Coded Graphic Character Sets -- - Part 1: Latin alphabet No. 1, ISO 8859-1:1987. - Part 2: Latin alphabet No. 2, ISO 8859-2, 1987. - Part 3: Latin alphabet No. 3, ISO 8859-3, 1988. - Part 4: Latin alphabet No. 4, ISO 8859-4, 1988. - Part 5: Latin/Cyrillic alphabet, ISO 8859-5, 1988. - Part 6: Latin/Arabic alphabet, ISO 8859-6, 1987. - Part 7: Latin/Greek alphabet, ISO 8859-7, 1987. - Part 8: Latin/Hebrew alphabet, ISO 8859-8, 1988. - Part 9: Latin alphabet No. 5, ISO 8859-9, 1990. - -15. Authors' Addresses - - Tim Berners-Lee - Director, W3 Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, U.S.A. - - Fax: +1 (617) 258 8682 - EMail: timbl@w3.org - - - Roy T. Fielding - Department of Information and Computer Science - University of California - Irvine, CA 92717-3425, U.S.A. - - Fax: +1 (714) 824-4056 - EMail: fielding@ics.uci.edu - - - Henrik Frystyk Nielsen - W3 Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, U.S.A. - - Fax: +1 (617) 258 8682 - EMail: frystyk@w3.org - - - - - - - - - - -Berners-Lee, et al Informational [Page 54] - -RFC 1945 HTTP/1.0 May 1996 - - -Appendices - - These appendices are provided for informational reasons only -- they - do not form a part of the HTTP/1.0 specification. - -A. Internet Media Type message/http - - In addition to defining the HTTP/1.0 protocol, this document serves - as the specification for the Internet media type "message/http". The - following is to be registered with IANA [13]. - - Media Type name: message - - Media subtype name: http - - Required parameters: none - - Optional parameters: version, msgtype - - version: The HTTP-Version number of the enclosed message - (e.g., "1.0"). If not present, the version can be - determined from the first line of the body. - - msgtype: The message type -- "request" or "response". If - not present, the type can be determined from the - first line of the body. - - Encoding considerations: only "7bit", "8bit", or "binary" are - permitted - - Security considerations: none - -B. Tolerant Applications - - Although this document specifies the requirements for the generation - of HTTP/1.0 messages, not all applications will be correct in their - implementation. We therefore recommend that operational applications - be tolerant of deviations whenever those deviations can be - interpreted unambiguously. - - Clients should be tolerant in parsing the Status-Line and servers - tolerant when parsing the Request-Line. In particular, they should - accept any amount of SP or HT characters between fields, even though - only a single SP is required. - - The line terminator for HTTP-header fields is the sequence CRLF. - However, we recommend that applications, when parsing such headers, - recognize a single LF as a line terminator and ignore the leading CR. - - - -Berners-Lee, et al Informational [Page 55] - -RFC 1945 HTTP/1.0 May 1996 - - -C. Relationship to MIME - - HTTP/1.0 uses many of the constructs defined for Internet Mail (RFC - 822 [7]) and the Multipurpose Internet Mail Extensions (MIME [5]) to - allow entities to be transmitted in an open variety of - representations and with extensible mechanisms. However, RFC 1521 - discusses mail, and HTTP has a few features that are different than - those described in RFC 1521. These differences were carefully chosen - to optimize performance over binary connections, to allow greater - freedom in the use of new media types, to make date comparisons - easier, and to acknowledge the practice of some early HTTP servers - and clients. - - At the time of this writing, it is expected that RFC 1521 will be - revised. The revisions may include some of the practices found in - HTTP/1.0 but not in RFC 1521. - - This appendix describes specific areas where HTTP differs from RFC - 1521. Proxies and gateways to strict MIME environments should be - aware of these differences and provide the appropriate conversions - where necessary. Proxies and gateways from MIME environments to HTTP - also need to be aware of the differences because some conversions may - be required. - -C.1 Conversion to Canonical Form - - RFC 1521 requires that an Internet mail entity be converted to - canonical form prior to being transferred, as described in Appendix G - of RFC 1521 [5]. Section 3.6.1 of this document describes the forms - allowed for subtypes of the "text" media type when transmitted over - HTTP. - - RFC 1521 requires that content with a Content-Type of "text" - represent line breaks as CRLF and forbids the use of CR or LF outside - of line break sequences. HTTP allows CRLF, bare CR, and bare LF to - indicate a line break within text content when a message is - transmitted over HTTP. - - Where it is possible, a proxy or gateway from HTTP to a strict RFC - 1521 environment should translate all line breaks within the text - media types described in Section 3.6.1 of this document to the RFC - 1521 canonical form of CRLF. Note, however, that this may be - complicated by the presence of a Content-Encoding and by the fact - that HTTP allows the use of some character sets which do not use - octets 13 and 10 to represent CR and LF, as is the case for some - multi-byte character sets. - - - - - -Berners-Lee, et al Informational [Page 56] - -RFC 1945 HTTP/1.0 May 1996 - - -C.2 Conversion of Date Formats - - HTTP/1.0 uses a restricted set of date formats (Section 3.3) to - simplify the process of date comparison. Proxies and gateways from - other protocols should ensure that any Date header field present in a - message conforms to one of the HTTP/1.0 formats and rewrite the date - if necessary. - -C.3 Introduction of Content-Encoding - - RFC 1521 does not include any concept equivalent to HTTP/1.0's - Content-Encoding header field. Since this acts as a modifier on the - media type, proxies and gateways from HTTP to MIME-compliant - protocols must either change the value of the Content-Type header - field or decode the Entity-Body before forwarding the message. (Some - experimental applications of Content-Type for Internet mail have used - a media-type parameter of ";conversions=" to perform - an equivalent function as Content-Encoding. However, this parameter - is not part of RFC 1521.) - -C.4 No Content-Transfer-Encoding - - HTTP does not use the Content-Transfer-Encoding (CTE) field of RFC - 1521. Proxies and gateways from MIME-compliant protocols to HTTP must - remove any non-identity CTE ("quoted-printable" or "base64") encoding - prior to delivering the response message to an HTTP client. - - Proxies and gateways from HTTP to MIME-compliant protocols are - responsible for ensuring that the message is in the correct format - and encoding for safe transport on that protocol, where "safe - transport" is defined by the limitations of the protocol being used. - Such a proxy or gateway should label the data with an appropriate - Content-Transfer-Encoding if doing so will improve the likelihood of - safe transport over the destination protocol. - -C.5 HTTP Header Fields in Multipart Body-Parts - - In RFC 1521, most header fields in multipart body-parts are generally - ignored unless the field name begins with "Content-". In HTTP/1.0, - multipart body-parts may contain any HTTP header fields which are - significant to the meaning of that part. - -D. Additional Features - - This appendix documents protocol elements used by some existing HTTP - implementations, but not consistently and correctly across most - HTTP/1.0 applications. Implementors should be aware of these - features, but cannot rely upon their presence in, or interoperability - - - -Berners-Lee, et al Informational [Page 57] - -RFC 1945 HTTP/1.0 May 1996 - - - with, other HTTP/1.0 applications. - -D.1 Additional Request Methods - -D.1.1 PUT - - The PUT method requests that the enclosed entity be stored under the - supplied Request-URI. If the Request-URI refers to an already - existing resource, the enclosed entity should be considered as a - modified version of the one residing on the origin server. If the - Request-URI does not point to an existing resource, and that URI is - capable of being defined as a new resource by the requesting user - agent, the origin server can create the resource with that URI. - - The fundamental difference between the POST and PUT requests is - reflected in the different meaning of the Request-URI. The URI in a - POST request identifies the resource that will handle the enclosed - entity as data to be processed. That resource may be a data-accepting - process, a gateway to some other protocol, or a separate entity that - accepts annotations. In contrast, the URI in a PUT request identifies - the entity enclosed with the request -- the user agent knows what URI - is intended and the server should not apply the request to some other - resource. - -D.1.2 DELETE - - The DELETE method requests that the origin server delete the resource - identified by the Request-URI. - -D.1.3 LINK - - The LINK method establishes one or more Link relationships between - the existing resource identified by the Request-URI and other - existing resources. - -D.1.4 UNLINK - - The UNLINK method removes one or more Link relationships from the - existing resource identified by the Request-URI. - -D.2 Additional Header Field Definitions - -D.2.1 Accept - - The Accept request-header field can be used to indicate a list of - media ranges which are acceptable as a response to the request. The - asterisk "*" character is used to group media types into ranges, with - "*/*" indicating all media types and "type/*" indicating all subtypes - - - -Berners-Lee, et al Informational [Page 58] - -RFC 1945 HTTP/1.0 May 1996 - - - of that type. The set of ranges given by the client should represent - what types are acceptable given the context of the request. - -D.2.2 Accept-Charset - - The Accept-Charset request-header field can be used to indicate a - list of preferred character sets other than the default US-ASCII and - ISO-8859-1. This field allows clients capable of understanding more - comprehensive or special-purpose character sets to signal that - capability to a server which is capable of representing documents in - those character sets. - -D.2.3 Accept-Encoding - - The Accept-Encoding request-header field is similar to Accept, but - restricts the content-coding values which are acceptable in the - response. - -D.2.4 Accept-Language - - The Accept-Language request-header field is similar to Accept, but - restricts the set of natural languages that are preferred as a - response to the request. - -D.2.5 Content-Language - - The Content-Language entity-header field describes the natural - language(s) of the intended audience for the enclosed entity. Note - that this may not be equivalent to all the languages used within the - entity. - -D.2.6 Link - - The Link entity-header field provides a means for describing a - relationship between the entity and some other resource. An entity - may include multiple Link values. Links at the metainformation level - typically indicate relationships like hierarchical structure and - navigation paths. - -D.2.7 MIME-Version - - HTTP messages may include a single MIME-Version general-header field - to indicate what version of the MIME protocol was used to construct - the message. Use of the MIME-Version header field, as defined by RFC - 1521 [5], should indicate that the message is MIME-conformant. - Unfortunately, some older HTTP/1.0 servers send it indiscriminately, - and thus this field should be ignored. - - - - -Berners-Lee, et al Informational [Page 59] - -RFC 1945 HTTP/1.0 May 1996 - - -D.2.8 Retry-After - - The Retry-After response-header field can be used with a 503 (service - unavailable) response to indicate how long the service is expected to - be unavailable to the requesting client. The value of this field can - be either an HTTP-date or an integer number of seconds (in decimal) - after the time of the response. - -D.2.9 Title - - The Title entity-header field indicates the title of the entity. - -D.2.10 URI - - The URI entity-header field may contain some or all of the Uniform - Resource Identifiers (Section 3.2) by which the Request-URI resource - can be identified. There is no guarantee that the resource can be - accessed using the URI(s) specified. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Berners-Lee, et al Informational [Page 60] - diff --git a/docs/specs/rfc2068.txt b/docs/specs/rfc2068.txt deleted file mode 100644 index e16e4fd..0000000 --- a/docs/specs/rfc2068.txt +++ /dev/null @@ -1,9075 +0,0 @@ - - - - - - -Network Working Group R. Fielding -Request for Comments: 2068 UC Irvine -Category: Standards Track J. Gettys - J. Mogul - DEC - H. Frystyk - T. Berners-Lee - MIT/LCS - January 1997 - - - Hypertext Transfer Protocol -- HTTP/1.1 - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Abstract - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol for distributed, collaborative, hypermedia information - systems. It is a generic, stateless, object-oriented protocol which - can be used for many tasks, such as name servers and distributed - object management systems, through extension of its request methods. - A feature of HTTP is the typing and negotiation of data - representation, allowing systems to be built independently of the - data being transferred. - - HTTP has been in use by the World-Wide Web global information - initiative since 1990. This specification defines the protocol - referred to as "HTTP/1.1". - -Table of Contents - - 1 Introduction.............................................7 - 1.1 Purpose ..............................................7 - 1.2 Requirements .........................................7 - 1.3 Terminology ..........................................8 - 1.4 Overall Operation ...................................11 - 2 Notational Conventions and Generic Grammar..............13 - 2.1 Augmented BNF .......................................13 - 2.2 Basic Rules .........................................15 - 3 Protocol Parameters.....................................17 - 3.1 HTTP Version ........................................17 - - - -Fielding, et. al. Standards Track [Page 1] - -RFC 2068 HTTP/1.1 January 1997 - - - 3.2 Uniform Resource Identifiers ........................18 - 3.2.1 General Syntax ...................................18 - 3.2.2 http URL .........................................19 - 3.2.3 URI Comparison ...................................20 - 3.3 Date/Time Formats ...................................21 - 3.3.1 Full Date ........................................21 - 3.3.2 Delta Seconds ....................................22 - 3.4 Character Sets ......................................22 - 3.5 Content Codings .....................................23 - 3.6 Transfer Codings ....................................24 - 3.7 Media Types .........................................25 - 3.7.1 Canonicalization and Text Defaults ...............26 - 3.7.2 Multipart Types ..................................27 - 3.8 Product Tokens ......................................28 - 3.9 Quality Values ......................................28 - 3.10 Language Tags ......................................28 - 3.11 Entity Tags ........................................29 - 3.12 Range Units ........................................30 - 4 HTTP Message............................................30 - 4.1 Message Types .......................................30 - 4.2 Message Headers .....................................31 - 4.3 Message Body ........................................32 - 4.4 Message Length ......................................32 - 4.5 General Header Fields ...............................34 - 5 Request.................................................34 - 5.1 Request-Line ........................................34 - 5.1.1 Method ...........................................35 - 5.1.2 Request-URI ......................................35 - 5.2 The Resource Identified by a Request ................37 - 5.3 Request Header Fields ...............................37 - 6 Response................................................38 - 6.1 Status-Line .........................................38 - 6.1.1 Status Code and Reason Phrase ....................39 - 6.2 Response Header Fields ..............................41 - 7 Entity..................................................41 - 7.1 Entity Header Fields ................................41 - 7.2 Entity Body .........................................42 - 7.2.1 Type .............................................42 - 7.2.2 Length ...........................................43 - 8 Connections.............................................43 - 8.1 Persistent Connections ..............................43 - 8.1.1 Purpose ..........................................43 - 8.1.2 Overall Operation ................................44 - 8.1.3 Proxy Servers ....................................45 - 8.1.4 Practical Considerations .........................45 - 8.2 Message Transmission Requirements ...................46 - 9 Method Definitions......................................48 - 9.1 Safe and Idempotent Methods .........................48 - - - -Fielding, et. al. Standards Track [Page 2] - -RFC 2068 HTTP/1.1 January 1997 - - - 9.1.1 Safe Methods .....................................48 - 9.1.2 Idempotent Methods ...............................49 - 9.2 OPTIONS .............................................49 - 9.3 GET .................................................50 - 9.4 HEAD ................................................50 - 9.5 POST ................................................51 - 9.6 PUT .................................................52 - 9.7 DELETE ..............................................53 - 9.8 TRACE ...............................................53 - 10 Status Code Definitions................................53 - 10.1 Informational 1xx ..................................54 - 10.1.1 100 Continue ....................................54 - 10.1.2 101 Switching Protocols .........................54 - 10.2 Successful 2xx .....................................54 - 10.2.1 200 OK ..........................................54 - 10.2.2 201 Created .....................................55 - 10.2.3 202 Accepted ....................................55 - 10.2.4 203 Non-Authoritative Information ...............55 - 10.2.5 204 No Content ..................................55 - 10.2.6 205 Reset Content ...............................56 - 10.2.7 206 Partial Content .............................56 - 10.3 Redirection 3xx ....................................56 - 10.3.1 300 Multiple Choices ............................57 - 10.3.2 301 Moved Permanently ...........................57 - 10.3.3 302 Moved Temporarily ...........................58 - 10.3.4 303 See Other ...................................58 - 10.3.5 304 Not Modified ................................58 - 10.3.6 305 Use Proxy ...................................59 - 10.4 Client Error 4xx ...................................59 - 10.4.1 400 Bad Request .................................60 - 10.4.2 401 Unauthorized ................................60 - 10.4.3 402 Payment Required ............................60 - 10.4.4 403 Forbidden ...................................60 - 10.4.5 404 Not Found ...................................60 - 10.4.6 405 Method Not Allowed ..........................61 - 10.4.7 406 Not Acceptable ..............................61 - 10.4.8 407 Proxy Authentication Required ...............61 - 10.4.9 408 Request Timeout .............................62 - 10.4.10 409 Conflict ...................................62 - 10.4.11 410 Gone .......................................62 - 10.4.12 411 Length Required ............................63 - 10.4.13 412 Precondition Failed ........................63 - 10.4.14 413 Request Entity Too Large ...................63 - 10.4.15 414 Request-URI Too Long .......................63 - 10.4.16 415 Unsupported Media Type .....................63 - 10.5 Server Error 5xx ...................................64 - 10.5.1 500 Internal Server Error .......................64 - 10.5.2 501 Not Implemented .............................64 - - - -Fielding, et. al. Standards Track [Page 3] - -RFC 2068 HTTP/1.1 January 1997 - - - 10.5.3 502 Bad Gateway .................................64 - 10.5.4 503 Service Unavailable .........................64 - 10.5.5 504 Gateway Timeout .............................64 - 10.5.6 505 HTTP Version Not Supported ..................65 - 11 Access Authentication..................................65 - 11.1 Basic Authentication Scheme ........................66 - 11.2 Digest Authentication Scheme .......................67 - 12 Content Negotiation....................................67 - 12.1 Server-driven Negotiation ..........................68 - 12.2 Agent-driven Negotiation ...........................69 - 12.3 Transparent Negotiation ............................70 - 13 Caching in HTTP........................................70 - 13.1.1 Cache Correctness ...............................72 - 13.1.2 Warnings ........................................73 - 13.1.3 Cache-control Mechanisms ........................74 - 13.1.4 Explicit User Agent Warnings ....................74 - 13.1.5 Exceptions to the Rules and Warnings ............75 - 13.1.6 Client-controlled Behavior ......................75 - 13.2 Expiration Model ...................................75 - 13.2.1 Server-Specified Expiration .....................75 - 13.2.2 Heuristic Expiration ............................76 - 13.2.3 Age Calculations ................................77 - 13.2.4 Expiration Calculations .........................79 - 13.2.5 Disambiguating Expiration Values ................80 - 13.2.6 Disambiguating Multiple Responses ...............80 - 13.3 Validation Model ...................................81 - 13.3.1 Last-modified Dates .............................82 - 13.3.2 Entity Tag Cache Validators .....................82 - 13.3.3 Weak and Strong Validators ......................82 - 13.3.4 Rules for When to Use Entity Tags and Last- - modified Dates..........................................85 - 13.3.5 Non-validating Conditionals .....................86 - 13.4 Response Cachability ...............................86 - 13.5 Constructing Responses From Caches .................87 - 13.5.1 End-to-end and Hop-by-hop Headers ...............88 - 13.5.2 Non-modifiable Headers ..........................88 - 13.5.3 Combining Headers ...............................89 - 13.5.4 Combining Byte Ranges ...........................90 - 13.6 Caching Negotiated Responses .......................90 - 13.7 Shared and Non-Shared Caches .......................91 - 13.8 Errors or Incomplete Response Cache Behavior .......91 - 13.9 Side Effects of GET and HEAD .......................92 - 13.10 Invalidation After Updates or Deletions ...........92 - 13.11 Write-Through Mandatory ...........................93 - 13.12 Cache Replacement .................................93 - 13.13 History Lists .....................................93 - 14 Header Field Definitions...............................94 - 14.1 Accept .............................................95 - - - -Fielding, et. al. Standards Track [Page 4] - -RFC 2068 HTTP/1.1 January 1997 - - - 14.2 Accept-Charset .....................................97 - 14.3 Accept-Encoding ....................................97 - 14.4 Accept-Language ....................................98 - 14.5 Accept-Ranges ......................................99 - 14.6 Age ................................................99 - 14.7 Allow .............................................100 - 14.8 Authorization .....................................100 - 14.9 Cache-Control .....................................101 - 14.9.1 What is Cachable ...............................103 - 14.9.2 What May be Stored by Caches ...................103 - 14.9.3 Modifications of the Basic Expiration Mechanism 104 - 14.9.4 Cache Revalidation and Reload Controls .........105 - 14.9.5 No-Transform Directive .........................107 - 14.9.6 Cache Control Extensions .......................108 - 14.10 Connection .......................................109 - 14.11 Content-Base .....................................109 - 14.12 Content-Encoding .................................110 - 14.13 Content-Language .................................110 - 14.14 Content-Length ...................................111 - 14.15 Content-Location .................................112 - 14.16 Content-MD5 ......................................113 - 14.17 Content-Range ....................................114 - 14.18 Content-Type .....................................116 - 14.19 Date .............................................116 - 14.20 ETag .............................................117 - 14.21 Expires ..........................................117 - 14.22 From .............................................118 - 14.23 Host .............................................119 - 14.24 If-Modified-Since ................................119 - 14.25 If-Match .........................................121 - 14.26 If-None-Match ....................................122 - 14.27 If-Range .........................................123 - 14.28 If-Unmodified-Since ..............................124 - 14.29 Last-Modified ....................................124 - 14.30 Location .........................................125 - 14.31 Max-Forwards .....................................125 - 14.32 Pragma ...........................................126 - 14.33 Proxy-Authenticate ...............................127 - 14.34 Proxy-Authorization ..............................127 - 14.35 Public ...........................................127 - 14.36 Range ............................................128 - 14.36.1 Byte Ranges ...................................128 - 14.36.2 Range Retrieval Requests ......................130 - 14.37 Referer ..........................................131 - 14.38 Retry-After ......................................131 - 14.39 Server ...........................................132 - 14.40 Transfer-Encoding ................................132 - 14.41 Upgrade ..........................................132 - - - -Fielding, et. al. Standards Track [Page 5] - -RFC 2068 HTTP/1.1 January 1997 - - - 14.42 User-Agent .......................................134 - 14.43 Vary .............................................134 - 14.44 Via ..............................................135 - 14.45 Warning ..........................................137 - 14.46 WWW-Authenticate .................................139 - 15 Security Considerations...............................139 - 15.1 Authentication of Clients .........................139 - 15.2 Offering a Choice of Authentication Schemes .......140 - 15.3 Abuse of Server Log Information ...................141 - 15.4 Transfer of Sensitive Information .................141 - 15.5 Attacks Based On File and Path Names ..............142 - 15.6 Personal Information ..............................143 - 15.7 Privacy Issues Connected to Accept Headers ........143 - 15.8 DNS Spoofing ......................................144 - 15.9 Location Headers and Spoofing .....................144 - 16 Acknowledgments.......................................144 - 17 References............................................146 - 18 Authors' Addresses....................................149 - 19 Appendices............................................150 - 19.1 Internet Media Type message/http ..................150 - 19.2 Internet Media Type multipart/byteranges ..........150 - 19.3 Tolerant Applications .............................151 - 19.4 Differences Between HTTP Entities and - MIME Entities...........................................152 - 19.4.1 Conversion to Canonical Form ...................152 - 19.4.2 Conversion of Date Formats .....................153 - 19.4.3 Introduction of Content-Encoding ...............153 - 19.4.4 No Content-Transfer-Encoding ...................153 - 19.4.5 HTTP Header Fields in Multipart Body-Parts .....153 - 19.4.6 Introduction of Transfer-Encoding ..............154 - 19.4.7 MIME-Version ...................................154 - 19.5 Changes from HTTP/1.0 .............................154 - 19.5.1 Changes to Simplify Multi-homed Web Servers and - Conserve IP Addresses .................................155 - 19.6 Additional Features ...............................156 - 19.6.1 Additional Request Methods .....................156 - 19.6.2 Additional Header Field Definitions ............156 - 19.7 Compatibility with Previous Versions ..............160 - 19.7.1 Compatibility with HTTP/1.0 Persistent - Connections............................................161 - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 6] - -RFC 2068 HTTP/1.1 January 1997 - - -1 Introduction - -1.1 Purpose - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol for distributed, collaborative, hypermedia information - systems. HTTP has been in use by the World-Wide Web global - information initiative since 1990. The first version of HTTP, - referred to as HTTP/0.9, was a simple protocol for raw data transfer - across the Internet. HTTP/1.0, as defined by RFC 1945 [6], improved - the protocol by allowing messages to be in the format of MIME-like - messages, containing metainformation about the data transferred and - modifiers on the request/response semantics. However, HTTP/1.0 does - not sufficiently take into consideration the effects of hierarchical - proxies, caching, the need for persistent connections, and virtual - hosts. In addition, the proliferation of incompletely-implemented - applications calling themselves "HTTP/1.0" has necessitated a - protocol version change in order for two communicating applications - to determine each other's true capabilities. - - This specification defines the protocol referred to as "HTTP/1.1". - This protocol includes more stringent requirements than HTTP/1.0 in - order to ensure reliable implementation of its features. - - Practical information systems require more functionality than simple - retrieval, including search, front-end update, and annotation. HTTP - allows an open-ended set of methods that indicate the purpose of a - request. It builds on the discipline of reference provided by the - Uniform Resource Identifier (URI) [3][20], as a location (URL) [4] or - name (URN) , for indicating the resource to which a method is to be - applied. Messages are passed in a format similar to that used by - Internet mail as defined by the Multipurpose Internet Mail Extensions - (MIME). - - HTTP is also used as a generic protocol for communication between - user agents and proxies/gateways to other Internet systems, including - those supported by the SMTP [16], NNTP [13], FTP [18], Gopher [2], - and WAIS [10] protocols. In this way, HTTP allows basic hypermedia - access to resources available from diverse applications. - -1.2 Requirements - - This specification uses the same words as RFC 1123 [8] for defining - the significance of each particular requirement. These words are: - - MUST - This word or the adjective "required" means that the item is an - absolute requirement of the specification. - - - -Fielding, et. al. Standards Track [Page 7] - -RFC 2068 HTTP/1.1 January 1997 - - - SHOULD - This word or the adjective "recommended" means that there may - exist valid reasons in particular circumstances to ignore this - item, but the full implications should be understood and the case - carefully weighed before choosing a different course. - - MAY - This word or the adjective "optional" means that this item is - truly optional. One vendor may choose to include the item because - a particular marketplace requires it or because it enhances the - product, for example; another vendor may omit the same item. - - An implementation is not compliant if it fails to satisfy one or more - of the MUST requirements for the protocols it implements. An - implementation that satisfies all the MUST and all the SHOULD - requirements for its protocols is said to be "unconditionally - compliant"; one that satisfies all the MUST requirements but not all - the SHOULD requirements for its protocols is said to be - "conditionally compliant." - -1.3 Terminology - - This specification uses a number of terms to refer to the roles - played by participants in, and objects of, the HTTP communication. - - connection - A transport layer virtual circuit established between two programs - for the purpose of communication. - - message - The basic unit of HTTP communication, consisting of a structured - sequence of octets matching the syntax defined in section 4 and - transmitted via the connection. - - request - An HTTP request message, as defined in section 5. - - response - An HTTP response message, as defined in section 6. - - resource - A network data object or service that can be identified by a URI, - as defined in section 3.2. Resources may be available in multiple - representations (e.g. multiple languages, data formats, size, - resolutions) or vary in other ways. - - - - - - -Fielding, et. al. Standards Track [Page 8] - -RFC 2068 HTTP/1.1 January 1997 - - - entity - The information transferred as the payload of a request or - response. An entity consists of metainformation in the form of - entity-header fields and content in the form of an entity-body, as - described in section 7. - - representation - An entity included with a response that is subject to content - negotiation, as described in section 12. There may exist multiple - representations associated with a particular response status. - - content negotiation - The mechanism for selecting the appropriate representation when - servicing a request, as described in section 12. The - representation of entities in any response can be negotiated - (including error responses). - - variant - A resource may have one, or more than one, representation(s) - associated with it at any given instant. Each of these - representations is termed a `variant.' Use of the term `variant' - does not necessarily imply that the resource is subject to content - negotiation. - - client - A program that establishes connections for the purpose of sending - requests. - - user agent - The client which initiates a request. These are often browsers, - editors, spiders (web-traversing robots), or other end user tools. - - server - An application program that accepts connections in order to - service requests by sending back responses. Any given program may - be capable of being both a client and a server; our use of these - terms refers only to the role being performed by the program for a - particular connection, rather than to the program's capabilities - in general. Likewise, any server may act as an origin server, - proxy, gateway, or tunnel, switching behavior based on the nature - of each request. - - origin server - The server on which a given resource resides or is to be created. - - - - - - - -Fielding, et. al. Standards Track [Page 9] - -RFC 2068 HTTP/1.1 January 1997 - - - proxy - An intermediary program which acts as both a server and a client - for the purpose of making requests on behalf of other clients. - Requests are serviced internally or by passing them on, with - possible translation, to other servers. A proxy must implement - both the client and server requirements of this specification. - - gateway - A server which acts as an intermediary for some other server. - Unlike a proxy, a gateway receives requests as if it were the - origin server for the requested resource; the requesting client - may not be aware that it is communicating with a gateway. - - tunnel - An intermediary program which is acting as a blind relay between - two connections. Once active, a tunnel is not considered a party - to the HTTP communication, though the tunnel may have been - initiated by an HTTP request. The tunnel ceases to exist when both - ends of the relayed connections are closed. - - cache - A program's local store of response messages and the subsystem - that controls its message storage, retrieval, and deletion. A - cache stores cachable responses in order to reduce the response - time and network bandwidth consumption on future, equivalent - requests. Any client or server may include a cache, though a cache - cannot be used by a server that is acting as a tunnel. - - cachable - A response is cachable if a cache is allowed to store a copy of - the response message for use in answering subsequent requests. The - rules for determining the cachability of HTTP responses are - defined in section 13. Even if a resource is cachable, there may - be additional constraints on whether a cache can use the cached - copy for a particular request. - - first-hand - A response is first-hand if it comes directly and without - unnecessary delay from the origin server, perhaps via one or more - proxies. A response is also first-hand if its validity has just - been checked directly with the origin server. - - explicit expiration time - The time at which the origin server intends that an entity should - no longer be returned by a cache without further validation. - - - - - - -Fielding, et. al. Standards Track [Page 10] - -RFC 2068 HTTP/1.1 January 1997 - - - heuristic expiration time - An expiration time assigned by a cache when no explicit expiration - time is available. - - age - The age of a response is the time since it was sent by, or - successfully validated with, the origin server. - - freshness lifetime - The length of time between the generation of a response and its - expiration time. - - fresh - A response is fresh if its age has not yet exceeded its freshness - lifetime. - - stale - A response is stale if its age has passed its freshness lifetime. - - semantically transparent - A cache behaves in a "semantically transparent" manner, with - respect to a particular response, when its use affects neither the - requesting client nor the origin server, except to improve - performance. When a cache is semantically transparent, the client - receives exactly the same response (except for hop-by-hop headers) - that it would have received had its request been handled directly - by the origin server. - - validator - A protocol element (e.g., an entity tag or a Last-Modified time) - that is used to find out whether a cache entry is an equivalent - copy of an entity. - -1.4 Overall Operation - - The HTTP protocol is a request/response protocol. A client sends a - request to the server in the form of a request method, URI, and - protocol version, followed by a MIME-like message containing request - modifiers, client information, and possible body content over a - connection with a server. The server responds with a status line, - including the message's protocol version and a success or error code, - followed by a MIME-like message containing server information, entity - metainformation, and possible entity-body content. The relationship - between HTTP and MIME is described in appendix 19.4. - - - - - - - -Fielding, et. al. Standards Track [Page 11] - -RFC 2068 HTTP/1.1 January 1997 - - - Most HTTP communication is initiated by a user agent and consists of - a request to be applied to a resource on some origin server. In the - simplest case, this may be accomplished via a single connection (v) - between the user agent (UA) and the origin server (O). - - request chain ------------------------> - UA -------------------v------------------- O - <----------------------- response chain - - A more complicated situation occurs when one or more intermediaries - are present in the request/response chain. There are three common - forms of intermediary: proxy, gateway, and tunnel. A proxy is a - forwarding agent, receiving requests for a URI in its absolute form, - rewriting all or part of the message, and forwarding the reformatted - request toward the server identified by the URI. A gateway is a - receiving agent, acting as a layer above some other server(s) and, if - necessary, translating the requests to the underlying server's - protocol. A tunnel acts as a relay point between two connections - without changing the messages; tunnels are used when the - communication needs to pass through an intermediary (such as a - firewall) even when the intermediary cannot understand the contents - of the messages. - - request chain --------------------------------------> - UA -----v----- A -----v----- B -----v----- C -----v----- O - <------------------------------------- response chain - - The figure above shows three intermediaries (A, B, and C) between the - user agent and origin server. A request or response message that - travels the whole chain will pass through four separate connections. - This distinction is important because some HTTP communication options - may apply only to the connection with the nearest, non-tunnel - neighbor, only to the end-points of the chain, or to all connections - along the chain. Although the diagram is linear, each participant - may be engaged in multiple, simultaneous communications. For example, - B may be receiving requests from many clients other than A, and/or - forwarding requests to servers other than C, at the same time that it - is handling A's request. - - Any party to the communication which is not acting as a tunnel may - employ an internal cache for handling requests. The effect of a cache - is that the request/response chain is shortened if one of the - participants along the chain has a cached response applicable to that - request. The following illustrates the resulting chain if B has a - cached copy of an earlier response from O (via C) for a request which - has not been cached by UA or A. - - - - - -Fielding, et. al. Standards Track [Page 12] - -RFC 2068 HTTP/1.1 January 1997 - - - request chain ----------> - UA -----v----- A -----v----- B - - - - - - C - - - - - - O - <--------- response chain - - Not all responses are usefully cachable, and some requests may - contain modifiers which place special requirements on cache behavior. - HTTP requirements for cache behavior and cachable responses are - defined in section 13. - - In fact, there are a wide variety of architectures and configurations - of caches and proxies currently being experimented with or deployed - across the World Wide Web; these systems include national hierarchies - of proxy caches to save transoceanic bandwidth, systems that - broadcast or multicast cache entries, organizations that distribute - subsets of cached data via CD-ROM, and so on. HTTP systems are used - in corporate intranets over high-bandwidth links, and for access via - PDAs with low-power radio links and intermittent connectivity. The - goal of HTTP/1.1 is to support the wide diversity of configurations - already deployed while introducing protocol constructs that meet the - needs of those who build web applications that require high - reliability and, failing that, at least reliable indications of - failure. - - HTTP communication usually takes place over TCP/IP connections. The - default port is TCP 80, but other ports can be used. This does not - preclude HTTP from being implemented on top of any other protocol on - the Internet, or on other networks. HTTP only presumes a reliable - transport; any protocol that provides such guarantees can be used; - the mapping of the HTTP/1.1 request and response structures onto the - transport data units of the protocol in question is outside the scope - of this specification. - - In HTTP/1.0, most implementations used a new connection for each - request/response exchange. In HTTP/1.1, a connection may be used for - one or more request/response exchanges, although connections may be - closed for a variety of reasons (see section 8.1). - -2 Notational Conventions and Generic Grammar - -2.1 Augmented BNF - - All of the mechanisms specified in this document are described in - both prose and an augmented Backus-Naur Form (BNF) similar to that - used by RFC 822 [9]. Implementers will need to be familiar with the - notation in order to understand this specification. The augmented BNF - includes the following constructs: - - - - - -Fielding, et. al. Standards Track [Page 13] - -RFC 2068 HTTP/1.1 January 1997 - - -name = definition - The name of a rule is simply the name itself (without any enclosing - "<" and ">") and is separated from its definition by the equal "=" - character. Whitespace is only significant in that indentation of - continuation lines is used to indicate a rule definition that spans - more than one line. Certain basic rules are in uppercase, such as - SP, LWS, HT, CRLF, DIGIT, ALPHA, etc. Angle brackets are used - within definitions whenever their presence will facilitate - discerning the use of rule names. - -"literal" - Quotation marks surround literal text. Unless stated otherwise, the - text is case-insensitive. - -rule1 | rule2 - Elements separated by a bar ("|") are alternatives, e.g., "yes | - no" will accept yes or no. - -(rule1 rule2) - Elements enclosed in parentheses are treated as a single element. - Thus, "(elem (foo | bar) elem)" allows the token sequences "elem - foo elem" and "elem bar elem". - -*rule - The character "*" preceding an element indicates repetition. The - full form is "*element" indicating at least and at most - occurrences of element. Default values are 0 and infinity so - that "*(element)" allows any number, including zero; "1*element" - requires at least one; and "1*2element" allows one or two. - -[rule] - Square brackets enclose optional elements; "[foo bar]" is - equivalent to "*1(foo bar)". - -N rule - Specific repetition: "(element)" is equivalent to - "*(element)"; that is, exactly occurrences of (element). - Thus 2DIGIT is a 2-digit number, and 3ALPHA is a string of three - alphabetic characters. - -#rule - A construct "#" is defined, similar to "*", for defining lists of - elements. The full form is "#element " indicating at least - and at most elements, each separated by one or more commas - (",") and optional linear whitespace (LWS). This makes the usual - form of lists very easy; a rule such as "( *LWS element *( *LWS "," - *LWS element )) " can be shown as "1#element". Wherever this - construct is used, null elements are allowed, but do not contribute - - - -Fielding, et. al. Standards Track [Page 14] - -RFC 2068 HTTP/1.1 January 1997 - - - to the count of elements present. That is, "(element), , (element) - " is permitted, but counts as only two elements. Therefore, where - at least one element is required, at least one non-null element - must be present. Default values are 0 and infinity so that - "#element" allows any number, including zero; "1#element" requires - at least one; and "1#2element" allows one or two. - -; comment - A semi-colon, set off some distance to the right of rule text, - starts a comment that continues to the end of line. This is a - simple way of including useful notes in parallel with the - specifications. - -implied *LWS - The grammar described by this specification is word-based. Except - where noted otherwise, linear whitespace (LWS) can be included - between any two adjacent words (token or quoted-string), and - between adjacent tokens and delimiters (tspecials), without - changing the interpretation of a field. At least one delimiter - (tspecials) must exist between any two tokens, since they would - otherwise be interpreted as a single token. - -2.2 Basic Rules - - The following rules are used throughout this specification to - describe basic parsing constructs. The US-ASCII coded character set - is defined by ANSI X3.4-1986 [21]. - - OCTET = - CHAR = - UPALPHA = - LOALPHA = - ALPHA = UPALPHA | LOALPHA - DIGIT = - CTL = - CR = - LF = - SP = - HT = - <"> = - - - - - - - - - - -Fielding, et. al. Standards Track [Page 15] - -RFC 2068 HTTP/1.1 January 1997 - - - HTTP/1.1 defines the sequence CR LF as the end-of-line marker for all - protocol elements except the entity-body (see appendix 19.3 for - tolerant applications). The end-of-line marker within an entity-body - is defined by its associated media type, as described in section 3.7. - - CRLF = CR LF - - HTTP/1.1 headers can be folded onto multiple lines if the - continuation line begins with a space or horizontal tab. All linear - white space, including folding, has the same semantics as SP. - - LWS = [CRLF] 1*( SP | HT ) - - The TEXT rule is only used for descriptive field contents and values - that are not intended to be interpreted by the message parser. Words - of *TEXT may contain characters from character sets other than ISO - 8859-1 [22] only when encoded according to the rules of RFC 1522 - [14]. - - TEXT = - - Hexadecimal numeric characters are used in several protocol elements. - - HEX = "A" | "B" | "C" | "D" | "E" | "F" - | "a" | "b" | "c" | "d" | "e" | "f" | DIGIT - - Many HTTP/1.1 header field values consist of words separated by LWS - or special characters. These special characters MUST be in a quoted - string to be used within a parameter value. - - token = 1* - - tspecials = "(" | ")" | "<" | ">" | "@" - | "," | ";" | ":" | "\" | <"> - | "/" | "[" | "]" | "?" | "=" - | "{" | "}" | SP | HT - - Comments can be included in some HTTP header fields by surrounding - the comment text with parentheses. Comments are only allowed in - fields containing "comment" as part of their field value definition. - In all other fields, parentheses are considered part of the field - value. - - comment = "(" *( ctext | comment ) ")" - ctext = - - - - - -Fielding, et. al. Standards Track [Page 16] - -RFC 2068 HTTP/1.1 January 1997 - - - A string of text is parsed as a single word if it is quoted using - double-quote marks. - - quoted-string = ( <"> *(qdtext) <"> ) - - qdtext = > - - The backslash character ("\") may be used as a single-character quoting - mechanism only within quoted-string and comment constructs. - - quoted-pair = "\" CHAR - -3 Protocol Parameters - -3.1 HTTP Version - - HTTP uses a "." numbering scheme to indicate versions - of the protocol. The protocol versioning policy is intended to allow - the sender to indicate the format of a message and its capacity for - understanding further HTTP communication, rather than the features - obtained via that communication. No change is made to the version - number for the addition of message components which do not affect - communication behavior or which only add to extensible field values. - The number is incremented when the changes made to the - protocol add features which do not change the general message parsing - algorithm, but which may add to the message semantics and imply - additional capabilities of the sender. The number is - incremented when the format of a message within the protocol is - changed. - - The version of an HTTP message is indicated by an HTTP-Version field - in the first line of the message. - - HTTP-Version = "HTTP" "/" 1*DIGIT "." 1*DIGIT - - Note that the major and minor numbers MUST be treated as separate - integers and that each may be incremented higher than a single digit. - Thus, HTTP/2.4 is a lower version than HTTP/2.13, which in turn is - lower than HTTP/12.3. Leading zeros MUST be ignored by recipients and - MUST NOT be sent. - - Applications sending Request or Response messages, as defined by this - specification, MUST include an HTTP-Version of "HTTP/1.1". Use of - this version number indicates that the sending application is at - least conditionally compliant with this specification. - - The HTTP version of an application is the highest HTTP version for - which the application is at least conditionally compliant. - - - -Fielding, et. al. Standards Track [Page 17] - -RFC 2068 HTTP/1.1 January 1997 - - - Proxy and gateway applications must be careful when forwarding - messages in protocol versions different from that of the application. - Since the protocol version indicates the protocol capability of the - sender, a proxy/gateway MUST never send a message with a version - indicator which is greater than its actual version; if a higher - version request is received, the proxy/gateway MUST either downgrade - the request version, respond with an error, or switch to tunnel - behavior. Requests with a version lower than that of the - proxy/gateway's version MAY be upgraded before being forwarded; the - proxy/gateway's response to that request MUST be in the same major - version as the request. - - Note: Converting between versions of HTTP may involve modification - of header fields required or forbidden by the versions involved. - -3.2 Uniform Resource Identifiers - - URIs have been known by many names: WWW addresses, Universal Document - Identifiers, Universal Resource Identifiers , and finally the - combination of Uniform Resource Locators (URL) and Names (URN). As - far as HTTP is concerned, Uniform Resource Identifiers are simply - formatted strings which identify--via name, location, or any other - characteristic--a resource. - -3.2.1 General Syntax - - URIs in HTTP can be represented in absolute form or relative to some - known base URI, depending upon the context of their use. The two - forms are differentiated by the fact that absolute URIs always begin - with a scheme name followed by a colon. - - URI = ( absoluteURI | relativeURI ) [ "#" fragment ] - - absoluteURI = scheme ":" *( uchar | reserved ) - - relativeURI = net_path | abs_path | rel_path - - net_path = "//" net_loc [ abs_path ] - abs_path = "/" rel_path - rel_path = [ path ] [ ";" params ] [ "?" query ] - - path = fsegment *( "/" segment ) - fsegment = 1*pchar - segment = *pchar - - params = param *( ";" param ) - param = *( pchar | "/" ) - - - - -Fielding, et. al. Standards Track [Page 18] - -RFC 2068 HTTP/1.1 January 1997 - - - scheme = 1*( ALPHA | DIGIT | "+" | "-" | "." ) - net_loc = *( pchar | ";" | "?" ) - - query = *( uchar | reserved ) - fragment = *( uchar | reserved ) - - pchar = uchar | ":" | "@" | "&" | "=" | "+" - uchar = unreserved | escape - unreserved = ALPHA | DIGIT | safe | extra | national - - escape = "%" HEX HEX - reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" - extra = "!" | "*" | "'" | "(" | ")" | "," - safe = "$" | "-" | "_" | "." - unsafe = CTL | SP | <"> | "#" | "%" | "<" | ">" - national = - - For definitive information on URL syntax and semantics, see RFC 1738 - [4] and RFC 1808 [11]. The BNF above includes national characters not - allowed in valid URLs as specified by RFC 1738, since HTTP servers - are not restricted in the set of unreserved characters allowed to - represent the rel_path part of addresses, and HTTP proxies may - receive requests for URIs not defined by RFC 1738. - - The HTTP protocol does not place any a priori limit on the length of - a URI. Servers MUST be able to handle the URI of any resource they - serve, and SHOULD be able to handle URIs of unbounded length if they - provide GET-based forms that could generate such URIs. A server - SHOULD return 414 (Request-URI Too Long) status if a URI is longer - than the server can handle (see section 10.4.15). - - Note: Servers should be cautious about depending on URI lengths - above 255 bytes, because some older client or proxy implementations - may not properly support these lengths. - -3.2.2 http URL - - The "http" scheme is used to locate network resources via the HTTP - protocol. This section defines the scheme-specific syntax and - semantics for http URLs. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 19] - -RFC 2068 HTTP/1.1 January 1997 - - - http_URL = "http:" "//" host [ ":" port ] [ abs_path ] - - host = - - port = *DIGIT - - If the port is empty or not given, port 80 is assumed. The semantics - are that the identified resource is located at the server listening - for TCP connections on that port of that host, and the Request-URI - for the resource is abs_path. The use of IP addresses in URL's SHOULD - be avoided whenever possible (see RFC 1900 [24]). If the abs_path is - not present in the URL, it MUST be given as "/" when used as a - Request-URI for a resource (section 5.1.2). - -3.2.3 URI Comparison - - When comparing two URIs to decide if they match or not, a client - SHOULD use a case-sensitive octet-by-octet comparison of the entire - URIs, with these exceptions: - - o A port that is empty or not given is equivalent to the default - port for that URI; - - o Comparisons of host names MUST be case-insensitive; - - o Comparisons of scheme names MUST be case-insensitive; - - o An empty abs_path is equivalent to an abs_path of "/". - - Characters other than those in the "reserved" and "unsafe" sets (see - section 3.2) are equivalent to their ""%" HEX HEX" encodings. - - For example, the following three URIs are equivalent: - - http://abc.com:80/~smith/home.html - http://ABC.com/%7Esmith/home.html - http://ABC.com:/%7esmith/home.html - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 20] - -RFC 2068 HTTP/1.1 January 1997 - - -3.3 Date/Time Formats - -3.3.1 Full Date - - HTTP applications have historically allowed three different formats - for the representation of date/time stamps: - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - The first format is preferred as an Internet standard and represents - a fixed-length subset of that defined by RFC 1123 (an update to RFC - 822). The second format is in common use, but is based on the - obsolete RFC 850 [12] date format and lacks a four-digit year. - HTTP/1.1 clients and servers that parse the date value MUST accept - all three formats (for compatibility with HTTP/1.0), though they MUST - only generate the RFC 1123 format for representing HTTP-date values - in header fields. - - Note: Recipients of date values are encouraged to be robust in - accepting date values that may have been sent by non-HTTP - applications, as is sometimes the case when retrieving or posting - messages via proxies/gateways to SMTP or NNTP. - - All HTTP date/time stamps MUST be represented in Greenwich Mean Time - (GMT), without exception. This is indicated in the first two formats - by the inclusion of "GMT" as the three-letter abbreviation for time - zone, and MUST be assumed when reading the asctime format. - - HTTP-date = rfc1123-date | rfc850-date | asctime-date - - rfc1123-date = wkday "," SP date1 SP time SP "GMT" - rfc850-date = weekday "," SP date2 SP time SP "GMT" - asctime-date = wkday SP date3 SP time SP 4DIGIT - - date1 = 2DIGIT SP month SP 4DIGIT - ; day month year (e.g., 02 Jun 1982) - date2 = 2DIGIT "-" month "-" 2DIGIT - ; day-month-year (e.g., 02-Jun-82) - date3 = month SP ( 2DIGIT | ( SP 1DIGIT )) - ; month day (e.g., Jun 2) - - time = 2DIGIT ":" 2DIGIT ":" 2DIGIT - ; 00:00:00 - 23:59:59 - - wkday = "Mon" | "Tue" | "Wed" - | "Thu" | "Fri" | "Sat" | "Sun" - - - -Fielding, et. al. Standards Track [Page 21] - -RFC 2068 HTTP/1.1 January 1997 - - - weekday = "Monday" | "Tuesday" | "Wednesday" - | "Thursday" | "Friday" | "Saturday" | "Sunday" - - month = "Jan" | "Feb" | "Mar" | "Apr" - | "May" | "Jun" | "Jul" | "Aug" - | "Sep" | "Oct" | "Nov" | "Dec" - - Note: HTTP requirements for the date/time stamp format apply only - to their usage within the protocol stream. Clients and servers are - not required to use these formats for user presentation, request - logging, etc. - -3.3.2 Delta Seconds - - Some HTTP header fields allow a time value to be specified as an - integer number of seconds, represented in decimal, after the time - that the message was received. - - delta-seconds = 1*DIGIT - -3.4 Character Sets - - HTTP uses the same definition of the term "character set" as that - described for MIME: - - The term "character set" is used in this document to refer to a - method used with one or more tables to convert a sequence of octets - into a sequence of characters. Note that unconditional conversion - in the other direction is not required, in that not all characters - may be available in a given character set and a character set may - provide more than one sequence of octets to represent a particular - character. This definition is intended to allow various kinds of - character encodings, from simple single-table mappings such as US- - ASCII to complex table switching methods such as those that use ISO - 2022's techniques. However, the definition associated with a MIME - character set name MUST fully specify the mapping to be performed - from octets to characters. In particular, use of external profiling - information to determine the exact mapping is not permitted. - - Note: This use of the term "character set" is more commonly - referred to as a "character encoding." However, since HTTP and MIME - share the same registry, it is important that the terminology also - be shared. - - - - - - - - -Fielding, et. al. Standards Track [Page 22] - -RFC 2068 HTTP/1.1 January 1997 - - - HTTP character sets are identified by case-insensitive tokens. The - complete set of tokens is defined by the IANA Character Set registry - [19]. - - charset = token - - Although HTTP allows an arbitrary token to be used as a charset - value, any token that has a predefined value within the IANA - Character Set registry MUST represent the character set defined by - that registry. Applications SHOULD limit their use of character sets - to those defined by the IANA registry. - -3.5 Content Codings - - Content coding values indicate an encoding transformation that has - been or can be applied to an entity. Content codings are primarily - used to allow a document to be compressed or otherwise usefully - transformed without losing the identity of its underlying media type - and without loss of information. Frequently, the entity is stored in - coded form, transmitted directly, and only decoded by the recipient. - - content-coding = token - - All content-coding values are case-insensitive. HTTP/1.1 uses - content-coding values in the Accept-Encoding (section 14.3) and - Content-Encoding (section 14.12) header fields. Although the value - describes the content-coding, what is more important is that it - indicates what decoding mechanism will be required to remove the - encoding. - - The Internet Assigned Numbers Authority (IANA) acts as a registry for - content-coding value tokens. Initially, the registry contains the - following tokens: - - gzip An encoding format produced by the file compression program "gzip" - (GNU zip) as described in RFC 1952 [25]. This format is a Lempel- - Ziv coding (LZ77) with a 32 bit CRC. - - compress - The encoding format produced by the common UNIX file compression - program "compress". This format is an adaptive Lempel-Ziv-Welch - coding (LZW). - - - - - - - - - -Fielding, et. al. Standards Track [Page 23] - -RFC 2068 HTTP/1.1 January 1997 - - - Note: Use of program names for the identification of encoding - formats is not desirable and should be discouraged for future - encodings. Their use here is representative of historical practice, - not good design. For compatibility with previous implementations of - HTTP, applications should consider "x-gzip" and "x-compress" to be - equivalent to "gzip" and "compress" respectively. - - deflate The "zlib" format defined in RFC 1950[31] in combination with - the "deflate" compression mechanism described in RFC 1951[29]. - - New content-coding value tokens should be registered; to allow - interoperability between clients and servers, specifications of the - content coding algorithms needed to implement a new value should be - publicly available and adequate for independent implementation, and - conform to the purpose of content coding defined in this section. - -3.6 Transfer Codings - - Transfer coding values are used to indicate an encoding - transformation that has been, can be, or may need to be applied to an - entity-body in order to ensure "safe transport" through the network. - This differs from a content coding in that the transfer coding is a - property of the message, not of the original entity. - - transfer-coding = "chunked" | transfer-extension - - transfer-extension = token - - All transfer-coding values are case-insensitive. HTTP/1.1 uses - transfer coding values in the Transfer-Encoding header field (section - 14.40). - - Transfer codings are analogous to the Content-Transfer-Encoding - values of MIME , which were designed to enable safe transport of - binary data over a 7-bit transport service. However, safe transport - has a different focus for an 8bit-clean transfer protocol. In HTTP, - the only unsafe characteristic of message-bodies is the difficulty in - determining the exact body length (section 7.2.2), or the desire to - encrypt data over a shared transport. - - The chunked encoding modifies the body of a message in order to - transfer it as a series of chunks, each with its own size indicator, - followed by an optional footer containing entity-header fields. This - allows dynamically-produced content to be transferred along with the - information necessary for the recipient to verify that it has - received the full message. - - - - - -Fielding, et. al. Standards Track [Page 24] - -RFC 2068 HTTP/1.1 January 1997 - - - Chunked-Body = *chunk - "0" CRLF - footer - CRLF - - chunk = chunk-size [ chunk-ext ] CRLF - chunk-data CRLF - - hex-no-zero = - - chunk-size = hex-no-zero *HEX - chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-value ] ) - chunk-ext-name = token - chunk-ext-val = token | quoted-string - chunk-data = chunk-size(OCTET) - - footer = *entity-header - - The chunked encoding is ended by a zero-sized chunk followed by the - footer, which is terminated by an empty line. The purpose of the - footer is to provide an efficient way to supply information about an - entity that is generated dynamically; applications MUST NOT send - header fields in the footer which are not explicitly defined as being - appropriate for the footer, such as Content-MD5 or future extensions - to HTTP for digital signatures or other facilities. - - An example process for decoding a Chunked-Body is presented in - appendix 19.4.6. - - All HTTP/1.1 applications MUST be able to receive and decode the - "chunked" transfer coding, and MUST ignore transfer coding extensions - they do not understand. A server which receives an entity-body with a - transfer-coding it does not understand SHOULD return 501 - (Unimplemented), and close the connection. A server MUST NOT send - transfer-codings to an HTTP/1.0 client. - -3.7 Media Types - - HTTP uses Internet Media Types in the Content-Type (section 14.18) - and Accept (section 14.1) header fields in order to provide open and - extensible data typing and type negotiation. - - media-type = type "/" subtype *( ";" parameter ) - type = token - subtype = token - - Parameters may follow the type/subtype in the form of attribute/value - pairs. - - - -Fielding, et. al. Standards Track [Page 25] - -RFC 2068 HTTP/1.1 January 1997 - - - parameter = attribute "=" value - attribute = token - value = token | quoted-string - - The type, subtype, and parameter attribute names are case- - insensitive. Parameter values may or may not be case-sensitive, - depending on the semantics of the parameter name. Linear white space - (LWS) MUST NOT be used between the type and subtype, nor between an - attribute and its value. User agents that recognize the media-type - MUST process (or arrange to be processed by any external applications - used to process that type/subtype by the user agent) the parameters - for that MIME type as described by that type/subtype definition to - the and inform the user of any problems discovered. - - Note: some older HTTP applications do not recognize media type - parameters. When sending data to older HTTP applications, - implementations should only use media type parameters when they are - required by that type/subtype definition. - - Media-type values are registered with the Internet Assigned Number - Authority (IANA). The media type registration process is outlined in - RFC 2048 [17]. Use of non-registered media types is discouraged. - -3.7.1 Canonicalization and Text Defaults - - Internet media types are registered with a canonical form. In - general, an entity-body transferred via HTTP messages MUST be - represented in the appropriate canonical form prior to its - transmission; the exception is "text" types, as defined in the next - paragraph. - - When in canonical form, media subtypes of the "text" type use CRLF as - the text line break. HTTP relaxes this requirement and allows the - transport of text media with plain CR or LF alone representing a line - break when it is done consistently for an entire entity-body. HTTP - applications MUST accept CRLF, bare CR, and bare LF as being - representative of a line break in text media received via HTTP. In - addition, if the text is represented in a character set that does not - use octets 13 and 10 for CR and LF respectively, as is the case for - some multi-byte character sets, HTTP allows the use of whatever octet - sequences are defined by that character set to represent the - equivalent of CR and LF for line breaks. This flexibility regarding - line breaks applies only to text media in the entity-body; a bare CR - or LF MUST NOT be substituted for CRLF within any of the HTTP control - structures (such as header fields and multipart boundaries). - - If an entity-body is encoded with a Content-Encoding, the underlying - data MUST be in a form defined above prior to being encoded. - - - -Fielding, et. al. Standards Track [Page 26] - -RFC 2068 HTTP/1.1 January 1997 - - - The "charset" parameter is used with some media types to define the - character set (section 3.4) of the data. When no explicit charset - parameter is provided by the sender, media subtypes of the "text" - type are defined to have a default charset value of "ISO-8859-1" when - received via HTTP. Data in character sets other than "ISO-8859-1" or - its subsets MUST be labeled with an appropriate charset value. - - Some HTTP/1.0 software has interpreted a Content-Type header without - charset parameter incorrectly to mean "recipient should guess." - Senders wishing to defeat this behavior MAY include a charset - parameter even when the charset is ISO-8859-1 and SHOULD do so when - it is known that it will not confuse the recipient. - - Unfortunately, some older HTTP/1.0 clients did not deal properly with - an explicit charset parameter. HTTP/1.1 recipients MUST respect the - charset label provided by the sender; and those user agents that have - a provision to "guess" a charset MUST use the charset from the - content-type field if they support that charset, rather than the - recipient's preference, when initially displaying a document. - -3.7.2 Multipart Types - - MIME provides for a number of "multipart" types -- encapsulations of - one or more entities within a single message-body. All multipart - types share a common syntax, as defined in MIME [7], and MUST - include a boundary parameter as part of the media type value. The - message body is itself a protocol element and MUST therefore use only - CRLF to represent line breaks between body-parts. Unlike in MIME, the - epilogue of any multipart message MUST be empty; HTTP applications - MUST NOT transmit the epilogue (even if the original multipart - contains an epilogue). - - In HTTP, multipart body-parts MAY contain header fields which are - significant to the meaning of that part. A Content-Location header - field (section 14.15) SHOULD be included in the body-part of each - enclosed entity that can be identified by a URL. - - In general, an HTTP user agent SHOULD follow the same or similar - behavior as a MIME user agent would upon receipt of a multipart type. - If an application receives an unrecognized multipart subtype, the - application MUST treat it as being equivalent to "multipart/mixed". - - Note: The "multipart/form-data" type has been specifically defined - for carrying form data suitable for processing via the POST request - method, as described in RFC 1867 [15]. - - - - - - -Fielding, et. al. Standards Track [Page 27] - -RFC 2068 HTTP/1.1 January 1997 - - -3.8 Product Tokens - - Product tokens are used to allow communicating applications to - identify themselves by software name and version. Most fields using - product tokens also allow sub-products which form a significant part - of the application to be listed, separated by whitespace. By - convention, the products are listed in order of their significance - for identifying the application. - - product = token ["/" product-version] - product-version = token - - Examples: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - Server: Apache/0.8.4 - - Product tokens should be short and to the point -- use of them for - advertising or other non-essential information is explicitly - forbidden. Although any token character may appear in a product- - version, this token SHOULD only be used for a version identifier - (i.e., successive versions of the same product SHOULD only differ in - the product-version portion of the product value). - -3.9 Quality Values - - HTTP content negotiation (section 12) uses short "floating point" - numbers to indicate the relative importance ("weight") of various - negotiable parameters. A weight is normalized to a real number in the - range 0 through 1, where 0 is the minimum and 1 the maximum value. - HTTP/1.1 applications MUST NOT generate more than three digits after - the decimal point. User configuration of these values SHOULD also be - limited in this fashion. - - qvalue = ( "0" [ "." 0*3DIGIT ] ) - | ( "1" [ "." 0*3("0") ] ) - - "Quality values" is a misnomer, since these values merely represent - relative degradation in desired quality. - -3.10 Language Tags - - A language tag identifies a natural language spoken, written, or - otherwise conveyed by human beings for communication of information - to other human beings. Computer languages are explicitly excluded. - HTTP uses language tags within the Accept-Language and Content- - Language fields. - - - - -Fielding, et. al. Standards Track [Page 28] - -RFC 2068 HTTP/1.1 January 1997 - - - The syntax and registry of HTTP language tags is the same as that - defined by RFC 1766 [1]. In summary, a language tag is composed of 1 - or more parts: A primary language tag and a possibly empty series of - subtags: - - language-tag = primary-tag *( "-" subtag ) - - primary-tag = 1*8ALPHA - subtag = 1*8ALPHA - - Whitespace is not allowed within the tag and all tags are case- - insensitive. The name space of language tags is administered by the - IANA. Example tags include: - - en, en-US, en-cockney, i-cherokee, x-pig-latin - - where any two-letter primary-tag is an ISO 639 language abbreviation - and any two-letter initial subtag is an ISO 3166 country code. (The - last three tags above are not registered tags; all but the last are - examples of tags which could be registered in future.) - -3.11 Entity Tags - - Entity tags are used for comparing two or more entities from the same - requested resource. HTTP/1.1 uses entity tags in the ETag (section - 14.20), If-Match (section 14.25), If-None-Match (section 14.26), and - If-Range (section 14.27) header fields. The definition of how they - are used and compared as cache validators is in section 13.3.3. An - entity tag consists of an opaque quoted string, possibly prefixed by - a weakness indicator. - - entity-tag = [ weak ] opaque-tag - - weak = "W/" - opaque-tag = quoted-string - - A "strong entity tag" may be shared by two entities of a resource - only if they are equivalent by octet equality. - - A "weak entity tag," indicated by the "W/" prefix, may be shared by - two entities of a resource only if the entities are equivalent and - could be substituted for each other with no significant change in - semantics. A weak entity tag can only be used for weak comparison. - - An entity tag MUST be unique across all versions of all entities - associated with a particular resource. A given entity tag value may - be used for entities obtained by requests on different URIs without - implying anything about the equivalence of those entities. - - - -Fielding, et. al. Standards Track [Page 29] - -RFC 2068 HTTP/1.1 January 1997 - - -3.12 Range Units - - HTTP/1.1 allows a client to request that only part (a range of) the - response entity be included within the response. HTTP/1.1 uses range - units in the Range (section 14.36) and Content-Range (section 14.17) - header fields. An entity may be broken down into subranges according - to various structural units. - - range-unit = bytes-unit | other-range-unit - - bytes-unit = "bytes" - other-range-unit = token - -The only range unit defined by HTTP/1.1 is "bytes". HTTP/1.1 - implementations may ignore ranges specified using other units. - HTTP/1.1 has been designed to allow implementations of applications - that do not depend on knowledge of ranges. - -4 HTTP Message - -4.1 Message Types - - HTTP messages consist of requests from client to server and responses - from server to client. - - HTTP-message = Request | Response ; HTTP/1.1 messages - - Request (section 5) and Response (section 6) messages use the generic - message format of RFC 822 [9] for transferring entities (the payload - of the message). Both types of message consist of a start-line, one - or more header fields (also known as "headers"), an empty line (i.e., - a line with nothing preceding the CRLF) indicating the end of the - header fields, and an optional message-body. - - generic-message = start-line - *message-header - CRLF - [ message-body ] - - start-line = Request-Line | Status-Line - - In the interest of robustness, servers SHOULD ignore any empty - line(s) received where a Request-Line is expected. In other words, if - the server is reading the protocol stream at the beginning of a - message and receives a CRLF first, it should ignore the CRLF. - - - - - - -Fielding, et. al. Standards Track [Page 30] - -RFC 2068 HTTP/1.1 January 1997 - - - Note: certain buggy HTTP/1.0 client implementations generate an - extra CRLF's after a POST request. To restate what is explicitly - forbidden by the BNF, an HTTP/1.1 client must not preface or follow - a request with an extra CRLF. - -4.2 Message Headers - - HTTP header fields, which include general-header (section 4.5), - request-header (section 5.3), response-header (section 6.2), and - entity-header (section 7.1) fields, follow the same generic format as - that given in Section 3.1 of RFC 822 [9]. Each header field consists - of a name followed by a colon (":") and the field value. Field names - are case-insensitive. The field value may be preceded by any amount - of LWS, though a single SP is preferred. Header fields can be - extended over multiple lines by preceding each extra line with at - least one SP or HT. Applications SHOULD follow "common form" when - generating HTTP constructs, since there might exist some - implementations that fail to accept anything beyond the common forms. - - message-header = field-name ":" [ field-value ] CRLF - - field-name = token - field-value = *( field-content | LWS ) - - field-content = - - The order in which header fields with differing field names are - received is not significant. However, it is "good practice" to send - general-header fields first, followed by request-header or response- - header fields, and ending with the entity-header fields. - - Multiple message-header fields with the same field-name may be - present in a message if and only if the entire field-value for that - header field is defined as a comma-separated list [i.e., #(values)]. - It MUST be possible to combine the multiple header fields into one - "field-name: field-value" pair, without changing the semantics of the - message, by appending each subsequent field-value to the first, each - separated by a comma. The order in which header fields with the same - field-name are received is therefore significant to the - interpretation of the combined field value, and thus a proxy MUST NOT - change the order of these field values when a message is forwarded. - - - - - - - - -Fielding, et. al. Standards Track [Page 31] - -RFC 2068 HTTP/1.1 January 1997 - - -4.3 Message Body - - The message-body (if any) of an HTTP message is used to carry the - entity-body associated with the request or response. The message-body - differs from the entity-body only when a transfer coding has been - applied, as indicated by the Transfer-Encoding header field (section - 14.40). - - message-body = entity-body - | - - Transfer-Encoding MUST be used to indicate any transfer codings - applied by an application to ensure safe and proper transfer of the - message. Transfer-Encoding is a property of the message, not of the - entity, and thus can be added or removed by any application along the - request/response chain. - - The rules for when a message-body is allowed in a message differ for - requests and responses. - - The presence of a message-body in a request is signaled by the - inclusion of a Content-Length or Transfer-Encoding header field in - the request's message-headers. A message-body MAY be included in a - request only when the request method (section 5.1.1) allows an - entity-body. - - For response messages, whether or not a message-body is included with - a message is dependent on both the request method and the response - status code (section 6.1.1). All responses to the HEAD request method - MUST NOT include a message-body, even though the presence of entity- - header fields might lead one to believe they do. All 1xx - (informational), 204 (no content), and 304 (not modified) responses - MUST NOT include a message-body. All other responses do include a - message-body, although it may be of zero length. - -4.4 Message Length - - When a message-body is included with a message, the length of that - body is determined by one of the following (in order of precedence): - - 1. Any response message which MUST NOT include a message-body - (such as the 1xx, 204, and 304 responses and any response to a HEAD - request) is always terminated by the first empty line after the - header fields, regardless of the entity-header fields present in the - message. - - 2. If a Transfer-Encoding header field (section 14.40) is present and - indicates that the "chunked" transfer coding has been applied, then - - - -Fielding, et. al. Standards Track [Page 32] - -RFC 2068 HTTP/1.1 January 1997 - - - the length is defined by the chunked encoding (section 3.6). - - 3. If a Content-Length header field (section 14.14) is present, its - value in bytes represents the length of the message-body. - - 4. If the message uses the media type "multipart/byteranges", which is - self-delimiting, then that defines the length. This media type MUST - NOT be used unless the sender knows that the recipient can parse it; - the presence in a request of a Range header with multiple byte-range - specifiers implies that the client can parse multipart/byteranges - responses. - - 5. By the server closing the connection. (Closing the connection - cannot be used to indicate the end of a request body, since that - would leave no possibility for the server to send back a response.) - - For compatibility with HTTP/1.0 applications, HTTP/1.1 requests - containing a message-body MUST include a valid Content-Length header - field unless the server is known to be HTTP/1.1 compliant. If a - request contains a message-body and a Content-Length is not given, - the server SHOULD respond with 400 (bad request) if it cannot - determine the length of the message, or with 411 (length required) if - it wishes to insist on receiving a valid Content-Length. - - All HTTP/1.1 applications that receive entities MUST accept the - "chunked" transfer coding (section 3.6), thus allowing this mechanism - to be used for messages when the message length cannot be determined - in advance. - - Messages MUST NOT include both a Content-Length header field and the - "chunked" transfer coding. If both are received, the Content-Length - MUST be ignored. - - When a Content-Length is given in a message where a message-body is - allowed, its field value MUST exactly match the number of OCTETs in - the message-body. HTTP/1.1 user agents MUST notify the user when an - invalid length is received and detected. - - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 33] - -RFC 2068 HTTP/1.1 January 1997 - - -4.5 General Header Fields - - There are a few header fields which have general applicability for - both request and response messages, but which do not apply to the - entity being transferred. These header fields apply only to the - message being transmitted. - - general-header = Cache-Control ; Section 14.9 - | Connection ; Section 14.10 - | Date ; Section 14.19 - | Pragma ; Section 14.32 - | Transfer-Encoding ; Section 14.40 - | Upgrade ; Section 14.41 - | Via ; Section 14.44 - - General-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields may be given the semantics of general - header fields if all parties in the communication recognize them to - be general-header fields. Unrecognized header fields are treated as - entity-header fields. - -5 Request - - A request message from a client to a server includes, within the - first line of that message, the method to be applied to the resource, - the identifier of the resource, and the protocol version in use. - - Request = Request-Line ; Section 5.1 - *( general-header ; Section 4.5 - | request-header ; Section 5.3 - | entity-header ) ; Section 7.1 - CRLF - [ message-body ] ; Section 7.2 - -5.1 Request-Line - - The Request-Line begins with a method token, followed by the - Request-URI and the protocol version, and ending with CRLF. The - elements are separated by SP characters. No CR or LF are allowed - except in the final CRLF sequence. - - Request-Line = Method SP Request-URI SP HTTP-Version CRLF - - - - - - - - -Fielding, et. al. Standards Track [Page 34] - -RFC 2068 HTTP/1.1 January 1997 - - -5.1.1 Method - - The Method token indicates the method to be performed on the resource - identified by the Request-URI. The method is case-sensitive. - - Method = "OPTIONS" ; Section 9.2 - | "GET" ; Section 9.3 - | "HEAD" ; Section 9.4 - | "POST" ; Section 9.5 - | "PUT" ; Section 9.6 - | "DELETE" ; Section 9.7 - | "TRACE" ; Section 9.8 - | extension-method - - extension-method = token - - The list of methods allowed by a resource can be specified in an - Allow header field (section 14.7). The return code of the response - always notifies the client whether a method is currently allowed on a - resource, since the set of allowed methods can change dynamically. - Servers SHOULD return the status code 405 (Method Not Allowed) if the - method is known by the server but not allowed for the requested - resource, and 501 (Not Implemented) if the method is unrecognized or - not implemented by the server. The list of methods known by a server - can be listed in a Public response-header field (section 14.35). - - The methods GET and HEAD MUST be supported by all general-purpose - servers. All other methods are optional; however, if the above - methods are implemented, they MUST be implemented with the same - semantics as those specified in section 9. - -5.1.2 Request-URI - - The Request-URI is a Uniform Resource Identifier (section 3.2) and - identifies the resource upon which to apply the request. - - Request-URI = "*" | absoluteURI | abs_path - - The three options for Request-URI are dependent on the nature of the - request. The asterisk "*" means that the request does not apply to a - particular resource, but to the server itself, and is only allowed - when the method used does not necessarily apply to a resource. One - example would be - - OPTIONS * HTTP/1.1 - - The absoluteURI form is required when the request is being made to a - proxy. The proxy is requested to forward the request or service it - - - -Fielding, et. al. Standards Track [Page 35] - -RFC 2068 HTTP/1.1 January 1997 - - - from a valid cache, and return the response. Note that the proxy MAY - forward the request on to another proxy or directly to the server - specified by the absoluteURI. In order to avoid request loops, a - proxy MUST be able to recognize all of its server names, including - any aliases, local variations, and the numeric IP address. An example - Request-Line would be: - - GET http://www.w3.org/pub/WWW/TheProject.html HTTP/1.1 - - To allow for transition to absoluteURIs in all requests in future - versions of HTTP, all HTTP/1.1 servers MUST accept the absoluteURI - form in requests, even though HTTP/1.1 clients will only generate - them in requests to proxies. - - The most common form of Request-URI is that used to identify a - resource on an origin server or gateway. In this case the absolute - path of the URI MUST be transmitted (see section 3.2.1, abs_path) as - the Request-URI, and the network location of the URI (net_loc) MUST - be transmitted in a Host header field. For example, a client wishing - to retrieve the resource above directly from the origin server would - create a TCP connection to port 80 of the host "www.w3.org" and send - the lines: - - GET /pub/WWW/TheProject.html HTTP/1.1 - Host: www.w3.org - - followed by the remainder of the Request. Note that the absolute path - cannot be empty; if none is present in the original URI, it MUST be - given as "/" (the server root). - - If a proxy receives a request without any path in the Request-URI and - the method specified is capable of supporting the asterisk form of - request, then the last proxy on the request chain MUST forward the - request with "*" as the final Request-URI. For example, the request - - OPTIONS http://www.ics.uci.edu:8001 HTTP/1.1 - - would be forwarded by the proxy as - - OPTIONS * HTTP/1.1 - Host: www.ics.uci.edu:8001 - - after connecting to port 8001 of host "www.ics.uci.edu". - - The Request-URI is transmitted in the format specified in section - 3.2.1. The origin server MUST decode the Request-URI in order to - properly interpret the request. Servers SHOULD respond to invalid - Request-URIs with an appropriate status code. - - - -Fielding, et. al. Standards Track [Page 36] - -RFC 2068 HTTP/1.1 January 1997 - - - In requests that they forward, proxies MUST NOT rewrite the - "abs_path" part of a Request-URI in any way except as noted above to - replace a null abs_path with "*", no matter what the proxy does in - its internal implementation. - - Note: The "no rewrite" rule prevents the proxy from changing the - meaning of the request when the origin server is improperly using a - non-reserved URL character for a reserved purpose. Implementers - should be aware that some pre-HTTP/1.1 proxies have been known to - rewrite the Request-URI. - -5.2 The Resource Identified by a Request - - HTTP/1.1 origin servers SHOULD be aware that the exact resource - identified by an Internet request is determined by examining both the - Request-URI and the Host header field. - - An origin server that does not allow resources to differ by the - requested host MAY ignore the Host header field value. (But see - section 19.5.1 for other requirements on Host support in HTTP/1.1.) - - An origin server that does differentiate resources based on the host - requested (sometimes referred to as virtual hosts or vanity - hostnames) MUST use the following rules for determining the requested - resource on an HTTP/1.1 request: - - 1. If Request-URI is an absoluteURI, the host is part of the - Request-URI. Any Host header field value in the request MUST be - ignored. - - 2. If the Request-URI is not an absoluteURI, and the request - includes a Host header field, the host is determined by the Host - header field value. - - 3. If the host as determined by rule 1 or 2 is not a valid host on - the server, the response MUST be a 400 (Bad Request) error - message. - - Recipients of an HTTP/1.0 request that lacks a Host header field MAY - attempt to use heuristics (e.g., examination of the URI path for - something unique to a particular host) in order to determine what - exact resource is being requested. - -5.3 Request Header Fields - - The request-header fields allow the client to pass additional - information about the request, and about the client itself, to the - server. These fields act as request modifiers, with semantics - - - -Fielding, et. al. Standards Track [Page 37] - -RFC 2068 HTTP/1.1 January 1997 - - - equivalent to the parameters on a programming language method - invocation. - - request-header = Accept ; Section 14.1 - | Accept-Charset ; Section 14.2 - | Accept-Encoding ; Section 14.3 - | Accept-Language ; Section 14.4 - | Authorization ; Section 14.8 - | From ; Section 14.22 - | Host ; Section 14.23 - | If-Modified-Since ; Section 14.24 - | If-Match ; Section 14.25 - | If-None-Match ; Section 14.26 - | If-Range ; Section 14.27 - | If-Unmodified-Since ; Section 14.28 - | Max-Forwards ; Section 14.31 - | Proxy-Authorization ; Section 14.34 - | Range ; Section 14.36 - | Referer ; Section 14.37 - | User-Agent ; Section 14.42 - - Request-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields MAY be given the semantics of request- - header fields if all parties in the communication recognize them to - be request-header fields. Unrecognized header fields are treated as - entity-header fields. - -6 Response - - After receiving and interpreting a request message, a server responds - with an HTTP response message. - - Response = Status-Line ; Section 6.1 - *( general-header ; Section 4.5 - | response-header ; Section 6.2 - | entity-header ) ; Section 7.1 - CRLF - [ message-body ] ; Section 7.2 - -6.1 Status-Line - - The first line of a Response message is the Status-Line, consisting - of the protocol version followed by a numeric status code and its - associated textual phrase, with each element separated by SP - characters. No CR or LF is allowed except in the final CRLF - sequence. - - - - -Fielding, et. al. Standards Track [Page 38] - -RFC 2068 HTTP/1.1 January 1997 - - - Status-Line = HTTP-Version SP Status-Code SP Reason-Phrase CRLF - -6.1.1 Status Code and Reason Phrase - - The Status-Code element is a 3-digit integer result code of the - attempt to understand and satisfy the request. These codes are fully - defined in section 10. The Reason-Phrase is intended to give a short - textual description of the Status-Code. The Status-Code is intended - for use by automata and the Reason-Phrase is intended for the human - user. The client is not required to examine or display the Reason- - Phrase. - - The first digit of the Status-Code defines the class of response. The - last two digits do not have any categorization role. There are 5 - values for the first digit: - - o 1xx: Informational - Request received, continuing process - - o 2xx: Success - The action was successfully received, understood, - and accepted - - o 3xx: Redirection - Further action must be taken in order to - complete the request - - o 4xx: Client Error - The request contains bad syntax or cannot be - fulfilled - - o 5xx: Server Error - The server failed to fulfill an apparently - valid request - - The individual values of the numeric status codes defined for - HTTP/1.1, and an example set of corresponding Reason-Phrase's, are - presented below. The reason phrases listed here are only recommended - -- they may be replaced by local equivalents without affecting the - protocol. - - Status-Code = "100" ; Continue - | "101" ; Switching Protocols - | "200" ; OK - | "201" ; Created - | "202" ; Accepted - | "203" ; Non-Authoritative Information - | "204" ; No Content - | "205" ; Reset Content - | "206" ; Partial Content - | "300" ; Multiple Choices - | "301" ; Moved Permanently - | "302" ; Moved Temporarily - - - -Fielding, et. al. Standards Track [Page 39] - -RFC 2068 HTTP/1.1 January 1997 - - - | "303" ; See Other - | "304" ; Not Modified - | "305" ; Use Proxy - | "400" ; Bad Request - | "401" ; Unauthorized - | "402" ; Payment Required - | "403" ; Forbidden - | "404" ; Not Found - | "405" ; Method Not Allowed - | "406" ; Not Acceptable - | "407" ; Proxy Authentication Required - | "408" ; Request Time-out - | "409" ; Conflict - | "410" ; Gone - | "411" ; Length Required - | "412" ; Precondition Failed - | "413" ; Request Entity Too Large - | "414" ; Request-URI Too Large - | "415" ; Unsupported Media Type - | "500" ; Internal Server Error - | "501" ; Not Implemented - | "502" ; Bad Gateway - | "503" ; Service Unavailable - | "504" ; Gateway Time-out - | "505" ; HTTP Version not supported - | extension-code - - extension-code = 3DIGIT - - Reason-Phrase = * - - HTTP status codes are extensible. HTTP applications are not required - to understand the meaning of all registered status codes, though such - understanding is obviously desirable. However, applications MUST - understand the class of any status code, as indicated by the first - digit, and treat any unrecognized response as being equivalent to the - x00 status code of that class, with the exception that an - unrecognized response MUST NOT be cached. For example, if an - unrecognized status code of 431 is received by the client, it can - safely assume that there was something wrong with its request and - treat the response as if it had received a 400 status code. In such - cases, user agents SHOULD present to the user the entity returned - with the response, since that entity is likely to include human- - readable information which will explain the unusual status. - - - - - - - -Fielding, et. al. Standards Track [Page 40] - -RFC 2068 HTTP/1.1 January 1997 - - -6.2 Response Header Fields - - The response-header fields allow the server to pass additional - information about the response which cannot be placed in the Status- - Line. These header fields give information about the server and about - further access to the resource identified by the Request-URI. - - response-header = Age ; Section 14.6 - | Location ; Section 14.30 - | Proxy-Authenticate ; Section 14.33 - | Public ; Section 14.35 - | Retry-After ; Section 14.38 - | Server ; Section 14.39 - | Vary ; Section 14.43 - | Warning ; Section 14.45 - | WWW-Authenticate ; Section 14.46 - - Response-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields MAY be given the semantics of response- - header fields if all parties in the communication recognize them to - be response-header fields. Unrecognized header fields are treated as - entity-header fields. - -7 Entity - - Request and Response messages MAY transfer an entity if not otherwise - restricted by the request method or response status code. An entity - consists of entity-header fields and an entity-body, although some - responses will only include the entity-headers. - - In this section, both sender and recipient refer to either the client - or the server, depending on who sends and who receives the entity. - -7.1 Entity Header Fields - - Entity-header fields define optional metainformation about the - entity-body or, if no body is present, about the resource identified - by the request. - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 41] - -RFC 2068 HTTP/1.1 January 1997 - - - entity-header = Allow ; Section 14.7 - | Content-Base ; Section 14.11 - | Content-Encoding ; Section 14.12 - | Content-Language ; Section 14.13 - | Content-Length ; Section 14.14 - | Content-Location ; Section 14.15 - | Content-MD5 ; Section 14.16 - | Content-Range ; Section 14.17 - | Content-Type ; Section 14.18 - | ETag ; Section 14.20 - | Expires ; Section 14.21 - | Last-Modified ; Section 14.29 - | extension-header - - extension-header = message-header - - The extension-header mechanism allows additional entity-header fields - to be defined without changing the protocol, but these fields cannot - be assumed to be recognizable by the recipient. Unrecognized header - fields SHOULD be ignored by the recipient and forwarded by proxies. - -7.2 Entity Body - - The entity-body (if any) sent with an HTTP request or response is in - a format and encoding defined by the entity-header fields. - - entity-body = *OCTET - - An entity-body is only present in a message when a message-body is - present, as described in section 4.3. The entity-body is obtained - from the message-body by decoding any Transfer-Encoding that may have - been applied to ensure safe and proper transfer of the message. - -7.2.1 Type - - When an entity-body is included with a message, the data type of that - body is determined via the header fields Content-Type and Content- - Encoding. These define a two-layer, ordered encoding model: - - entity-body := Content-Encoding( Content-Type( data ) ) - - Content-Type specifies the media type of the underlying data. - Content-Encoding may be used to indicate any additional content - codings applied to the data, usually for the purpose of data - compression, that are a property of the requested resource. There is - no default encoding. - - - - - -Fielding, et. al. Standards Track [Page 42] - -RFC 2068 HTTP/1.1 January 1997 - - - Any HTTP/1.1 message containing an entity-body SHOULD include a - Content-Type header field defining the media type of that body. If - and only if the media type is not given by a Content-Type field, the - recipient MAY attempt to guess the media type via inspection of its - content and/or the name extension(s) of the URL used to identify the - resource. If the media type remains unknown, the recipient SHOULD - treat it as type "application/octet-stream". - -7.2.2 Length - - The length of an entity-body is the length of the message-body after - any transfer codings have been removed. Section 4.4 defines how the - length of a message-body is determined. - -8 Connections - -8.1 Persistent Connections - -8.1.1 Purpose - - Prior to persistent connections, a separate TCP connection was - established to fetch each URL, increasing the load on HTTP servers - and causing congestion on the Internet. The use of inline images and - other associated data often requires a client to make multiple - requests of the same server in a short amount of time. Analyses of - these performance problems are available [30][27]; analysis and - results from a prototype implementation are in [26]. - - Persistent HTTP connections have a number of advantages: - - o By opening and closing fewer TCP connections, CPU time is saved, - and memory used for TCP protocol control blocks is also saved. - o HTTP requests and responses can be pipelined on a connection. - Pipelining allows a client to make multiple requests without - waiting for each response, allowing a single TCP connection to be - used much more efficiently, with much lower elapsed time. - o Network congestion is reduced by reducing the number of packets - caused by TCP opens, and by allowing TCP sufficient time to - determine the congestion state of the network. - o HTTP can evolve more gracefully; since errors can be reported - without the penalty of closing the TCP connection. Clients using - future versions of HTTP might optimistically try a new feature, but - if communicating with an older server, retry with old semantics - after an error is reported. - - HTTP implementations SHOULD implement persistent connections. - - - - - -Fielding, et. al. Standards Track [Page 43] - -RFC 2068 HTTP/1.1 January 1997 - - -8.1.2 Overall Operation - - A significant difference between HTTP/1.1 and earlier versions of - HTTP is that persistent connections are the default behavior of any - HTTP connection. That is, unless otherwise indicated, the client may - assume that the server will maintain a persistent connection. - - Persistent connections provide a mechanism by which a client and a - server can signal the close of a TCP connection. This signaling takes - place using the Connection header field. Once a close has been - signaled, the client MUST not send any more requests on that - connection. - -8.1.2.1 Negotiation - - An HTTP/1.1 server MAY assume that a HTTP/1.1 client intends to - maintain a persistent connection unless a Connection header including - the connection-token "close" was sent in the request. If the server - chooses to close the connection immediately after sending the - response, it SHOULD send a Connection header including the - connection-token close. - - An HTTP/1.1 client MAY expect a connection to remain open, but would - decide to keep it open based on whether the response from a server - contains a Connection header with the connection-token close. In case - the client does not want to maintain a connection for more than that - request, it SHOULD send a Connection header including the - connection-token close. - - If either the client or the server sends the close token in the - Connection header, that request becomes the last one for the - connection. - - Clients and servers SHOULD NOT assume that a persistent connection is - maintained for HTTP versions less than 1.1 unless it is explicitly - signaled. See section 19.7.1 for more information on backwards - compatibility with HTTP/1.0 clients. - - In order to remain persistent, all messages on the connection must - have a self-defined message length (i.e., one not defined by closure - of the connection), as described in section 4.4. - -8.1.2.2 Pipelining - - A client that supports persistent connections MAY "pipeline" its - requests (i.e., send multiple requests without waiting for each - response). A server MUST send its responses to those requests in the - same order that the requests were received. - - - -Fielding, et. al. Standards Track [Page 44] - -RFC 2068 HTTP/1.1 January 1997 - - - Clients which assume persistent connections and pipeline immediately - after connection establishment SHOULD be prepared to retry their - connection if the first pipelined attempt fails. If a client does - such a retry, it MUST NOT pipeline before it knows the connection is - persistent. Clients MUST also be prepared to resend their requests if - the server closes the connection before sending all of the - corresponding responses. - -8.1.3 Proxy Servers - - It is especially important that proxies correctly implement the - properties of the Connection header field as specified in 14.2.1. - - The proxy server MUST signal persistent connections separately with - its clients and the origin servers (or other proxy servers) that it - connects to. Each persistent connection applies to only one transport - link. - - A proxy server MUST NOT establish a persistent connection with an - HTTP/1.0 client. - -8.1.4 Practical Considerations - - Servers will usually have some time-out value beyond which they will - no longer maintain an inactive connection. Proxy servers might make - this a higher value since it is likely that the client will be making - more connections through the same server. The use of persistent - connections places no requirements on the length of this time-out for - either the client or the server. - - When a client or server wishes to time-out it SHOULD issue a graceful - close on the transport connection. Clients and servers SHOULD both - constantly watch for the other side of the transport close, and - respond to it as appropriate. If a client or server does not detect - the other side's close promptly it could cause unnecessary resource - drain on the network. - - A client, server, or proxy MAY close the transport connection at any - time. For example, a client MAY have started to send a new request at - the same time that the server has decided to close the "idle" - connection. From the server's point of view, the connection is being - closed while it was idle, but from the client's point of view, a - request is in progress. - - This means that clients, servers, and proxies MUST be able to recover - from asynchronous close events. Client software SHOULD reopen the - transport connection and retransmit the aborted request without user - interaction so long as the request method is idempotent (see section - - - -Fielding, et. al. Standards Track [Page 45] - -RFC 2068 HTTP/1.1 January 1997 - - - 9.1.2); other methods MUST NOT be automatically retried, although - user agents MAY offer a human operator the choice of retrying the - request. - - However, this automatic retry SHOULD NOT be repeated if the second - request fails. - - Servers SHOULD always respond to at least one request per connection, - if at all possible. Servers SHOULD NOT close a connection in the - middle of transmitting a response, unless a network or client failure - is suspected. - - Clients that use persistent connections SHOULD limit the number of - simultaneous connections that they maintain to a given server. A - single-user client SHOULD maintain AT MOST 2 connections with any - server or proxy. A proxy SHOULD use up to 2*N connections to another - server or proxy, where N is the number of simultaneously active - users. These guidelines are intended to improve HTTP response times - and avoid congestion of the Internet or other networks. - -8.2 Message Transmission Requirements - -General requirements: - -o HTTP/1.1 servers SHOULD maintain persistent connections and use - TCP's flow control mechanisms to resolve temporary overloads, - rather than terminating connections with the expectation that - clients will retry. The latter technique can exacerbate network - congestion. - -o An HTTP/1.1 (or later) client sending a message-body SHOULD monitor - the network connection for an error status while it is transmitting - the request. If the client sees an error status, it SHOULD - immediately cease transmitting the body. If the body is being sent - using a "chunked" encoding (section 3.6), a zero length chunk and - empty footer MAY be used to prematurely mark the end of the - message. If the body was preceded by a Content-Length header, the - client MUST close the connection. - -o An HTTP/1.1 (or later) client MUST be prepared to accept a 100 - (Continue) status followed by a regular response. - -o An HTTP/1.1 (or later) server that receives a request from a - HTTP/1.0 (or earlier) client MUST NOT transmit the 100 (continue) - response; it SHOULD either wait for the request to be completed - normally (thus avoiding an interrupted request) or close the - connection prematurely. - - - - -Fielding, et. al. Standards Track [Page 46] - -RFC 2068 HTTP/1.1 January 1997 - - - Upon receiving a method subject to these requirements from an - HTTP/1.1 (or later) client, an HTTP/1.1 (or later) server MUST either - respond with 100 (Continue) status and continue to read from the - input stream, or respond with an error status. If it responds with an - error status, it MAY close the transport (TCP) connection or it MAY - continue to read and discard the rest of the request. It MUST NOT - perform the requested method if it returns an error status. - - Clients SHOULD remember the version number of at least the most - recently used server; if an HTTP/1.1 client has seen an HTTP/1.1 or - later response from the server, and it sees the connection close - before receiving any status from the server, the client SHOULD retry - the request without user interaction so long as the request method is - idempotent (see section 9.1.2); other methods MUST NOT be - automatically retried, although user agents MAY offer a human - operator the choice of retrying the request.. If the client does - retry the request, the client - - o MUST first send the request header fields, and then - - o MUST wait for the server to respond with either a 100 (Continue) - response, in which case the client should continue, or with an - error status. - - If an HTTP/1.1 client has not seen an HTTP/1.1 or later response from - the server, it should assume that the server implements HTTP/1.0 or - older and will not use the 100 (Continue) response. If in this case - the client sees the connection close before receiving any status from - the server, the client SHOULD retry the request. If the client does - retry the request to this HTTP/1.0 server, it should use the - following "binary exponential backoff" algorithm to be assured of - obtaining a reliable response: - - 1. Initiate a new connection to the server - - 2. Transmit the request-headers - - 3. Initialize a variable R to the estimated round-trip time to the - server (e.g., based on the time it took to establish the - connection), or to a constant value of 5 seconds if the round-trip - time is not available. - - 4. Compute T = R * (2**N), where N is the number of previous retries - of this request. - - 5. Wait either for an error response from the server, or for T seconds - (whichever comes first) - - - - -Fielding, et. al. Standards Track [Page 47] - -RFC 2068 HTTP/1.1 January 1997 - - - 6. If no error response is received, after T seconds transmit the body - of the request. - - 7. If client sees that the connection is closed prematurely, repeat - from step 1 until the request is accepted, an error response is - received, or the user becomes impatient and terminates the retry - process. - - No matter what the server version, if an error status is received, - the client - - o MUST NOT continue and - - o MUST close the connection if it has not completed sending the - message. - - An HTTP/1.1 (or later) client that sees the connection close after - receiving a 100 (Continue) but before receiving any other status - SHOULD retry the request, and need not wait for 100 (Continue) - response (but MAY do so if this simplifies the implementation). - -9 Method Definitions - - The set of common methods for HTTP/1.1 is defined below. Although - this set can be expanded, additional methods cannot be assumed to - share the same semantics for separately extended clients and servers. - - The Host request-header field (section 14.23) MUST accompany all - HTTP/1.1 requests. - -9.1 Safe and Idempotent Methods - -9.1.1 Safe Methods - - Implementers should be aware that the software represents the user in - their interactions over the Internet, and should be careful to allow - the user to be aware of any actions they may take which may have an - unexpected significance to themselves or others. - - In particular, the convention has been established that the GET and - HEAD methods should never have the significance of taking an action - other than retrieval. These methods should be considered "safe." This - allows user agents to represent other methods, such as POST, PUT and - DELETE, in a special way, so that the user is made aware of the fact - that a possibly unsafe action is being requested. - - Naturally, it is not possible to ensure that the server does not - generate side-effects as a result of performing a GET request; in - - - -Fielding, et. al. Standards Track [Page 48] - -RFC 2068 HTTP/1.1 January 1997 - - - fact, some dynamic resources consider that a feature. The important - distinction here is that the user did not request the side-effects, - so therefore cannot be held accountable for them. - -9.1.2 Idempotent Methods - - Methods may also have the property of "idempotence" in that (aside - from error or expiration issues) the side-effects of N > 0 identical - requests is the same as for a single request. The methods GET, HEAD, - PUT and DELETE share this property. - -9.2 OPTIONS - - The OPTIONS method represents a request for information about the - communication options available on the request/response chain - identified by the Request-URI. This method allows the client to - determine the options and/or requirements associated with a resource, - or the capabilities of a server, without implying a resource action - or initiating a resource retrieval. - - Unless the server's response is an error, the response MUST NOT - include entity information other than what can be considered as - communication options (e.g., Allow is appropriate, but Content-Type - is not). Responses to this method are not cachable. - - If the Request-URI is an asterisk ("*"), the OPTIONS request is - intended to apply to the server as a whole. A 200 response SHOULD - include any header fields which indicate optional features - implemented by the server (e.g., Public), including any extensions - not defined by this specification, in addition to any applicable - general or response-header fields. As described in section 5.1.2, an - "OPTIONS *" request can be applied through a proxy by specifying the - destination server in the Request-URI without any path information. - - If the Request-URI is not an asterisk, the OPTIONS request applies - only to the options that are available when communicating with that - resource. A 200 response SHOULD include any header fields which - indicate optional features implemented by the server and applicable - to that resource (e.g., Allow), including any extensions not defined - by this specification, in addition to any applicable general or - response-header fields. If the OPTIONS request passes through a - proxy, the proxy MUST edit the response to exclude those options - which apply to a proxy's capabilities and which are known to be - unavailable through that proxy. - - - - - - - -Fielding, et. al. Standards Track [Page 49] - -RFC 2068 HTTP/1.1 January 1997 - - -9.3 GET - - The GET method means retrieve whatever information (in the form of an - entity) is identified by the Request-URI. If the Request-URI refers - to a data-producing process, it is the produced data which shall be - returned as the entity in the response and not the source text of the - process, unless that text happens to be the output of the process. - - The semantics of the GET method change to a "conditional GET" if the - request message includes an If-Modified-Since, If-Unmodified-Since, - If-Match, If-None-Match, or If-Range header field. A conditional GET - method requests that the entity be transferred only under the - circumstances described by the conditional header field(s). The - conditional GET method is intended to reduce unnecessary network - usage by allowing cached entities to be refreshed without requiring - multiple requests or transferring data already held by the client. - - The semantics of the GET method change to a "partial GET" if the - request message includes a Range header field. A partial GET requests - that only part of the entity be transferred, as described in section - 14.36. The partial GET method is intended to reduce unnecessary - network usage by allowing partially-retrieved entities to be - completed without transferring data already held by the client. - - The response to a GET request is cachable if and only if it meets the - requirements for HTTP caching described in section 13. - -9.4 HEAD - - The HEAD method is identical to GET except that the server MUST NOT - return a message-body in the response. The metainformation contained - in the HTTP headers in response to a HEAD request SHOULD be identical - to the information sent in response to a GET request. This method can - be used for obtaining metainformation about the entity implied by the - request without transferring the entity-body itself. This method is - often used for testing hypertext links for validity, accessibility, - and recent modification. - - The response to a HEAD request may be cachable in the sense that the - information contained in the response may be used to update a - previously cached entity from that resource. If the new field values - indicate that the cached entity differs from the current entity (as - would be indicated by a change in Content-Length, Content-MD5, ETag - or Last-Modified), then the cache MUST treat the cache entry as - stale. - - - - - - -Fielding, et. al. Standards Track [Page 50] - -RFC 2068 HTTP/1.1 January 1997 - - -9.5 POST - - The POST method is used to request that the destination server accept - the entity enclosed in the request as a new subordinate of the - resource identified by the Request-URI in the Request-Line. POST is - designed to allow a uniform method to cover the following functions: - - o Annotation of existing resources; - - o Posting a message to a bulletin board, newsgroup, mailing list, - or similar group of articles; - - o Providing a block of data, such as the result of submitting a - form, to a data-handling process; - - o Extending a database through an append operation. - - The actual function performed by the POST method is determined by the - server and is usually dependent on the Request-URI. The posted entity - is subordinate to that URI in the same way that a file is subordinate - to a directory containing it, a news article is subordinate to a - newsgroup to which it is posted, or a record is subordinate to a - database. - - The action performed by the POST method might not result in a - resource that can be identified by a URI. In this case, either 200 - (OK) or 204 (No Content) is the appropriate response status, - depending on whether or not the response includes an entity that - describes the result. - - If a resource has been created on the origin server, the response - SHOULD be 201 (Created) and contain an entity which describes the - status of the request and refers to the new resource, and a Location - header (see section 14.30). - - Responses to this method are not cachable, unless the response - includes appropriate Cache-Control or Expires header fields. However, - the 303 (See Other) response can be used to direct the user agent to - retrieve a cachable resource. - - POST requests must obey the message transmission requirements set out - in section 8.2. - - - - - - - - - -Fielding, et. al. Standards Track [Page 51] - -RFC 2068 HTTP/1.1 January 1997 - - -9.6 PUT - - The PUT method requests that the enclosed entity be stored under the - supplied Request-URI. If the Request-URI refers to an already - existing resource, the enclosed entity SHOULD be considered as a - modified version of the one residing on the origin server. If the - Request-URI does not point to an existing resource, and that URI is - capable of being defined as a new resource by the requesting user - agent, the origin server can create the resource with that URI. If a - new resource is created, the origin server MUST inform the user agent - via the 201 (Created) response. If an existing resource is modified, - either the 200 (OK) or 204 (No Content) response codes SHOULD be sent - to indicate successful completion of the request. If the resource - could not be created or modified with the Request-URI, an appropriate - error response SHOULD be given that reflects the nature of the - problem. The recipient of the entity MUST NOT ignore any Content-* - (e.g. Content-Range) headers that it does not understand or implement - and MUST return a 501 (Not Implemented) response in such cases. - - If the request passes through a cache and the Request-URI identifies - one or more currently cached entities, those entries should be - treated as stale. Responses to this method are not cachable. - - The fundamental difference between the POST and PUT requests is - reflected in the different meaning of the Request-URI. The URI in a - POST request identifies the resource that will handle the enclosed - entity. That resource may be a data-accepting process, a gateway to - some other protocol, or a separate entity that accepts annotations. - In contrast, the URI in a PUT request identifies the entity enclosed - with the request -- the user agent knows what URI is intended and the - server MUST NOT attempt to apply the request to some other resource. - If the server desires that the request be applied to a different URI, - it MUST send a 301 (Moved Permanently) response; the user agent MAY - then make its own decision regarding whether or not to redirect the - request. - - A single resource MAY be identified by many different URIs. For - example, an article may have a URI for identifying "the current - version" which is separate from the URI identifying each particular - version. In this case, a PUT request on a general URI may result in - several other URIs being defined by the origin server. - - HTTP/1.1 does not define how a PUT method affects the state of an - origin server. - - PUT requests must obey the message transmission requirements set out - in section 8.2. - - - - -Fielding, et. al. Standards Track [Page 52] - -RFC 2068 HTTP/1.1 January 1997 - - -9.7 DELETE - - The DELETE method requests that the origin server delete the resource - identified by the Request-URI. This method MAY be overridden by human - intervention (or other means) on the origin server. The client cannot - be guaranteed that the operation has been carried out, even if the - status code returned from the origin server indicates that the action - has been completed successfully. However, the server SHOULD not - indicate success unless, at the time the response is given, it - intends to delete the resource or move it to an inaccessible - location. - - A successful response SHOULD be 200 (OK) if the response includes an - entity describing the status, 202 (Accepted) if the action has not - yet been enacted, or 204 (No Content) if the response is OK but does - not include an entity. - - If the request passes through a cache and the Request-URI identifies - one or more currently cached entities, those entries should be - treated as stale. Responses to this method are not cachable. - -9.8 TRACE - - The TRACE method is used to invoke a remote, application-layer loop- - back of the request message. The final recipient of the request - SHOULD reflect the message received back to the client as the - entity-body of a 200 (OK) response. The final recipient is either the - origin server or the first proxy or gateway to receive a Max-Forwards - value of zero (0) in the request (see section 14.31). A TRACE request - MUST NOT include an entity. - - TRACE allows the client to see what is being received at the other - end of the request chain and use that data for testing or diagnostic - information. The value of the Via header field (section 14.44) is of - particular interest, since it acts as a trace of the request chain. - Use of the Max-Forwards header field allows the client to limit the - length of the request chain, which is useful for testing a chain of - proxies forwarding messages in an infinite loop. - - If successful, the response SHOULD contain the entire request message - in the entity-body, with a Content-Type of "message/http". Responses - to this method MUST NOT be cached. - -10 Status Code Definitions - - Each Status-Code is described below, including a description of which - method(s) it can follow and any metainformation required in the - response. - - - -Fielding, et. al. Standards Track [Page 53] - -RFC 2068 HTTP/1.1 January 1997 - - -10.1 Informational 1xx - - This class of status code indicates a provisional response, - consisting only of the Status-Line and optional headers, and is - terminated by an empty line. Since HTTP/1.0 did not define any 1xx - status codes, servers MUST NOT send a 1xx response to an HTTP/1.0 - client except under experimental conditions. - -10.1.1 100 Continue - - The client may continue with its request. This interim response is - used to inform the client that the initial part of the request has - been received and has not yet been rejected by the server. The client - SHOULD continue by sending the remainder of the request or, if the - request has already been completed, ignore this response. The server - MUST send a final response after the request has been completed. - -10.1.2 101 Switching Protocols - - The server understands and is willing to comply with the client's - request, via the Upgrade message header field (section 14.41), for a - change in the application protocol being used on this connection. The - server will switch protocols to those defined by the response's - Upgrade header field immediately after the empty line which - terminates the 101 response. - - The protocol should only be switched when it is advantageous to do - so. For example, switching to a newer version of HTTP is - advantageous over older versions, and switching to a real-time, - synchronous protocol may be advantageous when delivering resources - that use such features. - -10.2 Successful 2xx - - This class of status code indicates that the client's request was - successfully received, understood, and accepted. - -10.2.1 200 OK - - The request has succeeded. The information returned with the response - is dependent on the method used in the request, for example: - - GET an entity corresponding to the requested resource is sent in the - response; - - HEAD the entity-header fields corresponding to the requested resource - are sent in the response without any message-body; - - - - -Fielding, et. al. Standards Track [Page 54] - -RFC 2068 HTTP/1.1 January 1997 - - - POST an entity describing or containing the result of the action; - - TRACE an entity containing the request message as received by the end - server. - -10.2.2 201 Created - - The request has been fulfilled and resulted in a new resource being - created. The newly created resource can be referenced by the URI(s) - returned in the entity of the response, with the most specific URL - for the resource given by a Location header field. The origin server - MUST create the resource before returning the 201 status code. If the - action cannot be carried out immediately, the server should respond - with 202 (Accepted) response instead. - -10.2.3 202 Accepted - - The request has been accepted for processing, but the processing has - not been completed. The request MAY or MAY NOT eventually be acted - upon, as it MAY be disallowed when processing actually takes place. - There is no facility for re-sending a status code from an - asynchronous operation such as this. - - The 202 response is intentionally non-committal. Its purpose is to - allow a server to accept a request for some other process (perhaps a - batch-oriented process that is only run once per day) without - requiring that the user agent's connection to the server persist - until the process is completed. The entity returned with this - response SHOULD include an indication of the request's current status - and either a pointer to a status monitor or some estimate of when the - user can expect the request to be fulfilled. - -10.2.4 203 Non-Authoritative Information - - The returned metainformation in the entity-header is not the - definitive set as available from the origin server, but is gathered - from a local or a third-party copy. The set presented MAY be a subset - or superset of the original version. For example, including local - annotation information about the resource MAY result in a superset of - the metainformation known by the origin server. Use of this response - code is not required and is only appropriate when the response would - otherwise be 200 (OK). - -10.2.5 204 No Content - - The server has fulfilled the request but there is no new information - to send back. If the client is a user agent, it SHOULD NOT change its - document view from that which caused the request to be sent. This - - - -Fielding, et. al. Standards Track [Page 55] - -RFC 2068 HTTP/1.1 January 1997 - - - response is primarily intended to allow input for actions to take - place without causing a change to the user agent's active document - view. The response MAY include new metainformation in the form of - entity-headers, which SHOULD apply to the document currently in the - user agent's active view. - - The 204 response MUST NOT include a message-body, and thus is always - terminated by the first empty line after the header fields. - -10.2.6 205 Reset Content - - The server has fulfilled the request and the user agent SHOULD reset - the document view which caused the request to be sent. This response - is primarily intended to allow input for actions to take place via - user input, followed by a clearing of the form in which the input is - given so that the user can easily initiate another input action. The - response MUST NOT include an entity. - -10.2.7 206 Partial Content - - The server has fulfilled the partial GET request for the resource. - The request must have included a Range header field (section 14.36) - indicating the desired range. The response MUST include either a - Content-Range header field (section 14.17) indicating the range - included with this response, or a multipart/byteranges Content-Type - including Content-Range fields for each part. If multipart/byteranges - is not used, the Content-Length header field in the response MUST - match the actual number of OCTETs transmitted in the message-body. - - A cache that does not support the Range and Content-Range headers - MUST NOT cache 206 (Partial) responses. - -10.3 Redirection 3xx - - This class of status code indicates that further action needs to be - taken by the user agent in order to fulfill the request. The action - required MAY be carried out by the user agent without interaction - with the user if and only if the method used in the second request is - GET or HEAD. A user agent SHOULD NOT automatically redirect a request - more than 5 times, since such redirections usually indicate an - infinite loop. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 56] - -RFC 2068 HTTP/1.1 January 1997 - - -10.3.1 300 Multiple Choices - - The requested resource corresponds to any one of a set of - representations, each with its own specific location, and agent- - driven negotiation information (section 12) is being provided so that - the user (or user agent) can select a preferred representation and - redirect its request to that location. - - Unless it was a HEAD request, the response SHOULD include an entity - containing a list of resource characteristics and location(s) from - which the user or user agent can choose the one most appropriate. The - entity format is specified by the media type given in the Content- - Type header field. Depending upon the format and the capabilities of - the user agent, selection of the most appropriate choice may be - performed automatically. However, this specification does not define - any standard for such automatic selection. - - If the server has a preferred choice of representation, it SHOULD - include the specific URL for that representation in the Location - field; user agents MAY use the Location field value for automatic - redirection. This response is cachable unless indicated otherwise. - -10.3.2 301 Moved Permanently - - The requested resource has been assigned a new permanent URI and any - future references to this resource SHOULD be done using one of the - returned URIs. Clients with link editing capabilities SHOULD - automatically re-link references to the Request-URI to one or more of - the new references returned by the server, where possible. This - response is cachable unless indicated otherwise. - - If the new URI is a location, its URL SHOULD be given by the Location - field in the response. Unless the request method was HEAD, the entity - of the response SHOULD contain a short hypertext note with a - hyperlink to the new URI(s). - - If the 301 status code is received in response to a request other - than GET or HEAD, the user agent MUST NOT automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - Note: When automatically redirecting a POST request after receiving - a 301 status code, some existing HTTP/1.0 user agents will - erroneously change it into a GET request. - - - - - - - -Fielding, et. al. Standards Track [Page 57] - -RFC 2068 HTTP/1.1 January 1997 - - -10.3.3 302 Moved Temporarily - - The requested resource resides temporarily under a different URI. - Since the redirection may be altered on occasion, the client SHOULD - continue to use the Request-URI for future requests. This response is - only cachable if indicated by a Cache-Control or Expires header - field. - - If the new URI is a location, its URL SHOULD be given by the Location - field in the response. Unless the request method was HEAD, the entity - of the response SHOULD contain a short hypertext note with a - hyperlink to the new URI(s). - - If the 302 status code is received in response to a request other - than GET or HEAD, the user agent MUST NOT automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - Note: When automatically redirecting a POST request after receiving - a 302 status code, some existing HTTP/1.0 user agents will - erroneously change it into a GET request. - -10.3.4 303 See Other - - The response to the request can be found under a different URI and - SHOULD be retrieved using a GET method on that resource. This method - exists primarily to allow the output of a POST-activated script to - redirect the user agent to a selected resource. The new URI is not a - substitute reference for the originally requested resource. The 303 - response is not cachable, but the response to the second (redirected) - request MAY be cachable. - - If the new URI is a location, its URL SHOULD be given by the Location - field in the response. Unless the request method was HEAD, the entity - of the response SHOULD contain a short hypertext note with a - hyperlink to the new URI(s). - -10.3.5 304 Not Modified - - If the client has performed a conditional GET request and access is - allowed, but the document has not been modified, the server SHOULD - respond with this status code. The response MUST NOT contain a - message-body. - - - - - - - - -Fielding, et. al. Standards Track [Page 58] - -RFC 2068 HTTP/1.1 January 1997 - - - The response MUST include the following header fields: - - o Date - - o ETag and/or Content-Location, if the header would have been sent in - a 200 response to the same request - - o Expires, Cache-Control, and/or Vary, if the field-value might - differ from that sent in any previous response for the same variant - - If the conditional GET used a strong cache validator (see section - 13.3.3), the response SHOULD NOT include other entity-headers. - Otherwise (i.e., the conditional GET used a weak validator), the - response MUST NOT include other entity-headers; this prevents - inconsistencies between cached entity-bodies and updated headers. - - If a 304 response indicates an entity not currently cached, then the - cache MUST disregard the response and repeat the request without the - conditional. - - If a cache uses a received 304 response to update a cache entry, the - cache MUST update the entry to reflect any new field values given in - the response. - - The 304 response MUST NOT include a message-body, and thus is always - terminated by the first empty line after the header fields. - -10.3.6 305 Use Proxy - - The requested resource MUST be accessed through the proxy given by - the Location field. The Location field gives the URL of the proxy. - The recipient is expected to repeat the request via the proxy. - -10.4 Client Error 4xx - - The 4xx class of status code is intended for cases in which the - client seems to have erred. Except when responding to a HEAD request, - the server SHOULD include an entity containing an explanation of the - error situation, and whether it is a temporary or permanent - condition. These status codes are applicable to any request method. - User agents SHOULD display any included entity to the user. - - Note: If the client is sending data, a server implementation using - TCP should be careful to ensure that the client acknowledges - receipt of the packet(s) containing the response, before the server - closes the input connection. If the client continues sending data - to the server after the close, the server's TCP stack will send a - reset packet to the client, which may erase the client's - - - -Fielding, et. al. Standards Track [Page 59] - -RFC 2068 HTTP/1.1 January 1997 - - - unacknowledged input buffers before they can be read and - interpreted by the HTTP application. - -10.4.1 400 Bad Request - - The request could not be understood by the server due to malformed - syntax. The client SHOULD NOT repeat the request without - modifications. - -10.4.2 401 Unauthorized - - The request requires user authentication. The response MUST include a - WWW-Authenticate header field (section 14.46) containing a challenge - applicable to the requested resource. The client MAY repeat the - request with a suitable Authorization header field (section 14.8). If - the request already included Authorization credentials, then the 401 - response indicates that authorization has been refused for those - credentials. If the 401 response contains the same challenge as the - prior response, and the user agent has already attempted - authentication at least once, then the user SHOULD be presented the - entity that was given in the response, since that entity MAY include - relevant diagnostic information. HTTP access authentication is - explained in section 11. - -10.4.3 402 Payment Required - - This code is reserved for future use. - -10.4.4 403 Forbidden - - The server understood the request, but is refusing to fulfill it. - Authorization will not help and the request SHOULD NOT be repeated. - If the request method was not HEAD and the server wishes to make - public why the request has not been fulfilled, it SHOULD describe the - reason for the refusal in the entity. This status code is commonly - used when the server does not wish to reveal exactly why the request - has been refused, or when no other response is applicable. - -10.4.5 404 Not Found - - The server has not found anything matching the Request-URI. No - indication is given of whether the condition is temporary or - permanent. - - - - - - - - -Fielding, et. al. Standards Track [Page 60] - -RFC 2068 HTTP/1.1 January 1997 - - - If the server does not wish to make this information available to the - client, the status code 403 (Forbidden) can be used instead. The 410 - (Gone) status code SHOULD be used if the server knows, through some - internally configurable mechanism, that an old resource is - permanently unavailable and has no forwarding address. - -10.4.6 405 Method Not Allowed - - The method specified in the Request-Line is not allowed for the - resource identified by the Request-URI. The response MUST include an - Allow header containing a list of valid methods for the requested - resource. - -10.4.7 406 Not Acceptable - - The resource identified by the request is only capable of generating - response entities which have content characteristics not acceptable - according to the accept headers sent in the request. - - Unless it was a HEAD request, the response SHOULD include an entity - containing a list of available entity characteristics and location(s) - from which the user or user agent can choose the one most - appropriate. The entity format is specified by the media type given - in the Content-Type header field. Depending upon the format and the - capabilities of the user agent, selection of the most appropriate - choice may be performed automatically. However, this specification - does not define any standard for such automatic selection. - - Note: HTTP/1.1 servers are allowed to return responses which are - not acceptable according to the accept headers sent in the request. - In some cases, this may even be preferable to sending a 406 - response. User agents are encouraged to inspect the headers of an - incoming response to determine if it is acceptable. If the response - could be unacceptable, a user agent SHOULD temporarily stop receipt - of more data and query the user for a decision on further actions. - -10.4.8 407 Proxy Authentication Required - - This code is similar to 401 (Unauthorized), but indicates that the - client MUST first authenticate itself with the proxy. The proxy MUST - return a Proxy-Authenticate header field (section 14.33) containing a - challenge applicable to the proxy for the requested resource. The - client MAY repeat the request with a suitable Proxy-Authorization - header field (section 14.34). HTTP access authentication is explained - in section 11. - - - - - - -Fielding, et. al. Standards Track [Page 61] - -RFC 2068 HTTP/1.1 January 1997 - - -10.4.9 408 Request Timeout - - The client did not produce a request within the time that the server - was prepared to wait. The client MAY repeat the request without - modifications at any later time. - -10.4.10 409 Conflict - - The request could not be completed due to a conflict with the current - state of the resource. This code is only allowed in situations where - it is expected that the user might be able to resolve the conflict - and resubmit the request. The response body SHOULD include enough - information for the user to recognize the source of the conflict. - Ideally, the response entity would include enough information for the - user or user agent to fix the problem; however, that may not be - possible and is not required. - - Conflicts are most likely to occur in response to a PUT request. If - versioning is being used and the entity being PUT includes changes to - a resource which conflict with those made by an earlier (third-party) - request, the server MAY use the 409 response to indicate that it - can't complete the request. In this case, the response entity SHOULD - contain a list of the differences between the two versions in a - format defined by the response Content-Type. - -10.4.11 410 Gone - - The requested resource is no longer available at the server and no - forwarding address is known. This condition SHOULD be considered - permanent. Clients with link editing capabilities SHOULD delete - references to the Request-URI after user approval. If the server does - not know, or has no facility to determine, whether or not the - condition is permanent, the status code 404 (Not Found) SHOULD be - used instead. This response is cachable unless indicated otherwise. - - The 410 response is primarily intended to assist the task of web - maintenance by notifying the recipient that the resource is - intentionally unavailable and that the server owners desire that - remote links to that resource be removed. Such an event is common for - limited-time, promotional services and for resources belonging to - individuals no longer working at the server's site. It is not - necessary to mark all permanently unavailable resources as "gone" or - to keep the mark for any length of time -- that is left to the - discretion of the server owner. - - - - - - - -Fielding, et. al. Standards Track [Page 62] - -RFC 2068 HTTP/1.1 January 1997 - - -10.4.12 411 Length Required - - The server refuses to accept the request without a defined Content- - Length. The client MAY repeat the request if it adds a valid - Content-Length header field containing the length of the message-body - in the request message. - -10.4.13 412 Precondition Failed - - The precondition given in one or more of the request-header fields - evaluated to false when it was tested on the server. This response - code allows the client to place preconditions on the current resource - metainformation (header field data) and thus prevent the requested - method from being applied to a resource other than the one intended. - -10.4.14 413 Request Entity Too Large - - The server is refusing to process a request because the request - entity is larger than the server is willing or able to process. The - server may close the connection to prevent the client from continuing - the request. - - If the condition is temporary, the server SHOULD include a Retry- - After header field to indicate that it is temporary and after what - time the client may try again. - -10.4.15 414 Request-URI Too Long - - The server is refusing to service the request because the Request-URI - is longer than the server is willing to interpret. This rare - condition is only likely to occur when a client has improperly - converted a POST request to a GET request with long query - information, when the client has descended into a URL "black hole" of - redirection (e.g., a redirected URL prefix that points to a suffix of - itself), or when the server is under attack by a client attempting to - exploit security holes present in some servers using fixed-length - buffers for reading or manipulating the Request-URI. - -10.4.16 415 Unsupported Media Type - - The server is refusing to service the request because the entity of - the request is in a format not supported by the requested resource - for the requested method. - - - - - - - - -Fielding, et. al. Standards Track [Page 63] - -RFC 2068 HTTP/1.1 January 1997 - - -10.5 Server Error 5xx - - Response status codes beginning with the digit "5" indicate cases in - which the server is aware that it has erred or is incapable of - performing the request. Except when responding to a HEAD request, the - server SHOULD include an entity containing an explanation of the - error situation, and whether it is a temporary or permanent - condition. User agents SHOULD display any included entity to the - user. These response codes are applicable to any request method. - -10.5.1 500 Internal Server Error - - The server encountered an unexpected condition which prevented it - from fulfilling the request. - -10.5.2 501 Not Implemented - - The server does not support the functionality required to fulfill the - request. This is the appropriate response when the server does not - recognize the request method and is not capable of supporting it for - any resource. - -10.5.3 502 Bad Gateway - - The server, while acting as a gateway or proxy, received an invalid - response from the upstream server it accessed in attempting to - fulfill the request. - -10.5.4 503 Service Unavailable - - The server is currently unable to handle the request due to a - temporary overloading or maintenance of the server. The implication - is that this is a temporary condition which will be alleviated after - some delay. If known, the length of the delay may be indicated in a - Retry-After header. If no Retry-After is given, the client SHOULD - handle the response as it would for a 500 response. - - Note: The existence of the 503 status code does not imply that a - server must use it when becoming overloaded. Some servers may wish - to simply refuse the connection. - -10.5.5 504 Gateway Timeout - - The server, while acting as a gateway or proxy, did not receive a - timely response from the upstream server it accessed in attempting to - complete the request. - - - - - -Fielding, et. al. Standards Track [Page 64] - -RFC 2068 HTTP/1.1 January 1997 - - -10.5.6 505 HTTP Version Not Supported - - The server does not support, or refuses to support, the HTTP protocol - version that was used in the request message. The server is - indicating that it is unable or unwilling to complete the request - using the same major version as the client, as described in section - 3.1, other than with this error message. The response SHOULD contain - an entity describing why that version is not supported and what other - protocols are supported by that server. - -11 Access Authentication - - HTTP provides a simple challenge-response authentication mechanism - which MAY be used by a server to challenge a client request and by a - client to provide authentication information. It uses an extensible, - case-insensitive token to identify the authentication scheme, - followed by a comma-separated list of attribute-value pairs which - carry the parameters necessary for achieving authentication via that - scheme. - - auth-scheme = token - - auth-param = token "=" quoted-string - - The 401 (Unauthorized) response message is used by an origin server - to challenge the authorization of a user agent. This response MUST - include a WWW-Authenticate header field containing at least one - challenge applicable to the requested resource. - - challenge = auth-scheme 1*SP realm *( "," auth-param ) - - realm = "realm" "=" realm-value - realm-value = quoted-string - - The realm attribute (case-insensitive) is required for all - authentication schemes which issue a challenge. The realm value - (case-sensitive), in combination with the canonical root URL (see - section 5.1.2) of the server being accessed, defines the protection - space. These realms allow the protected resources on a server to be - partitioned into a set of protection spaces, each with its own - authentication scheme and/or authorization database. The realm value - is a string, generally assigned by the origin server, which may have - additional semantics specific to the authentication scheme. - - A user agent that wishes to authenticate itself with a server-- - usually, but not necessarily, after receiving a 401 or 411 response- - -MAY do so by including an Authorization header field with the - request. The Authorization field value consists of credentials - - - -Fielding, et. al. Standards Track [Page 65] - -RFC 2068 HTTP/1.1 January 1997 - - - containing the authentication information of the user agent for the - realm of the resource being requested. - - credentials = basic-credentials - | auth-scheme #auth-param - - The domain over which credentials can be automatically applied by a - user agent is determined by the protection space. If a prior request - has been authorized, the same credentials MAY be reused for all other - requests within that protection space for a period of time determined - by the authentication scheme, parameters, and/or user preference. - Unless otherwise defined by the authentication scheme, a single - protection space cannot extend outside the scope of its server. - - If the server does not wish to accept the credentials sent with a - request, it SHOULD return a 401 (Unauthorized) response. The response - MUST include a WWW-Authenticate header field containing the (possibly - new) challenge applicable to the requested resource and an entity - explaining the refusal. - - The HTTP protocol does not restrict applications to this simple - challenge-response mechanism for access authentication. Additional - mechanisms MAY be used, such as encryption at the transport level or - via message encapsulation, and with additional header fields - specifying authentication information. However, these additional - mechanisms are not defined by this specification. - - Proxies MUST be completely transparent regarding user agent - authentication. That is, they MUST forward the WWW-Authenticate and - Authorization headers untouched, and follow the rules found in - section 14.8. - - HTTP/1.1 allows a client to pass authentication information to and - from a proxy via the Proxy-Authenticate and Proxy-Authorization - headers. - -11.1 Basic Authentication Scheme - - The "basic" authentication scheme is based on the model that the user - agent must authenticate itself with a user-ID and a password for each - realm. The realm value should be considered an opaque string which - can only be compared for equality with other realms on that server. - The server will service the request only if it can validate the - user-ID and password for the protection space of the Request-URI. - There are no optional authentication parameters. - - - - - - -Fielding, et. al. Standards Track [Page 66] - -RFC 2068 HTTP/1.1 January 1997 - - - Upon receipt of an unauthorized request for a URI within the - protection space, the server MAY respond with a challenge like the - following: - - WWW-Authenticate: Basic realm="WallyWorld" - - where "WallyWorld" is the string assigned by the server to identify - the protection space of the Request-URI. - - To receive authorization, the client sends the userid and password, - separated by a single colon (":") character, within a base64 encoded - string in the credentials. - - basic-credentials = "Basic" SP basic-cookie - - basic-cookie = - - user-pass = userid ":" password - - userid = * - - password = *TEXT - - Userids might be case sensitive. - - If the user agent wishes to send the userid "Aladdin" and password - "open sesame", it would use the following header field: - - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - - See section 15 for security considerations associated with Basic - authentication. - -11.2 Digest Authentication Scheme - - A digest authentication for HTTP is specified in RFC 2069 [32]. - -12 Content Negotiation - - Most HTTP responses include an entity which contains information for - interpretation by a human user. Naturally, it is desirable to supply - the user with the "best available" entity corresponding to the - request. Unfortunately for servers and caches, not all users have - the same preferences for what is "best," and not all user agents are - equally capable of rendering all entity types. For that reason, HTTP - has provisions for several mechanisms for "content negotiation" -- - the process of selecting the best representation for a given response - - - -Fielding, et. al. Standards Track [Page 67] - -RFC 2068 HTTP/1.1 January 1997 - - - when there are multiple representations available. - - Note: This is not called "format negotiation" because the alternate - representations may be of the same media type, but use different - capabilities of that type, be in different languages, etc. - - Any response containing an entity-body MAY be subject to negotiation, - including error responses. - - There are two kinds of content negotiation which are possible in - HTTP: server-driven and agent-driven negotiation. These two kinds of - negotiation are orthogonal and thus may be used separately or in - combination. One method of combination, referred to as transparent - negotiation, occurs when a cache uses the agent-driven negotiation - information provided by the origin server in order to provide - server-driven negotiation for subsequent requests. - -12.1 Server-driven Negotiation - - If the selection of the best representation for a response is made by - an algorithm located at the server, it is called server-driven - negotiation. Selection is based on the available representations of - the response (the dimensions over which it can vary; e.g. language, - content-coding, etc.) and the contents of particular header fields in - the request message or on other information pertaining to the request - (such as the network address of the client). - - Server-driven negotiation is advantageous when the algorithm for - selecting from among the available representations is difficult to - describe to the user agent, or when the server desires to send its - "best guess" to the client along with the first response (hoping to - avoid the round-trip delay of a subsequent request if the "best - guess" is good enough for the user). In order to improve the server's - guess, the user agent MAY include request header fields (Accept, - Accept-Language, Accept-Encoding, etc.) which describe its - preferences for such a response. - - Server-driven negotiation has disadvantages: - -1. It is impossible for the server to accurately determine what might be - "best" for any given user, since that would require complete - knowledge of both the capabilities of the user agent and the intended - use for the response (e.g., does the user want to view it on screen - or print it on paper?). - -2. Having the user agent describe its capabilities in every request can - be both very inefficient (given that only a small percentage of - responses have multiple representations) and a potential violation of - - - -Fielding, et. al. Standards Track [Page 68] - -RFC 2068 HTTP/1.1 January 1997 - - - the user's privacy. - -3. It complicates the implementation of an origin server and the - algorithms for generating responses to a request. - -4. It may limit a public cache's ability to use the same response for - multiple user's requests. - - HTTP/1.1 includes the following request-header fields for enabling - server-driven negotiation through description of user agent - capabilities and user preferences: Accept (section 14.1), Accept- - Charset (section 14.2), Accept-Encoding (section 14.3), Accept- - Language (section 14.4), and User-Agent (section 14.42). However, an - origin server is not limited to these dimensions and MAY vary the - response based on any aspect of the request, including information - outside the request-header fields or within extension header fields - not defined by this specification. - - HTTP/1.1 origin servers MUST include an appropriate Vary header field - (section 14.43) in any cachable response based on server-driven - negotiation. The Vary header field describes the dimensions over - which the response might vary (i.e. the dimensions over which the - origin server picks its "best guess" response from multiple - representations). - - HTTP/1.1 public caches MUST recognize the Vary header field when it - is included in a response and obey the requirements described in - section 13.6 that describes the interactions between caching and - content negotiation. - -12.2 Agent-driven Negotiation - - With agent-driven negotiation, selection of the best representation - for a response is performed by the user agent after receiving an - initial response from the origin server. Selection is based on a list - of the available representations of the response included within the - header fields (this specification reserves the field-name Alternates, - as described in appendix 19.6.2.1) or entity-body of the initial - response, with each representation identified by its own URI. - Selection from among the representations may be performed - automatically (if the user agent is capable of doing so) or manually - by the user selecting from a generated (possibly hypertext) menu. - - Agent-driven negotiation is advantageous when the response would vary - over commonly-used dimensions (such as type, language, or encoding), - when the origin server is unable to determine a user agent's - capabilities from examining the request, and generally when public - caches are used to distribute server load and reduce network usage. - - - -Fielding, et. al. Standards Track [Page 69] - -RFC 2068 HTTP/1.1 January 1997 - - - Agent-driven negotiation suffers from the disadvantage of needing a - second request to obtain the best alternate representation. This - second request is only efficient when caching is used. In addition, - this specification does not define any mechanism for supporting - automatic selection, though it also does not prevent any such - mechanism from being developed as an extension and used within - HTTP/1.1. - - HTTP/1.1 defines the 300 (Multiple Choices) and 406 (Not Acceptable) - status codes for enabling agent-driven negotiation when the server is - unwilling or unable to provide a varying response using server-driven - negotiation. - -12.3 Transparent Negotiation - - Transparent negotiation is a combination of both server-driven and - agent-driven negotiation. When a cache is supplied with a form of the - list of available representations of the response (as in agent-driven - negotiation) and the dimensions of variance are completely understood - by the cache, then the cache becomes capable of performing server- - driven negotiation on behalf of the origin server for subsequent - requests on that resource. - - Transparent negotiation has the advantage of distributing the - negotiation work that would otherwise be required of the origin - server and also removing the second request delay of agent-driven - negotiation when the cache is able to correctly guess the right - response. - - This specification does not define any mechanism for transparent - negotiation, though it also does not prevent any such mechanism from - being developed as an extension and used within HTTP/1.1. An HTTP/1.1 - cache performing transparent negotiation MUST include a Vary header - field in the response (defining the dimensions of its variance) if it - is cachable to ensure correct interoperation with all HTTP/1.1 - clients. The agent-driven negotiation information supplied by the - origin server SHOULD be included with the transparently negotiated - response. - -13 Caching in HTTP - - HTTP is typically used for distributed information systems, where - performance can be improved by the use of response caches. The - HTTP/1.1 protocol includes a number of elements intended to make - caching work as well as possible. Because these elements are - inextricable from other aspects of the protocol, and because they - interact with each other, it is useful to describe the basic caching - design of HTTP separately from the detailed descriptions of methods, - - - -Fielding, et. al. Standards Track [Page 70] - -RFC 2068 HTTP/1.1 January 1997 - - - headers, response codes, etc. - - Caching would be useless if it did not significantly improve - performance. The goal of caching in HTTP/1.1 is to eliminate the need - to send requests in many cases, and to eliminate the need to send - full responses in many other cases. The former reduces the number of - network round-trips required for many operations; we use an - "expiration" mechanism for this purpose (see section 13.2). The - latter reduces network bandwidth requirements; we use a "validation" - mechanism for this purpose (see section 13.3). - - Requirements for performance, availability, and disconnected - operation require us to be able to relax the goal of semantic - transparency. The HTTP/1.1 protocol allows origin servers, caches, - and clients to explicitly reduce transparency when necessary. - However, because non-transparent operation may confuse non-expert - users, and may be incompatible with certain server applications (such - as those for ordering merchandise), the protocol requires that - transparency be relaxed - - o only by an explicit protocol-level request when relaxed by client - or origin server - - o only with an explicit warning to the end user when relaxed by cache - or client - - - - - - - - - - - - - - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 71] - -RFC 2068 HTTP/1.1 January 1997 - - - Therefore, the HTTP/1.1 protocol provides these important elements: - - 1. Protocol features that provide full semantic transparency when this - is required by all parties. - - 2. Protocol features that allow an origin server or user agent to - explicitly request and control non-transparent operation. - - 3. Protocol features that allow a cache to attach warnings to - responses that do not preserve the requested approximation of - semantic transparency. - - A basic principle is that it must be possible for the clients to - detect any potential relaxation of semantic transparency. - - Note: The server, cache, or client implementer may be faced with - design decisions not explicitly discussed in this specification. If - a decision may affect semantic transparency, the implementer ought - to err on the side of maintaining transparency unless a careful and - complete analysis shows significant benefits in breaking - transparency. - -13.1.1 Cache Correctness - - A correct cache MUST respond to a request with the most up-to-date - response held by the cache that is appropriate to the request (see - sections 13.2.5, 13.2.6, and 13.12) which meets one of the following - conditions: - - 1. It has been checked for equivalence with what the origin server - would have returned by revalidating the response with the origin - server (section 13.3); - - 2. It is "fresh enough" (see section 13.2). In the default case, this - means it meets the least restrictive freshness requirement of the - client, server, and cache (see section 14.9); if the origin server - so specifies, it is the freshness requirement of the origin server - alone. - - 3. It includes a warning if the freshness demand of the client or the - origin server is violated (see section 13.1.5 and 14.45). - - 4. It is an appropriate 304 (Not Modified), 305 (Proxy Redirect), or - error (4xx or 5xx) response message. - - If the cache can not communicate with the origin server, then a - correct cache SHOULD respond as above if the response can be - correctly served from the cache; if not it MUST return an error or - - - -Fielding, et. al. Standards Track [Page 72] - -RFC 2068 HTTP/1.1 January 1997 - - - warning indicating that there was a communication failure. - - If a cache receives a response (either an entire response, or a 304 - (Not Modified) response) that it would normally forward to the - requesting client, and the received response is no longer fresh, the - cache SHOULD forward it to the requesting client without adding a new - Warning (but without removing any existing Warning headers). A cache - SHOULD NOT attempt to revalidate a response simply because that - response became stale in transit; this might lead to an infinite - loop. An user agent that receives a stale response without a Warning - MAY display a warning indication to the user. - -13.1.2 Warnings - - Whenever a cache returns a response that is neither first-hand nor - "fresh enough" (in the sense of condition 2 in section 13.1.1), it - must attach a warning to that effect, using a Warning response- - header. This warning allows clients to take appropriate action. - - Warnings may be used for other purposes, both cache-related and - otherwise. The use of a warning, rather than an error status code, - distinguish these responses from true failures. - - Warnings are always cachable, because they never weaken the - transparency of a response. This means that warnings can be passed to - HTTP/1.0 caches without danger; such caches will simply pass the - warning along as an entity-header in the response. - - Warnings are assigned numbers between 0 and 99. This specification - defines the code numbers and meanings of each currently assigned - warnings, allowing a client or cache to take automated action in some - (but not all) cases. - - Warnings also carry a warning text. The text may be in any - appropriate natural language (perhaps based on the client's Accept - headers), and include an optional indication of what character set is - used. - - Multiple warnings may be attached to a response (either by the origin - server or by a cache), including multiple warnings with the same code - number. For example, a server may provide the same warning with texts - in both English and Basque. - - When multiple warnings are attached to a response, it may not be - practical or reasonable to display all of them to the user. This - version of HTTP does not specify strict priority rules for deciding - which warnings to display and in what order, but does suggest some - heuristics. - - - -Fielding, et. al. Standards Track [Page 73] - -RFC 2068 HTTP/1.1 January 1997 - - - The Warning header and the currently defined warnings are described - in section 14.45. - -13.1.3 Cache-control Mechanisms - - The basic cache mechanisms in HTTP/1.1 (server-specified expiration - times and validators) are implicit directives to caches. In some - cases, a server or client may need to provide explicit directives to - the HTTP caches. We use the Cache-Control header for this purpose. - - The Cache-Control header allows a client or server to transmit a - variety of directives in either requests or responses. These - directives typically override the default caching algorithms. As a - general rule, if there is any apparent conflict between header - values, the most restrictive interpretation should be applied (that - is, the one that is most likely to preserve semantic transparency). - However, in some cases, Cache-Control directives are explicitly - specified as weakening the approximation of semantic transparency - (for example, "max-stale" or "public"). - - The Cache-Control directives are described in detail in section 14.9. - -13.1.4 Explicit User Agent Warnings - - Many user agents make it possible for users to override the basic - caching mechanisms. For example, the user agent may allow the user to - specify that cached entities (even explicitly stale ones) are never - validated. Or the user agent might habitually add "Cache-Control: - max-stale=3600" to every request. The user should have to explicitly - request either non-transparent behavior, or behavior that results in - abnormally ineffective caching. - - If the user has overridden the basic caching mechanisms, the user - agent should explicitly indicate to the user whenever this results in - the display of information that might not meet the server's - transparency requirements (in particular, if the displayed entity is - known to be stale). Since the protocol normally allows the user agent - to determine if responses are stale or not, this indication need only - be displayed when this actually happens. The indication need not be a - dialog box; it could be an icon (for example, a picture of a rotting - fish) or some other visual indicator. - - If the user has overridden the caching mechanisms in a way that would - abnormally reduce the effectiveness of caches, the user agent should - continually display an indication (for example, a picture of currency - in flames) so that the user does not inadvertently consume excess - resources or suffer from excessive latency. - - - - -Fielding, et. al. Standards Track [Page 74] - -RFC 2068 HTTP/1.1 January 1997 - - -13.1.5 Exceptions to the Rules and Warnings - - In some cases, the operator of a cache may choose to configure it to - return stale responses even when not requested by clients. This - decision should not be made lightly, but may be necessary for reasons - of availability or performance, especially when the cache is poorly - connected to the origin server. Whenever a cache returns a stale - response, it MUST mark it as such (using a Warning header). This - allows the client software to alert the user that there may be a - potential problem. - - It also allows the user agent to take steps to obtain a first-hand or - fresh response. For this reason, a cache SHOULD NOT return a stale - response if the client explicitly requests a first-hand or fresh one, - unless it is impossible to comply for technical or policy reasons. - -13.1.6 Client-controlled Behavior - - While the origin server (and to a lesser extent, intermediate caches, - by their contribution to the age of a response) are the primary - source of expiration information, in some cases the client may need - to control a cache's decision about whether to return a cached - response without validating it. Clients do this using several - directives of the Cache-Control header. - - A client's request may specify the maximum age it is willing to - accept of an unvalidated response; specifying a value of zero forces - the cache(s) to revalidate all responses. A client may also specify - the minimum time remaining before a response expires. Both of these - options increase constraints on the behavior of caches, and so cannot - further relax the cache's approximation of semantic transparency. - - A client may also specify that it will accept stale responses, up to - some maximum amount of staleness. This loosens the constraints on the - caches, and so may violate the origin server's specified constraints - on semantic transparency, but may be necessary to support - disconnected operation, or high availability in the face of poor - connectivity. - -13.2 Expiration Model - -13.2.1 Server-Specified Expiration - - HTTP caching works best when caches can entirely avoid making - requests to the origin server. The primary mechanism for avoiding - requests is for an origin server to provide an explicit expiration - time in the future, indicating that a response may be used to satisfy - subsequent requests. In other words, a cache can return a fresh - - - -Fielding, et. al. Standards Track [Page 75] - -RFC 2068 HTTP/1.1 January 1997 - - - response without first contacting the server. - - Our expectation is that servers will assign future explicit - expiration times to responses in the belief that the entity is not - likely to change, in a semantically significant way, before the - expiration time is reached. This normally preserves semantic - transparency, as long as the server's expiration times are carefully - chosen. - - The expiration mechanism applies only to responses taken from a cache - and not to first-hand responses forwarded immediately to the - requesting client. - - If an origin server wishes to force a semantically transparent cache - to validate every request, it may assign an explicit expiration time - in the past. This means that the response is always stale, and so the - cache SHOULD validate it before using it for subsequent requests. See - section 14.9.4 for a more restrictive way to force revalidation. - - If an origin server wishes to force any HTTP/1.1 cache, no matter how - it is configured, to validate every request, it should use the - "must-revalidate" Cache-Control directive (see section 14.9). - - Servers specify explicit expiration times using either the Expires - header, or the max-age directive of the Cache-Control header. - - An expiration time cannot be used to force a user agent to refresh - its display or reload a resource; its semantics apply only to caching - mechanisms, and such mechanisms need only check a resource's - expiration status when a new request for that resource is initiated. - See section 13.13 for explanation of the difference between caches - and history mechanisms. - -13.2.2 Heuristic Expiration - - Since origin servers do not always provide explicit expiration times, - HTTP caches typically assign heuristic expiration times, employing - algorithms that use other header values (such as the Last-Modified - time) to estimate a plausible expiration time. The HTTP/1.1 - specification does not provide specific algorithms, but does impose - worst-case constraints on their results. Since heuristic expiration - times may compromise semantic transparency, they should be used - cautiously, and we encourage origin servers to provide explicit - expiration times as much as possible. - - - - - - - -Fielding, et. al. Standards Track [Page 76] - -RFC 2068 HTTP/1.1 January 1997 - - -13.2.3 Age Calculations - - In order to know if a cached entry is fresh, a cache needs to know if - its age exceeds its freshness lifetime. We discuss how to calculate - the latter in section 13.2.4; this section describes how to calculate - the age of a response or cache entry. - - In this discussion, we use the term "now" to mean "the current value - of the clock at the host performing the calculation." Hosts that use - HTTP, but especially hosts running origin servers and caches, should - use NTP [28] or some similar protocol to synchronize their clocks to - a globally accurate time standard. - - Also note that HTTP/1.1 requires origin servers to send a Date header - with every response, giving the time at which the response was - generated. We use the term "date_value" to denote the value of the - Date header, in a form appropriate for arithmetic operations. - - HTTP/1.1 uses the Age response-header to help convey age information - between caches. The Age header value is the sender's estimate of the - amount of time since the response was generated at the origin server. - In the case of a cached response that has been revalidated with the - origin server, the Age value is based on the time of revalidation, - not of the original response. - - In essence, the Age value is the sum of the time that the response - has been resident in each of the caches along the path from the - origin server, plus the amount of time it has been in transit along - network paths. - - We use the term "age_value" to denote the value of the Age header, in - a form appropriate for arithmetic operations. - - A response's age can be calculated in two entirely independent ways: - - 1. now minus date_value, if the local clock is reasonably well - synchronized to the origin server's clock. If the result is - negative, the result is replaced by zero. - - 2. age_value, if all of the caches along the response path - implement HTTP/1.1. - - Given that we have two independent ways to compute the age of a - response when it is received, we can combine these as - - corrected_received_age = max(now - date_value, age_value) - - and as long as we have either nearly synchronized clocks or all- - - - -Fielding, et. al. Standards Track [Page 77] - -RFC 2068 HTTP/1.1 January 1997 - - - HTTP/1.1 paths, one gets a reliable (conservative) result. - - Note that this correction is applied at each HTTP/1.1 cache along the - path, so that if there is an HTTP/1.0 cache in the path, the correct - received age is computed as long as the receiving cache's clock is - nearly in sync. We don't need end-to-end clock synchronization - (although it is good to have), and there is no explicit clock - synchronization step. - - Because of network-imposed delays, some significant interval may pass - from the time that a server generates a response and the time it is - received at the next outbound cache or client. If uncorrected, this - delay could result in improperly low ages. - - Because the request that resulted in the returned Age value must have - been initiated prior to that Age value's generation, we can correct - for delays imposed by the network by recording the time at which the - request was initiated. Then, when an Age value is received, it MUST - be interpreted relative to the time the request was initiated, not - the time that the response was received. This algorithm results in - conservative behavior no matter how much delay is experienced. So, we - compute: - - corrected_initial_age = corrected_received_age - + (now - request_time) - - where "request_time" is the time (according to the local clock) when - the request that elicited this response was sent. - - Summary of age calculation algorithm, when a cache receives a - response: - - /* - * age_value - * is the value of Age: header received by the cache with - * this response. - * date_value - * is the value of the origin server's Date: header - * request_time - * is the (local) time when the cache made the request - * that resulted in this cached response - * response_time - * is the (local) time when the cache received the - * response - * now - * is the current (local) time - */ - apparent_age = max(0, response_time - date_value); - - - -Fielding, et. al. Standards Track [Page 78] - -RFC 2068 HTTP/1.1 January 1997 - - - corrected_received_age = max(apparent_age, age_value); - response_delay = response_time - request_time; - corrected_initial_age = corrected_received_age + response_delay; - resident_time = now - response_time; - current_age = corrected_initial_age + resident_time; - - When a cache sends a response, it must add to the - corrected_initial_age the amount of time that the response was - resident locally. It must then transmit this total age, using the Age - header, to the next recipient cache. - - Note that a client cannot reliably tell that a response is first- - hand, but the presence of an Age header indicates that a response - is definitely not first-hand. Also, if the Date in a response is - earlier than the client's local request time, the response is - probably not first-hand (in the absence of serious clock skew). - -13.2.4 Expiration Calculations - - In order to decide whether a response is fresh or stale, we need to - compare its freshness lifetime to its age. The age is calculated as - described in section 13.2.3; this section describes how to calculate - the freshness lifetime, and to determine if a response has expired. - In the discussion below, the values can be represented in any form - appropriate for arithmetic operations. - - We use the term "expires_value" to denote the value of the Expires - header. We use the term "max_age_value" to denote an appropriate - value of the number of seconds carried by the max-age directive of - the Cache-Control header in a response (see section 14.10. - - The max-age directive takes priority over Expires, so if max-age is - present in a response, the calculation is simply: - - freshness_lifetime = max_age_value - - Otherwise, if Expires is present in the response, the calculation is: - - freshness_lifetime = expires_value - date_value - - Note that neither of these calculations is vulnerable to clock skew, - since all of the information comes from the origin server. - - If neither Expires nor Cache-Control: max-age appears in the - response, and the response does not include other restrictions on - caching, the cache MAY compute a freshness lifetime using a - heuristic. If the value is greater than 24 hours, the cache must - attach Warning 13 to any response whose age is more than 24 hours if - - - -Fielding, et. al. Standards Track [Page 79] - -RFC 2068 HTTP/1.1 January 1997 - - - such warning has not already been added. - - Also, if the response does have a Last-Modified time, the heuristic - expiration value SHOULD be no more than some fraction of the interval - since that time. A typical setting of this fraction might be 10%. - - The calculation to determine if a response has expired is quite - simple: - - response_is_fresh = (freshness_lifetime > current_age) - -13.2.5 Disambiguating Expiration Values - - Because expiration values are assigned optimistically, it is possible - for two caches to contain fresh values for the same resource that are - different. - - If a client performing a retrieval receives a non-first-hand response - for a request that was already fresh in its own cache, and the Date - header in its existing cache entry is newer than the Date on the new - response, then the client MAY ignore the response. If so, it MAY - retry the request with a "Cache-Control: max-age=0" directive (see - section 14.9), to force a check with the origin server. - - If a cache has two fresh responses for the same representation with - different validators, it MUST use the one with the more recent Date - header. This situation may arise because the cache is pooling - responses from other caches, or because a client has asked for a - reload or a revalidation of an apparently fresh cache entry. - -13.2.6 Disambiguating Multiple Responses - - Because a client may be receiving responses via multiple paths, so - that some responses flow through one set of caches and other - responses flow through a different set of caches, a client may - receive responses in an order different from that in which the origin - server sent them. We would like the client to use the most recently - generated response, even if older responses are still apparently - fresh. - - Neither the entity tag nor the expiration value can impose an - ordering on responses, since it is possible that a later response - intentionally carries an earlier expiration time. However, the - HTTP/1.1 specification requires the transmission of Date headers on - every response, and the Date values are ordered to a granularity of - one second. - - - - - -Fielding, et. al. Standards Track [Page 80] - -RFC 2068 HTTP/1.1 January 1997 - - - When a client tries to revalidate a cache entry, and the response it - receives contains a Date header that appears to be older than the one - for the existing entry, then the client SHOULD repeat the request - unconditionally, and include - - Cache-Control: max-age=0 - - to force any intermediate caches to validate their copies directly - with the origin server, or - - Cache-Control: no-cache - - to force any intermediate caches to obtain a new copy from the origin - server. - - If the Date values are equal, then the client may use either response - (or may, if it is being extremely prudent, request a new response). - Servers MUST NOT depend on clients being able to choose - deterministically between responses generated during the same second, - if their expiration times overlap. - -13.3 Validation Model - - When a cache has a stale entry that it would like to use as a - response to a client's request, it first has to check with the origin - server (or possibly an intermediate cache with a fresh response) to - see if its cached entry is still usable. We call this "validating" - the cache entry. Since we do not want to have to pay the overhead of - retransmitting the full response if the cached entry is good, and we - do not want to pay the overhead of an extra round trip if the cached - entry is invalid, the HTTP/1.1 protocol supports the use of - conditional methods. - - The key protocol features for supporting conditional methods are - those concerned with "cache validators." When an origin server - generates a full response, it attaches some sort of validator to it, - which is kept with the cache entry. When a client (user agent or - proxy cache) makes a conditional request for a resource for which it - has a cache entry, it includes the associated validator in the - request. - - The server then checks that validator against the current validator - for the entity, and, if they match, it responds with a special status - code (usually, 304 (Not Modified)) and no entity-body. Otherwise, it - returns a full response (including entity-body). Thus, we avoid - transmitting the full response if the validator matches, and we avoid - an extra round trip if it does not match. - - - - -Fielding, et. al. Standards Track [Page 81] - -RFC 2068 HTTP/1.1 January 1997 - - - Note: the comparison functions used to decide if validators match - are defined in section 13.3.3. - - In HTTP/1.1, a conditional request looks exactly the same as a normal - request for the same resource, except that it carries a special - header (which includes the validator) that implicitly turns the - method (usually, GET) into a conditional. - - The protocol includes both positive and negative senses of cache- - validating conditions. That is, it is possible to request either that - a method be performed if and only if a validator matches or if and - only if no validators match. - - Note: a response that lacks a validator may still be cached, and - served from cache until it expires, unless this is explicitly - prohibited by a Cache-Control directive. However, a cache cannot do - a conditional retrieval if it does not have a validator for the - entity, which means it will not be refreshable after it expires. - -13.3.1 Last-modified Dates - - The Last-Modified entity-header field value is often used as a cache - validator. In simple terms, a cache entry is considered to be valid - if the entity has not been modified since the Last-Modified value. - -13.3.2 Entity Tag Cache Validators - - The ETag entity-header field value, an entity tag, provides for an - "opaque" cache validator. This may allow more reliable validation in - situations where it is inconvenient to store modification dates, - where the one-second resolution of HTTP date values is not - sufficient, or where the origin server wishes to avoid certain - paradoxes that may arise from the use of modification dates. - - Entity Tags are described in section 3.11. The headers used with - entity tags are described in sections 14.20, 14.25, 14.26 and 14.43. - -13.3.3 Weak and Strong Validators - - Since both origin servers and caches will compare two validators to - decide if they represent the same or different entities, one normally - would expect that if the entity (the entity-body or any entity- - headers) changes in any way, then the associated validator would - change as well. If this is true, then we call this validator a - "strong validator." - - However, there may be cases when a server prefers to change the - validator only on semantically significant changes, and not when - - - -Fielding, et. al. Standards Track [Page 82] - -RFC 2068 HTTP/1.1 January 1997 - - - insignificant aspects of the entity change. A validator that does not - always change when the resource changes is a "weak validator." - - Entity tags are normally "strong validators," but the protocol - provides a mechanism to tag an entity tag as "weak." One can think of - a strong validator as one that changes whenever the bits of an entity - changes, while a weak value changes whenever the meaning of an entity - changes. Alternatively, one can think of a strong validator as part - of an identifier for a specific entity, while a weak validator is - part of an identifier for a set of semantically equivalent entities. - - Note: One example of a strong validator is an integer that is - incremented in stable storage every time an entity is changed. - - An entity's modification time, if represented with one-second - resolution, could be a weak validator, since it is possible that - the resource may be modified twice during a single second. - - Support for weak validators is optional; however, weak validators - allow for more efficient caching of equivalent objects; for - example, a hit counter on a site is probably good enough if it is - updated every few days or weeks, and any value during that period - is likely "good enough" to be equivalent. - - A "use" of a validator is either when a client generates a request - and includes the validator in a validating header field, or when a - server compares two validators. - - Strong validators are usable in any context. Weak validators are only - usable in contexts that do not depend on exact equality of an entity. - For example, either kind is usable for a conditional GET of a full - entity. However, only a strong validator is usable for a sub-range - retrieval, since otherwise the client may end up with an internally - inconsistent entity. - - The only function that the HTTP/1.1 protocol defines on validators is - comparison. There are two validator comparison functions, depending - on whether the comparison context allows the use of weak validators - or not: - - o The strong comparison function: in order to be considered equal, - both validators must be identical in every way, and neither may be - weak. - o The weak comparison function: in order to be considered equal, both - validators must be identical in every way, but either or both of - them may be tagged as "weak" without affecting the result. - - The weak comparison function MAY be used for simple (non-subrange) - - - -Fielding, et. al. Standards Track [Page 83] - -RFC 2068 HTTP/1.1 January 1997 - - - GET requests. The strong comparison function MUST be used in all - other cases. - - An entity tag is strong unless it is explicitly tagged as weak. - Section 3.11 gives the syntax for entity tags. - - A Last-Modified time, when used as a validator in a request, is - implicitly weak unless it is possible to deduce that it is strong, - using the following rules: - - o The validator is being compared by an origin server to the actual - current validator for the entity and, - o That origin server reliably knows that the associated entity did - not change twice during the second covered by the presented - validator. -or - - o The validator is about to be used by a client in an If-Modified- - Since or If-Unmodified-Since header, because the client has a cache - entry for the associated entity, and - o That cache entry includes a Date value, which gives the time when - the origin server sent the original response, and - o The presented Last-Modified time is at least 60 seconds before the - Date value. -or - - o The validator is being compared by an intermediate cache to the - validator stored in its cache entry for the entity, and - o That cache entry includes a Date value, which gives the time when - the origin server sent the original response, and - o The presented Last-Modified time is at least 60 seconds before the - Date value. - - This method relies on the fact that if two different responses were - sent by the origin server during the same second, but both had the - same Last-Modified time, then at least one of those responses would - have a Date value equal to its Last-Modified time. The arbitrary 60- - second limit guards against the possibility that the Date and Last- - Modified values are generated from different clocks, or at somewhat - different times during the preparation of the response. An - implementation may use a value larger than 60 seconds, if it is - believed that 60 seconds is too short. - - If a client wishes to perform a sub-range retrieval on a value for - which it has only a Last-Modified time and no opaque validator, it - may do this only if the Last-Modified time is strong in the sense - described here. - - - - -Fielding, et. al. Standards Track [Page 84] - -RFC 2068 HTTP/1.1 January 1997 - - - A cache or origin server receiving a cache-conditional request, other - than a full-body GET request, MUST use the strong comparison function - to evaluate the condition. - - These rules allow HTTP/1.1 caches and clients to safely perform sub- - range retrievals on values that have been obtained from HTTP/1.0 - servers. - -13.3.4 Rules for When to Use Entity Tags and Last-modified Dates - - We adopt a set of rules and recommendations for origin servers, - clients, and caches regarding when various validator types should be - used, and for what purposes. - - HTTP/1.1 origin servers: - - o SHOULD send an entity tag validator unless it is not feasible to - generate one. - o MAY send a weak entity tag instead of a strong entity tag, if - performance considerations support the use of weak entity tags, or - if it is unfeasible to send a strong entity tag. - o SHOULD send a Last-Modified value if it is feasible to send one, - unless the risk of a breakdown in semantic transparency that could - result from using this date in an If-Modified-Since header would - lead to serious problems. - - In other words, the preferred behavior for an HTTP/1.1 origin server - is to send both a strong entity tag and a Last-Modified value. - - In order to be legal, a strong entity tag MUST change whenever the - associated entity value changes in any way. A weak entity tag SHOULD - change whenever the associated entity changes in a semantically - significant way. - - Note: in order to provide semantically transparent caching, an - origin server must avoid reusing a specific strong entity tag value - for two different entities, or reusing a specific weak entity tag - value for two semantically different entities. Cache entries may - persist for arbitrarily long periods, regardless of expiration - times, so it may be inappropriate to expect that a cache will never - again attempt to validate an entry using a validator that it - obtained at some point in the past. - - HTTP/1.1 clients: - - o If an entity tag has been provided by the origin server, MUST - use that entity tag in any cache-conditional request (using - If-Match or If-None-Match). - - - -Fielding, et. al. Standards Track [Page 85] - -RFC 2068 HTTP/1.1 January 1997 - - - o If only a Last-Modified value has been provided by the origin - server, SHOULD use that value in non-subrange cache-conditional - requests (using If-Modified-Since). - o If only a Last-Modified value has been provided by an HTTP/1.0 - origin server, MAY use that value in subrange cache-conditional - requests (using If-Unmodified-Since:). The user agent should - provide a way to disable this, in case of difficulty. - o If both an entity tag and a Last-Modified value have been - provided by the origin server, SHOULD use both validators in - cache-conditional requests. This allows both HTTP/1.0 and - HTTP/1.1 caches to respond appropriately. - - An HTTP/1.1 cache, upon receiving a request, MUST use the most - restrictive validator when deciding whether the client's cache entry - matches the cache's own cache entry. This is only an issue when the - request contains both an entity tag and a last-modified-date - validator (If-Modified-Since or If-Unmodified-Since). - - A note on rationale: The general principle behind these rules is - that HTTP/1.1 servers and clients should transmit as much non- - redundant information as is available in their responses and - requests. HTTP/1.1 systems receiving this information will make the - most conservative assumptions about the validators they receive. - - HTTP/1.0 clients and caches will ignore entity tags. Generally, - last-modified values received or used by these systems will support - transparent and efficient caching, and so HTTP/1.1 origin servers - should provide Last-Modified values. In those rare cases where the - use of a Last-Modified value as a validator by an HTTP/1.0 system - could result in a serious problem, then HTTP/1.1 origin servers - should not provide one. - -13.3.5 Non-validating Conditionals - - The principle behind entity tags is that only the service author - knows the semantics of a resource well enough to select an - appropriate cache validation mechanism, and the specification of any - validator comparison function more complex than byte-equality would - open up a can of worms. Thus, comparisons of any other headers - (except Last-Modified, for compatibility with HTTP/1.0) are never - used for purposes of validating a cache entry. - -13.4 Response Cachability - - Unless specifically constrained by a Cache-Control (section 14.9) - directive, a caching system may always store a successful response - (see section 13.8) as a cache entry, may return it without validation - if it is fresh, and may return it after successful validation. If - - - -Fielding, et. al. Standards Track [Page 86] - -RFC 2068 HTTP/1.1 January 1997 - - - there is neither a cache validator nor an explicit expiration time - associated with a response, we do not expect it to be cached, but - certain caches may violate this expectation (for example, when little - or no network connectivity is available). A client can usually detect - that such a response was taken from a cache by comparing the Date - header to the current time. - - Note that some HTTP/1.0 caches are known to violate this - expectation without providing any Warning. - - However, in some cases it may be inappropriate for a cache to retain - an entity, or to return it in response to a subsequent request. This - may be because absolute semantic transparency is deemed necessary by - the service author, or because of security or privacy considerations. - Certain Cache-Control directives are therefore provided so that the - server can indicate that certain resource entities, or portions - thereof, may not be cached regardless of other considerations. - - Note that section 14.8 normally prevents a shared cache from saving - and returning a response to a previous request if that request - included an Authorization header. - - A response received with a status code of 200, 203, 206, 300, 301 or - 410 may be stored by a cache and used in reply to a subsequent - request, subject to the expiration mechanism, unless a Cache-Control - directive prohibits caching. However, a cache that does not support - the Range and Content-Range headers MUST NOT cache 206 (Partial - Content) responses. - - A response received with any other status code MUST NOT be returned - in a reply to a subsequent request unless there are Cache-Control - directives or another header(s) that explicitly allow it. For - example, these include the following: an Expires header (section - 14.21); a "max-age", "must-revalidate", "proxy-revalidate", "public" - or "private" Cache-Control directive (section 14.9). - -13.5 Constructing Responses From Caches - - The purpose of an HTTP cache is to store information received in - response to requests, for use in responding to future requests. In - many cases, a cache simply returns the appropriate parts of a - response to the requester. However, if the cache holds a cache entry - based on a previous response, it may have to combine parts of a new - response with what is held in the cache entry. - - - - - - - -Fielding, et. al. Standards Track [Page 87] - -RFC 2068 HTTP/1.1 January 1997 - - -13.5.1 End-to-end and Hop-by-hop Headers - - For the purpose of defining the behavior of caches and non-caching - proxies, we divide HTTP headers into two categories: - - o End-to-end headers, which must be transmitted to the - ultimate recipient of a request or response. End-to-end - headers in responses must be stored as part of a cache entry - and transmitted in any response formed from a cache entry. - o Hop-by-hop headers, which are meaningful only for a single - transport-level connection, and are not stored by caches or - forwarded by proxies. - - The following HTTP/1.1 headers are hop-by-hop headers: - - o Connection - o Keep-Alive - o Public - o Proxy-Authenticate - o Transfer-Encoding - o Upgrade - - All other headers defined by HTTP/1.1 are end-to-end headers. - - Hop-by-hop headers introduced in future versions of HTTP MUST be - listed in a Connection header, as described in section 14.10. - -13.5.2 Non-modifiable Headers - - Some features of the HTTP/1.1 protocol, such as Digest - Authentication, depend on the value of certain end-to-end headers. A - cache or non-caching proxy SHOULD NOT modify an end-to-end header - unless the definition of that header requires or specifically allows - that. - - A cache or non-caching proxy MUST NOT modify any of the following - fields in a request or response, nor may it add any of these fields - if not already present: - - o Content-Location - o ETag - o Expires - o Last-Modified - - - - - - - - -Fielding, et. al. Standards Track [Page 88] - -RFC 2068 HTTP/1.1 January 1997 - - - A cache or non-caching proxy MUST NOT modify or add any of the - following fields in a response that contains the no-transform Cache- - Control directive, or in any request: - - o Content-Encoding - o Content-Length - o Content-Range - o Content-Type - - A cache or non-caching proxy MAY modify or add these fields in a - response that does not include no-transform, but if it does so, it - MUST add a Warning 14 (Transformation applied) if one does not - already appear in the response. - - Warning: unnecessary modification of end-to-end headers may cause - authentication failures if stronger authentication mechanisms are - introduced in later versions of HTTP. Such authentication - mechanisms may rely on the values of header fields not listed here. - -13.5.3 Combining Headers - - When a cache makes a validating request to a server, and the server - provides a 304 (Not Modified) response, the cache must construct a - response to send to the requesting client. The cache uses the - entity-body stored in the cache entry as the entity-body of this - outgoing response. The end-to-end headers stored in the cache entry - are used for the constructed response, except that any end-to-end - headers provided in the 304 response MUST replace the corresponding - headers from the cache entry. Unless the cache decides to remove the - cache entry, it MUST also replace the end-to-end headers stored with - the cache entry with corresponding headers received in the incoming - response. - - In other words, the set of end-to-end headers received in the - incoming response overrides all corresponding end-to-end headers - stored with the cache entry. The cache may add Warning headers (see - section 14.45) to this set. - - If a header field-name in the incoming response matches more than one - header in the cache entry, all such old headers are replaced. - - Note: this rule allows an origin server to use a 304 (Not Modified) - response to update any header associated with a previous response - for the same entity, although it might not always be meaningful or - correct to do so. This rule does not allow an origin server to use - a 304 (not Modified) response to entirely delete a header that it - had provided with a previous response. - - - - -Fielding, et. al. Standards Track [Page 89] - -RFC 2068 HTTP/1.1 January 1997 - - -13.5.4 Combining Byte Ranges - - A response may transfer only a subrange of the bytes of an entity- - body, either because the request included one or more Range - specifications, or because a connection was broken prematurely. After - several such transfers, a cache may have received several ranges of - the same entity-body. - - If a cache has a stored non-empty set of subranges for an entity, and - an incoming response transfers another subrange, the cache MAY - combine the new subrange with the existing set if both the following - conditions are met: - - o Both the incoming response and the cache entry must have a cache - validator. - o The two cache validators must match using the strong comparison - function (see section 13.3.3). - - If either requirement is not meant, the cache must use only the most - recent partial response (based on the Date values transmitted with - every response, and using the incoming response if these values are - equal or missing), and must discard the other partial information. - -13.6 Caching Negotiated Responses - - Use of server-driven content negotiation (section 12), as indicated - by the presence of a Vary header field in a response, alters the - conditions and procedure by which a cache can use the response for - subsequent requests. - - A server MUST use the Vary header field (section 14.43) to inform a - cache of what header field dimensions are used to select among - multiple representations of a cachable response. A cache may use the - selected representation (the entity included with that particular - response) for replying to subsequent requests on that resource only - when the subsequent requests have the same or equivalent values for - all header fields specified in the Vary response-header. Requests - with a different value for one or more of those header fields would - be forwarded toward the origin server. - - If an entity tag was assigned to the representation, the forwarded - request SHOULD be conditional and include the entity tags in an If- - None-Match header field from all its cache entries for the Request- - URI. This conveys to the server the set of entities currently held by - the cache, so that if any one of these entities matches the requested - entity, the server can use the ETag header in its 304 (Not Modified) - response to tell the cache which entry is appropriate. If the - entity-tag of the new response matches that of an existing entry, the - - - -Fielding, et. al. Standards Track [Page 90] - -RFC 2068 HTTP/1.1 January 1997 - - - new response SHOULD be used to update the header fields of the - existing entry, and the result MUST be returned to the client. - - The Vary header field may also inform the cache that the - representation was selected using criteria not limited to the - request-headers; in this case, a cache MUST NOT use the response in a - reply to a subsequent request unless the cache relays the new request - to the origin server in a conditional request and the server responds - with 304 (Not Modified), including an entity tag or Content-Location - that indicates which entity should be used. - - If any of the existing cache entries contains only partial content - for the associated entity, its entity-tag SHOULD NOT be included in - the If-None-Match header unless the request is for a range that would - be fully satisfied by that entry. - - If a cache receives a successful response whose Content-Location - field matches that of an existing cache entry for the same Request- - URI, whose entity-tag differs from that of the existing entry, and - whose Date is more recent than that of the existing entry, the - existing entry SHOULD NOT be returned in response to future requests, - and should be deleted from the cache. - -13.7 Shared and Non-Shared Caches - - For reasons of security and privacy, it is necessary to make a - distinction between "shared" and "non-shared" caches. A non-shared - cache is one that is accessible only to a single user. Accessibility - in this case SHOULD be enforced by appropriate security mechanisms. - All other caches are considered to be "shared." Other sections of - this specification place certain constraints on the operation of - shared caches in order to prevent loss of privacy or failure of - access controls. - -13.8 Errors or Incomplete Response Cache Behavior - - A cache that receives an incomplete response (for example, with fewer - bytes of data than specified in a Content-Length header) may store - the response. However, the cache MUST treat this as a partial - response. Partial responses may be combined as described in section - 13.5.4; the result might be a full response or might still be - partial. A cache MUST NOT return a partial response to a client - without explicitly marking it as such, using the 206 (Partial - Content) status code. A cache MUST NOT return a partial response - using a status code of 200 (OK). - - If a cache receives a 5xx response while attempting to revalidate an - entry, it may either forward this response to the requesting client, - - - -Fielding, et. al. Standards Track [Page 91] - -RFC 2068 HTTP/1.1 January 1997 - - - or act as if the server failed to respond. In the latter case, it MAY - return a previously received response unless the cached entry - includes the "must-revalidate" Cache-Control directive (see section - 14.9). - -13.9 Side Effects of GET and HEAD - - Unless the origin server explicitly prohibits the caching of their - responses, the application of GET and HEAD methods to any resources - SHOULD NOT have side effects that would lead to erroneous behavior if - these responses are taken from a cache. They may still have side - effects, but a cache is not required to consider such side effects in - its caching decisions. Caches are always expected to observe an - origin server's explicit restrictions on caching. - - We note one exception to this rule: since some applications have - traditionally used GETs and HEADs with query URLs (those containing a - "?" in the rel_path part) to perform operations with significant side - effects, caches MUST NOT treat responses to such URLs as fresh unless - the server provides an explicit expiration time. This specifically - means that responses from HTTP/1.0 servers for such URIs should not - be taken from a cache. See section 9.1.1 for related information. - -13.10 Invalidation After Updates or Deletions - - The effect of certain methods at the origin server may cause one or - more existing cache entries to become non-transparently invalid. That - is, although they may continue to be "fresh," they do not accurately - reflect what the origin server would return for a new request. - - There is no way for the HTTP protocol to guarantee that all such - cache entries are marked invalid. For example, the request that - caused the change at the origin server may not have gone through the - proxy where a cache entry is stored. However, several rules help - reduce the likelihood of erroneous behavior. - - In this section, the phrase "invalidate an entity" means that the - cache should either remove all instances of that entity from its - storage, or should mark these as "invalid" and in need of a mandatory - revalidation before they can be returned in response to a subsequent - request. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 92] - -RFC 2068 HTTP/1.1 January 1997 - - - Some HTTP methods may invalidate an entity. This is either the entity - referred to by the Request-URI, or by the Location or Content- - Location response-headers (if present). These methods are: - - o PUT - o DELETE - o POST - - In order to prevent denial of service attacks, an invalidation based - on the URI in a Location or Content-Location header MUST only be - performed if the host part is the same as in the Request-URI. - -13.11 Write-Through Mandatory - - All methods that may be expected to cause modifications to the origin - server's resources MUST be written through to the origin server. This - currently includes all methods except for GET and HEAD. A cache MUST - NOT reply to such a request from a client before having transmitted - the request to the inbound server, and having received a - corresponding response from the inbound server. This does not prevent - a cache from sending a 100 (Continue) response before the inbound - server has replied. - - The alternative (known as "write-back" or "copy-back" caching) is not - allowed in HTTP/1.1, due to the difficulty of providing consistent - updates and the problems arising from server, cache, or network - failure prior to write-back. - -13.12 Cache Replacement - - If a new cachable (see sections 14.9.2, 13.2.5, 13.2.6 and 13.8) - response is received from a resource while any existing responses for - the same resource are cached, the cache SHOULD use the new response - to reply to the current request. It may insert it into cache storage - and may, if it meets all other requirements, use it to respond to any - future requests that would previously have caused the old response to - be returned. If it inserts the new response into cache storage it - should follow the rules in section 13.5.3. - - Note: a new response that has an older Date header value than - existing cached responses is not cachable. - -13.13 History Lists - - User agents often have history mechanisms, such as "Back" buttons and - history lists, which can be used to redisplay an entity retrieved - earlier in a session. - - - - -Fielding, et. al. Standards Track [Page 93] - -RFC 2068 HTTP/1.1 January 1997 - - - History mechanisms and caches are different. In particular history - mechanisms SHOULD NOT try to show a semantically transparent view of - the current state of a resource. Rather, a history mechanism is meant - to show exactly what the user saw at the time when the resource was - retrieved. - - By default, an expiration time does not apply to history mechanisms. - If the entity is still in storage, a history mechanism should display - it even if the entity has expired, unless the user has specifically - configured the agent to refresh expired history documents. - - This should not be construed to prohibit the history mechanism from - telling the user that a view may be stale. - - Note: if history list mechanisms unnecessarily prevent users from - viewing stale resources, this will tend to force service authors to - avoid using HTTP expiration controls and cache controls when they - would otherwise like to. Service authors may consider it important - that users not be presented with error messages or warning messages - when they use navigation controls (such as BACK) to view previously - fetched resources. Even though sometimes such resources ought not - to cached, or ought to expire quickly, user interface - considerations may force service authors to resort to other means - of preventing caching (e.g. "once-only" URLs) in order not to - suffer the effects of improperly functioning history mechanisms. - -14 Header Field Definitions - - This section defines the syntax and semantics of all standard - HTTP/1.1 header fields. For entity-header fields, both sender and - recipient refer to either the client or the server, depending on who - sends and who receives the entity. - - - - - - - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 94] - -RFC 2068 HTTP/1.1 January 1997 - - -14.1 Accept - - The Accept request-header field can be used to specify certain media - types which are acceptable for the response. Accept headers can be - used to indicate that the request is specifically limited to a small - set of desired types, as in the case of a request for an in-line - image. - - Accept = "Accept" ":" - #( media-range [ accept-params ] ) - - media-range = ( "*/*" - | ( type "/" "*" ) - | ( type "/" subtype ) - ) *( ";" parameter ) - - accept-params = ";" "q" "=" qvalue *( accept-extension ) - - accept-extension = ";" token [ "=" ( token | quoted-string ) ] - - The asterisk "*" character is used to group media types into ranges, - with "*/*" indicating all media types and "type/*" indicating all - subtypes of that type. The media-range MAY include media type - parameters that are applicable to that range. - - Each media-range MAY be followed by one or more accept-params, - beginning with the "q" parameter for indicating a relative quality - factor. The first "q" parameter (if any) separates the media-range - parameter(s) from the accept-params. Quality factors allow the user - or user agent to indicate the relative degree of preference for that - media-range, using the qvalue scale from 0 to 1 (section 3.9). The - default value is q=1. - - Note: Use of the "q" parameter name to separate media type - parameters from Accept extension parameters is due to historical - practice. Although this prevents any media type parameter named - "q" from being used with a media range, such an event is believed - to be unlikely given the lack of any "q" parameters in the IANA - media type registry and the rare usage of any media type parameters - in Accept. Future media types should be discouraged from - registering any parameter named "q". - - The example - - Accept: audio/*; q=0.2, audio/basic - - SHOULD be interpreted as "I prefer audio/basic, but send me any audio - type if it is the best available after an 80% mark-down in quality." - - - -Fielding, et. al. Standards Track [Page 95] - -RFC 2068 HTTP/1.1 January 1997 - - - If no Accept header field is present, then it is assumed that the - client accepts all media types. If an Accept header field is present, - and if the server cannot send a response which is acceptable - according to the combined Accept field value, then the server SHOULD - send a 406 (not acceptable) response. - - A more elaborate example is - - Accept: text/plain; q=0.5, text/html, - text/x-dvi; q=0.8, text/x-c - - Verbally, this would be interpreted as "text/html and text/x-c are - the preferred media types, but if they do not exist, then send the - text/x-dvi entity, and if that does not exist, send the text/plain - entity." - - Media ranges can be overridden by more specific media ranges or - specific media types. If more than one media range applies to a given - type, the most specific reference has precedence. For example, - - Accept: text/*, text/html, text/html;level=1, */* - - have the following precedence: - - 1) text/html;level=1 - 2) text/html - 3) text/* - 4) */* - - The media type quality factor associated with a given type is - determined by finding the media range with the highest precedence - which matches that type. For example, - - Accept: text/*;q=0.3, text/html;q=0.7, text/html;level=1, - text/html;level=2;q=0.4, */*;q=0.5 - - would cause the following values to be associated: - - text/html;level=1 = 1 - text/html = 0.7 - text/plain = 0.3 - image/jpeg = 0.5 - text/html;level=2 = 0.4 - text/html;level=3 = 0.7 - - Note: A user agent may be provided with a default set of quality - values for certain media ranges. However, unless the user agent is - a closed system which cannot interact with other rendering agents, - - - -Fielding, et. al. Standards Track [Page 96] - -RFC 2068 HTTP/1.1 January 1997 - - - this default set should be configurable by the user. - -14.2 Accept-Charset - - The Accept-Charset request-header field can be used to indicate what - character sets are acceptable for the response. This field allows - clients capable of understanding more comprehensive or special- - purpose character sets to signal that capability to a server which is - capable of representing documents in those character sets. The ISO- - 8859-1 character set can be assumed to be acceptable to all user - agents. - - Accept-Charset = "Accept-Charset" ":" - 1#( charset [ ";" "q" "=" qvalue ] ) - - Character set values are described in section 3.4. Each charset may - be given an associated quality value which represents the user's - preference for that charset. The default value is q=1. An example is - - Accept-Charset: iso-8859-5, unicode-1-1;q=0.8 - - If no Accept-Charset header is present, the default is that any - character set is acceptable. If an Accept-Charset header is present, - and if the server cannot send a response which is acceptable - according to the Accept-Charset header, then the server SHOULD send - an error response with the 406 (not acceptable) status code, though - the sending of an unacceptable response is also allowed. - -14.3 Accept-Encoding - - The Accept-Encoding request-header field is similar to Accept, but - restricts the content-coding values (section 14.12) which are - acceptable in the response. - - Accept-Encoding = "Accept-Encoding" ":" - #( content-coding ) - - An example of its use is - - Accept-Encoding: compress, gzip - - If no Accept-Encoding header is present in a request, the server MAY - assume that the client will accept any content coding. If an Accept- - Encoding header is present, and if the server cannot send a response - which is acceptable according to the Accept-Encoding header, then the - server SHOULD send an error response with the 406 (Not Acceptable) - status code. - - - - -Fielding, et. al. Standards Track [Page 97] - -RFC 2068 HTTP/1.1 January 1997 - - - An empty Accept-Encoding value indicates none are acceptable. - -14.4 Accept-Language - - The Accept-Language request-header field is similar to Accept, but - restricts the set of natural languages that are preferred as a - response to the request. - - Accept-Language = "Accept-Language" ":" - 1#( language-range [ ";" "q" "=" qvalue ] ) - - language-range = ( ( 1*8ALPHA *( "-" 1*8ALPHA ) ) | "*" ) - - Each language-range MAY be given an associated quality value which - represents an estimate of the user's preference for the languages - specified by that range. The quality value defaults to "q=1". For - example, - - Accept-Language: da, en-gb;q=0.8, en;q=0.7 - - would mean: "I prefer Danish, but will accept British English and - other types of English." A language-range matches a language-tag if - it exactly equals the tag, or if it exactly equals a prefix of the - tag such that the first tag character following the prefix is "-". - The special range "*", if present in the Accept-Language field, - matches every tag not matched by any other range present in the - Accept-Language field. - - Note: This use of a prefix matching rule does not imply that - language tags are assigned to languages in such a way that it is - always true that if a user understands a language with a certain - tag, then this user will also understand all languages with tags - for which this tag is a prefix. The prefix rule simply allows the - use of prefix tags if this is the case. - - The language quality factor assigned to a language-tag by the - Accept-Language field is the quality value of the longest language- - range in the field that matches the language-tag. If no language- - range in the field matches the tag, the language quality factor - assigned is 0. If no Accept-Language header is present in the - request, the server SHOULD assume that all languages are equally - acceptable. If an Accept-Language header is present, then all - languages which are assigned a quality factor greater than 0 are - acceptable. - - It may be contrary to the privacy expectations of the user to send an - Accept-Language header with the complete linguistic preferences of - the user in every request. For a discussion of this issue, see - - - -Fielding, et. al. Standards Track [Page 98] - -RFC 2068 HTTP/1.1 January 1997 - - - section 15.7. - - Note: As intelligibility is highly dependent on the individual - user, it is recommended that client applications make the choice of - linguistic preference available to the user. If the choice is not - made available, then the Accept-Language header field must not be - given in the request. - -14.5 Accept-Ranges - - The Accept-Ranges response-header field allows the server to indicate - its acceptance of range requests for a resource: - - Accept-Ranges = "Accept-Ranges" ":" acceptable-ranges - - acceptable-ranges = 1#range-unit | "none" - - Origin servers that accept byte-range requests MAY send - - Accept-Ranges: bytes - - but are not required to do so. Clients MAY generate byte-range - requests without having received this header for the resource - involved. - - Servers that do not accept any kind of range request for a resource - MAY send - - Accept-Ranges: none - - to advise the client not to attempt a range request. - -14.6 Age - - The Age response-header field conveys the sender's estimate of the - amount of time since the response (or its revalidation) was generated - at the origin server. A cached response is "fresh" if its age does - not exceed its freshness lifetime. Age values are calculated as - specified in section 13.2.3. - - Age = "Age" ":" age-value - - age-value = delta-seconds - - Age values are non-negative decimal integers, representing time in - seconds. - - - - - -Fielding, et. al. Standards Track [Page 99] - -RFC 2068 HTTP/1.1 January 1997 - - - If a cache receives a value larger than the largest positive integer - it can represent, or if any of its age calculations overflows, it - MUST transmit an Age header with a value of 2147483648 (2^31). - HTTP/1.1 caches MUST send an Age header in every response. Caches - SHOULD use an arithmetic type of at least 31 bits of range. - -14.7 Allow - - The Allow entity-header field lists the set of methods supported by - the resource identified by the Request-URI. The purpose of this field - is strictly to inform the recipient of valid methods associated with - the resource. An Allow header field MUST be present in a 405 (Method - Not Allowed) response. - - Allow = "Allow" ":" 1#method - - Example of use: - - Allow: GET, HEAD, PUT - - This field cannot prevent a client from trying other methods. - However, the indications given by the Allow header field value SHOULD - be followed. The actual set of allowed methods is defined by the - origin server at the time of each request. - - The Allow header field MAY be provided with a PUT request to - recommend the methods to be supported by the new or modified - resource. The server is not required to support these methods and - SHOULD include an Allow header in the response giving the actual - supported methods. - - A proxy MUST NOT modify the Allow header field even if it does not - understand all the methods specified, since the user agent MAY have - other means of communicating with the origin server. - - The Allow header field does not indicate what methods are implemented - at the server level. Servers MAY use the Public response-header field - (section 14.35) to describe what methods are implemented on the - server as a whole. - -14.8 Authorization - - A user agent that wishes to authenticate itself with a server-- - usually, but not necessarily, after receiving a 401 response--MAY do - so by including an Authorization request-header field with the - request. The Authorization field value consists of credentials - containing the authentication information of the user agent for the - realm of the resource being requested. - - - -Fielding, et. al. Standards Track [Page 100] - -RFC 2068 HTTP/1.1 January 1997 - - - Authorization = "Authorization" ":" credentials - - HTTP access authentication is described in section 11. If a request - is authenticated and a realm specified, the same credentials SHOULD - be valid for all other requests within this realm. - - When a shared cache (see section 13.7) receives a request containing - an Authorization field, it MUST NOT return the corresponding response - as a reply to any other request, unless one of the following specific - exceptions holds: - - 1. If the response includes the "proxy-revalidate" Cache-Control - directive, the cache MAY use that response in replying to a - subsequent request, but a proxy cache MUST first revalidate it with - the origin server, using the request-headers from the new request - to allow the origin server to authenticate the new request. - 2. If the response includes the "must-revalidate" Cache-Control - directive, the cache MAY use that response in replying to a - subsequent request, but all caches MUST first revalidate it with - the origin server, using the request-headers from the new request - to allow the origin server to authenticate the new request. - 3. If the response includes the "public" Cache-Control directive, it - may be returned in reply to any subsequent request. - -14.9 Cache-Control - - The Cache-Control general-header field is used to specify directives - that MUST be obeyed by all caching mechanisms along the - request/response chain. The directives specify behavior intended to - prevent caches from adversely interfering with the request or - response. These directives typically override the default caching - algorithms. Cache directives are unidirectional in that the presence - of a directive in a request does not imply that the same directive - should be given in the response. - - Note that HTTP/1.0 caches may not implement Cache-Control and may - only implement Pragma: no-cache (see section 14.32). - - Cache directives must be passed through by a proxy or gateway - application, regardless of their significance to that application, - since the directives may be applicable to all recipients along the - request/response chain. It is not possible to specify a cache- - directive for a specific cache. - - Cache-Control = "Cache-Control" ":" 1#cache-directive - - cache-directive = cache-request-directive - | cache-response-directive - - - -Fielding, et. al. Standards Track [Page 101] - -RFC 2068 HTTP/1.1 January 1997 - - - cache-request-directive = - "no-cache" [ "=" <"> 1#field-name <"> ] - | "no-store" - | "max-age" "=" delta-seconds - | "max-stale" [ "=" delta-seconds ] - | "min-fresh" "=" delta-seconds - | "only-if-cached" - | cache-extension - - cache-response-directive = - "public" - | "private" [ "=" <"> 1#field-name <"> ] - | "no-cache" [ "=" <"> 1#field-name <"> ] - | "no-store" - | "no-transform" - | "must-revalidate" - | "proxy-revalidate" - | "max-age" "=" delta-seconds - | cache-extension - - cache-extension = token [ "=" ( token | quoted-string ) ] - - When a directive appears without any 1#field-name parameter, the - directive applies to the entire request or response. When such a - directive appears with a 1#field-name parameter, it applies only to - the named field or fields, and not to the rest of the request or - response. This mechanism supports extensibility; implementations of - future versions of the HTTP protocol may apply these directives to - header fields not defined in HTTP/1.1. - - The cache-control directives can be broken down into these general - categories: - - o Restrictions on what is cachable; these may only be imposed by the - origin server. - o Restrictions on what may be stored by a cache; these may be imposed - by either the origin server or the user agent. - o Modifications of the basic expiration mechanism; these may be - imposed by either the origin server or the user agent. - o Controls over cache revalidation and reload; these may only be - imposed by a user agent. - o Control over transformation of entities. - o Extensions to the caching system. - - - - - - - - -Fielding, et. al. Standards Track [Page 102] - -RFC 2068 HTTP/1.1 January 1997 - - -14.9.1 What is Cachable - - By default, a response is cachable if the requirements of the request - method, request header fields, and the response status indicate that - it is cachable. Section 13.4 summarizes these defaults for - cachability. The following Cache-Control response directives allow an - origin server to override the default cachability of a response: - -public - Indicates that the response is cachable by any cache, even if it - would normally be non-cachable or cachable only within a non-shared - cache. (See also Authorization, section 14.8, for additional - details.) - -private - Indicates that all or part of the response message is intended for a - single user and MUST NOT be cached by a shared cache. This allows an - origin server to state that the specified parts of the response are - intended for only one user and are not a valid response for requests - by other users. A private (non-shared) cache may cache the response. - - Note: This usage of the word private only controls where the - response may be cached, and cannot ensure the privacy of the - message content. - -no-cache - Indicates that all or part of the response message MUST NOT be cached - anywhere. This allows an origin server to prevent caching even by - caches that have been configured to return stale responses to client - requests. - - Note: Most HTTP/1.0 caches will not recognize or obey this - directive. - -14.9.2 What May be Stored by Caches - - The purpose of the no-store directive is to prevent the inadvertent - release or retention of sensitive information (for example, on backup - tapes). The no-store directive applies to the entire message, and may - be sent either in a response or in a request. If sent in a request, a - cache MUST NOT store any part of either this request or any response - to it. If sent in a response, a cache MUST NOT store any part of - either this response or the request that elicited it. This directive - applies to both non-shared and shared caches. "MUST NOT store" in - this context means that the cache MUST NOT intentionally store the - information in non-volatile storage, and MUST make a best-effort - attempt to remove the information from volatile storage as promptly - as possible after forwarding it. - - - -Fielding, et. al. Standards Track [Page 103] - -RFC 2068 HTTP/1.1 January 1997 - - - Even when this directive is associated with a response, users may - explicitly store such a response outside of the caching system (e.g., - with a "Save As" dialog). History buffers may store such responses as - part of their normal operation. - - The purpose of this directive is to meet the stated requirements of - certain users and service authors who are concerned about accidental - releases of information via unanticipated accesses to cache data - structures. While the use of this directive may improve privacy in - some cases, we caution that it is NOT in any way a reliable or - sufficient mechanism for ensuring privacy. In particular, malicious - or compromised caches may not recognize or obey this directive; and - communications networks may be vulnerable to eavesdropping. - -14.9.3 Modifications of the Basic Expiration Mechanism - - The expiration time of an entity may be specified by the origin - server using the Expires header (see section 14.21). Alternatively, - it may be specified using the max-age directive in a response. - - If a response includes both an Expires header and a max-age - directive, the max-age directive overrides the Expires header, even - if the Expires header is more restrictive. This rule allows an origin - server to provide, for a given response, a longer expiration time to - an HTTP/1.1 (or later) cache than to an HTTP/1.0 cache. This may be - useful if certain HTTP/1.0 caches improperly calculate ages or - expiration times, perhaps due to desynchronized clocks. - - Note: most older caches, not compliant with this specification, do - not implement any Cache-Control directives. An origin server - wishing to use a Cache-Control directive that restricts, but does - not prevent, caching by an HTTP/1.1-compliant cache may exploit the - requirement that the max-age directive overrides the Expires - header, and the fact that non-HTTP/1.1-compliant caches do not - observe the max-age directive. - - Other directives allow an user agent to modify the basic expiration - mechanism. These directives may be specified on a request: - - max-age - Indicates that the client is willing to accept a response whose age - is no greater than the specified time in seconds. Unless max-stale - directive is also included, the client is not willing to accept a - stale response. - - min-fresh - Indicates that the client is willing to accept a response whose - freshness lifetime is no less than its current age plus the - - - -Fielding, et. al. Standards Track [Page 104] - -RFC 2068 HTTP/1.1 January 1997 - - - specified time in seconds. That is, the client wants a response - that will still be fresh for at least the specified number of - seconds. - - max-stale - Indicates that the client is willing to accept a response that has - exceeded its expiration time. If max-stale is assigned a value, - then the client is willing to accept a response that has exceeded - its expiration time by no more than the specified number of - seconds. If no value is assigned to max-stale, then the client is - willing to accept a stale response of any age. - - If a cache returns a stale response, either because of a max-stale - directive on a request, or because the cache is configured to - override the expiration time of a response, the cache MUST attach a - Warning header to the stale response, using Warning 10 (Response is - stale). - -14.9.4 Cache Revalidation and Reload Controls - - Sometimes an user agent may want or need to insist that a cache - revalidate its cache entry with the origin server (and not just with - the next cache along the path to the origin server), or to reload its - cache entry from the origin server. End-to-end revalidation may be - necessary if either the cache or the origin server has overestimated - the expiration time of the cached response. End-to-end reload may be - necessary if the cache entry has become corrupted for some reason. - - End-to-end revalidation may be requested either when the client does - not have its own local cached copy, in which case we call it - "unspecified end-to-end revalidation", or when the client does have a - local cached copy, in which case we call it "specific end-to-end - revalidation." - - The client can specify these three kinds of action using Cache- - Control request directives: - - End-to-end reload - The request includes a "no-cache" Cache-Control directive or, for - compatibility with HTTP/1.0 clients, "Pragma: no-cache". No field - names may be included with the no-cache directive in a request. The - server MUST NOT use a cached copy when responding to such a - request. - - Specific end-to-end revalidation - The request includes a "max-age=0" Cache-Control directive, which - forces each cache along the path to the origin server to revalidate - its own entry, if any, with the next cache or server. The initial - - - -Fielding, et. al. Standards Track [Page 105] - -RFC 2068 HTTP/1.1 January 1997 - - - request includes a cache-validating conditional with the client's - current validator. - - Unspecified end-to-end revalidation - The request includes "max-age=0" Cache-Control directive, which - forces each cache along the path to the origin server to revalidate - its own entry, if any, with the next cache or server. The initial - request does not include a cache-validating conditional; the first - cache along the path (if any) that holds a cache entry for this - resource includes a cache-validating conditional with its current - validator. - - When an intermediate cache is forced, by means of a max-age=0 - directive, to revalidate its own cache entry, and the client has - supplied its own validator in the request, the supplied validator may - differ from the validator currently stored with the cache entry. In - this case, the cache may use either validator in making its own - request without affecting semantic transparency. - - However, the choice of validator may affect performance. The best - approach is for the intermediate cache to use its own validator when - making its request. If the server replies with 304 (Not Modified), - then the cache should return its now validated copy to the client - with a 200 (OK) response. If the server replies with a new entity and - cache validator, however, the intermediate cache should compare the - returned validator with the one provided in the client's request, - using the strong comparison function. If the client's validator is - equal to the origin server's, then the intermediate cache simply - returns 304 (Not Modified). Otherwise, it returns the new entity with - a 200 (OK) response. - - If a request includes the no-cache directive, it should not include - min-fresh, max-stale, or max-age. - - In some cases, such as times of extremely poor network connectivity, - a client may want a cache to return only those responses that it - currently has stored, and not to reload or revalidate with the origin - server. To do this, the client may include the only-if-cached - directive in a request. If it receives this directive, a cache SHOULD - either respond using a cached entry that is consistent with the other - constraints of the request, or respond with a 504 (Gateway Timeout) - status. However, if a group of caches is being operated as a unified - system with good internal connectivity, such a request MAY be - forwarded within that group of caches. - - Because a cache may be configured to ignore a server's specified - expiration time, and because a client request may include a max-stale - directive (which has a similar effect), the protocol also includes a - - - -Fielding, et. al. Standards Track [Page 106] - -RFC 2068 HTTP/1.1 January 1997 - - - mechanism for the origin server to require revalidation of a cache - entry on any subsequent use. When the must-revalidate directive is - present in a response received by a cache, that cache MUST NOT use - the entry after it becomes stale to respond to a subsequent request - without first revalidating it with the origin server. (I.e., the - cache must do an end-to-end revalidation every time, if, based solely - on the origin server's Expires or max-age value, the cached response - is stale.) - - The must-revalidate directive is necessary to support reliable - operation for certain protocol features. In all circumstances an - HTTP/1.1 cache MUST obey the must-revalidate directive; in - particular, if the cache cannot reach the origin server for any - reason, it MUST generate a 504 (Gateway Timeout) response. - - Servers should send the must-revalidate directive if and only if - failure to revalidate a request on the entity could result in - incorrect operation, such as a silently unexecuted financial - transaction. Recipients MUST NOT take any automated action that - violates this directive, and MUST NOT automatically provide an - unvalidated copy of the entity if revalidation fails. - - Although this is not recommended, user agents operating under severe - connectivity constraints may violate this directive but, if so, MUST - explicitly warn the user that an unvalidated response has been - provided. The warning MUST be provided on each unvalidated access, - and SHOULD require explicit user confirmation. - - The proxy-revalidate directive has the same meaning as the must- - revalidate directive, except that it does not apply to non-shared - user agent caches. It can be used on a response to an authenticated - request to permit the user's cache to store and later return the - response without needing to revalidate it (since it has already been - authenticated once by that user), while still requiring proxies that - service many users to revalidate each time (in order to make sure - that each user has been authenticated). Note that such authenticated - responses also need the public cache control directive in order to - allow them to be cached at all. - -14.9.5 No-Transform Directive - - Implementers of intermediate caches (proxies) have found it useful to - convert the media type of certain entity bodies. A proxy might, for - example, convert between image formats in order to save cache space - or to reduce the amount of traffic on a slow link. HTTP has to date - been silent on these transformations. - - - - - -Fielding, et. al. Standards Track [Page 107] - -RFC 2068 HTTP/1.1 January 1997 - - - Serious operational problems have already occurred, however, when - these transformations have been applied to entity bodies intended for - certain kinds of applications. For example, applications for medical - imaging, scientific data analysis and those using end-to-end - authentication, all depend on receiving an entity body that is bit - for bit identical to the original entity-body. - - Therefore, if a response includes the no-transform directive, an - intermediate cache or proxy MUST NOT change those headers that are - listed in section 13.5.2 as being subject to the no-transform - directive. This implies that the cache or proxy must not change any - aspect of the entity-body that is specified by these headers. - -14.9.6 Cache Control Extensions - - The Cache-Control header field can be extended through the use of one - or more cache-extension tokens, each with an optional assigned value. - Informational extensions (those which do not require a change in - cache behavior) may be added without changing the semantics of other - directives. Behavioral extensions are designed to work by acting as - modifiers to the existing base of cache directives. Both the new - directive and the standard directive are supplied, such that - applications which do not understand the new directive will default - to the behavior specified by the standard directive, and those that - understand the new directive will recognize it as modifying the - requirements associated with the standard directive. In this way, - extensions to the Cache-Control directives can be made without - requiring changes to the base protocol. - - This extension mechanism depends on a HTTP cache obeying all of the - cache-control directives defined for its native HTTP-version, obeying - certain extensions, and ignoring all directives that it does not - understand. - - For example, consider a hypothetical new response directive called - "community" which acts as a modifier to the "private" directive. We - define this new directive to mean that, in addition to any non-shared - cache, any cache which is shared only by members of the community - named within its value may cache the response. An origin server - wishing to allow the "UCI" community to use an otherwise private - response in their shared cache(s) may do so by including - - Cache-Control: private, community="UCI" - - A cache seeing this header field will act correctly even if the cache - does not understand the "community" cache-extension, since it will - also see and understand the "private" directive and thus default to - the safe behavior. - - - -Fielding, et. al. Standards Track [Page 108] - -RFC 2068 HTTP/1.1 January 1997 - - - Unrecognized cache-directives MUST be ignored; it is assumed that any - cache-directive likely to be unrecognized by an HTTP/1.1 cache will - be combined with standard directives (or the response's default - cachability) such that the cache behavior will remain minimally - correct even if the cache does not understand the extension(s). - -14.10 Connection - - The Connection general-header field allows the sender to specify - options that are desired for that particular connection and MUST NOT - be communicated by proxies over further connections. - - The Connection header has the following grammar: - - Connection-header = "Connection" ":" 1#(connection-token) - connection-token = token - - HTTP/1.1 proxies MUST parse the Connection header field before a - message is forwarded and, for each connection-token in this field, - remove any header field(s) from the message with the same name as the - connection-token. Connection options are signaled by the presence of - a connection-token in the Connection header field, not by any - corresponding additional header field(s), since the additional header - field may not be sent if there are no parameters associated with that - connection option. HTTP/1.1 defines the "close" connection option - for the sender to signal that the connection will be closed after - completion of the response. For example, - - Connection: close - - in either the request or the response header fields indicates that - the connection should not be considered `persistent' (section 8.1) - after the current request/response is complete. - - HTTP/1.1 applications that do not support persistent connections MUST - include the "close" connection option in every message. - -14.11 Content-Base - - The Content-Base entity-header field may be used to specify the base - URI for resolving relative URLs within the entity. This header field - is described as Base in RFC 1808, which is expected to be revised. - - Content-Base = "Content-Base" ":" absoluteURI - - If no Content-Base field is present, the base URI of an entity is - defined either by its Content-Location (if that Content-Location URI - is an absolute URI) or the URI used to initiate the request, in that - - - -Fielding, et. al. Standards Track [Page 109] - -RFC 2068 HTTP/1.1 January 1997 - - - order of precedence. Note, however, that the base URI of the contents - within the entity-body may be redefined within that entity-body. - -14.12 Content-Encoding - - The Content-Encoding entity-header field is used as a modifier to the - media-type. When present, its value indicates what additional content - codings have been applied to the entity-body, and thus what decoding - mechanisms MUST be applied in order to obtain the media-type - referenced by the Content-Type header field. Content-Encoding is - primarily used to allow a document to be compressed without losing - the identity of its underlying media type. - - Content-Encoding = "Content-Encoding" ":" 1#content-coding - - Content codings are defined in section 3.5. An example of its use is - - Content-Encoding: gzip - - The Content-Encoding is a characteristic of the entity identified by - the Request-URI. Typically, the entity-body is stored with this - encoding and is only decoded before rendering or analogous usage. - - If multiple encodings have been applied to an entity, the content - codings MUST be listed in the order in which they were applied. - - Additional information about the encoding parameters MAY be provided - by other entity-header fields not defined by this specification. - -14.13 Content-Language - - The Content-Language entity-header field describes the natural - language(s) of the intended audience for the enclosed entity. Note - that this may not be equivalent to all the languages used within the - entity-body. - - Content-Language = "Content-Language" ":" 1#language-tag - - Language tags are defined in section 3.10. The primary purpose of - Content-Language is to allow a user to identify and differentiate - entities according to the user's own preferred language. Thus, if the - body content is intended only for a Danish-literate audience, the - appropriate field is - - Content-Language: da - - If no Content-Language is specified, the default is that the content - is intended for all language audiences. This may mean that the sender - - - -Fielding, et. al. Standards Track [Page 110] - -RFC 2068 HTTP/1.1 January 1997 - - - does not consider it to be specific to any natural language, or that - the sender does not know for which language it is intended. - - Multiple languages MAY be listed for content that is intended for - multiple audiences. For example, a rendition of the "Treaty of - Waitangi," presented simultaneously in the original Maori and English - versions, would call for - - Content-Language: mi, en - - However, just because multiple languages are present within an entity - does not mean that it is intended for multiple linguistic audiences. - An example would be a beginner's language primer, such as "A First - Lesson in Latin," which is clearly intended to be used by an - English-literate audience. In this case, the Content-Language should - only include "en". - - Content-Language may be applied to any media type -- it is not - limited to textual documents. - -14.14 Content-Length - - The Content-Length entity-header field indicates the size of the - message-body, in decimal number of octets, sent to the recipient or, - in the case of the HEAD method, the size of the entity-body that - would have been sent had the request been a GET. - - Content-Length = "Content-Length" ":" 1*DIGIT - - An example is - - Content-Length: 3495 - - Applications SHOULD use this field to indicate the size of the - message-body to be transferred, regardless of the media type of the - entity. It must be possible for the recipient to reliably determine - the end of HTTP/1.1 requests containing an entity-body, e.g., because - the request has a valid Content-Length field, uses Transfer-Encoding: - chunked or a multipart body. - - Any Content-Length greater than or equal to zero is a valid value. - Section 4.4 describes how to determine the length of a message-body - if a Content-Length is not given. - - - - - - - - -Fielding, et. al. Standards Track [Page 111] - -RFC 2068 HTTP/1.1 January 1997 - - - Note: The meaning of this field is significantly different from the - corresponding definition in MIME, where it is an optional field - used within the "message/external-body" content-type. In HTTP, it - SHOULD be sent whenever the message's length can be determined - prior to being transferred. - -14.15 Content-Location - - The Content-Location entity-header field may be used to supply the - resource location for the entity enclosed in the message. In the case - where a resource has multiple entities associated with it, and those - entities actually have separate locations by which they might be - individually accessed, the server should provide a Content-Location - for the particular variant which is returned. In addition, a server - SHOULD provide a Content-Location for the resource corresponding to - the response entity. - - Content-Location = "Content-Location" ":" - ( absoluteURI | relativeURI ) - - If no Content-Base header field is present, the value of Content- - Location also defines the base URL for the entity (see section - 14.11). - - The Content-Location value is not a replacement for the original - requested URI; it is only a statement of the location of the resource - corresponding to this particular entity at the time of the request. - Future requests MAY use the Content-Location URI if the desire is to - identify the source of that particular entity. - - A cache cannot assume that an entity with a Content-Location - different from the URI used to retrieve it can be used to respond to - later requests on that Content-Location URI. However, the Content- - Location can be used to differentiate between multiple entities - retrieved from a single requested resource, as described in section - 13.6. - - If the Content-Location is a relative URI, the URI is interpreted - relative to any Content-Base URI provided in the response. If no - Content-Base is provided, the relative URI is interpreted relative to - the Request-URI. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 112] - -RFC 2068 HTTP/1.1 January 1997 - - -14.16 Content-MD5 - - The Content-MD5 entity-header field, as defined in RFC 1864 [23], is - an MD5 digest of the entity-body for the purpose of providing an - end-to-end message integrity check (MIC) of the entity-body. (Note: a - MIC is good for detecting accidental modification of the entity-body - in transit, but is not proof against malicious attacks.) - - Content-MD5 = "Content-MD5" ":" md5-digest - - md5-digest = - - The Content-MD5 header field may be generated by an origin server to - function as an integrity check of the entity-body. Only origin - servers may generate the Content-MD5 header field; proxies and - gateways MUST NOT generate it, as this would defeat its value as an - end-to-end integrity check. Any recipient of the entity-body, - including gateways and proxies, MAY check that the digest value in - this header field matches that of the entity-body as received. - - The MD5 digest is computed based on the content of the entity-body, - including any Content-Encoding that has been applied, but not - including any Transfer-Encoding that may have been applied to the - message-body. If the message is received with a Transfer-Encoding, - that encoding must be removed prior to checking the Content-MD5 value - against the received entity. - - This has the result that the digest is computed on the octets of the - entity-body exactly as, and in the order that, they would be sent if - no Transfer-Encoding were being applied. - - HTTP extends RFC 1864 to permit the digest to be computed for MIME - composite media-types (e.g., multipart/* and message/rfc822), but - this does not change how the digest is computed as defined in the - preceding paragraph. - - Note: There are several consequences of this. The entity-body for - composite types may contain many body-parts, each with its own MIME - and HTTP headers (including Content-MD5, Content-Transfer-Encoding, - and Content-Encoding headers). If a body-part has a Content- - Transfer-Encoding or Content-Encoding header, it is assumed that - the content of the body-part has had the encoding applied, and the - body-part is included in the Content-MD5 digest as is -- i.e., - after the application. The Transfer-Encoding header field is not - allowed within body-parts. - - Note: while the definition of Content-MD5 is exactly the same for - HTTP as in RFC 1864 for MIME entity-bodies, there are several ways - - - -Fielding, et. al. Standards Track [Page 113] - -RFC 2068 HTTP/1.1 January 1997 - - - in which the application of Content-MD5 to HTTP entity-bodies - differs from its application to MIME entity-bodies. One is that - HTTP, unlike MIME, does not use Content-Transfer-Encoding, and does - use Transfer-Encoding and Content-Encoding. Another is that HTTP - more frequently uses binary content types than MIME, so it is worth - noting that, in such cases, the byte order used to compute the - digest is the transmission byte order defined for the type. Lastly, - HTTP allows transmission of text types with any of several line - break conventions and not just the canonical form using CRLF. - Conversion of all line breaks to CRLF should not be done before - computing or checking the digest: the line break convention used in - the text actually transmitted should be left unaltered when - computing the digest. - -14.17 Content-Range - - The Content-Range entity-header is sent with a partial entity-body to - specify where in the full entity-body the partial body should be - inserted. It also indicates the total size of the full entity-body. - When a server returns a partial response to a client, it must - describe both the extent of the range covered by the response, and - the length of the entire entity-body. - - Content-Range = "Content-Range" ":" content-range-spec - - content-range-spec = byte-content-range-spec - - byte-content-range-spec = bytes-unit SP first-byte-pos "-" - last-byte-pos "/" entity-length - - entity-length = 1*DIGIT - - Unlike byte-ranges-specifier values, a byte-content-range-spec may - only specify one range, and must contain absolute byte positions for - both the first and last byte of the range. - - A byte-content-range-spec whose last-byte-pos value is less than its - first-byte-pos value, or whose entity-length value is less than or - equal to its last-byte-pos value, is invalid. The recipient of an - invalid byte-content-range-spec MUST ignore it and any content - transferred along with it. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 114] - -RFC 2068 HTTP/1.1 January 1997 - - - Examples of byte-content-range-spec values, assuming that the entity - contains a total of 1234 bytes: - - o The first 500 bytes: - - bytes 0-499/1234 - - o The second 500 bytes: - - bytes 500-999/1234 - - o All except for the first 500 bytes: - - bytes 500-1233/1234 - - o The last 500 bytes: - - bytes 734-1233/1234 - - When an HTTP message includes the content of a single range (for - example, a response to a request for a single range, or to a request - for a set of ranges that overlap without any holes), this content is - transmitted with a Content-Range header, and a Content-Length header - showing the number of bytes actually transferred. For example, - - HTTP/1.1 206 Partial content - Date: Wed, 15 Nov 1995 06:25:24 GMT - Last-modified: Wed, 15 Nov 1995 04:58:08 GMT - Content-Range: bytes 21010-47021/47022 - Content-Length: 26012 - Content-Type: image/gif - - When an HTTP message includes the content of multiple ranges (for - example, a response to a request for multiple non-overlapping - ranges), these are transmitted as a multipart MIME message. The - multipart MIME content-type used for this purpose is defined in this - specification to be "multipart/byteranges". See appendix 19.2 for its - definition. - - A client that cannot decode a MIME multipart/byteranges message - should not ask for multiple byte-ranges in a single request. - - When a client requests multiple byte-ranges in one request, the - server SHOULD return them in the order that they appeared in the - request. - - If the server ignores a byte-range-spec because it is invalid, the - server should treat the request as if the invalid Range header field - - - -Fielding, et. al. Standards Track [Page 115] - -RFC 2068 HTTP/1.1 January 1997 - - - did not exist. (Normally, this means return a 200 response containing - the full entity). The reason is that the only time a client will make - such an invalid request is when the entity is smaller than the entity - retrieved by a prior request. - -14.18 Content-Type - - The Content-Type entity-header field indicates the media type of the - entity-body sent to the recipient or, in the case of the HEAD method, - the media type that would have been sent had the request been a GET. - - Content-Type = "Content-Type" ":" media-type - Media types are defined in section 3.7. An example of the field is - - Content-Type: text/html; charset=ISO-8859-4 - - Further discussion of methods for identifying the media type of an - entity is provided in section 7.2.1. - -14.19 Date - - The Date general-header field represents the date and time at which - the message was originated, having the same semantics as orig-date in - RFC 822. The field value is an HTTP-date, as described in section - 3.3.1. - - Date = "Date" ":" HTTP-date - - An example is - - Date: Tue, 15 Nov 1994 08:12:31 GMT - - If a message is received via direct connection with the user agent - (in the case of requests) or the origin server (in the case of - responses), then the date can be assumed to be the current date at - the receiving end. However, since the date--as it is believed by the - origin--is important for evaluating cached responses, origin servers - MUST include a Date header field in all responses. Clients SHOULD - only send a Date header field in messages that include an entity- - body, as in the case of the PUT and POST requests, and even then it - is optional. A received message which does not have a Date header - field SHOULD be assigned one by the recipient if the message will be - cached by that recipient or gatewayed via a protocol which requires a - Date. - - - - - - - -Fielding, et. al. Standards Track [Page 116] - -RFC 2068 HTTP/1.1 January 1997 - - - In theory, the date SHOULD represent the moment just before the - entity is generated. In practice, the date can be generated at any - time during the message origination without affecting its semantic - value. - - The format of the Date is an absolute date and time as defined by - HTTP-date in section 3.3; it MUST be sent in RFC1123 [8]-date format. - -14.20 ETag - - The ETag entity-header field defines the entity tag for the - associated entity. The headers used with entity tags are described in - sections 14.20, 14.25, 14.26 and 14.43. The entity tag may be used - for comparison with other entities from the same resource (see - section 13.3.2). - - ETag = "ETag" ":" entity-tag - - Examples: - - ETag: "xyzzy" - ETag: W/"xyzzy" - ETag: "" - -14.21 Expires - - The Expires entity-header field gives the date/time after which the - response should be considered stale. A stale cache entry may not - normally be returned by a cache (either a proxy cache or an user - agent cache) unless it is first validated with the origin server (or - with an intermediate cache that has a fresh copy of the entity). See - section 13.2 for further discussion of the expiration model. - - The presence of an Expires field does not imply that the original - resource will change or cease to exist at, before, or after that - time. - - The format is an absolute date and time as defined by HTTP-date in - section 3.3; it MUST be in RFC1123-date format: - - Expires = "Expires" ":" HTTP-date - - - - - - - - - - -Fielding, et. al. Standards Track [Page 117] - -RFC 2068 HTTP/1.1 January 1997 - - - An example of its use is - - Expires: Thu, 01 Dec 1994 16:00:00 GMT - - Note: if a response includes a Cache-Control field with the max-age - directive, that directive overrides the Expires field. - - HTTP/1.1 clients and caches MUST treat other invalid date formats, - especially including the value "0", as in the past (i.e., "already - expired"). - - To mark a response as "already expired," an origin server should use - an Expires date that is equal to the Date header value. (See the - rules for expiration calculations in section 13.2.4.) - - To mark a response as "never expires," an origin server should use an - Expires date approximately one year from the time the response is - sent. HTTP/1.1 servers should not send Expires dates more than one - year in the future. - - The presence of an Expires header field with a date value of some - time in the future on an response that otherwise would by default be - non-cacheable indicates that the response is cachable, unless - indicated otherwise by a Cache-Control header field (section 14.9). - -14.22 From - - The From request-header field, if given, SHOULD contain an Internet - e-mail address for the human user who controls the requesting user - agent. The address SHOULD be machine-usable, as defined by mailbox - in RFC 822 (as updated by RFC 1123 ): - - From = "From" ":" mailbox - - An example is: - - From: webmaster@w3.org - - This header field MAY be used for logging purposes and as a means for - identifying the source of invalid or unwanted requests. It SHOULD NOT - be used as an insecure form of access protection. The interpretation - of this field is that the request is being performed on behalf of the - person given, who accepts responsibility for the method performed. In - particular, robot agents SHOULD include this header so that the - person responsible for running the robot can be contacted if problems - occur on the receiving end. - - - - - -Fielding, et. al. Standards Track [Page 118] - -RFC 2068 HTTP/1.1 January 1997 - - - The Internet e-mail address in this field MAY be separate from the - Internet host which issued the request. For example, when a request - is passed through a proxy the original issuer's address SHOULD be - used. - - Note: The client SHOULD not send the From header field without the - user's approval, as it may conflict with the user's privacy - interests or their site's security policy. It is strongly - recommended that the user be able to disable, enable, and modify - the value of this field at any time prior to a request. - -14.23 Host - - The Host request-header field specifies the Internet host and port - number of the resource being requested, as obtained from the original - URL given by the user or referring resource (generally an HTTP URL, - as described in section 3.2.2). The Host field value MUST represent - the network location of the origin server or gateway given by the - original URL. This allows the origin server or gateway to - differentiate between internally-ambiguous URLs, such as the root "/" - URL of a server for multiple host names on a single IP address. - - Host = "Host" ":" host [ ":" port ] ; Section 3.2.2 - - A "host" without any trailing port information implies the default - port for the service requested (e.g., "80" for an HTTP URL). For - example, a request on the origin server for - MUST include: - - GET /pub/WWW/ HTTP/1.1 - Host: www.w3.org - - A client MUST include a Host header field in all HTTP/1.1 request - messages on the Internet (i.e., on any message corresponding to a - request for a URL which includes an Internet host address for the - service being requested). If the Host field is not already present, - an HTTP/1.1 proxy MUST add a Host field to the request message prior - to forwarding it on the Internet. All Internet-based HTTP/1.1 servers - MUST respond with a 400 status code to any HTTP/1.1 request message - which lacks a Host header field. - - See sections 5.2 and 19.5.1 for other requirements relating to Host. - -14.24 If-Modified-Since - - The If-Modified-Since request-header field is used with the GET - method to make it conditional: if the requested variant has not been - modified since the time specified in this field, an entity will not - - - -Fielding, et. al. Standards Track [Page 119] - -RFC 2068 HTTP/1.1 January 1997 - - - be returned from the server; instead, a 304 (not modified) response - will be returned without any message-body. - - If-Modified-Since = "If-Modified-Since" ":" HTTP-date - - An example of the field is: - - If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT - - A GET method with an If-Modified-Since header and no Range header - requests that the identified entity be transferred only if it has - been modified since the date given by the If-Modified-Since header. - The algorithm for determining this includes the following cases: - - a)If the request would normally result in anything other than a 200 - (OK) status, or if the passed If-Modified-Since date is invalid, the - response is exactly the same as for a normal GET. A date which is - later than the server's current time is invalid. - - b)If the variant has been modified since the If-Modified-Since date, - the response is exactly the same as for a normal GET. - - c)If the variant has not been modified since a valid If-Modified-Since - date, the server MUST return a 304 (Not Modified) response. - - The purpose of this feature is to allow efficient updates of cached - information with a minimum amount of transaction overhead. - - Note that the Range request-header field modifies the meaning of - If-Modified-Since; see section 14.36 for full details. - - Note that If-Modified-Since times are interpreted by the server, - whose clock may not be synchronized with the client. - - Note that if a client uses an arbitrary date in the If-Modified-Since - header instead of a date taken from the Last-Modified header for the - same request, the client should be aware of the fact that this date - is interpreted in the server's understanding of time. The client - should consider unsynchronized clocks and rounding problems due to - the different encodings of time between the client and server. This - includes the possibility of race conditions if the document has - changed between the time it was first requested and the If-Modified- - Since date of a subsequent request, and the possibility of clock- - skew-related problems if the If-Modified-Since date is derived from - the client's clock without correction to the server's clock. - Corrections for different time bases between client and server are at - best approximate due to network latency. - - - - -Fielding, et. al. Standards Track [Page 120] - -RFC 2068 HTTP/1.1 January 1997 - - -14.25 If-Match - - The If-Match request-header field is used with a method to make it - conditional. A client that has one or more entities previously - obtained from the resource can verify that one of those entities is - current by including a list of their associated entity tags in the - If-Match header field. The purpose of this feature is to allow - efficient updates of cached information with a minimum amount of - transaction overhead. It is also used, on updating requests, to - prevent inadvertent modification of the wrong version of a resource. - As a special case, the value "*" matches any current entity of the - resource. - - If-Match = "If-Match" ":" ( "*" | 1#entity-tag ) - - If any of the entity tags match the entity tag of the entity that - would have been returned in the response to a similar GET request - (without the If-Match header) on that resource, or if "*" is given - and any current entity exists for that resource, then the server MAY - perform the requested method as if the If-Match header field did not - exist. - - A server MUST use the strong comparison function (see section 3.11) - to compare the entity tags in If-Match. - - If none of the entity tags match, or if "*" is given and no current - entity exists, the server MUST NOT perform the requested method, and - MUST return a 412 (Precondition Failed) response. This behavior is - most useful when the client wants to prevent an updating method, such - as PUT, from modifying a resource that has changed since the client - last retrieved it. - - If the request would, without the If-Match header field, result in - anything other than a 2xx status, then the If-Match header MUST be - ignored. - - The meaning of "If-Match: *" is that the method SHOULD be performed - if the representation selected by the origin server (or by a cache, - possibly using the Vary mechanism, see section 14.43) exists, and - MUST NOT be performed if the representation does not exist. - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 121] - -RFC 2068 HTTP/1.1 January 1997 - - - A request intended to update a resource (e.g., a PUT) MAY include an - If-Match header field to signal that the request method MUST NOT be - applied if the entity corresponding to the If-Match value (a single - entity tag) is no longer a representation of that resource. This - allows the user to indicate that they do not wish the request to be - successful if the resource has been changed without their knowledge. - Examples: - - If-Match: "xyzzy" - If-Match: "xyzzy", "r2d2xxxx", "c3piozzzz" - If-Match: * - -14.26 If-None-Match - - The If-None-Match request-header field is used with a method to make - it conditional. A client that has one or more entities previously - obtained from the resource can verify that none of those entities is - current by including a list of their associated entity tags in the - If-None-Match header field. The purpose of this feature is to allow - efficient updates of cached information with a minimum amount of - transaction overhead. It is also used, on updating requests, to - prevent inadvertent modification of a resource which was not known to - exist. - - As a special case, the value "*" matches any current entity of the - resource. - - If-None-Match = "If-None-Match" ":" ( "*" | 1#entity-tag ) - - If any of the entity tags match the entity tag of the entity that - would have been returned in the response to a similar GET request - (without the If-None-Match header) on that resource, or if "*" is - given and any current entity exists for that resource, then the - server MUST NOT perform the requested method. Instead, if the request - method was GET or HEAD, the server SHOULD respond with a 304 (Not - Modified) response, including the cache-related entity-header fields - (particularly ETag) of one of the entities that matched. For all - other request methods, the server MUST respond with a status of 412 - (Precondition Failed). - - See section 13.3.3 for rules on how to determine if two entity tags - match. The weak comparison function can only be used with GET or HEAD - requests. - - If none of the entity tags match, or if "*" is given and no current - entity exists, then the server MAY perform the requested method as if - the If-None-Match header field did not exist. - - - - -Fielding, et. al. Standards Track [Page 122] - -RFC 2068 HTTP/1.1 January 1997 - - - If the request would, without the If-None-Match header field, result - in anything other than a 2xx status, then the If-None-Match header - MUST be ignored. - - The meaning of "If-None-Match: *" is that the method MUST NOT be - performed if the representation selected by the origin server (or by - a cache, possibly using the Vary mechanism, see section 14.43) - exists, and SHOULD be performed if the representation does not exist. - This feature may be useful in preventing races between PUT - operations. - - Examples: - - If-None-Match: "xyzzy" - If-None-Match: W/"xyzzy" - If-None-Match: "xyzzy", "r2d2xxxx", "c3piozzzz" - If-None-Match: W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz" - If-None-Match: * - -14.27 If-Range - - If a client has a partial copy of an entity in its cache, and wishes - to have an up-to-date copy of the entire entity in its cache, it - could use the Range request-header with a conditional GET (using - either or both of If-Unmodified-Since and If-Match.) However, if the - condition fails because the entity has been modified, the client - would then have to make a second request to obtain the entire current - entity-body. - - The If-Range header allows a client to "short-circuit" the second - request. Informally, its meaning is `if the entity is unchanged, send - me the part(s) that I am missing; otherwise, send me the entire new - entity.' - - If-Range = "If-Range" ":" ( entity-tag | HTTP-date ) - - If the client has no entity tag for an entity, but does have a Last- - Modified date, it may use that date in a If-Range header. (The server - can distinguish between a valid HTTP-date and any form of entity-tag - by examining no more than two characters.) The If-Range header should - only be used together with a Range header, and must be ignored if the - request does not include a Range header, or if the server does not - support the sub-range operation. - - - - - - - - -Fielding, et. al. Standards Track [Page 123] - -RFC 2068 HTTP/1.1 January 1997 - - - If the entity tag given in the If-Range header matches the current - entity tag for the entity, then the server should provide the - specified sub-range of the entity using a 206 (Partial content) - response. If the entity tag does not match, then the server should - return the entire entity using a 200 (OK) response. - -14.28 If-Unmodified-Since - - The If-Unmodified-Since request-header field is used with a method to - make it conditional. If the requested resource has not been modified - since the time specified in this field, the server should perform the - requested operation as if the If-Unmodified-Since header were not - present. - - If the requested variant has been modified since the specified time, - the server MUST NOT perform the requested operation, and MUST return - a 412 (Precondition Failed). - - If-Unmodified-Since = "If-Unmodified-Since" ":" HTTP-date - - An example of the field is: - - If-Unmodified-Since: Sat, 29 Oct 1994 19:43:31 GMT - - If the request normally (i.e., without the If-Unmodified-Since - header) would result in anything other than a 2xx status, the If- - Unmodified-Since header should be ignored. - - If the specified date is invalid, the header is ignored. - -14.29 Last-Modified - - The Last-Modified entity-header field indicates the date and time at - which the origin server believes the variant was last modified. - - Last-Modified = "Last-Modified" ":" HTTP-date - - An example of its use is - - Last-Modified: Tue, 15 Nov 1994 12:45:26 GMT - - The exact meaning of this header field depends on the implementation - of the origin server and the nature of the original resource. For - files, it may be just the file system last-modified time. For - entities with dynamically included parts, it may be the most recent - of the set of last-modify times for its component parts. For database - gateways, it may be the last-update time stamp of the record. For - virtual objects, it may be the last time the internal state changed. - - - -Fielding, et. al. Standards Track [Page 124] - -RFC 2068 HTTP/1.1 January 1997 - - - An origin server MUST NOT send a Last-Modified date which is later - than the server's time of message origination. In such cases, where - the resource's last modification would indicate some time in the - future, the server MUST replace that date with the message - origination date. - - An origin server should obtain the Last-Modified value of the entity - as close as possible to the time that it generates the Date value of - its response. This allows a recipient to make an accurate assessment - of the entity's modification time, especially if the entity changes - near the time that the response is generated. - - HTTP/1.1 servers SHOULD send Last-Modified whenever feasible. - -14.30 Location - - The Location response-header field is used to redirect the recipient - to a location other than the Request-URI for completion of the - request or identification of a new resource. For 201 (Created) - responses, the Location is that of the new resource which was created - by the request. For 3xx responses, the location SHOULD indicate the - server's preferred URL for automatic redirection to the resource. The - field value consists of a single absolute URL. - - Location = "Location" ":" absoluteURI - - An example is - - Location: http://www.w3.org/pub/WWW/People.html - - Note: The Content-Location header field (section 14.15) differs - from Location in that the Content-Location identifies the original - location of the entity enclosed in the request. It is therefore - possible for a response to contain header fields for both Location - and Content-Location. Also see section 13.10 for cache requirements - of some methods. - -14.31 Max-Forwards - - The Max-Forwards request-header field may be used with the TRACE - method (section 14.31) to limit the number of proxies or gateways - that can forward the request to the next inbound server. This can be - useful when the client is attempting to trace a request chain which - appears to be failing or looping in mid-chain. - - Max-Forwards = "Max-Forwards" ":" 1*DIGIT - - - - - -Fielding, et. al. Standards Track [Page 125] - -RFC 2068 HTTP/1.1 January 1997 - - - The Max-Forwards value is a decimal integer indicating the remaining - number of times this request message may be forwarded. - - Each proxy or gateway recipient of a TRACE request containing a Max- - Forwards header field SHOULD check and update its value prior to - forwarding the request. If the received value is zero (0), the - recipient SHOULD NOT forward the request; instead, it SHOULD respond - as the final recipient with a 200 (OK) response containing the - received request message as the response entity-body (as described in - section 9.8). If the received Max-Forwards value is greater than - zero, then the forwarded message SHOULD contain an updated Max- - Forwards field with a value decremented by one (1). - - The Max-Forwards header field SHOULD be ignored for all other methods - defined by this specification and for any extension methods for which - it is not explicitly referred to as part of that method definition. - -14.32 Pragma - - The Pragma general-header field is used to include implementation- - specific directives that may apply to any recipient along the - request/response chain. All pragma directives specify optional - behavior from the viewpoint of the protocol; however, some systems - MAY require that behavior be consistent with the directives. - - Pragma = "Pragma" ":" 1#pragma-directive - - pragma-directive = "no-cache" | extension-pragma - extension-pragma = token [ "=" ( token | quoted-string ) ] - - When the no-cache directive is present in a request message, an - application SHOULD forward the request toward the origin server even - if it has a cached copy of what is being requested. This pragma - directive has the same semantics as the no-cache cache-directive (see - section 14.9) and is defined here for backwards compatibility with - HTTP/1.0. Clients SHOULD include both header fields when a no-cache - request is sent to a server not known to be HTTP/1.1 compliant. - - Pragma directives MUST be passed through by a proxy or gateway - application, regardless of their significance to that application, - since the directives may be applicable to all recipients along the - request/response chain. It is not possible to specify a pragma for a - specific recipient; however, any pragma directive not relevant to a - recipient SHOULD be ignored by that recipient. - - - - - - - -Fielding, et. al. Standards Track [Page 126] - -RFC 2068 HTTP/1.1 January 1997 - - - HTTP/1.1 clients SHOULD NOT send the Pragma request-header. HTTP/1.1 - caches SHOULD treat "Pragma: no-cache" as if the client had sent - "Cache-Control: no-cache". No new Pragma directives will be defined - in HTTP. - -14.33 Proxy-Authenticate - - The Proxy-Authenticate response-header field MUST be included as part - of a 407 (Proxy Authentication Required) response. The field value - consists of a challenge that indicates the authentication scheme and - parameters applicable to the proxy for this Request-URI. - - Proxy-Authenticate = "Proxy-Authenticate" ":" challenge - - The HTTP access authentication process is described in section 11. - Unlike WWW-Authenticate, the Proxy-Authenticate header field applies - only to the current connection and SHOULD NOT be passed on to - downstream clients. However, an intermediate proxy may need to obtain - its own credentials by requesting them from the downstream client, - which in some circumstances will appear as if the proxy is forwarding - the Proxy-Authenticate header field. - -14.34 Proxy-Authorization - - The Proxy-Authorization request-header field allows the client to - identify itself (or its user) to a proxy which requires - authentication. The Proxy-Authorization field value consists of - credentials containing the authentication information of the user - agent for the proxy and/or realm of the resource being requested. - - Proxy-Authorization = "Proxy-Authorization" ":" credentials - - The HTTP access authentication process is described in section 11. - Unlike Authorization, the Proxy-Authorization header field applies - only to the next outbound proxy that demanded authentication using - the Proxy-Authenticate field. When multiple proxies are used in a - chain, the Proxy-Authorization header field is consumed by the first - outbound proxy that was expecting to receive credentials. A proxy MAY - relay the credentials from the client request to the next proxy if - that is the mechanism by which the proxies cooperatively authenticate - a given request. - -14.35 Public - - The Public response-header field lists the set of methods supported - by the server. The purpose of this field is strictly to inform the - recipient of the capabilities of the server regarding unusual - methods. The methods listed may or may not be applicable to the - - - -Fielding, et. al. Standards Track [Page 127] - -RFC 2068 HTTP/1.1 January 1997 - - - Request-URI; the Allow header field (section 14.7) MAY be used to - indicate methods allowed for a particular URI. - - Public = "Public" ":" 1#method - - Example of use: - - Public: OPTIONS, MGET, MHEAD, GET, HEAD - - This header field applies only to the server directly connected to - the client (i.e., the nearest neighbor in a chain of connections). If - the response passes through a proxy, the proxy MUST either remove the - Public header field or replace it with one applicable to its own - capabilities. - -14.36 Range - -14.36.1 Byte Ranges - - Since all HTTP entities are represented in HTTP messages as sequences - of bytes, the concept of a byte range is meaningful for any HTTP - entity. (However, not all clients and servers need to support byte- - range operations.) - - Byte range specifications in HTTP apply to the sequence of bytes in - the entity-body (not necessarily the same as the message-body). - - A byte range operation may specify a single range of bytes, or a set - of ranges within a single entity. - - ranges-specifier = byte-ranges-specifier - - byte-ranges-specifier = bytes-unit "=" byte-range-set - - byte-range-set = 1#( byte-range-spec | suffix-byte-range-spec ) - - byte-range-spec = first-byte-pos "-" [last-byte-pos] - - first-byte-pos = 1*DIGIT - - last-byte-pos = 1*DIGIT - - The first-byte-pos value in a byte-range-spec gives the byte-offset - of the first byte in a range. The last-byte-pos value gives the - byte-offset of the last byte in the range; that is, the byte - positions specified are inclusive. Byte offsets start at zero. - - - - - -Fielding, et. al. Standards Track [Page 128] - -RFC 2068 HTTP/1.1 January 1997 - - - If the last-byte-pos value is present, it must be greater than or - equal to the first-byte-pos in that byte-range-spec, or the byte- - range-spec is invalid. The recipient of an invalid byte-range-spec - must ignore it. - - If the last-byte-pos value is absent, or if the value is greater than - or equal to the current length of the entity-body, last-byte-pos is - taken to be equal to one less than the current length of the entity- - body in bytes. - - By its choice of last-byte-pos, a client can limit the number of - bytes retrieved without knowing the size of the entity. - - suffix-byte-range-spec = "-" suffix-length - - suffix-length = 1*DIGIT - - A suffix-byte-range-spec is used to specify the suffix of the - entity-body, of a length given by the suffix-length value. (That is, - this form specifies the last N bytes of an entity-body.) If the - entity is shorter than the specified suffix-length, the entire - entity-body is used. - - Examples of byte-ranges-specifier values (assuming an entity-body of - length 10000): - - o The first 500 bytes (byte offsets 0-499, inclusive): - - bytes=0-499 - - o The second 500 bytes (byte offsets 500-999, inclusive): - - bytes=500-999 - - o The final 500 bytes (byte offsets 9500-9999, inclusive): - - bytes=-500 - - o Or - - bytes=9500- - - o The first and last bytes only (bytes 0 and 9999): - - bytes=0-0,-1 - - - - - - -Fielding, et. al. Standards Track [Page 129] - -RFC 2068 HTTP/1.1 January 1997 - - - o Several legal but not canonical specifications of the second - 500 bytes (byte offsets 500-999, inclusive): - - bytes=500-600,601-999 - - bytes=500-700,601-999 - -14.36.2 Range Retrieval Requests - - HTTP retrieval requests using conditional or unconditional GET - methods may request one or more sub-ranges of the entity, instead of - the entire entity, using the Range request header, which applies to - the entity returned as the result of the request: - - Range = "Range" ":" ranges-specifier - - A server MAY ignore the Range header. However, HTTP/1.1 origin - servers and intermediate caches SHOULD support byte ranges when - possible, since Range supports efficient recovery from partially - failed transfers, and supports efficient partial retrieval of large - entities. - - If the server supports the Range header and the specified range or - ranges are appropriate for the entity: - - o The presence of a Range header in an unconditional GET modifies - what is returned if the GET is otherwise successful. In other - words, the response carries a status code of 206 (Partial - Content) instead of 200 (OK). - - o The presence of a Range header in a conditional GET (a request - using one or both of If-Modified-Since and If-None-Match, or - one or both of If-Unmodified-Since and If-Match) modifies what - is returned if the GET is otherwise successful and the condition - is true. It does not affect the 304 (Not Modified) response - returned if the conditional is false. - - In some cases, it may be more appropriate to use the If-Range header - (see section 14.27) in addition to the Range header. - - If a proxy that supports ranges receives a Range request, forwards - the request to an inbound server, and receives an entire entity in - reply, it SHOULD only return the requested range to its client. It - SHOULD store the entire received response in its cache, if that is - consistent with its cache allocation policies. - - - - - - -Fielding, et. al. Standards Track [Page 130] - -RFC 2068 HTTP/1.1 January 1997 - - -14.37 Referer - - The Referer[sic] request-header field allows the client to specify, - for the server's benefit, the address (URI) of the resource from - which the Request-URI was obtained (the "referrer", although the - header field is misspelled.) The Referer request-header allows a - server to generate lists of back-links to resources for interest, - logging, optimized caching, etc. It also allows obsolete or mistyped - links to be traced for maintenance. The Referer field MUST NOT be - sent if the Request-URI was obtained from a source that does not have - its own URI, such as input from the user keyboard. - - Referer = "Referer" ":" ( absoluteURI | relativeURI ) - - Example: - - Referer: http://www.w3.org/hypertext/DataSources/Overview.html - - If the field value is a partial URI, it SHOULD be interpreted - relative to the Request-URI. The URI MUST NOT include a fragment. - - Note: Because the source of a link may be private information or - may reveal an otherwise private information source, it is strongly - recommended that the user be able to select whether or not the - Referer field is sent. For example, a browser client could have a - toggle switch for browsing openly/anonymously, which would - respectively enable/disable the sending of Referer and From - information. - -14.38 Retry-After - - The Retry-After response-header field can be used with a 503 (Service - Unavailable) response to indicate how long the service is expected to - be unavailable to the requesting client. The value of this field can - be either an HTTP-date or an integer number of seconds (in decimal) - after the time of the response. - - Retry-After = "Retry-After" ":" ( HTTP-date | delta-seconds ) - - Two examples of its use are - - Retry-After: Fri, 31 Dec 1999 23:59:59 GMT - Retry-After: 120 - - In the latter example, the delay is 2 minutes. - - - - - - -Fielding, et. al. Standards Track [Page 131] - -RFC 2068 HTTP/1.1 January 1997 - - -14.39 Server - - The Server response-header field contains information about the - software used by the origin server to handle the request. The field - can contain multiple product tokens (section 3.8) and comments - identifying the server and any significant subproducts. The product - tokens are listed in order of their significance for identifying the - application. - - Server = "Server" ":" 1*( product | comment ) - - Example: - - Server: CERN/3.0 libwww/2.17 - - If the response is being forwarded through a proxy, the proxy - application MUST NOT modify the Server response-header. Instead, it - SHOULD include a Via field (as described in section 14.44). - - Note: Revealing the specific software version of the server may - allow the server machine to become more vulnerable to attacks - against software that is known to contain security holes. Server - implementers are encouraged to make this field a configurable - option. - -14.40 Transfer-Encoding - - The Transfer-Encoding general-header field indicates what (if any) - type of transformation has been applied to the message body in order - to safely transfer it between the sender and the recipient. This - differs from the Content-Encoding in that the transfer coding is a - property of the message, not of the entity. - - Transfer-Encoding = "Transfer-Encoding" ":" 1#transfer- - coding - - Transfer codings are defined in section 3.6. An example is: - - Transfer-Encoding: chunked - - Many older HTTP/1.0 applications do not understand the Transfer- - Encoding header. - -14.41 Upgrade - - The Upgrade general-header allows the client to specify what - additional communication protocols it supports and would like to use - if the server finds it appropriate to switch protocols. The server - - - -Fielding, et. al. Standards Track [Page 132] - -RFC 2068 HTTP/1.1 January 1997 - - - MUST use the Upgrade header field within a 101 (Switching Protocols) - response to indicate which protocol(s) are being switched. - - Upgrade = "Upgrade" ":" 1#product - - For example, - - Upgrade: HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11 - - The Upgrade header field is intended to provide a simple mechanism - for transition from HTTP/1.1 to some other, incompatible protocol. It - does so by allowing the client to advertise its desire to use another - protocol, such as a later version of HTTP with a higher major version - number, even though the current request has been made using HTTP/1.1. - This eases the difficult transition between incompatible protocols by - allowing the client to initiate a request in the more commonly - supported protocol while indicating to the server that it would like - to use a "better" protocol if available (where "better" is determined - by the server, possibly according to the nature of the method and/or - resource being requested). - - The Upgrade header field only applies to switching application-layer - protocols upon the existing transport-layer connection. Upgrade - cannot be used to insist on a protocol change; its acceptance and use - by the server is optional. The capabilities and nature of the - application-layer communication after the protocol change is entirely - dependent upon the new protocol chosen, although the first action - after changing the protocol MUST be a response to the initial HTTP - request containing the Upgrade header field. - - The Upgrade header field only applies to the immediate connection. - Therefore, the upgrade keyword MUST be supplied within a Connection - header field (section 14.10) whenever Upgrade is present in an - HTTP/1.1 message. - - The Upgrade header field cannot be used to indicate a switch to a - protocol on a different connection. For that purpose, it is more - appropriate to use a 301, 302, 303, or 305 redirection response. - - This specification only defines the protocol name "HTTP" for use by - the family of Hypertext Transfer Protocols, as defined by the HTTP - version rules of section 3.1 and future updates to this - specification. Any token can be used as a protocol name; however, it - will only be useful if both the client and server associate the name - with the same protocol. - - - - - - -Fielding, et. al. Standards Track [Page 133] - -RFC 2068 HTTP/1.1 January 1997 - - -14.42 User-Agent - - The User-Agent request-header field contains information about the - user agent originating the request. This is for statistical purposes, - the tracing of protocol violations, and automated recognition of user - agents for the sake of tailoring responses to avoid particular user - agent limitations. User agents SHOULD include this field with - requests. The field can contain multiple product tokens (section 3.8) - and comments identifying the agent and any subproducts which form a - significant part of the user agent. By convention, the product tokens - are listed in order of their significance for identifying the - application. - - User-Agent = "User-Agent" ":" 1*( product | comment ) - - Example: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - -14.43 Vary - - The Vary response-header field is used by a server to signal that the - response entity was selected from the available representations of - the response using server-driven negotiation (section 12). Field- - names listed in Vary headers are those of request-headers. The Vary - field value indicates either that the given set of header fields - encompass the dimensions over which the representation might vary, or - that the dimensions of variance are unspecified ("*") and thus may - vary over any aspect of future requests. - - Vary = "Vary" ":" ( "*" | 1#field-name ) - - An HTTP/1.1 server MUST include an appropriate Vary header field with - any cachable response that is subject to server-driven negotiation. - Doing so allows a cache to properly interpret future requests on that - resource and informs the user agent about the presence of negotiation - on that resource. A server SHOULD include an appropriate Vary header - field with a non-cachable response that is subject to server-driven - negotiation, since this might provide the user agent with useful - information about the dimensions over which the response might vary. - - The set of header fields named by the Vary field value is known as - the "selecting" request-headers. - - When the cache receives a subsequent request whose Request-URI - specifies one or more cache entries including a Vary header, the - cache MUST NOT use such a cache entry to construct a response to the - new request unless all of the headers named in the cached Vary header - - - -Fielding, et. al. Standards Track [Page 134] - -RFC 2068 HTTP/1.1 January 1997 - - - are present in the new request, and all of the stored selecting - request-headers from the previous request match the corresponding - headers in the new request. - - The selecting request-headers from two requests are defined to match - if and only if the selecting request-headers in the first request can - be transformed to the selecting request-headers in the second request - by adding or removing linear whitespace (LWS) at places where this is - allowed by the corresponding BNF, and/or combining multiple message- - header fields with the same field name following the rules about - message headers in section 4.2. - - A Vary field value of "*" signals that unspecified parameters, - possibly other than the contents of request-header fields (e.g., the - network address of the client), play a role in the selection of the - response representation. Subsequent requests on that resource can - only be properly interpreted by the origin server, and thus a cache - MUST forward a (possibly conditional) request even when it has a - fresh response cached for the resource. See section 13.6 for use of - the Vary header by caches. - - A Vary field value consisting of a list of field-names signals that - the representation selected for the response is based on a selection - algorithm which considers ONLY the listed request-header field values - in selecting the most appropriate representation. A cache MAY assume - that the same selection will be made for future requests with the - same values for the listed field names, for the duration of time in - which the response is fresh. - - The field-names given are not limited to the set of standard - request-header fields defined by this specification. Field names are - case-insensitive. - -14.44 Via - - The Via general-header field MUST be used by gateways and proxies to - indicate the intermediate protocols and recipients between the user - agent and the server on requests, and between the origin server and - the client on responses. It is analogous to the "Received" field of - RFC 822 and is intended to be used for tracking message forwards, - avoiding request loops, and identifying the protocol capabilities of - all senders along the request/response chain. - - - - - - - - - -Fielding, et. al. Standards Track [Page 135] - -RFC 2068 HTTP/1.1 January 1997 - - - Via = "Via" ":" 1#( received-protocol received-by [ comment ] ) - - received-protocol = [ protocol-name "/" ] protocol-version - protocol-name = token - protocol-version = token - received-by = ( host [ ":" port ] ) | pseudonym - pseudonym = token - - The received-protocol indicates the protocol version of the message - received by the server or client along each segment of the - request/response chain. The received-protocol version is appended to - the Via field value when the message is forwarded so that information - about the protocol capabilities of upstream applications remains - visible to all recipients. - - The protocol-name is optional if and only if it would be "HTTP". The - received-by field is normally the host and optional port number of a - recipient server or client that subsequently forwarded the message. - However, if the real host is considered to be sensitive information, - it MAY be replaced by a pseudonym. If the port is not given, it MAY - be assumed to be the default port of the received-protocol. - - Multiple Via field values represent each proxy or gateway that has - forwarded the message. Each recipient MUST append its information - such that the end result is ordered according to the sequence of - forwarding applications. - - Comments MAY be used in the Via header field to identify the software - of the recipient proxy or gateway, analogous to the User-Agent and - Server header fields. However, all comments in the Via field are - optional and MAY be removed by any recipient prior to forwarding the - message. - - For example, a request message could be sent from an HTTP/1.0 user - agent to an internal proxy code-named "fred", which uses HTTP/1.1 to - forward the request to a public proxy at nowhere.com, which completes - the request by forwarding it to the origin server at www.ics.uci.edu. - The request received by www.ics.uci.edu would then have the following - Via header field: - - Via: 1.0 fred, 1.1 nowhere.com (Apache/1.1) - - Proxies and gateways used as a portal through a network firewall - SHOULD NOT, by default, forward the names and ports of hosts within - the firewall region. This information SHOULD only be propagated if - explicitly enabled. If not enabled, the received-by host of any host - behind the firewall SHOULD be replaced by an appropriate pseudonym - for that host. - - - -Fielding, et. al. Standards Track [Page 136] - -RFC 2068 HTTP/1.1 January 1997 - - - For organizations that have strong privacy requirements for hiding - internal structures, a proxy MAY combine an ordered subsequence of - Via header field entries with identical received-protocol values into - a single such entry. For example, - - Via: 1.0 ricky, 1.1 ethel, 1.1 fred, 1.0 lucy - - could be collapsed to - - Via: 1.0 ricky, 1.1 mertz, 1.0 lucy - - Applications SHOULD NOT combine multiple entries unless they are all - under the same organizational control and the hosts have already been - replaced by pseudonyms. Applications MUST NOT combine entries which - have different received-protocol values. - -14.45 Warning - - The Warning response-header field is used to carry additional - information about the status of a response which may not be reflected - by the response status code. This information is typically, though - not exclusively, used to warn about a possible lack of semantic - transparency from caching operations. - - Warning headers are sent with responses using: - - Warning = "Warning" ":" 1#warning-value - - warning-value = warn-code SP warn-agent SP warn-text - warn-code = 2DIGIT - warn-agent = ( host [ ":" port ] ) | pseudonym - ; the name or pseudonym of the server adding - ; the Warning header, for use in debugging - warn-text = quoted-string - - A response may carry more than one Warning header. - - The warn-text should be in a natural language and character set that - is most likely to be intelligible to the human user receiving the - response. This decision may be based on any available knowledge, - such as the location of the cache or user, the Accept-Language field - in a request, the Content-Language field in a response, etc. The - default language is English and the default character set is ISO- - 8859-1. - - If a character set other than ISO-8859-1 is used, it MUST be encoded - in the warn-text using the method described in RFC 1522 [14]. - - - - -Fielding, et. al. Standards Track [Page 137] - -RFC 2068 HTTP/1.1 January 1997 - - - Any server or cache may add Warning headers to a response. New - Warning headers should be added after any existing Warning headers. A - cache MUST NOT delete any Warning header that it received with a - response. However, if a cache successfully validates a cache entry, - it SHOULD remove any Warning headers previously attached to that - entry except as specified for specific Warning codes. It MUST then - add any Warning headers received in the validating response. In other - words, Warning headers are those that would be attached to the most - recent relevant response. - - When multiple Warning headers are attached to a response, the user - agent SHOULD display as many of them as possible, in the order that - they appear in the response. If it is not possible to display all of - the warnings, the user agent should follow these heuristics: - - o Warnings that appear early in the response take priority over those - appearing later in the response. - o Warnings in the user's preferred character set take priority over - warnings in other character sets but with identical warn-codes and - warn-agents. - - Systems that generate multiple Warning headers should order them with - this user agent behavior in mind. - - This is a list of the currently-defined warn-codes, each with a - recommended warn-text in English, and a description of its meaning. - -10 Response is stale - MUST be included whenever the returned response is stale. A cache may - add this warning to any response, but may never remove it until the - response is known to be fresh. - -11 Revalidation failed - MUST be included if a cache returns a stale response because an - attempt to revalidate the response failed, due to an inability to - reach the server. A cache may add this warning to any response, but - may never remove it until the response is successfully revalidated. - -12 Disconnected operation - SHOULD be included if the cache is intentionally disconnected from - the rest of the network for a period of time. - -13 Heuristic expiration - MUST be included if the cache heuristically chose a freshness - lifetime greater than 24 hours and the response's age is greater than - 24 hours. - - - - - -Fielding, et. al. Standards Track [Page 138] - -RFC 2068 HTTP/1.1 January 1997 - - -14 Transformation applied - MUST be added by an intermediate cache or proxy if it applies any - transformation changing the content-coding (as specified in the - Content-Encoding header) or media-type (as specified in the - Content-Type header) of the response, unless this Warning code - already appears in the response. MUST NOT be deleted from a response - even after revalidation. - -99 Miscellaneous warning - The warning text may include arbitrary information to be presented to - a human user, or logged. A system receiving this warning MUST NOT - take any automated action. - -14.46 WWW-Authenticate - - The WWW-Authenticate response-header field MUST be included in 401 - (Unauthorized) response messages. The field value consists of at - least one challenge that indicates the authentication scheme(s) and - parameters applicable to the Request-URI. - - WWW-Authenticate = "WWW-Authenticate" ":" 1#challenge - - The HTTP access authentication process is described in section 11. - User agents MUST take special care in parsing the WWW-Authenticate - field value if it contains more than one challenge, or if more than - one WWW-Authenticate header field is provided, since the contents of - a challenge may itself contain a comma-separated list of - authentication parameters. - -15 Security Considerations - - This section is meant to inform application developers, information - providers, and users of the security limitations in HTTP/1.1 as - described by this document. The discussion does not include - definitive solutions to the problems revealed, though it does make - some suggestions for reducing security risks. - -15.1 Authentication of Clients - - The Basic authentication scheme is not a secure method of user - authentication, nor does it in any way protect the entity, which is - transmitted in clear text across the physical network used as the - carrier. HTTP does not prevent additional authentication schemes and - encryption mechanisms from being employed to increase security or the - addition of enhancements (such as schemes to use one-time passwords) - to Basic authentication. - - - - - -Fielding, et. al. Standards Track [Page 139] - -RFC 2068 HTTP/1.1 January 1997 - - - The most serious flaw in Basic authentication is that it results in - the essentially clear text transmission of the user's password over - the physical network. It is this problem which Digest Authentication - attempts to address. - - Because Basic authentication involves the clear text transmission of - passwords it SHOULD never be used (without enhancements) to protect - sensitive or valuable information. - - A common use of Basic authentication is for identification purposes - -- requiring the user to provide a user name and password as a means - of identification, for example, for purposes of gathering accurate - usage statistics on a server. When used in this way it is tempting to - think that there is no danger in its use if illicit access to the - protected documents is not a major concern. This is only correct if - the server issues both user name and password to the users and in - particular does not allow the user to choose his or her own password. - The danger arises because naive users frequently reuse a single - password to avoid the task of maintaining multiple passwords. - - If a server permits users to select their own passwords, then the - threat is not only illicit access to documents on the server but also - illicit access to the accounts of all users who have chosen to use - their account password. If users are allowed to choose their own - password that also means the server must maintain files containing - the (presumably encrypted) passwords. Many of these may be the - account passwords of users perhaps at distant sites. The owner or - administrator of such a system could conceivably incur liability if - this information is not maintained in a secure fashion. - - Basic Authentication is also vulnerable to spoofing by counterfeit - servers. If a user can be led to believe that he is connecting to a - host containing information protected by basic authentication when in - fact he is connecting to a hostile server or gateway then the - attacker can request a password, store it for later use, and feign an - error. This type of attack is not possible with Digest Authentication - [32]. Server implementers SHOULD guard against the possibility of - this sort of counterfeiting by gateways or CGI scripts. In particular - it is very dangerous for a server to simply turn over a connection to - a gateway since that gateway can then use the persistent connection - mechanism to engage in multiple transactions with the client while - impersonating the original server in a way that is not detectable by - the client. - -15.2 Offering a Choice of Authentication Schemes - - An HTTP/1.1 server may return multiple challenges with a 401 - (Authenticate) response, and each challenge may use a different - - - -Fielding, et. al. Standards Track [Page 140] - -RFC 2068 HTTP/1.1 January 1997 - - - scheme. The order of the challenges returned to the user agent is in - the order that the server would prefer they be chosen. The server - should order its challenges with the "most secure" authentication - scheme first. A user agent should choose as the challenge to be made - to the user the first one that the user agent understands. - - When the server offers choices of authentication schemes using the - WWW-Authenticate header, the "security" of the authentication is only - as malicious user could capture the set of challenges and try to - authenticate him/herself using the weakest of the authentication - schemes. Thus, the ordering serves more to protect the user's - credentials than the server's information. - - A possible man-in-the-middle (MITM) attack would be to add a weak - authentication scheme to the set of choices, hoping that the client - will use one that exposes the user's credentials (e.g. password). For - this reason, the client should always use the strongest scheme that - it understands from the choices accepted. - - An even better MITM attack would be to remove all offered choices, - and to insert a challenge that requests Basic authentication. For - this reason, user agents that are concerned about this kind of attack - could remember the strongest authentication scheme ever requested by - a server and produce a warning message that requires user - confirmation before using a weaker one. A particularly insidious way - to mount such a MITM attack would be to offer a "free" proxy caching - service to gullible users. - -15.3 Abuse of Server Log Information - - A server is in the position to save personal data about a user's - requests which may identify their reading patterns or subjects of - interest. This information is clearly confidential in nature and its - handling may be constrained by law in certain countries. People using - the HTTP protocol to provide data are responsible for ensuring that - such material is not distributed without the permission of any - individuals that are identifiable by the published results. - -15.4 Transfer of Sensitive Information - - Like any generic data transfer protocol, HTTP cannot regulate the - content of the data that is transferred, nor is there any a priori - method of determining the sensitivity of any particular piece of - information within the context of any given request. Therefore, - applications SHOULD supply as much control over this information as - possible to the provider of that information. Four header fields are - worth special mention in this context: Server, Via, Referer and From. - - - - -Fielding, et. al. Standards Track [Page 141] - -RFC 2068 HTTP/1.1 January 1997 - - - Revealing the specific software version of the server may allow the - server machine to become more vulnerable to attacks against software - that is known to contain security holes. Implementers SHOULD make the - Server header field a configurable option. - - Proxies which serve as a portal through a network firewall SHOULD - take special precautions regarding the transfer of header information - that identifies the hosts behind the firewall. In particular, they - SHOULD remove, or replace with sanitized versions, any Via fields - generated behind the firewall. - - The Referer field allows reading patterns to be studied and reverse - links drawn. Although it can be very useful, its power can be abused - if user details are not separated from the information contained in - the Referer. Even when the personal information has been removed, the - Referer field may indicate a private document's URI whose publication - would be inappropriate. - - The information sent in the From field might conflict with the user's - privacy interests or their site's security policy, and hence it - SHOULD NOT be transmitted without the user being able to disable, - enable, and modify the contents of the field. The user MUST be able - to set the contents of this field within a user preference or - application defaults configuration. - - We suggest, though do not require, that a convenient toggle interface - be provided for the user to enable or disable the sending of From and - Referer information. - -15.5 Attacks Based On File and Path Names - - Implementations of HTTP origin servers SHOULD be careful to restrict - the documents returned by HTTP requests to be only those that were - intended by the server administrators. If an HTTP server translates - HTTP URIs directly into file system calls, the server MUST take - special care not to serve files that were not intended to be - delivered to HTTP clients. For example, UNIX, Microsoft Windows, and - other operating systems use ".." as a path component to indicate a - directory level above the current one. On such a system, an HTTP - server MUST disallow any such construct in the Request-URI if it - would otherwise allow access to a resource outside those intended to - be accessible via the HTTP server. Similarly, files intended for - reference only internally to the server (such as access control - files, configuration files, and script code) MUST be protected from - inappropriate retrieval, since they might contain sensitive - information. Experience has shown that minor bugs in such HTTP server - implementations have turned into security risks. - - - - -Fielding, et. al. Standards Track [Page 142] - -RFC 2068 HTTP/1.1 January 1997 - - -15.6 Personal Information - - HTTP clients are often privy to large amounts of personal information - (e.g. the user's name, location, mail address, passwords, encryption - keys, etc.), and SHOULD be very careful to prevent unintentional - leakage of this information via the HTTP protocol to other sources. - We very strongly recommend that a convenient interface be provided - for the user to control dissemination of such information, and that - designers and implementers be particularly careful in this area. - History shows that errors in this area are often both serious - security and/or privacy problems, and often generate highly adverse - publicity for the implementer's company. - -15.7 Privacy Issues Connected to Accept Headers - - Accept request-headers can reveal information about the user to all - servers which are accessed. The Accept-Language header in particular - can reveal information the user would consider to be of a private - nature, because the understanding of particular languages is often - strongly correlated to the membership of a particular ethnic group. - User agents which offer the option to configure the contents of an - Accept-Language header to be sent in every request are strongly - encouraged to let the configuration process include a message which - makes the user aware of the loss of privacy involved. - - An approach that limits the loss of privacy would be for a user agent - to omit the sending of Accept-Language headers by default, and to ask - the user whether it should start sending Accept-Language headers to a - server if it detects, by looking for any Vary response-header fields - generated by the server, that such sending could improve the quality - of service. - - Elaborate user-customized accept header fields sent in every request, - in particular if these include quality values, can be used by servers - as relatively reliable and long-lived user identifiers. Such user - identifiers would allow content providers to do click-trail tracking, - and would allow collaborating content providers to match cross-server - click-trails or form submissions of individual users. Note that for - many users not behind a proxy, the network address of the host - running the user agent will also serve as a long-lived user - identifier. In environments where proxies are used to enhance - privacy, user agents should be conservative in offering accept header - configuration options to end users. As an extreme privacy measure, - proxies could filter the accept headers in relayed requests. General - purpose user agents which provide a high degree of header - configurability should warn users about the loss of privacy which can - be involved. - - - - -Fielding, et. al. Standards Track [Page 143] - -RFC 2068 HTTP/1.1 January 1997 - - -15.8 DNS Spoofing - - Clients using HTTP rely heavily on the Domain Name Service, and are - thus generally prone to security attacks based on the deliberate - mis-association of IP addresses and DNS names. Clients need to be - cautious in assuming the continuing validity of an IP number/DNS name - association. - - In particular, HTTP clients SHOULD rely on their name resolver for - confirmation of an IP number/DNS name association, rather than - caching the result of previous host name lookups. Many platforms - already can cache host name lookups locally when appropriate, and - they SHOULD be configured to do so. These lookups should be cached, - however, only when the TTL (Time To Live) information reported by the - name server makes it likely that the cached information will remain - useful. - - If HTTP clients cache the results of host name lookups in order to - achieve a performance improvement, they MUST observe the TTL - information reported by DNS. - - If HTTP clients do not observe this rule, they could be spoofed when - a previously-accessed server's IP address changes. As network - renumbering is expected to become increasingly common, the - possibility of this form of attack will grow. Observing this - requirement thus reduces this potential security vulnerability. - - This requirement also improves the load-balancing behavior of clients - for replicated servers using the same DNS name and reduces the - likelihood of a user's experiencing failure in accessing sites which - use that strategy. - -15.9 Location Headers and Spoofing - - If a single server supports multiple organizations that do not trust - one another, then it must check the values of Location and Content- - Location headers in responses that are generated under control of - said organizations to make sure that they do not attempt to - invalidate resources over which they have no authority. - -16 Acknowledgments - - This specification makes heavy use of the augmented BNF and generic - constructs defined by David H. Crocker for RFC 822. Similarly, it - reuses many of the definitions provided by Nathaniel Borenstein and - Ned Freed for MIME. We hope that their inclusion in this - specification will help reduce past confusion over the relationship - between HTTP and Internet mail message formats. - - - -Fielding, et. al. Standards Track [Page 144] - -RFC 2068 HTTP/1.1 January 1997 - - - The HTTP protocol has evolved considerably over the past four years. - It has benefited from a large and active developer community--the - many people who have participated on the www-talk mailing list--and - it is that community which has been most responsible for the success - of HTTP and of the World-Wide Web in general. Marc Andreessen, Robert - Cailliau, Daniel W. Connolly, Bob Denny, John Franks, Jean-Francois - Groff, Phillip M. Hallam-Baker, Hakon W. Lie, Ari Luotonen, Rob - McCool, Lou Montulli, Dave Raggett, Tony Sanders, and Marc - VanHeyningen deserve special recognition for their efforts in - defining early aspects of the protocol. - - This document has benefited greatly from the comments of all those - participating in the HTTP-WG. In addition to those already mentioned, - the following individuals have contributed to this specification: - - Gary Adams Albert Lunde - Harald Tveit Alvestrand John C. Mallery - Keith Ball Jean-Philippe Martin-Flatin - Brian Behlendorf Larry Masinter - Paul Burchard Mitra - Maurizio Codogno David Morris - Mike Cowlishaw Gavin Nicol - Roman Czyborra Bill Perry - Michael A. Dolan Jeffrey Perry - David J. Fiander Scott Powers - Alan Freier Owen Rees - Marc Hedlund Luigi Rizzo - Greg Herlihy David Robinson - Koen Holtman Marc Salomon - Alex Hopmann Rich Salz - Bob Jernigan Allan M. Schiffman - Shel Kaphan Jim Seidman - Rohit Khare Chuck Shotton - John Klensin Eric W. Sink - Martijn Koster Simon E. Spero - Alexei Kosut Richard N. Taylor - David M. Kristol Robert S. Thau - Daniel LaLiberte Bill (BearHeart) Weinman - Ben Laurie Francois Yergeau - Paul J. Leach Mary Ellen Zurko - Daniel DuBois - - Much of the content and presentation of the caching design is due to - suggestions and comments from individuals including: Shel Kaphan, - Paul Leach, Koen Holtman, David Morris, and Larry Masinter. - - - - - - -Fielding, et. al. Standards Track [Page 145] - -RFC 2068 HTTP/1.1 January 1997 - - - Most of the specification of ranges is based on work originally done - by Ari Luotonen and John Franks, with additional input from Steve - Zilles. - - Thanks to the "cave men" of Palo Alto. You know who you are. - - Jim Gettys (the current editor of this document) wishes particularly - to thank Roy Fielding, the previous editor of this document, along - with John Klensin, Jeff Mogul, Paul Leach, Dave Kristol, Koen - Holtman, John Franks, Alex Hopmann, and Larry Masinter for their - help. - -17 References - - [1] Alvestrand, H., "Tags for the identification of languages", RFC - 1766, UNINETT, March 1995. - - [2] Anklesaria, F., McCahill, M., Lindner, P., Johnson, D., Torrey, - D., and B. Alberti. "The Internet Gopher Protocol: (a distributed - document search and retrieval protocol)", RFC 1436, University of - Minnesota, March 1993. - - [3] Berners-Lee, T., "Universal Resource Identifiers in WWW", A - Unifying Syntax for the Expression of Names and Addresses of Objects - on the Network as used in the World-Wide Web", RFC 1630, CERN, June - 1994. - - [4] Berners-Lee, T., Masinter, L., and M. McCahill, "Uniform Resource - Locators (URL)", RFC 1738, CERN, Xerox PARC, University of Minnesota, - December 1994. - - [5] Berners-Lee, T., and D. Connolly, "HyperText Markup Language - Specification - 2.0", RFC 1866, MIT/LCS, November 1995. - - [6] Berners-Lee, T., Fielding, R., and H. Frystyk, "Hypertext - Transfer Protocol -- HTTP/1.0.", RFC 1945 MIT/LCS, UC Irvine, May - 1996. - - [7] Freed, N., and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part One: Format of Internet Message Bodies", RFC - 2045, Innosoft, First Virtual, November 1996. - - [8] Braden, R., "Requirements for Internet hosts - application and - support", STD 3, RFC 1123, IETF, October 1989. - - [9] Crocker, D., "Standard for the Format of ARPA Internet Text - Messages", STD 11, RFC 822, UDEL, August 1982. - - - - -Fielding, et. al. Standards Track [Page 146] - -RFC 2068 HTTP/1.1 January 1997 - - - [10] Davis, F., Kahle, B., Morris, H., Salem, J., Shen, T., Wang, R., - Sui, J., and M. Grinbaum. "WAIS Interface Protocol Prototype - Functional Specification", (v1.5), Thinking Machines Corporation, - April 1990. - - [11] Fielding, R., "Relative Uniform Resource Locators", RFC 1808, UC - Irvine, June 1995. - - [12] Horton, M., and R. Adams. "Standard for interchange of USENET - messages", RFC 1036, AT&T Bell Laboratories, Center for Seismic - Studies, December 1987. - - [13] Kantor, B., and P. Lapsley. "Network News Transfer Protocol." A - Proposed Standard for the Stream-Based Transmission of News", RFC - 977, UC San Diego, UC Berkeley, February 1986. - - [14] Moore, K., "MIME (Multipurpose Internet Mail Extensions) Part - Three: Message Header Extensions for Non-ASCII Text", RFC 2047, - University of Tennessee, November 1996. - - [15] Nebel, E., and L. Masinter. "Form-based File Upload in HTML", - RFC 1867, Xerox Corporation, November 1995. - - [16] Postel, J., "Simple Mail Transfer Protocol", STD 10, RFC 821, - USC/ISI, August 1982. - - [17] Postel, J., "Media Type Registration Procedure", RFC 2048, - USC/ISI, November 1996. - - [18] Postel, J., and J. Reynolds, "File Transfer Protocol (FTP)", STD - 9, RFC 959, USC/ISI, October 1985. - - [19] Reynolds, J., and J. Postel, "Assigned Numbers", STD 2, RFC - 1700, USC/ISI, October 1994. - - [20] Sollins, K., and L. Masinter, "Functional Requirements for - Uniform Resource Names", RFC 1737, MIT/LCS, Xerox Corporation, - December 1994. - - [21] US-ASCII. Coded Character Set - 7-Bit American Standard Code for - Information Interchange. Standard ANSI X3.4-1986, ANSI, 1986. - - [22] ISO-8859. International Standard -- Information Processing -- - 8-bit Single-Byte Coded Graphic Character Sets -- - Part 1: Latin alphabet No. 1, ISO 8859-1:1987. - Part 2: Latin alphabet No. 2, ISO 8859-2, 1987. - Part 3: Latin alphabet No. 3, ISO 8859-3, 1988. - Part 4: Latin alphabet No. 4, ISO 8859-4, 1988. - - - -Fielding, et. al. Standards Track [Page 147] - -RFC 2068 HTTP/1.1 January 1997 - - - Part 5: Latin/Cyrillic alphabet, ISO 8859-5, 1988. - Part 6: Latin/Arabic alphabet, ISO 8859-6, 1987. - Part 7: Latin/Greek alphabet, ISO 8859-7, 1987. - Part 8: Latin/Hebrew alphabet, ISO 8859-8, 1988. - Part 9: Latin alphabet No. 5, ISO 8859-9, 1990. - - [23] Meyers, J., and M. Rose "The Content-MD5 Header Field", RFC - 1864, Carnegie Mellon, Dover Beach Consulting, October, 1995. - - [24] Carpenter, B., and Y. Rekhter, "Renumbering Needs Work", RFC - 1900, IAB, February 1996. - - [25] Deutsch, P., "GZIP file format specification version 4.3." RFC - 1952, Aladdin Enterprises, May 1996. - - [26] Venkata N. Padmanabhan and Jeffrey C. Mogul. Improving HTTP - Latency. Computer Networks and ISDN Systems, v. 28, pp. 25-35, Dec. - 1995. Slightly revised version of paper in Proc. 2nd International - WWW Conf. '94: Mosaic and the Web, Oct. 1994, which is available at - http://www.ncsa.uiuc.edu/SDG/IT94/Proceedings/DDay/mogul/ - HTTPLatency.html. - - [27] Joe Touch, John Heidemann, and Katia Obraczka, "Analysis of HTTP - Performance", , - USC/Information Sciences Institute, June 1996 - - [28] Mills, D., "Network Time Protocol, Version 3, Specification, - Implementation and Analysis", RFC 1305, University of Delaware, March - 1992. - - [29] Deutsch, P., "DEFLATE Compressed Data Format Specification - version 1.3." RFC 1951, Aladdin Enterprises, May 1996. - - [30] Spero, S., "Analysis of HTTP Performance Problems" - . - - [31] Deutsch, P., and J-L. Gailly, "ZLIB Compressed Data Format - Specification version 3.3", RFC 1950, Aladdin Enterprises, Info-ZIP, - May 1996. - - [32] Franks, J., Hallam-Baker, P., Hostetler, J., Leach, P., - Luotonen, A., Sink, E., and L. Stewart, "An Extension to HTTP : - Digest Access Authentication", RFC 2069, January 1997. - - - - - - - - -Fielding, et. al. Standards Track [Page 148] - -RFC 2068 HTTP/1.1 January 1997 - - -18 Authors' Addresses - - Roy T. Fielding - Department of Information and Computer Science - University of California - Irvine, CA 92717-3425, USA - - Fax: +1 (714) 824-4056 - EMail: fielding@ics.uci.edu - - - Jim Gettys - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: jg@w3.org - - - Jeffrey C. Mogul - Western Research Laboratory - Digital Equipment Corporation - 250 University Avenue - Palo Alto, California, 94305, USA - - EMail: mogul@wrl.dec.com - - - Henrik Frystyk Nielsen - W3 Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: frystyk@w3.org - - - Tim Berners-Lee - Director, W3 Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: timbl@w3.org - - - - -Fielding, et. al. Standards Track [Page 149] - -RFC 2068 HTTP/1.1 January 1997 - - -19 Appendices - -19.1 Internet Media Type message/http - - In addition to defining the HTTP/1.1 protocol, this document serves - as the specification for the Internet media type "message/http". The - following is to be registered with IANA. - - Media Type name: message - Media subtype name: http - Required parameters: none - Optional parameters: version, msgtype - - version: The HTTP-Version number of the enclosed message - (e.g., "1.1"). If not present, the version can be - determined from the first line of the body. - - msgtype: The message type -- "request" or "response". If not - present, the type can be determined from the first - line of the body. - - Encoding considerations: only "7bit", "8bit", or "binary" are - permitted - - Security considerations: none - -19.2 Internet Media Type multipart/byteranges - - When an HTTP message includes the content of multiple ranges (for - example, a response to a request for multiple non-overlapping - ranges), these are transmitted as a multipart MIME message. The - multipart media type for this purpose is called - "multipart/byteranges". - - The multipart/byteranges media type includes two or more parts, each - with its own Content-Type and Content-Range fields. The parts are - separated using a MIME boundary parameter. - - Media Type name: multipart - Media subtype name: byteranges - Required parameters: boundary - Optional parameters: none - - Encoding considerations: only "7bit", "8bit", or "binary" are - permitted - - Security considerations: none - - - - -Fielding, et. al. Standards Track [Page 150] - -RFC 2068 HTTP/1.1 January 1997 - - -For example: - - HTTP/1.1 206 Partial content - Date: Wed, 15 Nov 1995 06:25:24 GMT - Last-modified: Wed, 15 Nov 1995 04:58:08 GMT - Content-type: multipart/byteranges; boundary=THIS_STRING_SEPARATES - - --THIS_STRING_SEPARATES - Content-type: application/pdf - Content-range: bytes 500-999/8000 - - ...the first range... - --THIS_STRING_SEPARATES - Content-type: application/pdf - Content-range: bytes 7000-7999/8000 - - ...the second range - --THIS_STRING_SEPARATES-- - -19.3 Tolerant Applications - - Although this document specifies the requirements for the generation - of HTTP/1.1 messages, not all applications will be correct in their - implementation. We therefore recommend that operational applications - be tolerant of deviations whenever those deviations can be - interpreted unambiguously. - - Clients SHOULD be tolerant in parsing the Status-Line and servers - tolerant when parsing the Request-Line. In particular, they SHOULD - accept any amount of SP or HT characters between fields, even though - only a single SP is required. - - The line terminator for message-header fields is the sequence CRLF. - However, we recommend that applications, when parsing such headers, - recognize a single LF as a line terminator and ignore the leading CR. - - The character set of an entity-body should be labeled as the lowest - common denominator of the character codes used within that body, with - the exception that no label is preferred over the labels US-ASCII or - ISO-8859-1. - - Additional rules for requirements on parsing and encoding of dates - and other potential problems with date encodings include: - - o HTTP/1.1 clients and caches should assume that an RFC-850 date - which appears to be more than 50 years in the future is in fact - in the past (this helps solve the "year 2000" problem). - - - - -Fielding, et. al. Standards Track [Page 151] - -RFC 2068 HTTP/1.1 January 1997 - - - o An HTTP/1.1 implementation may internally represent a parsed - Expires date as earlier than the proper value, but MUST NOT - internally represent a parsed Expires date as later than the - proper value. - - o All expiration-related calculations must be done in GMT. The - local time zone MUST NOT influence the calculation or comparison - of an age or expiration time. - - o If an HTTP header incorrectly carries a date value with a time - zone other than GMT, it must be converted into GMT using the most - conservative possible conversion. - -19.4 Differences Between HTTP Entities and MIME Entities - - HTTP/1.1 uses many of the constructs defined for Internet Mail (RFC - 822) and the Multipurpose Internet Mail Extensions (MIME ) to allow - entities to be transmitted in an open variety of representations and - with extensible mechanisms. However, MIME [7] discusses mail, and - HTTP has a few features that are different from those described in - MIME. These differences were carefully chosen to optimize - performance over binary connections, to allow greater freedom in the - use of new media types, to make date comparisons easier, and to - acknowledge the practice of some early HTTP servers and clients. - - This appendix describes specific areas where HTTP differs from MIME. - Proxies and gateways to strict MIME environments SHOULD be aware of - these differences and provide the appropriate conversions where - necessary. Proxies and gateways from MIME environments to HTTP also - need to be aware of the differences because some conversions may be - required. - -19.4.1 Conversion to Canonical Form - - MIME requires that an Internet mail entity be converted to canonical - form prior to being transferred. Section 3.7.1 of this document - describes the forms allowed for subtypes of the "text" media type - when transmitted over HTTP. MIME requires that content with a type of - "text" represent line breaks as CRLF and forbids the use of CR or LF - outside of line break sequences. HTTP allows CRLF, bare CR, and bare - LF to indicate a line break within text content when a message is - transmitted over HTTP. - - Where it is possible, a proxy or gateway from HTTP to a strict MIME - environment SHOULD translate all line breaks within the text media - types described in section 3.7.1 of this document to the MIME - canonical form of CRLF. Note, however, that this may be complicated - by the presence of a Content-Encoding and by the fact that HTTP - - - -Fielding, et. al. Standards Track [Page 152] - -RFC 2068 HTTP/1.1 January 1997 - - - allows the use of some character sets which do not use octets 13 and - 10 to represent CR and LF, as is the case for some multi-byte - character sets. - -19.4.2 Conversion of Date Formats - - HTTP/1.1 uses a restricted set of date formats (section 3.3.1) to - simplify the process of date comparison. Proxies and gateways from - other protocols SHOULD ensure that any Date header field present in a - message conforms to one of the HTTP/1.1 formats and rewrite the date - if necessary. - -19.4.3 Introduction of Content-Encoding - - MIME does not include any concept equivalent to HTTP/1.1's Content- - Encoding header field. Since this acts as a modifier on the media - type, proxies and gateways from HTTP to MIME-compliant protocols MUST - either change the value of the Content-Type header field or decode - the entity-body before forwarding the message. (Some experimental - applications of Content-Type for Internet mail have used a media-type - parameter of ";conversions=" to perform an equivalent - function as Content-Encoding. However, this parameter is not part of - MIME.) - -19.4.4 No Content-Transfer-Encoding - - HTTP does not use the Content-Transfer-Encoding (CTE) field of MIME. - Proxies and gateways from MIME-compliant protocols to HTTP MUST - remove any non-identity CTE ("quoted-printable" or "base64") encoding - prior to delivering the response message to an HTTP client. - - Proxies and gateways from HTTP to MIME-compliant protocols are - responsible for ensuring that the message is in the correct format - and encoding for safe transport on that protocol, where "safe - transport" is defined by the limitations of the protocol being used. - Such a proxy or gateway SHOULD label the data with an appropriate - Content-Transfer-Encoding if doing so will improve the likelihood of - safe transport over the destination protocol. - -19.4.5 HTTP Header Fields in Multipart Body-Parts - - In MIME, most header fields in multipart body-parts are generally - ignored unless the field name begins with "Content-". In HTTP/1.1, - multipart body-parts may contain any HTTP header fields which are - significant to the meaning of that part. - - - - - - -Fielding, et. al. Standards Track [Page 153] - -RFC 2068 HTTP/1.1 January 1997 - - -19.4.6 Introduction of Transfer-Encoding - - HTTP/1.1 introduces the Transfer-Encoding header field (section - 14.40). Proxies/gateways MUST remove any transfer coding prior to - forwarding a message via a MIME-compliant protocol. - - A process for decoding the "chunked" transfer coding (section 3.6) - can be represented in pseudo-code as: - - length := 0 - read chunk-size, chunk-ext (if any) and CRLF - while (chunk-size > 0) { - read chunk-data and CRLF - append chunk-data to entity-body - length := length + chunk-size - read chunk-size and CRLF - } - read entity-header - while (entity-header not empty) { - append entity-header to existing header fields - read entity-header - } - Content-Length := length - Remove "chunked" from Transfer-Encoding - -19.4.7 MIME-Version - - HTTP is not a MIME-compliant protocol (see appendix 19.4). However, - HTTP/1.1 messages may include a single MIME-Version general-header - field to indicate what version of the MIME protocol was used to - construct the message. Use of the MIME-Version header field indicates - that the message is in full compliance with the MIME protocol. - Proxies/gateways are responsible for ensuring full compliance (where - possible) when exporting HTTP messages to strict MIME environments. - - MIME-Version = "MIME-Version" ":" 1*DIGIT "." 1*DIGIT - - MIME version "1.0" is the default for use in HTTP/1.1. However, - HTTP/1.1 message parsing and semantics are defined by this document - and not the MIME specification. - -19.5 Changes from HTTP/1.0 - - This section summarizes major differences between versions HTTP/1.0 - and HTTP/1.1. - - - - - - -Fielding, et. al. Standards Track [Page 154] - -RFC 2068 HTTP/1.1 January 1997 - - -19.5.1 Changes to Simplify Multi-homed Web Servers and Conserve IP - Addresses - - The requirements that clients and servers support the Host request- - header, report an error if the Host request-header (section 14.23) is - missing from an HTTP/1.1 request, and accept absolute URIs (section - 5.1.2) are among the most important changes defined by this - specification. - - Older HTTP/1.0 clients assumed a one-to-one relationship of IP - addresses and servers; there was no other established mechanism for - distinguishing the intended server of a request than the IP address - to which that request was directed. The changes outlined above will - allow the Internet, once older HTTP clients are no longer common, to - support multiple Web sites from a single IP address, greatly - simplifying large operational Web servers, where allocation of many - IP addresses to a single host has created serious problems. The - Internet will also be able to recover the IP addresses that have been - allocated for the sole purpose of allowing special-purpose domain - names to be used in root-level HTTP URLs. Given the rate of growth of - the Web, and the number of servers already deployed, it is extremely - important that all implementations of HTTP (including updates to - existing HTTP/1.0 applications) correctly implement these - requirements: - - o Both clients and servers MUST support the Host request-header. - - o Host request-headers are required in HTTP/1.1 requests. - - o Servers MUST report a 400 (Bad Request) error if an HTTP/1.1 - request does not include a Host request-header. - - o Servers MUST accept absolute URIs. - - - - - - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 155] - -RFC 2068 HTTP/1.1 January 1997 - - -19.6 Additional Features - - This appendix documents protocol elements used by some existing HTTP - implementations, but not consistently and correctly across most - HTTP/1.1 applications. Implementers should be aware of these - features, but cannot rely upon their presence in, or interoperability - with, other HTTP/1.1 applications. Some of these describe proposed - experimental features, and some describe features that experimental - deployment found lacking that are now addressed in the base HTTP/1.1 - specification. - -19.6.1 Additional Request Methods - -19.6.1.1 PATCH - - The PATCH method is similar to PUT except that the entity contains a - list of differences between the original version of the resource - identified by the Request-URI and the desired content of the resource - after the PATCH action has been applied. The list of differences is - in a format defined by the media type of the entity (e.g., - "application/diff") and MUST include sufficient information to allow - the server to recreate the changes necessary to convert the original - version of the resource to the desired version. - - If the request passes through a cache and the Request-URI identifies - a currently cached entity, that entity MUST be removed from the - cache. Responses to this method are not cachable. - - The actual method for determining how the patched resource is placed, - and what happens to its predecessor, is defined entirely by the - origin server. If the original version of the resource being patched - included a Content-Version header field, the request entity MUST - include a Derived-From header field corresponding to the value of the - original Content-Version header field. Applications are encouraged to - use these fields for constructing versioning relationships and - resolving version conflicts. - - PATCH requests must obey the message transmission requirements set - out in section 8.2. - - Caches that implement PATCH should invalidate cached responses as - defined in section 13.10 for PUT. - -19.6.1.2 LINK - - The LINK method establishes one or more Link relationships between - the existing resource identified by the Request-URI and other - existing resources. The difference between LINK and other methods - - - -Fielding, et. al. Standards Track [Page 156] - -RFC 2068 HTTP/1.1 January 1997 - - - allowing links to be established between resources is that the LINK - method does not allow any message-body to be sent in the request and - does not directly result in the creation of new resources. - - If the request passes through a cache and the Request-URI identifies - a currently cached entity, that entity MUST be removed from the - cache. Responses to this method are not cachable. - - Caches that implement LINK should invalidate cached responses as - defined in section 13.10 for PUT. - -19.6.1.3 UNLINK - - The UNLINK method removes one or more Link relationships from the - existing resource identified by the Request-URI. These relationships - may have been established using the LINK method or by any other - method supporting the Link header. The removal of a link to a - resource does not imply that the resource ceases to exist or becomes - inaccessible for future references. - - If the request passes through a cache and the Request-URI identifies - a currently cached entity, that entity MUST be removed from the - cache. Responses to this method are not cachable. - - Caches that implement UNLINK should invalidate cached responses as - defined in section 13.10 for PUT. - -19.6.2 Additional Header Field Definitions - -19.6.2.1 Alternates - - The Alternates response-header field has been proposed as a means for - the origin server to inform the client about other available - representations of the requested resource, along with their - distinguishing attributes, and thus providing a more reliable means - for a user agent to perform subsequent selection of another - representation which better fits the desires of its user (described - as agent-driven negotiation in section 12). - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 157] - -RFC 2068 HTTP/1.1 January 1997 - - - The Alternates header field is orthogonal to the Vary header field in - that both may coexist in a message without affecting the - interpretation of the response or the available representations. It - is expected that Alternates will provide a significant improvement - over the server-driven negotiation provided by the Vary field for - those resources that vary over common dimensions like type and - language. - - The Alternates header field will be defined in a future - specification. - -19.6.2.2 Content-Version - - The Content-Version entity-header field defines the version tag - associated with a rendition of an evolving entity. Together with the - Derived-From field described in section 19.6.2.3, it allows a group - of people to work simultaneously on the creation of a work as an - iterative process. The field should be used to allow evolution of a - particular work along a single path rather than derived works or - renditions in different representations. - - Content-Version = "Content-Version" ":" quoted-string - - Examples of the Content-Version field include: - - Content-Version: "2.1.2" - Content-Version: "Fred 19950116-12:26:48" - Content-Version: "2.5a4-omega7" - -19.6.2.3 Derived-From - - The Derived-From entity-header field can be used to indicate the - version tag of the resource from which the enclosed entity was - derived before modifications were made by the sender. This field is - used to help manage the process of merging successive changes to a - resource, particularly when such changes are being made in parallel - and from multiple sources. - - Derived-From = "Derived-From" ":" quoted-string - - An example use of the field is: - - Derived-From: "2.1.1" - - The Derived-From field is required for PUT and PATCH requests if the - entity being sent was previously retrieved from the same URI and a - Content-Version header was included with the entity when it was last - retrieved. - - - -Fielding, et. al. Standards Track [Page 158] - -RFC 2068 HTTP/1.1 January 1997 - - -19.6.2.4 Link - - The Link entity-header field provides a means for describing a - relationship between two resources, generally between the requested - resource and some other resource. An entity MAY include multiple Link - values. Links at the metainformation level typically indicate - relationships like hierarchical structure and navigation paths. The - Link field is semantically equivalent to the element in - HTML.[5] - - Link = "Link" ":" #("<" URI ">" *( ";" link-param ) - - link-param = ( ( "rel" "=" relationship ) - | ( "rev" "=" relationship ) - | ( "title" "=" quoted-string ) - | ( "anchor" "=" <"> URI <"> ) - | ( link-extension ) ) - - link-extension = token [ "=" ( token | quoted-string ) ] - - relationship = sgml-name - | ( <"> sgml-name *( SP sgml-name) <"> ) - - sgml-name = ALPHA *( ALPHA | DIGIT | "." | "-" ) - - Relationship values are case-insensitive and MAY be extended within - the constraints of the sgml-name syntax. The title parameter MAY be - used to label the destination of a link such that it can be used as - identification within a human-readable menu. The anchor parameter MAY - be used to indicate a source anchor other than the entire current - resource, such as a fragment of this resource or a third resource. - - Examples of usage include: - - Link: ; rel="Previous" - - Link: ; rev="Made"; title="Tim Berners-Lee" - - The first example indicates that chapter2 is previous to this - resource in a logical navigation path. The second indicates that the - person responsible for making the resource available is identified by - the given e-mail address. - -19.6.2.5 URI - - The URI header field has, in past versions of this specification, - been used as a combination of the existing Location, Content- - Location, and Vary header fields as well as the future Alternates - - - -Fielding, et. al. Standards Track [Page 159] - -RFC 2068 HTTP/1.1 January 1997 - - - field (above). Its primary purpose has been to include a list of - additional URIs for the resource, including names and mirror - locations. However, it has become clear that the combination of many - different functions within this single field has been a barrier to - consistently and correctly implementing any of those functions. - Furthermore, we believe that the identification of names and mirror - locations would be better performed via the Link header field. The - URI header field is therefore deprecated in favor of those other - fields. - - URI-header = "URI" ":" 1#( "<" URI ">" ) - -19.7 Compatibility with Previous Versions - - It is beyond the scope of a protocol specification to mandate - compliance with previous versions. HTTP/1.1 was deliberately - designed, however, to make supporting previous versions easy. It is - worth noting that at the time of composing this specification, we - would expect commercial HTTP/1.1 servers to: - - o recognize the format of the Request-Line for HTTP/0.9, 1.0, and 1.1 - requests; - - o understand any valid request in the format of HTTP/0.9, 1.0, or - 1.1; - - o respond appropriately with a message in the same major version used - by the client. - - And we would expect HTTP/1.1 clients to: - - o recognize the format of the Status-Line for HTTP/1.0 and 1.1 - responses; - - o understand any valid response in the format of HTTP/0.9, 1.0, or - 1.1. - - For most implementations of HTTP/1.0, each connection is established - by the client prior to the request and closed by the server after - sending the response. A few implementations implement the Keep-Alive - version of persistent connections described in section 19.7.1.1. - - - - - - - - - - -Fielding, et. al. Standards Track [Page 160] - -RFC 2068 HTTP/1.1 January 1997 - - -19.7.1 Compatibility with HTTP/1.0 Persistent Connections - - Some clients and servers may wish to be compatible with some previous - implementations of persistent connections in HTTP/1.0 clients and - servers. Persistent connections in HTTP/1.0 must be explicitly - negotiated as they are not the default behavior. HTTP/1.0 - experimental implementations of persistent connections are faulty, - and the new facilities in HTTP/1.1 are designed to rectify these - problems. The problem was that some existing 1.0 clients may be - sending Keep-Alive to a proxy server that doesn't understand - Connection, which would then erroneously forward it to the next - inbound server, which would establish the Keep-Alive connection and - result in a hung HTTP/1.0 proxy waiting for the close on the - response. The result is that HTTP/1.0 clients must be prevented from - using Keep-Alive when talking to proxies. - - However, talking to proxies is the most important use of persistent - connections, so that prohibition is clearly unacceptable. Therefore, - we need some other mechanism for indicating a persistent connection - is desired, which is safe to use even when talking to an old proxy - that ignores Connection. Persistent connections are the default for - HTTP/1.1 messages; we introduce a new keyword (Connection: close) for - declaring non-persistence. - - The following describes the original HTTP/1.0 form of persistent - connections. - - When it connects to an origin server, an HTTP client MAY send the - Keep-Alive connection-token in addition to the Persist connection- - token: - - Connection: Keep-Alive - - An HTTP/1.0 server would then respond with the Keep-Alive connection - token and the client may proceed with an HTTP/1.0 (or Keep-Alive) - persistent connection. - - An HTTP/1.1 server may also establish persistent connections with - HTTP/1.0 clients upon receipt of a Keep-Alive connection token. - However, a persistent connection with an HTTP/1.0 client cannot make - use of the chunked transfer-coding, and therefore MUST use a - Content-Length for marking the ending boundary of each message. - - A client MUST NOT send the Keep-Alive connection token to a proxy - server as HTTP/1.0 proxy servers do not obey the rules of HTTP/1.1 - for parsing the Connection header field. - - - - - -Fielding, et. al. Standards Track [Page 161] - -RFC 2068 HTTP/1.1 January 1997 - - -19.7.1.1 The Keep-Alive Header - - When the Keep-Alive connection-token has been transmitted with a - request or a response, a Keep-Alive header field MAY also be - included. The Keep-Alive header field takes the following form: - - Keep-Alive-header = "Keep-Alive" ":" 0# keepalive-param - - keepalive-param = param-name "=" value - - The Keep-Alive header itself is optional, and is used only if a - parameter is being sent. HTTP/1.1 does not define any parameters. - - If the Keep-Alive header is sent, the corresponding connection token - MUST be transmitted. The Keep-Alive header MUST be ignored if - received without the connection token. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Fielding, et. al. Standards Track [Page 162] - diff --git a/docs/specs/rfc2109.txt b/docs/specs/rfc2109.txt deleted file mode 100644 index 432fdcc..0000000 --- a/docs/specs/rfc2109.txt +++ /dev/null @@ -1,1179 +0,0 @@ - - - - - - -Network Working Group D. Kristol -Request for Comments: 2109 Bell Laboratories, Lucent Technologies -Category: Standards Track L. Montulli - Netscape Communications - February 1997 - - - HTTP State Management Mechanism - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -1. ABSTRACT - - This document specifies a way to create a stateful session with HTTP - requests and responses. It describes two new headers, Cookie and - Set-Cookie, which carry state information between participating - origin servers and user agents. The method described here differs - from Netscape's Cookie proposal, but it can interoperate with - HTTP/1.0 user agents that use Netscape's method. (See the HISTORICAL - section.) - -2. TERMINOLOGY - - The terms user agent, client, server, proxy, and origin server have - the same meaning as in the HTTP/1.0 specification. - - Fully-qualified host name (FQHN) means either the fully-qualified - domain name (FQDN) of a host (i.e., a completely specified domain - name ending in a top-level domain such as .com or .uk), or the - numeric Internet Protocol (IP) address of a host. The fully - qualified domain name is preferred; use of numeric IP addresses is - strongly discouraged. - - The terms request-host and request-URI refer to the values the client - would send to the server as, respectively, the host (but not port) - and abs_path portions of the absoluteURI (http_URL) of the HTTP - request line. Note that request-host must be a FQHN. - - - - - - - - -Kristol & Montulli Standards Track [Page 1] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - Hosts names can be specified either as an IP address or a FQHN - string. Sometimes we compare one host name with another. Host A's - name domain-matches host B's if - - * both host names are IP addresses and their host name strings match - exactly; or - - * both host names are FQDN strings and their host name strings match - exactly; or - - * A is a FQDN string and has the form NB, where N is a non-empty name - string, B has the form .B', and B' is a FQDN string. (So, x.y.com - domain-matches .y.com but not y.com.) - - Note that domain-match is not a commutative operation: a.b.c.com - domain-matches .c.com, but not the reverse. - - Because it was used in Netscape's original implementation of state - management, we will use the term cookie to refer to the state - information that passes between an origin server and user agent, and - that gets stored by the user agent. - -3. STATE AND SESSIONS - - This document describes a way to create stateful sessions with HTTP - requests and responses. Currently, HTTP servers respond to each - client request without relating that request to previous or - subsequent requests; the technique allows clients and servers that - wish to exchange state information to place HTTP requests and - responses within a larger context, which we term a "session". This - context might be used to create, for example, a "shopping cart", in - which user selections can be aggregated before purchase, or a - magazine browsing system, in which a user's previous reading affects - which offerings are presented. - - There are, of course, many different potential contexts and thus many - different potential types of session. The designers' paradigm for - sessions created by the exchange of cookies has these key attributes: - - 1. Each session has a beginning and an end. - - 2. Each session is relatively short-lived. - - 3. Either the user agent or the origin server may terminate a - session. - - 4. The session is implicit in the exchange of state information. - - - - -Kristol & Montulli Standards Track [Page 2] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -4. OUTLINE - - We outline here a way for an origin server to send state information - to the user agent, and for the user agent to return the state - information to the origin server. The goal is to have a minimal - impact on HTTP and user agents. Only origin servers that need to - maintain sessions would suffer any significant impact, and that - impact can largely be confined to Common Gateway Interface (CGI) - programs, unless the server provides more sophisticated state - management support. (See Implementation Considerations, below.) - -4.1 Syntax: General - - The two state management headers, Set-Cookie and Cookie, have common - syntactic properties involving attribute-value pairs. The following - grammar uses the notation, and tokens DIGIT (decimal digits) and - token (informally, a sequence of non-special, non-white space - characters) from the HTTP/1.1 specification [RFC 2068] to describe - their syntax. - - av-pairs = av-pair *(";" av-pair) - av-pair = attr ["=" value] ; optional value - attr = token - value = word - word = token | quoted-string - - Attributes (names) (attr) are case-insensitive. White space is - permitted between tokens. Note that while the above syntax - description shows value as optional, most attrs require them. - - NOTE: The syntax above allows whitespace between the attribute and - the = sign. - -4.2 Origin Server Role - -4.2.1 General - - The origin server initiates a session, if it so desires. (Note that - "session" here does not refer to a persistent network connection but - to a logical session created from HTTP requests and responses. The - presence or absence of a persistent connection should have no effect - on the use of cookie-derived sessions). To initiate a session, the - origin server returns an extra response header to the client, Set- - Cookie. (The details follow later.) - - A user agent returns a Cookie request header (see below) to the - origin server if it chooses to continue a session. The origin server - may ignore it or use it to determine the current state of the - - - -Kristol & Montulli Standards Track [Page 3] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - session. It may send back to the client a Set-Cookie response header - with the same or different information, or it may send no Set-Cookie - header at all. The origin server effectively ends a session by - sending the client a Set-Cookie header with Max-Age=0. - - Servers may return a Set-Cookie response headers with any response. - User agents should send Cookie request headers, subject to other - rules detailed below, with every request. - - An origin server may include multiple Set-Cookie headers in a - response. Note that an intervening gateway could fold multiple such - headers into a single header. - -4.2.2 Set-Cookie Syntax - - The syntax for the Set-Cookie response header is - - set-cookie = "Set-Cookie:" cookies - cookies = 1#cookie - cookie = NAME "=" VALUE *(";" cookie-av) - NAME = attr - VALUE = value - cookie-av = "Comment" "=" value - | "Domain" "=" value - | "Max-Age" "=" value - | "Path" "=" value - | "Secure" - | "Version" "=" 1*DIGIT - - Informally, the Set-Cookie response header comprises the token Set- - Cookie:, followed by a comma-separated list of one or more cookies. - Each cookie begins with a NAME=VALUE pair, followed by zero or more - semi-colon-separated attribute-value pairs. The syntax for - attribute-value pairs was shown earlier. The specific attributes and - the semantics of their values follows. The NAME=VALUE attribute- - value pair must come first in each cookie. The others, if present, - can occur in any order. If an attribute appears more than once in a - cookie, the behavior is undefined. - - NAME=VALUE - Required. The name of the state information ("cookie") is NAME, - and its value is VALUE. NAMEs that begin with $ are reserved for - other uses and must not be used by applications. - - - - - - - - -Kristol & Montulli Standards Track [Page 4] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - The VALUE is opaque to the user agent and may be anything the - origin server chooses to send, possibly in a server-selected - printable ASCII encoding. "Opaque" implies that the content is of - interest and relevance only to the origin server. The content - may, in fact, be readable by anyone that examines the Set-Cookie - header. - - Comment=comment - Optional. Because cookies can contain private information about a - user, the Cookie attribute allows an origin server to document its - intended use of a cookie. The user can inspect the information to - decide whether to initiate or continue a session with this cookie. - - Domain=domain - Optional. The Domain attribute specifies the domain for which the - cookie is valid. An explicitly specified domain must always start - with a dot. - - Max-Age=delta-seconds - Optional. The Max-Age attribute defines the lifetime of the - cookie, in seconds. The delta-seconds value is a decimal non- - negative integer. After delta-seconds seconds elapse, the client - should discard the cookie. A value of zero means the cookie - should be discarded immediately. - - Path=path - Optional. The Path attribute specifies the subset of URLs to - which this cookie applies. - - Secure - Optional. The Secure attribute (with no value) directs the user - agent to use only (unspecified) secure means to contact the origin - server whenever it sends back this cookie. - - The user agent (possibly under the user's control) may determine - what level of security it considers appropriate for "secure" - cookies. The Secure attribute should be considered security - advice from the server to the user agent, indicating that it is in - the session's interest to protect the cookie contents. - - Version=version - Required. The Version attribute, a decimal integer, identifies to - which version of the state management specification the cookie - conforms. For this specification, Version=1 applies. - - - - - - - -Kristol & Montulli Standards Track [Page 5] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -4.2.3 Controlling Caching - - An origin server must be cognizant of the effect of possible caching - of both the returned resource and the Set-Cookie header. Caching - "public" documents is desirable. For example, if the origin server - wants to use a public document such as a "front door" page as a - sentinel to indicate the beginning of a session for which a Set- - Cookie response header must be generated, the page should be stored - in caches "pre-expired" so that the origin server will see further - requests. "Private documents", for example those that contain - information strictly private to a session, should not be cached in - shared caches. - - If the cookie is intended for use by a single user, the Set-cookie - header should not be cached. A Set-cookie header that is intended to - be shared by multiple users may be cached. - - The origin server should send the following additional HTTP/1.1 - response headers, depending on circumstances: - - * To suppress caching of the Set-Cookie header: Cache-control: no- - cache="set-cookie". - - and one of the following: - - * To suppress caching of a private document in shared caches: Cache- - control: private. - - * To allow caching of a document and require that it be validated - before returning it to the client: Cache-control: must-revalidate. - - * To allow caching of a document, but to require that proxy caches - (not user agent caches) validate it before returning it to the - client: Cache-control: proxy-revalidate. - - * To allow caching of a document and request that it be validated - before returning it to the client (by "pre-expiring" it): - Cache-control: max-age=0. Not all caches will revalidate the - document in every case. - - HTTP/1.1 servers must send Expires: old-date (where old-date is a - date long in the past) on responses containing Set-Cookie response - headers unless they know for certain (by out of band means) that - there are no downsteam HTTP/1.0 proxies. HTTP/1.1 servers may send - other Cache-Control directives that permit caching by HTTP/1.1 - proxies in addition to the Expires: old-date directive; the Cache- - Control directive will override the Expires: old-date for HTTP/1.1 - proxies. - - - -Kristol & Montulli Standards Track [Page 6] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -4.3 User Agent Role - -4.3.1 Interpreting Set-Cookie - - The user agent keeps separate track of state information that arrives - via Set-Cookie response headers from each origin server (as - distinguished by name or IP address and port). The user agent - applies these defaults for optional attributes that are missing: - - VersionDefaults to "old cookie" behavior as originally specified by - Netscape. See the HISTORICAL section. - - Domain Defaults to the request-host. (Note that there is no dot at - the beginning of request-host.) - - Max-AgeThe default behavior is to discard the cookie when the user - agent exits. - - Path Defaults to the path of the request URL that generated the - Set-Cookie response, up to, but not including, the - right-most /. - - Secure If absent, the user agent may send the cookie over an - insecure channel. - -4.3.2 Rejecting Cookies - - To prevent possible security or privacy violations, a user agent - rejects a cookie (shall not store its information) if any of the - following is true: - - * The value for the Path attribute is not a prefix of the request- - URI. - - * The value for the Domain attribute contains no embedded dots or - does not start with a dot. - - * The value for the request-host does not domain-match the Domain - attribute. - - * The request-host is a FQDN (not IP address) and has the form HD, - where D is the value of the Domain attribute, and H is a string - that contains one or more dots. - - Examples: - - * A Set-Cookie from request-host y.x.foo.com for Domain=.foo.com - would be rejected, because H is y.x and contains a dot. - - - -Kristol & Montulli Standards Track [Page 7] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - * A Set-Cookie from request-host x.foo.com for Domain=.foo.com would - be accepted. - - * A Set-Cookie with Domain=.com or Domain=.com., will always be - rejected, because there is no embedded dot. - - * A Set-Cookie with Domain=ajax.com will be rejected because the - value for Domain does not begin with a dot. - -4.3.3 Cookie Management - - If a user agent receives a Set-Cookie response header whose NAME is - the same as a pre-existing cookie, and whose Domain and Path - attribute values exactly (string) match those of a pre-existing - cookie, the new cookie supersedes the old. However, if the Set- - Cookie has a value for Max-Age of zero, the (old and new) cookie is - discarded. Otherwise cookies accumulate until they expire (resources - permitting), at which time they are discarded. - - Because user agents have finite space in which to store cookies, they - may also discard older cookies to make space for newer ones, using, - for example, a least-recently-used algorithm, along with constraints - on the maximum number of cookies that each origin server may set. - - If a Set-Cookie response header includes a Comment attribute, the - user agent should store that information in a human-readable form - with the cookie and should display the comment text as part of a - cookie inspection user interface. - - User agents should allow the user to control cookie destruction. An - infrequently-used cookie may function as a "preferences file" for - network applications, and a user may wish to keep it even if it is - the least-recently-used cookie. One possible implementation would be - an interface that allows the permanent storage of a cookie through a - checkbox (or, conversely, its immediate destruction). - - Privacy considerations dictate that the user have considerable - control over cookie management. The PRIVACY section contains more - information. - -4.3.4 Sending Cookies to the Origin Server - - When it sends a request to an origin server, the user agent sends a - Cookie request header to the origin server if it has cookies that are - applicable to the request, based on - - * the request-host; - - - - -Kristol & Montulli Standards Track [Page 8] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - * the request-URI; - - * the cookie's age. - - The syntax for the header is: - - cookie = "Cookie:" cookie-version - 1*((";" | ",") cookie-value) - cookie-value = NAME "=" VALUE [";" path] [";" domain] - cookie-version = "$Version" "=" value - NAME = attr - VALUE = value - path = "$Path" "=" value - domain = "$Domain" "=" value - - The value of the cookie-version attribute must be the value from the - Version attribute, if any, of the corresponding Set-Cookie response - header. Otherwise the value for cookie-version is 0. The value for - the path attribute must be the value from the Path attribute, if any, - of the corresponding Set-Cookie response header. Otherwise the - attribute should be omitted from the Cookie request header. The - value for the domain attribute must be the value from the Domain - attribute, if any, of the corresponding Set-Cookie response header. - Otherwise the attribute should be omitted from the Cookie request - header. - - Note that there is no Comment attribute in the Cookie request header - corresponding to the one in the Set-Cookie response header. The user - agent does not return the comment information to the origin server. - - The following rules apply to choosing applicable cookie-values from - among all the cookies the user agent has. - - Domain Selection - The origin server's fully-qualified host name must domain-match - the Domain attribute of the cookie. - - Path Selection - The Path attribute of the cookie must match a prefix of the - request-URI. - - Max-Age Selection - Cookies that have expired should have been discarded and thus - are not forwarded to an origin server. - - - - - - - -Kristol & Montulli Standards Track [Page 9] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - If multiple cookies satisfy the criteria above, they are ordered in - the Cookie header such that those with more specific Path attributes - precede those with less specific. Ordering with respect to other - attributes (e.g., Domain) is unspecified. - - Note: For backward compatibility, the separator in the Cookie header - is semi-colon (;) everywhere. A server should also accept comma (,) - as the separator between cookie-values for future compatibility. - -4.3.5 Sending Cookies in Unverifiable Transactions - - Users must have control over sessions in order to ensure privacy. - (See PRIVACY section below.) To simplify implementation and to - prevent an additional layer of complexity where adequate safeguards - exist, however, this document distinguishes between transactions that - are verifiable and those that are unverifiable. A transaction is - verifiable if the user has the option to review the request-URI prior - to its use in the transaction. A transaction is unverifiable if the - user does not have that option. Unverifiable transactions typically - arise when a user agent automatically requests inlined or embedded - entities or when it resolves redirection (3xx) responses from an - origin server. Typically the origin transaction, the transaction - that the user initiates, is verifiable, and that transaction may - directly or indirectly induce the user agent to make unverifiable - transactions. - - When it makes an unverifiable transaction, a user agent must enable a - session only if a cookie with a domain attribute D was sent or - received in its origin transaction, such that the host name in the - Request-URI of the unverifiable transaction domain-matches D. - - This restriction prevents a malicious service author from using - unverifiable transactions to induce a user agent to start or continue - a session with a server in a different domain. The starting or - continuation of such sessions could be contrary to the privacy - expectations of the user, and could also be a security problem. - - User agents may offer configurable options that allow the user agent, - or any autonomous programs that the user agent executes, to ignore - the above rule, so long as these override options default to "off". - - Many current user agents already provide a review option that would - render many links verifiable. For instance, some user agents display - the URL that would be referenced for a particular link when the mouse - pointer is placed over that link. The user can therefore determine - whether to visit that site before causing the browser to do so. - (Though not implemented on current user agents, a similar technique - could be used for a button used to submit a form -- the user agent - - - -Kristol & Montulli Standards Track [Page 10] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - could display the action to be taken if the user were to select that - button.) However, even this would not make all links verifiable; for - example, links to automatically loaded images would not normally be - subject to "mouse pointer" verification. - - Many user agents also provide the option for a user to view the HTML - source of a document, or to save the source to an external file where - it can be viewed by another application. While such an option does - provide a crude review mechanism, some users might not consider it - acceptable for this purpose. - -4.4 How an Origin Server Interprets the Cookie Header - - A user agent returns much of the information in the Set-Cookie header - to the origin server when the Path attribute matches that of a new - request. When it receives a Cookie header, the origin server should - treat cookies with NAMEs whose prefix is $ specially, as an attribute - for the adjacent cookie. The value for such a NAME is to be - interpreted as applying to the lexically (left-to-right) most recent - cookie whose name does not have the $ prefix. If there is no - previous cookie, the value applies to the cookie mechanism as a - whole. For example, consider the cookie - - Cookie: $Version="1"; Customer="WILE_E_COYOTE"; - $Path="/acme" - - $Version applies to the cookie mechanism as a whole (and gives the - version number for the cookie mechanism). $Path is an attribute - whose value (/acme) defines the Path attribute that was used when the - Customer cookie was defined in a Set-Cookie response header. - -4.5 Caching Proxy Role - - One reason for separating state information from both a URL and - document content is to facilitate the scaling that caching permits. - To support cookies, a caching proxy must obey these rules already in - the HTTP specification: - - * Honor requests from the cache, if possible, based on cache validity - rules. - - * Pass along a Cookie request header in any request that the proxy - must make of another server. - - * Return the response to the client. Include any Set-Cookie response - header. - - - - - -Kristol & Montulli Standards Track [Page 11] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - * Cache the received response subject to the control of the usual - headers, such as Expires, Cache-control: no-cache, and Cache- - control: private, - - * Cache the Set-Cookie subject to the control of the usual header, - Cache-control: no-cache="set-cookie". (The Set-Cookie header - should usually not be cached.) - - Proxies must not introduce Set-Cookie (Cookie) headers of their own - in proxy responses (requests). - -5. EXAMPLES - -5.1 Example 1 - - Most detail of request and response headers has been omitted. Assume - the user agent has no stored cookies. - - 1. User Agent -> Server - - POST /acme/login HTTP/1.1 - [form data] - - User identifies self via a form. - - 2. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme" - - Cookie reflects user's identity. - - 3. User Agent -> Server - - POST /acme/pickitem HTTP/1.1 - Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme" - [form data] - - User selects an item for "shopping basket." - - 4. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1"; - Path="/acme" - - Shopping basket contains an item. - - - - -Kristol & Montulli Standards Track [Page 12] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - 5. User Agent -> Server - - POST /acme/shipping HTTP/1.1 - Cookie: $Version="1"; - Customer="WILE_E_COYOTE"; $Path="/acme"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme" - [form data] - - User selects shipping method from form. - - 6. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie: Shipping="FedEx"; Version="1"; Path="/acme" - - New cookie reflects shipping method. - - 7. User Agent -> Server - - POST /acme/process HTTP/1.1 - Cookie: $Version="1"; - Customer="WILE_E_COYOTE"; $Path="/acme"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme"; - Shipping="FedEx"; $Path="/acme" - [form data] - - User chooses to process order. - - 8. Server -> User Agent - - HTTP/1.1 200 OK - - Transaction is complete. - - The user agent makes a series of requests on the origin server, after - each of which it receives a new cookie. All the cookies have the - same Path attribute and (default) domain. Because the request URLs - all have /acme as a prefix, and that matches the Path attribute, each - request contains all the cookies received so far. - -5.2 Example 2 - - This example illustrates the effect of the Path attribute. All - detail of request and response headers has been omitted. Assume the - user agent has no stored cookies. - - Imagine the user agent has received, in response to earlier requests, - the response headers - - - -Kristol & Montulli Standards Track [Page 13] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1"; - Path="/acme" - - and - - Set-Cookie: Part_Number="Riding_Rocket_0023"; Version="1"; - Path="/acme/ammo" - - A subsequent request by the user agent to the (same) server for URLs - of the form /acme/ammo/... would include the following request - header: - - Cookie: $Version="1"; - Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme" - - Note that the NAME=VALUE pair for the cookie with the more specific - Path attribute, /acme/ammo, comes before the one with the less - specific Path attribute, /acme. Further note that the same cookie - name appears more than once. - - A subsequent request by the user agent to the (same) server for a URL - of the form /acme/parts/ would include the following request header: - - Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme" - - Here, the second cookie's Path attribute /acme/ammo is not a prefix - of the request URL, /acme/parts/, so the cookie does not get - forwarded to the server. - -6. IMPLEMENTATION CONSIDERATIONS - - Here we speculate on likely or desirable details for an origin server - that implements state management. - -6.1 Set-Cookie Content - - An origin server's content should probably be divided into disjoint - application areas, some of which require the use of state - information. The application areas can be distinguished by their - request URLs. The Set-Cookie header can incorporate information - about the application areas by setting the Path attribute for each - one. - - The session information can obviously be clear or encoded text that - describes state. However, if it grows too large, it can become - unwieldy. Therefore, an implementor might choose for the session - information to be a key to a server-side resource. Of course, using - - - -Kristol & Montulli Standards Track [Page 14] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - a database creates some problems that this state management - specification was meant to avoid, namely: - - 1. keeping real state on the server side; - - 2. how and when to garbage-collect the database entry, in case the - user agent terminates the session by, for example, exiting. - -6.2 Stateless Pages - - Caching benefits the scalability of WWW. Therefore it is important - to reduce the number of documents that have state embedded in them - inherently. For example, if a shopping-basket-style application - always displays a user's current basket contents on each page, those - pages cannot be cached, because each user's basket's contents would - be different. On the other hand, if each page contains just a link - that allows the user to "Look at My Shopping Basket", the page can be - cached. - -6.3 Implementation Limits - - Practical user agent implementations have limits on the number and - size of cookies that they can store. In general, user agents' cookie - support should have no fixed limits. They should strive to store as - many frequently-used cookies as possible. Furthermore, general-use - user agents should provide each of the following minimum capabilities - individually, although not necessarily simultaneously: - - * at least 300 cookies - - * at least 4096 bytes per cookie (as measured by the size of the - characters that comprise the cookie non-terminal in the syntax - description of the Set-Cookie header) - - * at least 20 cookies per unique host or domain name - - User agents created for specific purposes or for limited-capacity - devices should provide at least 20 cookies of 4096 bytes, to ensure - that the user can interact with a session-based origin server. - - The information in a Set-Cookie response header must be retained in - its entirety. If for some reason there is inadequate space to store - the cookie, it must be discarded, not truncated. - - Applications should use as few and as small cookies as possible, and - they should cope gracefully with the loss of a cookie. - - - - - -Kristol & Montulli Standards Track [Page 15] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -6.3.1 Denial of Service Attacks - - User agents may choose to set an upper bound on the number of cookies - to be stored from a given host or domain name or on the size of the - cookie information. Otherwise a malicious server could attempt to - flood a user agent with many cookies, or large cookies, on successive - responses, which would force out cookies the user agent had received - from other servers. However, the minima specified above should still - be supported. - -7. PRIVACY - -7.1 User Agent Control - - An origin server could create a Set-Cookie header to track the path - of a user through the server. Users may object to this behavior as - an intrusive accumulation of information, even if their identity is - not evident. (Identity might become evident if a user subsequently - fills out a form that contains identifying information.) This state - management specification therefore requires that a user agent give - the user control over such a possible intrusion, although the - interface through which the user is given this control is left - unspecified. However, the control mechanisms provided shall at least - allow the user - - * to completely disable the sending and saving of cookies. - - * to determine whether a stateful session is in progress. - - * to control the saving of a cookie on the basis of the cookie's - Domain attribute. - - Such control could be provided by, for example, mechanisms - - * to notify the user when the user agent is about to send a cookie - to the origin server, offering the option not to begin a session. - - * to display a visual indication that a stateful session is in - progress. - - * to let the user decide which cookies, if any, should be saved - when the user concludes a window or user agent session. - - * to let the user examine the contents of a cookie at any time. - - A user agent usually begins execution with no remembered state - information. It should be possible to configure a user agent never - to send Cookie headers, in which case it can never sustain state with - - - -Kristol & Montulli Standards Track [Page 16] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - an origin server. (The user agent would then behave like one that is - unaware of how to handle Set-Cookie response headers.) - - When the user agent terminates execution, it should let the user - discard all state information. Alternatively, the user agent may ask - the user whether state information should be retained; the default - should be "no". If the user chooses to retain state information, it - would be restored the next time the user agent runs. - - NOTE: User agents should probably be cautious about using files to - store cookies long-term. If a user runs more than one instance of - the user agent, the cookies could be commingled or otherwise messed - up. - -7.2 Protocol Design - - The restrictions on the value of the Domain attribute, and the rules - concerning unverifiable transactions, are meant to reduce the ways - that cookies can "leak" to the "wrong" site. The intent is to - restrict cookies to one, or a closely related set of hosts. - Therefore a request-host is limited as to what values it can set for - Domain. We consider it acceptable for hosts host1.foo.com and - host2.foo.com to share cookies, but not a.com and b.com. - - Similarly, a server can only set a Path for cookies that are related - to the request-URI. - -8. SECURITY CONSIDERATIONS - -8.1 Clear Text - - The information in the Set-Cookie and Cookie headers is unprotected. - Two consequences are: - - 1. Any sensitive information that is conveyed in them is exposed - to intruders. - - 2. A malicious intermediary could alter the headers as they travel - in either direction, with unpredictable results. - - These facts imply that information of a personal and/or financial - nature should only be sent over a secure channel. For less sensitive - information, or when the content of the header is a database key, an - origin server should be vigilant to prevent a bad Cookie value from - causing failures. - - - - - - -Kristol & Montulli Standards Track [Page 17] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -8.2 Cookie Spoofing - - Proper application design can avoid spoofing attacks from related - domains. Consider: - - 1. User agent makes request to victim.cracker.edu, gets back - cookie session_id="1234" and sets the default domain - victim.cracker.edu. - - 2. User agent makes request to spoof.cracker.edu, gets back - cookie session-id="1111", with Domain=".cracker.edu". - - 3. User agent makes request to victim.cracker.edu again, and - passes - - Cookie: $Version="1"; - session_id="1234"; - session_id="1111"; $Domain=".cracker.edu" - - The server at victim.cracker.edu should detect that the second - cookie was not one it originated by noticing that the Domain - attribute is not for itself and ignore it. - -8.3 Unexpected Cookie Sharing - - A user agent should make every attempt to prevent the sharing of - session information between hosts that are in different domains. - Embedded or inlined objects may cause particularly severe privacy - problems if they can be used to share cookies between disparate - hosts. For example, a malicious server could embed cookie - information for host a.com in a URI for a CGI on host b.com. User - agent implementors are strongly encouraged to prevent this sort of - exchange whenever possible. - -9. OTHER, SIMILAR, PROPOSALS - - Three other proposals have been made to accomplish similar goals. - This specification is an amalgam of Kristol's State-Info proposal and - Netscape's Cookie proposal. - - Brian Behlendorf proposed a Session-ID header that would be user- - agent-initiated and could be used by an origin server to track - "clicktrails". It would not carry any origin-server-defined state, - however. Phillip Hallam-Baker has proposed another client-defined - session ID mechanism for similar purposes. - - - - - - -Kristol & Montulli Standards Track [Page 18] - -RFC 2109 HTTP State Management Mechanism February 1997 - - - While both session IDs and cookies can provide a way to sustain - stateful sessions, their intended purpose is different, and, - consequently, the privacy requirements for them are different. A - user initiates session IDs to allow servers to track progress through - them, or to distinguish multiple users on a shared machine. Cookies - are server-initiated, so the cookie mechanism described here gives - users control over something that would otherwise take place without - the users' awareness. Furthermore, cookies convey rich, server- - selected information, whereas session IDs comprise user-selected, - simple information. - -10. HISTORICAL - -10.1 Compatibility With Netscape's Implementation - - HTTP/1.0 clients and servers may use Set-Cookie and Cookie headers - that reflect Netscape's original cookie proposal. These notes cover - inter-operation between "old" and "new" cookies. - -10.1.1 Extended Cookie Header - - This proposal adds attribute-value pairs to the Cookie request header - in a compatible way. An "old" client that receives a "new" cookie - will ignore attributes it does not understand; it returns what it - does understand to the origin server. A "new" client always sends - cookies in the new form. - - An "old" server that receives a "new" cookie will see what it thinks - are many cookies with names that begin with a $, and it will ignore - them. (The "old" server expects these cookies to be separated by - semi-colon, not comma.) A "new" server can detect cookies that have - passed through an "old" client, because they lack a $Version - attribute. - -10.1.2 Expires and Max-Age - - Netscape's original proposal defined an Expires header that took a - date value in a fixed-length variant format in place of Max-Age: - - Wdy, DD-Mon-YY HH:MM:SS GMT - - Note that the Expires date format contains embedded spaces, and that - "old" cookies did not have quotes around values. Clients that - implement to this specification should be aware of "old" cookies and - Expires. - - - - - - -Kristol & Montulli Standards Track [Page 19] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -10.1.3 Punctuation - - In Netscape's original proposal, the values in attribute-value pairs - did not accept "-quoted strings. Origin servers should be cautious - about sending values that require quotes unless they know the - receiving user agent understands them (i.e., "new" cookies). A - ("new") user agent should only use quotes around values in Cookie - headers when the cookie's version(s) is (are) all compliant with this - specification or later. - - In Netscape's original proposal, no whitespace was permitted around - the = that separates attribute-value pairs. Therefore such - whitespace should be used with caution in new implementations. - -10.2 Caching and HTTP/1.0 - - Some caches, such as those conforming to HTTP/1.0, will inevitably - cache the Set-Cookie header, because there was no mechanism to - suppress caching of headers prior to HTTP/1.1. This caching can lead - to security problems. Documents transmitted by an origin server - along with Set-Cookie headers will usually either be uncachable, or - will be "pre-expired". As long as caches obey instructions not to - cache documents (following Expires: or Pragma: - no-cache (HTTP/1.0), or Cache-control: no-cache (HTTP/1.1)) - uncachable documents present no problem. However, pre-expired - documents may be stored in caches. They require validation (a - conditional GET) on each new request, but some cache operators loosen - the rules for their caches, and sometimes serve expired documents - without first validating them. This combination of factors can lead - to cookies meant for one user later being sent to another user. The - Set-Cookie header is stored in the cache, and, although the document - is stale (expired), the cache returns the document in response to - later requests, including cached headers. - -11. ACKNOWLEDGEMENTS - - This document really represents the collective efforts of the - following people, in addition to the authors: Roy Fielding, Marc - Hedlund, Ted Hardie, Koen Holtman, Shel Kaphan, Rohit Khare. - - - - - - - - - - - - -Kristol & Montulli Standards Track [Page 20] - -RFC 2109 HTTP State Management Mechanism February 1997 - - -12. AUTHORS' ADDRESSES - - David M. Kristol - Bell Laboratories, Lucent Technologies - 600 Mountain Ave. Room 2A-227 - Murray Hill, NJ 07974 - - Phone: (908) 582-2250 - Fax: (908) 582-5809 - EMail: dmk@bell-labs.com - - - Lou Montulli - Netscape Communications Corp. - 501 E. Middlefield Rd. - Mountain View, CA 94043 - - Phone: (415) 528-2600 - EMail: montulli@netscape.com - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Kristol & Montulli Standards Track [Page 21] - diff --git a/docs/specs/rfc2145.txt b/docs/specs/rfc2145.txt deleted file mode 100644 index b6db4d5..0000000 --- a/docs/specs/rfc2145.txt +++ /dev/null @@ -1,395 +0,0 @@ - - - - - - -Network Working Group J. C. Mogul -Request for Comments: 2145 DEC -Category: Informational R. Fielding - UC Irvine - J. Gettys - DEC - H. Frystyk - MIT/LCS - May 1997 - - Use and Interpretation of - HTTP Version Numbers - -Status of this Memo - - This memo provides information for the Internet community. This memo - does not specify an Internet standard of any kind. Distribution of - this memo is unlimited. - - Distribution of this document is unlimited. Please send comments to - the HTTP working group at . Discussions - of the working group are archived at - . General discussions - about HTTP and the applications which use HTTP should take place on - the mailing list. - -Abstract - - HTTP request and response messages include an HTTP protocol version - number. Some confusion exists concerning the proper use and - interpretation of HTTP version numbers, and concerning - interoperability of HTTP implementations of different protocol - versions. This document is an attempt to clarify the situation. It - is not a modification of the intended meaning of the existing - HTTP/1.0 and HTTP/1.1 documents, but it does describe the intention - of the authors of those documents, and can be considered definitive - when there is any ambiguity in those documents concerning HTTP - version numbers, for all versions of HTTP. - - - - - - - - - - - - - -Mogul, et. al. Informational [Page 1] - -RFC 2145 HTTP Version Numbers May 1997 - - -TABLE OF CONTENTS - - 1 Introduction. . . . . . . . . . . . . . . . . . . . . . . . . . 2 - 1.1 Robustness Principle . . . . . . . . . . . . . . . . . . 3 - 2 HTTP version numbers. . . . . . . . . . . . . . . . . . . . . . 3 - 2.1 Proxy behavior. . . . . . . . . . . . . . . . . . . . . . . . 4 - 2.2 Compatibility between minor versions of the same major - version. . . . . . . . . . . . . . . . . . . . . . . . 4 - 2.3 Which version number to send in a message. . . . . . . . 5 - 3 Security Considerations . . . . . . . . . . . . . . . . . . . . 6 - 4 References. . . . . . . . . . . . . . . . . . . . . . . . . . . 6 - 5 Authors' addresses. . . . . . . . . . . . . . . . . . . . . . . 6 - -1 Introduction - - HTTP request and response messages include an HTTP protocol version - number. According to section 3.1 of the HTTP/1.1 specification [2], - - HTTP uses a "." numbering scheme to indicate - versions of the protocol. The protocol versioning policy is - intended to allow the sender to indicate the format of a message - and its capacity for understanding further HTTP communication, - rather than the features obtained via that communication. No - change is made to the version number for the addition of message - components which do not affect communication behavior or which - only add to extensible field values. The number is - incremented when the changes made to the protocol add features - which do not change the general message parsing algorithm, but - which may add to the message semantics and imply additional - capabilities of the sender. The number is incremented when - the format of a message within the protocol is changed. - - The same language appears in the description of HTTP/1.0 [1]. - - Many readers of these documents have expressed some confusion about - the intended meaning of this policy. Also, some people who wrote - HTTP implementations before RFC1945 [1] was issued were not aware of - the intentions behind the introduction of version numbers in - HTTP/1.0. This has led to debate and inconsistency regarding the use - and interpretation of HTTP version numbers, and has led to - interoperability problems in certain cases. - - - - - - - - - - -Mogul, et. al. Informational [Page 2] - -RFC 2145 HTTP Version Numbers May 1997 - - - This document is an attempt to clarify the situation. It is not a - modification of the intended meaning of the existing HTTP/1.0 and - HTTP/1.1 documents, but it does describe the intention of the authors - of those documents. In any case where either of those two documents - is ambiguous regarding the use and interpretation of HTTP version - numbers, this document should be considered the definitive as to the - intentions of the designers of HTTP. - - The specification described in this document is not part of the - specification of any individual version of HTTP, such as HTTP/1.0 or - HTTP/1.1. Rather, this document describes the use of HTTP version - numbers in any version of HTTP (except for HTTP/0.9, which did not - include version numbers). - - No vendor or other provider of an HTTP implementation should claim - any compliance with any IETF HTTP specification unless the - implementation conditionally complies with the rules in this - document. - -1.1 Robustness Principle - - RFC791 [4] defines the "robustness principle" in section 3.2: - - an implementation must be conservative in its sending - behavior, and liberal in its receiving behavior. - - This principle applies to HTTP, as well. It is the fundamental basis - for interpreting any part of the HTTP specification that might still - be ambiguous. In particular, implementations of HTTP SHOULD NOT - reject messages or generate errors unnecessarily. - -2 HTTP version numbers - - We start by restating the language quoted above from section 3.1 of - the HTTP/1.1 specification [2]: - - It is, and has always been, the explicit intent of the - HTTP specification that the interpretation of an HTTP message - header does not change between minor versions of the same major - version. - - It is, and has always been, the explicit intent of the - HTTP specification that an implementation receiving a message - header that it does not understand MUST ignore that header. (The - word "ignore" has a special meaning for proxies; see section 2.1 - below.) - - - - - -Mogul, et. al. Informational [Page 3] - -RFC 2145 HTTP Version Numbers May 1997 - - - To make this as clear as possible: The major version sent in a - message MAY indicate the interpretation of other header fields. The - minor version sent in a message MUST NOT indicate the interpretation - of other header fields. This reflects the principle that the minor - version labels the capability of the sender, not the interpretation - of the message. - - Note: In a future version of HTTP, we may introduce a mechanism - that explicitly requires a receiving implementation to reject a - message if it does not understand certain headers. For example, - this might be implemented by means of a header that lists a set of - other message headers that must be understood by the recipient. - Any implementation claiming at least conditional compliance with - this future version of HTTP would have to implement this - mechanism. However, no implementation claiming compliance with a - lower HTTP version (in particular, HTTP/1.1) will have to - implement this mechanism. - - This future change may be required to support the Protocol - Extension Protocol (PEP) [3]. - - One consequence of these rules is that an HTTP/1.1 message sent to an - HTTP/1.0 recipient (or a recipient whose version is unknown) MUST be - constructed so that it remains a valid HTTP/1.0 message when all - headers not defined in the HTTP/1.0 specification [1] are removed. - -2.1 Proxy behavior - - A proxy MUST forward an unknown header, unless it is protected by a - Connection header. A proxy implementing an HTTP version >= 1.1 MUST - NOT forward unknown headers that are protected by a Connection - header, as described in section 14.10 of the HTTP/1.1 specification - [2]. - - We remind the reader that that HTTP version numbers are hop-by-hop - components of HTTP messages, and are not end-to-end. That is, an - HTTP proxy never "forwards" an HTTP version number in either a - request or response. - -2.2 Compatibility between minor versions of the same major version - - An implementation of HTTP/x.b sending a message to a recipient whose - version is known to be HTTP/x.a, a < b, MAY send a header that is not - defined in the specification for HTTP/x.a. For example, an HTTP/1.1 - server may send a "Cache-control" header to an HTTP/1.0 client; this - may be useful if the immediate recipient is an HTTP/1.0 proxy, but - the ultimate recipient is an HTTP/1.1 client. - - - - -Mogul, et. al. Informational [Page 4] - -RFC 2145 HTTP Version Numbers May 1997 - - - An implementation of HTTP/x.b sending a message to a recipient whose - version is known to be HTTP/x.a, a < b, MUST NOT depend on the - recipient understanding a header not defined in the specification for - HTTP/x.a. For example, HTTP/1.0 clients cannot be expected to - understand chunked encodings, and so an HTTP/1.1 server must never - send "Transfer-Encoding: chunked" in response to an HTTP/1.0 request. - -2.3 Which version number to send in a message - - The most strenuous debate over the use of HTTP version numbers has - centered on the problem of implementations that do not follow the - robustness principle, and which fail to produce useful results when - they receive a message with an HTTP minor version higher than the - minor version they implement. We consider these implementations - buggy, but we recognize that the robustness principle also implies - that message senders should make concessions to buggy implementations - when this is truly necessary for interoperation. - - An HTTP client SHOULD send a request version equal to the highest - version for which the client is at least conditionally compliant, and - whose major version is no higher than the highest version supported - by the server, if this is known. An HTTP client MUST NOT send a - version for which it is not at least conditionally compliant. - - An HTTP client MAY send a lower request version, if it is known that - the server incorrectly implements the HTTP specification, but only - after the client has determined that the server is actually buggy. - - An HTTP server SHOULD send a response version equal to the highest - version for which the server is at least conditionally compliant, and - whose major version is less than or equal to the one received in the - request. An HTTP server MUST NOT send a version for which it is not - at least conditionally compliant. A server MAY send a 505 (HTTP - Version Not Supported) response if cannot send a response using the - major version used in the client's request. - - An HTTP server MAY send a lower response version, if it is known or - suspected that the client incorrectly implements the HTTP - specification, but this should not be the default, and this SHOULD - NOT be done if the request version is HTTP/1.1 or greater. - - - - - - - - - - - -Mogul, et. al. Informational [Page 5] - -RFC 2145 HTTP Version Numbers May 1997 - - -3 Security Considerations - - None, except to the extent that security mechanisms introduced in one - version of HTTP might depend on the proper interpretation of HTTP - version numbers in older implementations. - -4 References - - 1. Berners-Lee, T., R. Fielding, and H. Frystyk. Hypertext - Transfer Protocol -- HTTP/1.0. RFC 1945, HTTP Working Group, May, - 1996. - - 2. Fielding, Roy T., Jim Gettys, Jeffrey C. Mogul, Henrik Frystyk - Nielsen, and Tim Berners-Lee. Hypertext Transfer Protocol -- - HTTP/1.1. RFC 2068, HTTP Working Group, January, 1997. - - 3. Khare, Rohit. HTTP/1.2 Extension Protocol (PEP). HTTP Working - Group, Work in Progress. - - 4. Postel, Jon. Internet Protocol. RFC 791, NIC, September, 1981. - -5 Authors' addresses - - Jeffrey C. Mogul - Western Research Laboratory - Digital Equipment Corporation - 250 University Avenue - Palo Alto, California, 94305, USA - Email: mogul@wrl.dec.com - - Roy T. Fielding - Department of Information and Computer Science - University of California - Irvine, CA 92717-3425, USA - Fax: +1 (714) 824-4056 - Email: fielding@ics.uci.edu - - Jim Gettys - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - Fax: +1 (617) 258 8682 - Email: jg@w3.org - - - - - - - - -Mogul, et. al. Informational [Page 6] - -RFC 2145 HTTP Version Numbers May 1997 - - - Henrik Frystyk Nielsen - W3 Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - Fax: +1 (617) 258 8682 - Email: frystyk@w3.org - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Mogul, et. al. Informational [Page 7] - diff --git a/docs/specs/rfc2324.txt b/docs/specs/rfc2324.txt deleted file mode 100644 index a85921a..0000000 --- a/docs/specs/rfc2324.txt +++ /dev/null @@ -1,563 +0,0 @@ - - - - - - -Network Working Group L. Masinter -Request for Comments: 2324 1 April 1998 -Category: Informational - - - Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0) - -Status of this Memo - - This memo provides information for the Internet community. It does - not specify an Internet standard of any kind. Distribution of this - memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (1998). All Rights Reserved. - -Abstract - - This document describes HTCPCP, a protocol for controlling, - monitoring, and diagnosing coffee pots. - -1. Rationale and Scope - - There is coffee all over the world. Increasingly, in a world in which - computing is ubiquitous, the computists want to make coffee. Coffee - brewing is an art, but the distributed intelligence of the web- - connected world transcends art. Thus, there is a strong, dark, rich - requirement for a protocol designed espressoly for the brewing of - coffee. Coffee is brewed using coffee pots. Networked coffee pots - require a control protocol if they are to be controlled. - - Increasingly, home and consumer devices are being connected to the - Internet. Early networking experiments demonstrated vending devices - connected to the Internet for status monitoring [COKE]. One of the - first remotely _operated_ machine to be hooked up to the Internet, - the Internet Toaster, (controlled via SNMP) was debuted in 1990 - [RFC2235]. - - The demand for ubiquitous appliance connectivity that is causing the - consumption of the IPv4 address space. Consumers want remote control - of devices such as coffee pots so that they may wake up to freshly - brewed coffee, or cause coffee to be prepared at a precise time after - the completion of dinner preparations. - - - - - - - -Masinter Informational [Page 1] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - This document specifies a Hyper Text Coffee Pot Control Protocol - (HTCPCP), which permits the full request and responses necessary to - control all devices capable of making the popular caffeinated hot - beverages. - - HTTP 1.1 ([RFC2068]) permits the transfer of web objects from origin - servers to clients. The web is world-wide. HTCPCP is based on HTTP. - This is because HTTP is everywhere. It could not be so pervasive - without being good. Therefore, HTTP is good. If you want good coffee, - HTCPCP needs to be good. To make HTCPCP good, it is good to base - HTCPCP on HTTP. - - Future versions of this protocol may include extensions for espresso - machines and similar devices. - -2. HTCPCP Protocol - - The HTCPCP protocol is built on top of HTTP, with the addition of a - few new methods, header fields and return codes. All HTCPCP servers - should be referred to with the "coffee:" URI scheme (Section 4). - -2.1 HTCPCP Added Methods - -2.1.1 The BREW method, and the use of POST - - Commands to control a coffee pot are sent from client to coffee - server using either the BREW or POST method, and a message body with - Content-Type set to "application/coffee-pot-command". - - A coffee pot server MUST accept both the BREW and POST method - equivalently. However, the use of POST for causing actions to happen - is deprecated. - - Coffee pots heat water using electronic mechanisms, so there is no - fire. Thus, no firewalls are necessary, and firewall control policy - is irrelevant. However, POST may be a trademark for coffee, and so - the BREW method has been added. The BREW method may be used with - other HTTP-based protocols (e.g., the Hyper Text Brewery Control - Protocol). - -2.1.2 GET method - - In HTTP, the GET method is used to mean "retrieve whatever - information (in the form of an entity) identified by the Request- - URI." If the Request-URI refers to a data-producing process, it is - the produced data which shall be returned as the entity in the - response and not the source text of the process, unless that text - happens to be the output of the process. - - - -Masinter Informational [Page 2] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - In HTCPCP, the resources associated with a coffee pot are physical, - and not information resources. The "data" for most coffee URIs - contain no caffeine. - -2.1.3 PROPFIND method - - If a cup of coffee is data, metadata about the brewed resource is - discovered using the PROPFIND method [WEBDAV]. - -2.1.4 WHEN method - - When coffee is poured, and milk is offered, it is necessary for the - holder of the recipient of milk to say "when" at the time when - sufficient milk has been introduced into the coffee. For this - purpose, the "WHEN" method has been added to HTCPCP. Enough? Say - WHEN. - -2.2 Coffee Pot Header fields - - HTCPCP recommends several HTTP header fields and defines some new - ones. - -2.2.1 Recommended header fields - -2.2.1.1 The "safe" response header field. - - [SAFE] defines a HTTP response header field, "Safe", which can be - used to indicate that repeating a HTTP request is safe. The inclusion - of a "Safe: Yes" header field allows a client to repeat a previous - request if the result of the request might be repeated. - - The actual safety of devices for brewing coffee varies widely, and - may depend, in fact, on conditions in the client rather than just in - the server. Thus, this protocol includes an extension to the "Safe" - response header: - - Safe = "Safe" ":" safe-nature - safe-nature = "yes" | "no" | conditionally-safe - conditionally-safe = "if-" safe-condition - safe-condition = "user-awake" | token - - indication will allow user agents to handle retries of some safe - requests, in particular safe POST requests, in a more user-friendly - way. - - - - - - - -Masinter Informational [Page 3] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - -2.2.2 New header fields - -2.2.2.1 The Accept-Additions header field - - In HTTP, the "Accept" request-header field is used to specify media - types which are acceptable for the response. However, in HTCPCP, the - response may result in additional actions on the part of the - automated pot. For this reason, HTCPCP adds a new header field, - "Accept-Additions": - - - Accept-Additions = "Accept-Additions" ":" - #( addition-range [ accept-params ] ) - - addition-type = ( "*" - | milk-type - | syrup-type - | sweetener-type - | spice-type - | alcohol-type - ) *( ";" parameter ) - milk-type = ( "Cream" | "Half-and-half" | "Whole-milk" - | "Part-Skim" | "Skim" | "Non-Dairy" ) - syrup-type = ( "Vanilla" | "Almond" | "Raspberry" - | "Chocolate" ) - alcohol-type = ( "Whisky" | "Rum" | "Kahlua" | "Aquavit" ) - -2.2.3 Omitted Header Fields - - No options were given for decaffeinated coffee. What's the point? - -2.3 HTCPCP return codes - - Normal HTTP return codes are used to indicate difficulties of the - HTCPCP server. This section identifies special interpretations and - new return codes. - -2.3.1 406 Not Acceptable - - This return code is normally interpreted as "The resource identified - by the request is only capable of generating response entities which - have content characteristics not acceptable according to the accept - headers sent in the request. In HTCPCP, this response code MAY be - returned if the operator of the coffee pot cannot comply with the - Accept-Addition request. Unless the request was a HEAD request, the - response SHOULD include an entity containing a list of available - coffee additions. - - - - -Masinter Informational [Page 4] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - In practice, most automated coffee pots cannot currently provide - additions. - -2.3.2 418 I'm a teapot - - Any attempt to brew coffee with a teapot should result in the error - code "418 I'm a teapot". The resulting entity body MAY be short and - stout. - -3. The "coffee" URI scheme - - Because coffee is international, there are international coffee URI - schemes. All coffee URL schemes are written with URL encoding of the - UTF-8 encoding of the characters that spell the word for "coffee" in - any of 29 languages, following the conventions for - internationalization in URIs [URLI18N]. - -coffee-url = coffee-scheme ":" [ "//" host ] - ["/" pot-designator ] ["?" additions-list ] - -coffee-scheme = ( "koffie" ; Afrikaans, Dutch - | "q%C3%A6hv%C3%A6" ; Azerbaijani - | "%D9%82%D9%87%D9%88%D8%A9" ; Arabic - | "akeita" ; Basque - | "koffee" ; Bengali - | "kahva" ; Bosnian - | "kafe" ; Bulgarian, Czech - | "caf%C3%E8" ; Catalan, French, Galician - | "%E5%92%96%E5%95%A1" ; Chinese - | "kava" ; Croatian - | "k%C3%A1va ; Czech - | "kaffe" ; Danish, Norwegian, Swedish - | "coffee" ; English - | "kafo" ; Esperanto - | "kohv" ; Estonian - | "kahvi" ; Finnish - | "%4Baffee" ; German - | "%CE%BA%CE%B1%CF%86%CE%AD" ; Greek - | "%E0%A4%95%E0%A5%8C%E0%A4%AB%E0%A5%80" ; Hindi - | "%E3%82%B3%E3%83%BC%E3%83%92%E3%83%BC" ; Japanese - | "%EC%BB%A4%ED%94%BC" ; Korean - | "%D0%BA%D0%BE%D1%84%D0%B5" ; Russian - | "%E0%B8%81%E0%B8%B2%E0%B9%81%E0%B8%9F" ; Thai - ) - - pot-designator = "pot-" integer ; for machines with multiple pots - additions-list = #( addition ) - - - - -Masinter Informational [Page 5] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - All alternative coffee-scheme forms are equivalent. However, the use - of coffee-scheme in various languages MAY be interpreted as an - indication of the kind of coffee produced by the coffee pot. Note - that while URL scheme names are case-independent, capitalization is - important for German and thus the initial "K" must be encoded. - -4. The "message/coffeepot" media type - - The entity body of a POST or BREW request MUST be of Content-Type - "message/coffeepot". Since most of the information for controlling - the coffee pot is conveyed by the additional headers, the content of - "message/coffeepot" contains only a coffee-message-body: - - coffee-message-body = "start" | "stop" - -5. Operational constraints - - This section lays out some of the operational issues with deployment - of HTCPCP ubiquitously. - -5.1 Timing Considerations - - A robust quality of service is required between the coffee pot user - and the coffee pot service. Coffee pots SHOULD use the Network Time - Protocol [NTP] to synchronize their clocks to a globally accurate - time standard. - - Telerobotics has been an expensive technology. However, with the - advent of the Cambridge Coffee Pot [CAM], the use of the web (rather - than SNMP) for remote system monitoring and management has been - proven. Additional coffee pot maintenance tasks might be - accomplished by remote robotics. - - Web data is normally static. Therefore to save data transmission and - time, Web browser programs store each Web page retrieved by a user on - the user's computer. Thus, if the user wants to return to that page, - it is now stored locally and does not need to be requested again from - the server. An image used for robot control or for monitoring a - changing scene is dynamic. A fresh version needs to be retrieved from - the server each time it is accessed. - -5.2 Crossing firewalls - - In most organizations HTTP traffic crosses firewalls fairly easily. - Modern coffee pots do not use fire. However, a "firewall" is useful - for protection of any source from any manner of heat, and not just - fire. Every home computer network SHOULD be protected by a firewall - from sources of heat. However, remote control of coffee pots is - - - -Masinter Informational [Page 6] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - important from outside the home. Thus, it is important that HTCPCP - cross firewalls easily. - - By basing HTCPCP on HTTP and using port 80, it will get all of HTTP's - firewall-crossing virtues. Of course, the home firewalls will require - reconfiguration or new versions in order to accommodate HTCPCP- - specific methods, headers and trailers, but such upgrades will be - easily accommodated. Most home network system administrators drink - coffee, and are willing to accommodate the needs of tunnelling - HTCPCP. - -6. System management considerations - - Coffee pot monitoring using HTTP protocols has been an early - application of the web. In the earliest instance, coffee pot - monitoring was an early (and appropriate) use of ATM networks [CAM]. - - The traditional technique [CAM] was to attach a frame-grabber to a - video camera, and feed the images to a web server. This was an - appropriate application of ATM networks. In this coffee pot - installation, the Trojan Room of Cambridge University laboratories - was used to give a web interface to monitor a common coffee pot. of - us involved in related research and, being poor, impoverished - academics, we only had one coffee filter machine between us, which - lived in the corridor just outside the Trojan Room. However, being - highly dedicated and hard-working academics, we got through a lot of - coffee, and when a fresh pot was brewed, it often didn't last long. - - This service was created as the first application to use a new RPC - mechanism designed in the Cambridge Computer Laboratory - MSRPC2. It - runs over MSNL (Multi-Service Network Layer) - a network layer - protocol designed for ATM networks. - - Coffee pots on the Internet may be managed using the Coffee Pot MIB - [CPMIB]. - -7. Security Considerations - - Anyone who gets in between me and my morning coffee should be - insecure. - - Unmoderated access to unprotected coffee pots from Internet users - might lead to several kinds of "denial of coffee service" attacks. - The improper use of filtration devices might admit trojan grounds. - Filtration is not a good virus protection method. - - - - - - -Masinter Informational [Page 7] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - Putting coffee grounds into Internet plumbing may result in clogged - plumbing, which would entail the services of an Internet Plumber - [PLUMB], who would, in turn, require an Internet Plumber's Helper. - - Access authentication will be discussed in a separate memo. - -8. Acknowledgements - - Many thanks to the many contributors to this standard, including Roy - Fielding, Mark Day, Keith Moore, Carl Uno-Manros, Michael Slavitch, - and Martin Duerst. The inspiration of the Prancing Pony, the CMU - Coke Machine, the Cambridge Coffee Pot, the Internet Toaster, and - other computer controlled remote devices have led to this valuable - creation. - -9. References - - [RFC2068] Fielding, R., Gettys, J., Mogul, J., Frystyk, H., and T. - Berners-Lee, "Hypertext Transfer Protocol -- HTTP/1.1", RFC 2068, - January 1997. - - [RFC2186] Wessels, D., and K. Claffy, "Internet Cache Protocol (ICP), - version 2," RFC 2186, September 1997 - - [CPMIB] Slavitch, M., "Definitions of Managed Objects for Drip-Type - Heated Beverage Hardware Devices using SMIv2", RFC 2325, 1 April - 1998. - - [HTSVMP] Q. Stafford-Fraser, "Hyper Text Sandwich Van Monitoring - Protocol, Version 3.2". In preparation. - - [RFC2295] Holtman, K., and A. Mutz, "Transparent Content Negotiation - in HTTP", RFC 2295, March 1998. - - [SAFE] K. Holtman. "The Safe Response Header Field", September 1997. - - [CAM] "The Trojan Room Coffee Machine", D. Gordon and M. Johnson, - University of Cambridge Computer Lab, - - - [CBIO] "The Trojan Room Coffee Pot, a (non-technical) biography", Q. - Stafford-Fraser, University of Cambridge Computer Lab, - . - - [RFC2235] Zakon, R., "Hobbes' Internet Timeline", FYI 32, RFC 2230, - November 1997. See also - - - - - -Masinter Informational [Page 8] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - - [NTP] Mills, D., "Network Time Protocol (Version 3) Specification, - Implementation and Analysis", RFC 1305, March 1992. - - [URLI18N] Masinter, L., "Using UTF8 for non-ASCII Characters in - Extended URIs" Work in Progress. - - [PLUMB] B. Metcalfe, "Internet Plumber of the Year: Jim Gettys", - Infoworld, February 2, 1998. - - [COKE] D. Nichols, "Coke machine history", C. Everhart, "Interesting - uses of networking", . - -10. Author's Address - - Larry Masinter - Xerox Palo Alto Research Center - 3333 Coyote Hill Road - Palo Alto, CA 94304 - - EMail: masinter@parc.xerox.com - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Masinter Informational [Page 9] - -RFC 2324 HTCPCP/1.0 1 April 1998 - - -11. Full Copyright Statement - - Copyright (C) The Internet Society (1998). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - - - - - - - - - - - - - - - - - - - - - - - - -Masinter Informational [Page 10] - diff --git a/docs/specs/rfc2388.txt b/docs/specs/rfc2388.txt deleted file mode 100644 index ffb9b6c..0000000 --- a/docs/specs/rfc2388.txt +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - -Network Working Group L. Masinter -Request for Comments: 2388 Xerox Corporation -Category: Standards Track August 1998 - - - Returning Values from Forms: multipart/form-data - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (1998). All Rights Reserved. - -1. Abstract - - This specification defines an Internet Media Type, multipart/form- - data, which can be used by a wide variety of applications and - transported by a wide variety of protocols as a way of returning a - set of values as the result of a user filling out a form. - -2. Introduction - - In many applications, it is possible for a user to be presented with - a form. The user will fill out the form, including information that - is typed, generated by user input, or included from files that the - user has selected. When the form is filled out, the data from the - form is sent from the user to the receiving application. - - The definition of MultiPart/Form-Data is derived from one of those - applications, originally set out in [RFC1867] and subsequently - incorporated into [HTML40], where forms are expressed in HTML, and in - which the form values are sent via HTTP or electronic mail. This - representation is widely implemented in numerous web browsers and web - servers. - - However, multipart/form-data can be used for forms that are presented - using representations other than HTML (spreadsheets, Portable - Document Format, etc), and for transport using other means than - electronic mail or HTTP. This document defines the representation of - form values independently of the application for which it is used. - - - - - -Masinter Standards Track [Page 1] - -RFC 2388 multipart/form-data August 1998 - - -3. Definition of multipart/form-data - - The media-type multipart/form-data follows the rules of all multipart - MIME data streams as outlined in [RFC 2046]. In forms, there are a - series of fields to be supplied by the user who fills out the form. - Each field has a name. Within a given form, the names are unique. - - "multipart/form-data" contains a series of parts. Each part is - expected to contain a content-disposition header [RFC 2183] where the - disposition type is "form-data", and where the disposition contains - an (additional) parameter of "name", where the value of that - parameter is the original field name in the form. For example, a part - might contain a header: - - Content-Disposition: form-data; name="user" - - with the value corresponding to the entry of the "user" field. - - Field names originally in non-ASCII character sets may be encoded - within the value of the "name" parameter using the standard method - described in RFC 2047. - - As with all multipart MIME types, each part has an optional - "Content-Type", which defaults to text/plain. If the contents of a - file are returned via filling out a form, then the file input is - identified as the appropriate media type, if known, or - "application/octet-stream". If multiple files are to be returned as - the result of a single form entry, they should be represented as a - "multipart/mixed" part embedded within the "multipart/form-data". - - Each part may be encoded and the "content-transfer-encoding" header - supplied if the value of that part does not conform to the default - encoding. - -4. Use of multipart/form-data - -4.1 Boundary - - As with other multipart types, a boundary is selected that does not - occur in any of the data. Each field of the form is sent, in the - order defined by the sending appliction and form, as a part of the - multipart stream. Each part identifies the INPUT name within the - original form. Each part should be labelled with an appropriate - content-type if the media type is known (e.g., inferred from the file - extension or operating system typing information) or as - "application/octet-stream". - - - - - -Masinter Standards Track [Page 2] - -RFC 2388 multipart/form-data August 1998 - - -4.2 Sets of files - - If the value of a form field is a set of files rather than a single - file, that value can be transferred together using the - "multipart/mixed" format. - -4.3 Encoding - - While the HTTP protocol can transport arbitrary binary data, the - default for mail transport is the 7BIT encoding. The value supplied - for a part may need to be encoded and the "content-transfer-encoding" - header supplied if the value does not conform to the default - encoding. [See section 5 of RFC 2046 for more details.] - -4.4 Other attributes - - Forms may request file inputs from the user; the form software may - include the file name and other file attributes, as specified in [RFC - 2184]. - - The original local file name may be supplied as well, either as a - "filename" parameter either of the "content-disposition: form-data" - header or, in the case of multiple files, in a "content-disposition: - file" header of the subpart. The sending application MAY supply a - file name; if the file name of the sender's operating system is not - in US-ASCII, the file name might be approximated, or encoded using - the method of RFC 2231. - - This is a convenience for those cases where the files supplied by the - form might contain references to each other, e.g., a TeX file and its - .sty auxiliary style description. - -4.5 Charset of text in form data - - Each part of a multipart/form-data is supposed to have a content- - type. In the case where a field element is text, the charset - parameter for the text indicates the character encoding used. - - For example, a form with a text field in which a user typed 'Joe owes - 100' where is the Euro symbol might have form data returned - as: - - --AaB03x - content-disposition: form-data; name="field1" - content-type: text/plain;charset=windows-1250 - content-transfer-encoding: quoted-printable - - - - - -Masinter Standards Track [Page 3] - -RFC 2388 multipart/form-data August 1998 - - - Joe owes =80100. - --AaB03x - -5. Operability considerations - -5.1 Compression, encryption - - Some of the data in forms may be compressed or encrypted, using other - MIME mechanisms. This is a function of the application that is - generating the form-data. - -5.2 Other data encodings rather than multipart - - Various people have suggested using new mime top-level type - "aggregate", e.g., aggregate/mixed or a content-transfer-encoding of - "packet" to express indeterminate-length binary data, rather than - relying on the multipart-style boundaries. While this would be - useful, the "multipart" mechanisms are well established, simple to - implement on both the sending client and receiving server, and as - efficient as other methods of dealing with multiple combinations of - binary data. - - The multipart/form-data encoding has a high overhead and performance - impact if there are many fields with short values. However, in - practice, for the forms in use, for example, in HTML, the average - overhead is not significant. - -5.3 Remote files with third-party transfer - - In some scenarios, the user operating the form software might want to - specify a URL for remote data rather than a local file. In this case, - is there a way to allow the browser to send to the client a pointer - to the external data rather than the entire contents? This capability - could be implemented, for example, by having the client send to the - server data of type "message/external-body" with "access-type" set - to, say, "uri", and the URL of the remote data in the body of the - message. - -5.4 Non-ASCII field names - - Note that MIME headers are generally required to consist only of 7- - bit data in the US-ASCII character set. Hence field names should be - encoded according to the method in RFC 2047 if they contain - characters outside of that set. - - - - - - - -Masinter Standards Track [Page 4] - -RFC 2388 multipart/form-data August 1998 - - -5.5 Ordered fields and duplicated field names - - The relationship of the ordering of fields within a form and the - ordering of returned values within "multipart/form-data" is not - defined by this specification, nor is the handling of the case where - a form has multiple fields with the same name. While HTML-based forms - may send back results in the order received, and intermediaries - should not reorder the results, there are some systems which might - not define a natural order for form fields. - -5.6 Interoperability with web applications - - Many web applications use the "application/x-url-encoded" method for - returning data from forms. This format is quite compact, e.g.: - - name=Xavier+Xantico&verdict=Yes&colour=Blue&happy=sad&Utf%F6r=Send - - however, there is no opportunity to label the enclosed data with - content type, apply a charset, or use other encoding mechanisms. - - Many form-interpreting programs (primarly web browsers) now implement - and generate multipart/form-data, but an existing application might - need to optionally support both the application/x-url-encoded format - as well. - -5.7 Correlating form data with the original form - - This specification provides no specific mechanism by which - multipart/form-data can be associated with the form that caused it to - be transmitted. This separation is intentional; many different forms - might be used for transmitting the same data. In practice, - applications may supply a specific form processing resource (in HTML, - the ACTION attribute in a FORM tag) for each different form. - Alternatively, data about the form might be encoded in a "hidden - field" (a field which is part of the form but which has a fixed value - to be transmitted back to the form-data processor.) - -6. Security Considerations - - The data format described in this document introduces no new security - considerations outside of those introduced by the protocols that use - it and of the component elements. It is important when interpreting - content-disposition to not overwrite files in the recipients address - space inadvertently. - - User applications that request form information from users must be - careful not to cause a user to send information to the requestor or a - third party unwillingly or unwittingly. For example, a form might - - - -Masinter Standards Track [Page 5] - -RFC 2388 multipart/form-data August 1998 - - - request 'spam' information to be sent to an unintended third party, - or private information to be sent to someone that the user might not - actually intend. While this is primarily an issue for the - representation and interpretation of forms themselves, rather than - the data representation of the result of form transmission, the - transportation of private information must be done in a way that does - not expose it to unwanted prying. - - With the introduction of form-data that can reasonably send back the - content of files from user's file space, the possibility that a user - might be sent an automated script that fills out a form and then - sends the user's local file to another address arises. Thus, - additional caution is required when executing automated scripting - where form-data might include user's files. - -7. Author's Address - - Larry Masinter - Xerox Palo Alto Research Center - 3333 Coyote Hill Road - Palo Alto, CA 94304 - - Fax: +1 650 812 4333 - EMail: masinter@parc.xerox.com - - - - - - - - - - - - - - - - - - - - - - - - - - - -Masinter Standards Track [Page 6] - -RFC 2388 multipart/form-data August 1998 - - -Appendix A. Media type registration for multipart/form-data - - Media Type name: - multipart - - Media subtype name: - form-data - - Required parameters: - none - - Optional parameters: - none - - Encoding considerations: - No additional considerations other than as for other multipart - types. - - Security Considerations - Applications which receive forms and process them must be careful - not to supply data back to the requesting form processing site that - was not intended to be sent by the recipient. This is a - consideration for any application that generates a multipart/form- - data. - - The multipart/form-data type introduces no new security - considerations for recipients beyond what might occur with any of - the enclosed parts. - - - - - - - - - - - - - - - - - - - - - - - -Masinter Standards Track [Page 7] - -RFC 2388 multipart/form-data August 1998 - - -References - - [RFC 2046] Freed, N., and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part Two: Media Types", RFC 2046, - November 1996. - - [RFC 2047] Moore, K., "MIME (Multipurpose Internet Mail Extensions) - Part Three: Message Header Extensions for Non-ASCII Text", - RFC 2047, November 1996. - - [RFC 2231] Freed, N., and K. Moore, "MIME Parameter Value and Encoded - Word Extensions: Character Sets, Languages, and - Continuations", RFC 2231, November 1997. - - [RFC 1806] Troost, R., and S. Dorner, "Communicating Presentation - Information in Internet Messages: The Content-Disposition - Header", RFC 1806, June 1995. - - [RFC 1867] Nebel, E., and L. Masinter, "Form-based File Upload in - HTML", RFC 1867, November 1995. - - [RFC 2183] Troost, R., Dorner, S., and K. Moore, "Communicating - Presentation Information in Internet Messages: The - Content-Disposition Header Field", RFC 2183, August 1997. - - [RFC 2184] Freed, N., and K. Moore, "MIME Parameter Value and Encoded - Word Extensions: Character Sets, Languages, and - Continuations", RFC 2184, August 1997. - - [HTML40] D. Raggett, A. Le Hors, I. Jacobs. "HTML 4.0 - Specification", World Wide Web Consortium Technical Report - "REC-html40", December, 1997. - - - - - - - - - - - - - - - - - - -Masinter Standards Track [Page 8] - -RFC 2388 multipart/form-data August 1998 - - -Full Copyright Statement - - Copyright (C) The Internet Society (1998). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - - - - - - - - - - - - - - - - - - - - - - - - -Masinter Standards Track [Page 9] - diff --git a/docs/specs/rfc2518.txt b/docs/specs/rfc2518.txt deleted file mode 100644 index 81d4038..0000000 --- a/docs/specs/rfc2518.txt +++ /dev/null @@ -1,5267 +0,0 @@ - - - - - - -Network Working Group Y. Goland -Request for Comments: 2518 Microsoft -Category: Standards Track E. Whitehead - UC Irvine - A. Faizi - Netscape - S. Carter - Novell - D. Jensen - Novell - February 1999 - - - HTTP Extensions for Distributed Authoring -- WEBDAV - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (1999). All Rights Reserved. - -Abstract - - This document specifies a set of methods, headers, and content-types - ancillary to HTTP/1.1 for the management of resource properties, - creation and management of resource collections, namespace - manipulation, and resource locking (collision avoidance). - -Table of Contents - - ABSTRACT............................................................1 - 1 INTRODUCTION .....................................................5 - 2 NOTATIONAL CONVENTIONS ...........................................7 - 3 TERMINOLOGY ......................................................7 - 4 DATA MODEL FOR RESOURCE PROPERTIES ...............................8 - 4.1 The Resource Property Model ...................................8 - 4.2 Existing Metadata Proposals ...................................8 - 4.3 Properties and HTTP Headers ...................................9 - 4.4 Property Values ...............................................9 - 4.5 Property Names ...............................................10 - 4.6 Media Independent Links ......................................10 - 5 COLLECTIONS OF WEB RESOURCES ....................................11 - - - -Goland, et al. Standards Track [Page 1] - -RFC 2518 WEBDAV February 1999 - - - 5.1 HTTP URL Namespace Model .....................................11 - 5.2 Collection Resources .........................................11 - 5.3 Creation and Retrieval of Collection Resources ...............12 - 5.4 Source Resources and Output Resources ........................13 - 6 LOCKING .........................................................14 - 6.1 Exclusive Vs. Shared Locks ...................................14 - 6.2 Required Support .............................................16 - 6.3 Lock Tokens ..................................................16 - 6.4 opaquelocktoken Lock Token URI Scheme ........................16 - 6.4.1 Node Field Generation Without the IEEE 802 Address ........17 - 6.5 Lock Capability Discovery ....................................19 - 6.6 Active Lock Discovery ........................................19 - 6.7 Usage Considerations .........................................19 - 7 WRITE LOCK ......................................................20 - 7.1 Methods Restricted by Write Locks ............................20 - 7.2 Write Locks and Lock Tokens ..................................20 - 7.3 Write Locks and Properties ...................................20 - 7.4 Write Locks and Null Resources ...............................21 - 7.5 Write Locks and Collections ..................................21 - 7.6 Write Locks and the If Request Header ........................22 - 7.6.1 Example - Write Lock ......................................22 - 7.7 Write Locks and COPY/MOVE ....................................23 - 7.8 Refreshing Write Locks .......................................23 - 8 HTTP METHODS FOR DISTRIBUTED AUTHORING ..........................23 - 8.1 PROPFIND .....................................................24 - 8.1.1 Example - Retrieving Named Properties .....................25 - 8.1.2 Example - Using allprop to Retrieve All Properties ........26 - 8.1.3 Example - Using propname to Retrieve all Property Names ...29 - 8.2 PROPPATCH ....................................................31 - 8.2.1 Status Codes for use with 207 (Multi-Status) ..............31 - 8.2.2 Example - PROPPATCH .......................................32 - 8.3 MKCOL Method .................................................33 - 8.3.1 Request ...................................................33 - 8.3.2 Status Codes ..............................................33 - 8.3.3 Example - MKCOL ...........................................34 - 8.4 GET, HEAD for Collections ....................................34 - 8.5 POST for Collections .........................................35 - 8.6 DELETE .......................................................35 - 8.6.1 DELETE for Non-Collection Resources .......................35 - 8.6.2 DELETE for Collections ....................................36 - 8.7 PUT ..........................................................36 - 8.7.1 PUT for Non-Collection Resources ..........................36 - 8.7.2 PUT for Collections .......................................37 - 8.8 COPY Method ..................................................37 - 8.8.1 COPY for HTTP/1.1 resources ...............................37 - 8.8.2 COPY for Properties .......................................38 - 8.8.3 COPY for Collections ......................................38 - 8.8.4 COPY and the Overwrite Header .............................39 - - - -Goland, et al. Standards Track [Page 2] - -RFC 2518 WEBDAV February 1999 - - - 8.8.5 Status Codes ..............................................39 - 8.8.6 Example - COPY with Overwrite .............................40 - 8.8.7 Example - COPY with No Overwrite ..........................40 - 8.8.8 Example - COPY of a Collection ............................41 - 8.9 MOVE Method ..................................................42 - 8.9.1 MOVE for Properties .......................................42 - 8.9.2 MOVE for Collections ......................................42 - 8.9.3 MOVE and the Overwrite Header .............................43 - 8.9.4 Status Codes ..............................................43 - 8.9.5 Example - MOVE of a Non-Collection ........................44 - 8.9.6 Example - MOVE of a Collection ............................44 - 8.10 LOCK Method ..................................................45 - 8.10.1 Operation .................................................46 - 8.10.2 The Effect of Locks on Properties and Collections .........46 - 8.10.3 Locking Replicated Resources ..............................46 - 8.10.4 Depth and Locking .........................................46 - 8.10.5 Interaction with other Methods ............................47 - 8.10.6 Lock Compatibility Table ..................................47 - 8.10.7 Status Codes ..............................................48 - 8.10.8 Example - Simple Lock Request .............................48 - 8.10.9 Example - Refreshing a Write Lock .........................49 - 8.10.10 Example - Multi-Resource Lock Request ....................50 - 8.11 UNLOCK Method ................................................51 - 8.11.1 Example - UNLOCK ..........................................52 - 9 HTTP HEADERS FOR DISTRIBUTED AUTHORING ..........................52 - 9.1 DAV Header ...................................................52 - 9.2 Depth Header .................................................52 - 9.3 Destination Header ...........................................54 - 9.4 If Header ....................................................54 - 9.4.1 No-tag-list Production ....................................55 - 9.4.2 Tagged-list Production ....................................55 - 9.4.3 not Production ............................................56 - 9.4.4 Matching Function .........................................56 - 9.4.5 If Header and Non-DAV Compliant Proxies ...................57 - 9.5 Lock-Token Header ............................................57 - 9.6 Overwrite Header .............................................57 - 9.7 Status-URI Response Header ...................................57 - 9.8 Timeout Request Header .......................................58 - 10 STATUS CODE EXTENSIONS TO HTTP/1.1 ............................59 - 10.1 102 Processing ...............................................59 - 10.2 207 Multi-Status .............................................59 - 10.3 422 Unprocessable Entity .....................................60 - 10.4 423 Locked ...................................................60 - 10.5 424 Failed Dependency ........................................60 - 10.6 507 Insufficient Storage .....................................60 - 11 MULTI-STATUS RESPONSE .........................................60 - 12 XML ELEMENT DEFINITIONS .......................................61 - 12.1 activelock XML Element .......................................61 - - - -Goland, et al. Standards Track [Page 3] - -RFC 2518 WEBDAV February 1999 - - - 12.1.1 depth XML Element .........................................61 - 12.1.2 locktoken XML Element .....................................61 - 12.1.3 timeout XML Element .......................................61 - 12.2 collection XML Element .......................................62 - 12.3 href XML Element .............................................62 - 12.4 link XML Element .............................................62 - 12.4.1 dst XML Element ...........................................62 - 12.4.2 src XML Element ...........................................62 - 12.5 lockentry XML Element ........................................63 - 12.6 lockinfo XML Element .........................................63 - 12.7 lockscope XML Element ........................................63 - 12.7.1 exclusive XML Element .....................................63 - 12.7.2 shared XML Element ........................................63 - 12.8 locktype XML Element .........................................64 - 12.8.1 write XML Element .........................................64 - 12.9 multistatus XML Element ......................................64 - 12.9.1 response XML Element ......................................64 - 12.9.2 responsedescription XML Element ...........................65 - 12.10 owner XML Element ...........................................65 - 12.11 prop XML element ............................................66 - 12.12 propertybehavior XML element ................................66 - 12.12.1 keepalive XML element ....................................66 - 12.12.2 omit XML element .........................................67 - 12.13 propertyupdate XML element ..................................67 - 12.13.1 remove XML element .......................................67 - 12.13.2 set XML element ..........................................67 - 12.14 propfind XML Element ........................................68 - 12.14.1 allprop XML Element ......................................68 - 12.14.2 propname XML Element .....................................68 - 13 DAV PROPERTIES ................................................68 - 13.1 creationdate Property ........................................69 - 13.2 displayname Property .........................................69 - 13.3 getcontentlanguage Property ..................................69 - 13.4 getcontentlength Property ....................................69 - 13.5 getcontenttype Property ......................................70 - 13.6 getetag Property .............................................70 - 13.7 getlastmodified Property .....................................70 - 13.8 lockdiscovery Property .......................................71 - 13.8.1 Example - Retrieving the lockdiscovery Property ...........71 - 13.9 resourcetype Property ........................................72 - 13.10 source Property .............................................72 - 13.10.1 Example - A source Property ..............................72 - 13.11 supportedlock Property ......................................73 - 13.11.1 Example - Retrieving the supportedlock Property ..........73 - 14 INSTRUCTIONS FOR PROCESSING XML IN DAV ........................74 - 15 DAV COMPLIANCE CLASSES ........................................75 - 15.1 Class 1 ......................................................75 - 15.2 Class 2 ......................................................75 - - - -Goland, et al. Standards Track [Page 4] - -RFC 2518 WEBDAV February 1999 - - - 16 INTERNATIONALIZATION CONSIDERATIONS ...........................76 - 17 SECURITY CONSIDERATIONS .......................................77 - 17.1 Authentication of Clients ....................................77 - 17.2 Denial of Service ............................................78 - 17.3 Security through Obscurity ...................................78 - 17.4 Privacy Issues Connected to Locks ............................78 - 17.5 Privacy Issues Connected to Properties .......................79 - 17.6 Reduction of Security due to Source Link .....................79 - 17.7 Implications of XML External Entities ........................79 - 17.8 Risks Connected with Lock Tokens .............................80 - 18 IANA CONSIDERATIONS ...........................................80 - 19 INTELLECTUAL PROPERTY .........................................81 - 20 ACKNOWLEDGEMENTS ..............................................82 - 21 REFERENCES ....................................................82 - 21.1 Normative References .........................................82 - 21.2 Informational References .....................................83 - 22 AUTHORS' ADDRESSES ............................................84 - 23 APPENDICES ....................................................86 - 23.1 Appendix 1 - WebDAV Document Type Definition .................86 - 23.2 Appendix 2 - ISO 8601 Date and Time Profile ..................88 - 23.3 Appendix 3 - Notes on Processing XML Elements ................89 - 23.3.1 Notes on Empty XML Elements ...............................89 - 23.3.2 Notes on Illegal XML Processing ...........................89 - 23.4 Appendix 4 -- XML Namespaces for WebDAV ......................92 - 23.4.1 Introduction ..............................................92 - 23.4.2 Meaning of Qualified Names ................................92 - 24 FULL COPYRIGHT STATEMENT ......................................94 - - - -1 Introduction - - This document describes an extension to the HTTP/1.1 protocol that - allows clients to perform remote web content authoring operations. - This extension provides a coherent set of methods, headers, request - entity body formats, and response entity body formats that provide - operations for: - - Properties: The ability to create, remove, and query information - about Web pages, such as their authors, creation dates, etc. Also, - the ability to link pages of any media type to related pages. - - Collections: The ability to create sets of documents and to retrieve - a hierarchical membership listing (like a directory listing in a file - system). - - - - - - -Goland, et al. Standards Track [Page 5] - -RFC 2518 WEBDAV February 1999 - - - Locking: The ability to keep more than one person from working on a - document at the same time. This prevents the "lost update problem," - in which modifications are lost as first one author then another - writes changes without merging the other author's changes. - - Namespace Operations: The ability to instruct the server to copy and - move Web resources. - - Requirements and rationale for these operations are described in a - companion document, "Requirements for a Distributed Authoring and - Versioning Protocol for the World Wide Web" [RFC2291]. - - The sections below provide a detailed introduction to resource - properties (section 4), collections of resources (section 5), and - locking operations (section 6). These sections introduce the - abstractions manipulated by the WebDAV-specific HTTP methods - described in section 8, "HTTP Methods for Distributed Authoring". - - In HTTP/1.1, method parameter information was exclusively encoded in - HTTP headers. Unlike HTTP/1.1, WebDAV encodes method parameter - information either in an Extensible Markup Language (XML) [REC-XML] - request entity body, or in an HTTP header. The use of XML to encode - method parameters was motivated by the ability to add extra XML - elements to existing structures, providing extensibility; and by - XML's ability to encode information in ISO 10646 character sets, - providing internationalization support. As a rule of thumb, - parameters are encoded in XML entity bodies when they have unbounded - length, or when they may be shown to a human user and hence require - encoding in an ISO 10646 character set. Otherwise, parameters are - encoded within HTTP headers. Section 9 describes the new HTTP - headers used with WebDAV methods. - - In addition to encoding method parameters, XML is used in WebDAV to - encode the responses from methods, providing the extensibility and - internationalization advantages of XML for method output, as well as - input. - - XML elements used in this specification are defined in section 12. - - The XML namespace extension (Appendix 4) is also used in this - specification in order to allow for new XML elements to be added - without fear of colliding with other element names. - - While the status codes provided by HTTP/1.1 are sufficient to - describe most error conditions encountered by WebDAV methods, there - are some errors that do not fall neatly into the existing categories. - New status codes developed for the WebDAV methods are defined in - section 10. Since some WebDAV methods may operate over many - - - -Goland, et al. Standards Track [Page 6] - -RFC 2518 WEBDAV February 1999 - - - resources, the Multi-Status response has been introduced to return - status information for multiple resources. The Multi-Status response - is described in section 11. - - WebDAV employs the property mechanism to store information about the - current state of the resource. For example, when a lock is taken out - on a resource, a lock information property describes the current - state of the lock. Section 13 defines the properties used within the - WebDAV specification. - - Finishing off the specification are sections on what it means to be - compliant with this specification (section 15), on - internationalization support (section 16), and on security (section - 17). - -2 Notational Conventions - - Since this document describes a set of extensions to the HTTP/1.1 - protocol, the augmented BNF used herein to describe protocol elements - is exactly the same as described in section 2.1 of [RFC2068]. Since - this augmented BNF uses the basic production rules provided in - section 2.2 of [RFC2068], these rules apply to this document as well. - - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", - "SHOULD", SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this - document are to be interpreted as described in RFC 2119 [RFC2119]. - -3 Terminology - - URI/URL - A Uniform Resource Identifier and Uniform Resource Locator, - respectively. These terms (and the distinction between them) are - defined in [RFC2396]. - - Collection - A resource that contains a set of URIs, termed member - URIs, which identify member resources and meets the requirements in - section 5 of this specification. - - Member URI - A URI which is a member of the set of URIs contained by - a collection. - - Internal Member URI - A Member URI that is immediately relative to - the URI of the collection (the definition of immediately relative is - given in section 5.2). - - Property - A name/value pair that contains descriptive information - about a resource. - - - - - -Goland, et al. Standards Track [Page 7] - -RFC 2518 WEBDAV February 1999 - - - Live Property - A property whose semantics and syntax are enforced by - the server. For example, the live "getcontentlength" property has - its value, the length of the entity returned by a GET request, - automatically calculated by the server. - - Dead Property - A property whose semantics and syntax are not - enforced by the server. The server only records the value of a dead - property; the client is responsible for maintaining the consistency - of the syntax and semantics of a dead property. - - Null Resource - A resource which responds with a 404 (Not Found) to - any HTTP/1.1 or DAV method except for PUT, MKCOL, OPTIONS and LOCK. - A NULL resource MUST NOT appear as a member of its parent collection. - -4 Data Model for Resource Properties - -4.1 The Resource Property Model - - Properties are pieces of data that describe the state of a resource. - Properties are data about data. - - Properties are used in distributed authoring environments to provide - for efficient discovery and management of resources. For example, a - 'subject' property might allow for the indexing of all resources by - their subject, and an 'author' property might allow for the discovery - of what authors have written which documents. - - The DAV property model consists of name/value pairs. The name of a - property identifies the property's syntax and semantics, and provides - an address by which to refer to its syntax and semantics. - - There are two categories of properties: "live" and "dead". A live - property has its syntax and semantics enforced by the server. Live - properties include cases where a) the value of a property is read- - only, maintained by the server, and b) the value of the property is - maintained by the client, but the server performs syntax checking on - submitted values. All instances of a given live property MUST comply - with the definition associated with that property name. A dead - property has its syntax and semantics enforced by the client; the - server merely records the value of the property verbatim. - -4.2 Existing Metadata Proposals - - Properties have long played an essential role in the maintenance of - large document repositories, and many current proposals contain some - notion of a property, or discuss web metadata more generally. These - include PICS [REC-PICS], PICS-NG, XML, Web Collections, and several - proposals on representing relationships within HTML. Work on PICS-NG - - - -Goland, et al. Standards Track [Page 8] - -RFC 2518 WEBDAV February 1999 - - - and Web Collections has been subsumed by the Resource Description - Framework (RDF) metadata activity of the World Wide Web Consortium. - RDF consists of a network-based data model and an XML representation - of that model. - - Some proposals come from a digital library perspective. These - include the Dublin Core [RFC2413] metadata set and the Warwick - Framework [WF], a container architecture for different metadata - schemas. The literature includes many examples of metadata, - including MARC [USMARC], a bibliographic metadata format, and a - technical report bibliographic format employed by the Dienst system - [RFC1807]. Additionally, the proceedings from the first IEEE Metadata - conference describe many community-specific metadata sets. - - Participants of the 1996 Metadata II Workshop in Warwick, UK [WF], - noted that "new metadata sets will develop as the networked - infrastructure matures" and "different communities will propose, - design, and be responsible for different types of metadata." These - observations can be corroborated by noting that many community- - specific sets of metadata already exist, and there is significant - motivation for the development of new forms of metadata as many - communities increasingly make their data available in digital form, - requiring a metadata format to assist data location and cataloging. - -4.3 Properties and HTTP Headers - - Properties already exist, in a limited sense, in HTTP message - headers. However, in distributed authoring environments a relatively - large number of properties are needed to describe the state of a - resource, and setting/returning them all through HTTP headers is - inefficient. Thus a mechanism is needed which allows a principal to - identify a set of properties in which the principal is interested and - to set or retrieve just those properties. - -4.4 Property Values - - The value of a property when expressed in XML MUST be well formed. - - XML has been chosen because it is a flexible, self-describing, - structured data format that supports rich schema definitions, and - because of its support for multiple character sets. XML's self- - describing nature allows any property's value to be extended by - adding new elements. Older clients will not break when they - encounter extensions because they will still have the data specified - in the original schema and will ignore elements they do not - understand. XML's support for multiple character sets allows any - human-readable property to be encoded and read in a character set - familiar to the user. XML's support for multiple human languages, - - - -Goland, et al. Standards Track [Page 9] - -RFC 2518 WEBDAV February 1999 - - - using the "xml:lang" attribute, handles cases where the same - character set is employed by multiple human languages. - -4.5 Property Names - - A property name is a universally unique identifier that is associated - with a schema that provides information about the syntax and - semantics of the property. - - Because a property's name is universally unique, clients can depend - upon consistent behavior for a particular property across multiple - resources, on the same and across different servers, so long as that - property is "live" on the resources in question, and the - implementation of the live property is faithful to its definition. - - The XML namespace mechanism, which is based on URIs [RFC2396], is - used to name properties because it prevents namespace collisions and - provides for varying degrees of administrative control. - - The property namespace is flat; that is, no hierarchy of properties - is explicitly recognized. Thus, if a property A and a property A/B - exist on a resource, there is no recognition of any relationship - between the two properties. It is expected that a separate - specification will eventually be produced which will address issues - relating to hierarchical properties. - - Finally, it is not possible to define the same property twice on a - single resource, as this would cause a collision in the resource's - property namespace. - -4.6 Media Independent Links - - Although HTML resources support links to other resources, the Web - needs more general support for links between resources of any media - type (media types are also known as MIME types, or content types). - WebDAV provides such links. A WebDAV link is a special type of - property value, formally defined in section 12.4, that allows typed - connections to be established between resources of any media type. - The property value consists of source and destination Uniform - Resource Identifiers (URIs); the property name identifies the link - type. - - - - - - - - - - -Goland, et al. Standards Track [Page 10] - -RFC 2518 WEBDAV February 1999 - - -5 Collections of Web Resources - - This section provides a description of a new type of Web resource, - the collection, and discusses its interactions with the HTTP URL - namespace. The purpose of a collection resource is to model - collection-like objects (e.g., file system directories) within a - server's namespace. - - All DAV compliant resources MUST support the HTTP URL namespace model - specified herein. - -5.1 HTTP URL Namespace Model - - The HTTP URL namespace is a hierarchical namespace where the - hierarchy is delimited with the "/" character. - - An HTTP URL namespace is said to be consistent if it meets the - following conditions: for every URL in the HTTP hierarchy there - exists a collection that contains that URL as an internal member. - The root, or top-level collection of the namespace under - consideration is exempt from the previous rule. - - Neither HTTP/1.1 nor WebDAV require that the entire HTTP URL - namespace be consistent. However, certain WebDAV methods are - prohibited from producing results that cause namespace - inconsistencies. - - Although implicit in [RFC2068] and [RFC2396], any resource, including - collection resources, MAY be identified by more than one URI. For - example, a resource could be identified by multiple HTTP URLs. - -5.2 Collection Resources - - A collection is a resource whose state consists of at least a list of - internal member URIs and a set of properties, but which may have - additional state such as entity bodies returned by GET. An internal - member URI MUST be immediately relative to a base URI of the - collection. That is, the internal member URI is equal to a - containing collection's URI plus an additional segment for non- - collection resources, or additional segment plus trailing slash "/" - for collection resources, where segment is defined in section 3.3 of - [RFC2396]. - - Any given internal member URI MUST only belong to the collection - once, i.e., it is illegal to have multiple instances of the same URI - in a collection. Properties defined on collections behave exactly as - do properties on non-collection resources. - - - - -Goland, et al. Standards Track [Page 11] - -RFC 2518 WEBDAV February 1999 - - - For all WebDAV compliant resources A and B, identified by URIs U and - V, for which U is immediately relative to V, B MUST be a collection - that has U as an internal member URI. So, if the resource with URL - http://foo.com/bar/blah is WebDAV compliant and if the resource with - URL http://foo.com/bar/ is WebDAV compliant then the resource with - URL http://foo.com/bar/ must be a collection and must contain URL - http://foo.com/bar/blah as an internal member. - - Collection resources MAY list the URLs of non-WebDAV compliant - children in the HTTP URL namespace hierarchy as internal members but - are not required to do so. For example, if the resource with URL - http://foo.com/bar/blah is not WebDAV compliant and the URL - http://foo.com/bar/ identifies a collection then URL - http://foo.com/bar/blah may or may not be an internal member of the - collection with URL http://foo.com/bar/. - - If a WebDAV compliant resource has no WebDAV compliant children in - the HTTP URL namespace hierarchy then the WebDAV compliant resource - is not required to be a collection. - - There is a standing convention that when a collection is referred to - by its name without a trailing slash, the trailing slash is - automatically appended. Due to this, a resource may accept a URI - without a trailing "/" to point to a collection. In this case it - SHOULD return a content-location header in the response pointing to - the URI ending with the "/". For example, if a client invokes a - method on http://foo.bar/blah (no trailing slash), the resource - http://foo.bar/blah/ (trailing slash) may respond as if the operation - were invoked on it, and should return a content-location header with - http://foo.bar/blah/ in it. In general clients SHOULD use the "/" - form of collection names. - - A resource MAY be a collection but not be WebDAV compliant. That is, - the resource may comply with all the rules set out in this - specification regarding how a collection is to behave without - necessarily supporting all methods that a WebDAV compliant resource - is required to support. In such a case the resource may return the - DAV:resourcetype property with the value DAV:collection but MUST NOT - return a DAV header containing the value "1" on an OPTIONS response. - -5.3 Creation and Retrieval of Collection Resources - - This document specifies the MKCOL method to create new collection - resources, rather than using the existing HTTP/1.1 PUT or POST - method, for the following reasons: - - - - - - -Goland, et al. Standards Track [Page 12] - -RFC 2518 WEBDAV February 1999 - - - In HTTP/1.1, the PUT method is defined to store the request body at - the location specified by the Request-URI. While a description - format for a collection can readily be constructed for use with PUT, - the implications of sending such a description to the server are - undesirable. For example, if a description of a collection that - omitted some existing resources were PUT to a server, this might be - interpreted as a command to remove those members. This would extend - PUT to perform DELETE functionality, which is undesirable since it - changes the semantics of PUT, and makes it difficult to control - DELETE functionality with an access control scheme based on methods. - - While the POST method is sufficiently open-ended that a "create a - collection" POST command could be constructed, this is undesirable - because it would be difficult to separate access control for - collection creation from other uses of POST. - - The exact definition of the behavior of GET and PUT on collections is - defined later in this document. - -5.4 Source Resources and Output Resources - - For many resources, the entity returned by a GET method exactly - matches the persistent state of the resource, for example, a GIF file - stored on a disk. For this simple case, the URI at which a resource - is accessed is identical to the URI at which the source (the - persistent state) of the resource is accessed. This is also the case - for HTML source files that are not processed by the server prior to - transmission. - - However, the server can sometimes process HTML resources before they - are transmitted as a return entity body. For example, a server- - side-include directive within an HTML file might instruct a server to - replace the directive with another value, such as the current date. - In this case, what is returned by GET (HTML plus date) differs from - the persistent state of the resource (HTML plus directive). - Typically there is no way to access the HTML resource containing the - unprocessed directive. - - Sometimes the entity returned by GET is the output of a data- - producing process that is described by one or more source resources - (that may not even have a location in the URI namespace). A single - data-producing process may dynamically generate the state of a - potentially large number of output resources. An example of this is - a CGI script that describes a "finger" gateway process that maps part - of the namespace of a server into finger requests, such as - http://www.foo.bar.org/finger_gateway/user@host. - - - - - -Goland, et al. Standards Track [Page 13] - -RFC 2518 WEBDAV February 1999 - - - In the absence of distributed authoring capabilities, it is - acceptable to have no mapping of source resource(s) to the URI - namespace. In fact, preventing access to the source resource(s) has - desirable security benefits. However, if remote editing of the - source resource(s) is desired, the source resource(s) should be given - a location in the URI namespace. This source location should not be - one of the locations at which the generated output is retrievable, - since in general it is impossible for the server to differentiate - requests for source resources from requests for process output - resources. There is often a many-to-many relationship between source - resources and output resources. - - On WebDAV compliant servers the URI of the source resource(s) may be - stored in a link on the output resource with type DAV:source (see - section 13.10 for a description of the source link property). - Storing the source URIs in links on the output resources places the - burden of discovering the source on the authoring client. Note that - the value of a source link is not guaranteed to point to the correct - source. Source links may break or incorrect values may be entered. - Also note that not all servers will allow the client to set the - source link value. For example a server which generates source links - on the fly for its CGI files will most likely not allow a client to - set the source link value. - -6 Locking - - The ability to lock a resource provides a mechanism for serializing - access to that resource. Using a lock, an authoring client can - provide a reasonable guarantee that another principal will not modify - a resource while it is being edited. In this way, a client can - prevent the "lost update" problem. - - This specification allows locks to vary over two client-specified - parameters, the number of principals involved (exclusive vs. shared) - and the type of access to be granted. This document defines locking - for only one access type, write. However, the syntax is extensible, - and permits the eventual specification of locking for other access - types. - -6.1 Exclusive Vs. Shared Locks - - The most basic form of lock is an exclusive lock. This is a lock - where the access right in question is only granted to a single - principal. The need for this arbitration results from a desire to - avoid having to merge results. - - - - - - -Goland, et al. Standards Track [Page 14] - -RFC 2518 WEBDAV February 1999 - - - However, there are times when the goal of a lock is not to exclude - others from exercising an access right but rather to provide a - mechanism for principals to indicate that they intend to exercise - their access rights. Shared locks are provided for this case. A - shared lock allows multiple principals to receive a lock. Hence any - principal with appropriate access can get the lock. - - With shared locks there are two trust sets that affect a resource. - The first trust set is created by access permissions. Principals who - are trusted, for example, may have permission to write to the - resource. Among those who have access permission to write to the - resource, the set of principals who have taken out a shared lock also - must trust each other, creating a (typically) smaller trust set - within the access permission write set. - - Starting with every possible principal on the Internet, in most - situations the vast majority of these principals will not have write - access to a given resource. Of the small number who do have write - access, some principals may decide to guarantee their edits are free - from overwrite conflicts by using exclusive write locks. Others may - decide they trust their collaborators will not overwrite their work - (the potential set of collaborators being the set of principals who - have write permission) and use a shared lock, which informs their - collaborators that a principal may be working on the resource. - - The WebDAV extensions to HTTP do not need to provide all of the - communications paths necessary for principals to coordinate their - activities. When using shared locks, principals may use any out of - band communication channel to coordinate their work (e.g., face-to- - face interaction, written notes, post-it notes on the screen, - telephone conversation, Email, etc.) The intent of a shared lock is - to let collaborators know who else may be working on a resource. - - Shared locks are included because experience from web distributed - authoring systems has indicated that exclusive locks are often too - rigid. An exclusive lock is used to enforce a particular editing - process: take out an exclusive lock, read the resource, perform - edits, write the resource, release the lock. This editing process - has the problem that locks are not always properly released, for - example when a program crashes, or when a lock owner leaves without - unlocking a resource. While both timeouts and administrative action - can be used to remove an offending lock, neither mechanism may be - available when needed; the timeout may be long or the administrator - may not be available. - - - - - - - -Goland, et al. Standards Track [Page 15] - -RFC 2518 WEBDAV February 1999 - - -6.2 Required Support - - A WebDAV compliant server is not required to support locking in any - form. If the server does support locking it may choose to support - any combination of exclusive and shared locks for any access types. - - The reason for this flexibility is that locking policy strikes to the - very heart of the resource management and versioning systems employed - by various storage repositories. These repositories require control - over what sort of locking will be made available. For example, some - repositories only support shared write locks while others only - provide support for exclusive write locks while yet others use no - locking at all. As each system is sufficiently different to merit - exclusion of certain locking features, this specification leaves - locking as the sole axis of negotiation within WebDAV. - -6.3 Lock Tokens - - A lock token is a type of state token, represented as a URI, which - identifies a particular lock. A lock token is returned by every - successful LOCK operation in the lockdiscovery property in the - response body, and can also be found through lock discovery on a - resource. - - Lock token URIs MUST be unique across all resources for all time. - This uniqueness constraint allows lock tokens to be submitted across - resources and servers without fear of confusion. - - This specification provides a lock token URI scheme called - opaquelocktoken that meets the uniqueness requirements. However - resources are free to return any URI scheme so long as it meets the - uniqueness requirements. - - Having a lock token provides no special access rights. Anyone can - find out anyone else's lock token by performing lock discovery. - Locks MUST be enforced based upon whatever authentication mechanism - is used by the server, not based on the secrecy of the token values. - -6.4 opaquelocktoken Lock Token URI Scheme - - The opaquelocktoken URI scheme is designed to be unique across all - resources for all time. Due to this uniqueness quality, a client may - submit an opaque lock token in an If header on a resource other than - the one that returned it. - - All resources MUST recognize the opaquelocktoken scheme and, at - minimum, recognize that the lock token does not refer to an - outstanding lock on the resource. - - - -Goland, et al. Standards Track [Page 16] - -RFC 2518 WEBDAV February 1999 - - - In order to guarantee uniqueness across all resources for all time - the opaquelocktoken requires the use of the Universal Unique - Identifier (UUID) mechanism, as described in [ISO-11578]. - - Opaquelocktoken generators, however, have a choice of how they create - these tokens. They can either generate a new UUID for every lock - token they create or they can create a single UUID and then add - extension characters. If the second method is selected then the - program generating the extensions MUST guarantee that the same - extension will never be used twice with the associated UUID. - - OpaqueLockToken-URI = "opaquelocktoken:" UUID [Extension] ; The UUID - production is the string representation of a UUID, as defined in - [ISO-11578]. Note that white space (LWS) is not allowed between - elements of this production. - - Extension = path ; path is defined in section 3.2.1 of RFC 2068 - [RFC2068] - -6.4.1 Node Field Generation Without the IEEE 802 Address - - UUIDs, as defined in [ISO-11578], contain a "node" field that - contains one of the IEEE 802 addresses for the server machine. As - noted in section 17.8, there are several security risks associated - with exposing a machine's IEEE 802 address. This section provides an - alternate mechanism for generating the "node" field of a UUID which - does not employ an IEEE 802 address. WebDAV servers MAY use this - algorithm for creating the node field when generating UUIDs. The - text in this section is originally from an Internet-Draft by Paul - Leach and Rich Salz, who are noted here to properly attribute their - work. - - The ideal solution is to obtain a 47 bit cryptographic quality random - number, and use it as the low 47 bits of the node ID, with the most - significant bit of the first octet of the node ID set to 1. This bit - is the unicast/multicast bit, which will never be set in IEEE 802 - addresses obtained from network cards; hence, there can never be a - conflict between UUIDs generated by machines with and without network - cards. - - If a system does not have a primitive to generate cryptographic - quality random numbers, then in most systems there are usually a - fairly large number of sources of randomness available from which one - can be generated. Such sources are system specific, but often - include: - - - - - - -Goland, et al. Standards Track [Page 17] - -RFC 2518 WEBDAV February 1999 - - - - the percent of memory in use - - the size of main memory in bytes - - the amount of free main memory in bytes - - the size of the paging or swap file in bytes - - free bytes of paging or swap file - - the total size of user virtual address space in bytes - - the total available user address space bytes - - the size of boot disk drive in bytes - - the free disk space on boot drive in bytes - - the current time - - the amount of time since the system booted - - the individual sizes of files in various system directories - - the creation, last read, and modification times of files in - various system directories - - the utilization factors of various system resources (heap, etc.) - - current mouse cursor position - - current caret position - - current number of running processes, threads - - handles or IDs of the desktop window and the active window - - the value of stack pointer of the caller - - the process and thread ID of caller - - various processor architecture specific performance counters - (instructions executed, cache misses, TLB misses) - - (Note that it is precisely the above kinds of sources of randomness - that are used to seed cryptographic quality random number generators - on systems without special hardware for their construction.) - - In addition, items such as the computer's name and the name of the - operating system, while not strictly speaking random, will help - differentiate the results from those obtained by other systems. - - The exact algorithm to generate a node ID using these data is system - specific, because both the data available and the functions to obtain - them are often very system specific. However, assuming that one can - concatenate all the values from the randomness sources into a buffer, - and that a cryptographic hash function such as MD5 is available, then - any 6 bytes of the MD5 hash of the buffer, with the multicast bit - (the high bit of the first byte) set will be an appropriately random - node ID. - - Other hash functions, such as SHA-1, can also be used. The only - requirement is that the result be suitably random _ in the sense that - the outputs from a set uniformly distributed inputs are themselves - uniformly distributed, and that a single bit change in the input can - be expected to cause half of the output bits to change. - - - - - -Goland, et al. Standards Track [Page 18] - -RFC 2518 WEBDAV February 1999 - - -6.5 Lock Capability Discovery - - Since server lock support is optional, a client trying to lock a - resource on a server can either try the lock and hope for the best, - or perform some form of discovery to determine what lock capabilities - the server supports. This is known as lock capability discovery. - Lock capability discovery differs from discovery of supported access - control types, since there may be access control types without - corresponding lock types. A client can determine what lock types the - server supports by retrieving the supportedlock property. - - Any DAV compliant resource that supports the LOCK method MUST support - the supportedlock property. - -6.6 Active Lock Discovery - - If another principal locks a resource that a principal wishes to - access, it is useful for the second principal to be able to find out - who the first principal is. For this purpose the lockdiscovery - property is provided. This property lists all outstanding locks, - describes their type, and where available, provides their lock token. - - Any DAV compliant resource that supports the LOCK method MUST support - the lockdiscovery property. - -6.7 Usage Considerations - - Although the locking mechanisms specified here provide some help in - preventing lost updates, they cannot guarantee that updates will - never be lost. Consider the following scenario: - - Two clients A and B are interested in editing the resource ' - index.html'. Client A is an HTTP client rather than a WebDAV client, - and so does not know how to perform locking. - Client A doesn't lock the document, but does a GET and begins - editing. - Client B does LOCK, performs a GET and begins editing. - Client B finishes editing, performs a PUT, then an UNLOCK. - Client A performs a PUT, overwriting and losing all of B's changes. - - There are several reasons why the WebDAV protocol itself cannot - prevent this situation. First, it cannot force all clients to use - locking because it must be compatible with HTTP clients that do not - comprehend locking. Second, it cannot require servers to support - locking because of the variety of repository implementations, some of - which rely on reservations and merging rather than on locking. - Finally, being stateless, it cannot enforce a sequence of operations - like LOCK / GET / PUT / UNLOCK. - - - -Goland, et al. Standards Track [Page 19] - -RFC 2518 WEBDAV February 1999 - - - WebDAV servers that support locking can reduce the likelihood that - clients will accidentally overwrite each other's changes by requiring - clients to lock resources before modifying them. Such servers would - effectively prevent HTTP 1.0 and HTTP 1.1 clients from modifying - resources. - - WebDAV clients can be good citizens by using a lock / retrieve / - write /unlock sequence of operations (at least by default) whenever - they interact with a WebDAV server that supports locking. - - HTTP 1.1 clients can be good citizens, avoiding overwriting other - clients' changes, by using entity tags in If-Match headers with any - requests that would modify resources. - - Information managers may attempt to prevent overwrites by - implementing client-side procedures requiring locking before - modifying WebDAV resources. - -7 Write Lock - - This section describes the semantics specific to the write lock type. - The write lock is a specific instance of a lock type, and is the only - lock type described in this specification. - -7.1 Methods Restricted by Write Locks - - A write lock MUST prevent a principal without the lock from - successfully executing a PUT, POST, PROPPATCH, LOCK, UNLOCK, MOVE, - DELETE, or MKCOL on the locked resource. All other current methods, - GET in particular, function independently of the lock. - - Note, however, that as new methods are created it will be necessary - to specify how they interact with a write lock. - -7.2 Write Locks and Lock Tokens - - A successful request for an exclusive or shared write lock MUST - result in the generation of a unique lock token associated with the - requesting principal. Thus if five principals have a shared write - lock on the same resource there will be five lock tokens, one for - each principal. - -7.3 Write Locks and Properties - - While those without a write lock may not alter a property on a - resource it is still possible for the values of live properties to - change, even while locked, due to the requirements of their schemas. - - - - -Goland, et al. Standards Track [Page 20] - -RFC 2518 WEBDAV February 1999 - - - Only dead properties and live properties defined to respect locks are - guaranteed not to change while write locked. - -7.4 Write Locks and Null Resources - - It is possible to assert a write lock on a null resource in order to - lock the name. - - A write locked null resource, referred to as a lock-null resource, - MUST respond with a 404 (Not Found) or 405 (Method Not Allowed) to - any HTTP/1.1 or DAV methods except for PUT, MKCOL, OPTIONS, PROPFIND, - LOCK, and UNLOCK. A lock-null resource MUST appear as a member of - its parent collection. Additionally the lock-null resource MUST have - defined on it all mandatory DAV properties. Most of these - properties, such as all the get* properties, will have no value as a - lock-null resource does not support the GET method. Lock-Null - resources MUST have defined values for lockdiscovery and - supportedlock properties. - - Until a method such as PUT or MKCOL is successfully executed on the - lock-null resource the resource MUST stay in the lock-null state. - However, once a PUT or MKCOL is successfully executed on a lock-null - resource the resource ceases to be in the lock-null state. - - If the resource is unlocked, for any reason, without a PUT, MKCOL, or - similar method having been successfully executed upon it then the - resource MUST return to the null state. - -7.5 Write Locks and Collections - - A write lock on a collection, whether created by a "Depth: 0" or - "Depth: infinity" lock request, prevents the addition or removal of - member URIs of the collection by non-lock owners. As a consequence, - when a principal issues a PUT or POST request to create a new - resource under a URI which needs to be an internal member of a write - locked collection to maintain HTTP namespace consistency, or issues a - DELETE to remove a resource which has a URI which is an existing - internal member URI of a write locked collection, this request MUST - fail if the principal does not have a write lock on the collection. - - However, if a write lock request is issued to a collection containing - member URIs identifying resources that are currently locked in a - manner which conflicts with the write lock, the request MUST fail - with a 423 (Locked) status code. - - If a lock owner causes the URI of a resource to be added as an - internal member URI of a locked collection then the new resource MUST - be automatically added to the lock. This is the only mechanism that - - - -Goland, et al. Standards Track [Page 21] - -RFC 2518 WEBDAV February 1999 - - - allows a resource to be added to a write lock. Thus, for example, if - the collection /a/b/ is write locked and the resource /c is moved to - /a/b/c then resource /a/b/c will be added to the write lock. - -7.6 Write Locks and the If Request Header - - If a user agent is not required to have knowledge about a lock when - requesting an operation on a locked resource, the following scenario - might occur. Program A, run by User A, takes out a write lock on a - resource. Program B, also run by User A, has no knowledge of the - lock taken out by Program A, yet performs a PUT to the locked - resource. In this scenario, the PUT succeeds because locks are - associated with a principal, not a program, and thus program B, - because it is acting with principal A's credential, is allowed to - perform the PUT. However, had program B known about the lock, it - would not have overwritten the resource, preferring instead to - present a dialog box describing the conflict to the user. Due to - this scenario, a mechanism is needed to prevent different programs - from accidentally ignoring locks taken out by other programs with the - same authorization. - - In order to prevent these collisions a lock token MUST be submitted - by an authorized principal in the If header for all locked resources - that a method may interact with or the method MUST fail. For - example, if a resource is to be moved and both the source and - destination are locked then two lock tokens must be submitted, one - for the source and the other for the destination. - -7.6.1 Example - Write Lock - - >>Request - - COPY /~fielding/index.html HTTP/1.1 - Host: www.ics.uci.edu - Destination: http://www.ics.uci.edu/users/f/fielding/index.html - If: - () - - >>Response - - HTTP/1.1 204 No Content - - In this example, even though both the source and destination are - locked, only one lock token must be submitted, for the lock on the - destination. This is because the source resource is not modified by - a COPY, and hence unaffected by the write lock. In this example, user - agent authentication has previously occurred via a mechanism outside - the scope of the HTTP protocol, in the underlying transport layer. - - - -Goland, et al. Standards Track [Page 22] - -RFC 2518 WEBDAV February 1999 - - -7.7 Write Locks and COPY/MOVE - - A COPY method invocation MUST NOT duplicate any write locks active on - the source. However, as previously noted, if the COPY copies the - resource into a collection that is locked with "Depth: infinity", - then the resource will be added to the lock. - - A successful MOVE request on a write locked resource MUST NOT move - the write lock with the resource. However, the resource is subject to - being added to an existing lock at the destination, as specified in - section 7.5. For example, if the MOVE makes the resource a child of a - collection that is locked with "Depth: infinity", then the resource - will be added to that collection's lock. Additionally, if a resource - locked with "Depth: infinity" is moved to a destination that is - within the scope of the same lock (e.g., within the namespace tree - covered by the lock), the moved resource will again be a added to the - lock. In both these examples, as specified in section 7.6, an If - header must be submitted containing a lock token for both the source - and destination. - -7.8 Refreshing Write Locks - - A client MUST NOT submit the same write lock request twice. Note - that a client is always aware it is resubmitting the same lock - request because it must include the lock token in the If header in - order to make the request for a resource that is already locked. - - However, a client may submit a LOCK method with an If header but - without a body. This form of LOCK MUST only be used to "refresh" a - lock. Meaning, at minimum, that any timers associated with the lock - MUST be re-set. - - A server may return a Timeout header with a lock refresh that is - different than the Timeout header returned when the lock was - originally requested. Additionally clients may submit Timeout - headers of arbitrary value with their lock refresh requests. - Servers, as always, may ignore Timeout headers submitted by the - client. - - If an error is received in response to a refresh LOCK request the - client SHOULD assume that the lock was not refreshed. - -8 HTTP Methods for Distributed Authoring - - The following new HTTP methods use XML as a request and response - format. All DAV compliant clients and resources MUST use XML parsers - that are compliant with [REC-XML]. All XML used in either requests - or responses MUST be, at minimum, well formed. If a server receives - - - -Goland, et al. Standards Track [Page 23] - -RFC 2518 WEBDAV February 1999 - - - ill-formed XML in a request it MUST reject the entire request with a - 400 (Bad Request). If a client receives ill-formed XML in a response - then it MUST NOT assume anything about the outcome of the executed - method and SHOULD treat the server as malfunctioning. - -8.1 PROPFIND - - The PROPFIND method retrieves properties defined on the resource - identified by the Request-URI, if the resource does not have any - internal members, or on the resource identified by the Request-URI - and potentially its member resources, if the resource is a collection - that has internal member URIs. All DAV compliant resources MUST - support the PROPFIND method and the propfind XML element (section - 12.14) along with all XML elements defined for use with that element. - - A client may submit a Depth header with a value of "0", "1", or - "infinity" with a PROPFIND on a collection resource with internal - member URIs. DAV compliant servers MUST support the "0", "1" and - "infinity" behaviors. By default, the PROPFIND method without a Depth - header MUST act as if a "Depth: infinity" header was included. - - A client may submit a propfind XML element in the body of the request - method describing what information is being requested. It is - possible to request particular property values, all property values, - or a list of the names of the resource's properties. A client may - choose not to submit a request body. An empty PROPFIND request body - MUST be treated as a request for the names and values of all - properties. - - All servers MUST support returning a response of content type - text/xml or application/xml that contains a multistatus XML element - that describes the results of the attempts to retrieve the various - properties. - - If there is an error retrieving a property then a proper error result - MUST be included in the response. A request to retrieve the value of - a property which does not exist is an error and MUST be noted, if the - response uses a multistatus XML element, with a response XML element - which contains a 404 (Not Found) status value. - - Consequently, the multistatus XML element for a collection resource - with member URIs MUST include a response XML element for each member - URI of the collection, to whatever depth was requested. Each response - XML element MUST contain an href XML element that gives the URI of - the resource on which the properties in the prop XML element are - defined. Results for a PROPFIND on a collection resource with - internal member URIs are returned as a flat list whose order of - entries is not significant. - - - -Goland, et al. Standards Track [Page 24] - -RFC 2518 WEBDAV February 1999 - - - In the case of allprop and propname, if a principal does not have the - right to know whether a particular property exists then the property - should be silently excluded from the response. - - The results of this method SHOULD NOT be cached. - -8.1.1 Example - Retrieving Named Properties - - >>Request - - PROPFIND /file HTTP/1.1 - Host: www.foo.bar - Content-type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/file - - - - Box type A - - - J.J. Johnson - - - HTTP/1.1 200 OK - - - - - - -Goland, et al. Standards Track [Page 25] - -RFC 2518 WEBDAV February 1999 - - - HTTP/1.1 403 Forbidden - The user does not have access to - the DingALing property. - - - - There has been an access violation error. - - - - In this example, PROPFIND is executed on a non-collection resource - http://www.foo.bar/file. The propfind XML element specifies the name - of four properties whose values are being requested. In this case - only two properties were returned, since the principal issuing the - request did not have sufficient access rights to see the third and - fourth properties. - -8.1.2 Example - Using allprop to Retrieve All Properties - - >>Request - - PROPFIND /container/ HTTP/1.1 - Host: www.foo.bar - Depth: 1 - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/container/ - - - - Box type A - - - - - -Goland, et al. Standards Track [Page 26] - -RFC 2518 WEBDAV February 1999 - - - Hadrian - - - 1997-12-01T17:42:21-08:00 - - - Example collection - - - - - - - - - - - - - - HTTP/1.1 200 OK - - - - http://www.foo.bar/container/front.html - - - - Box type B - - - 1997-12-01T18:27:21-08:00 - - - Example HTML resource - - - 4525 - - - text/html - - - zzyzx - - - Monday, 12-Jan-98 09:25:56 GMT - - - - -Goland, et al. Standards Track [Page 27] - -RFC 2518 WEBDAV February 1999 - - - - - - - - - - - - - - - HTTP/1.1 200 OK - - - - - In this example, PROPFIND was invoked on the resource - http://www.foo.bar/container/ with a Depth header of 1, meaning the - request applies to the resource and its children, and a propfind XML - element containing the allprop XML element, meaning the request - should return the name and value of all properties defined on each - resource. - - The resource http://www.foo.bar/container/ has six properties defined - on it: - - http://www.foo.bar/boxschema/bigbox, - http://www.foo.bar/boxschema/author, DAV:creationdate, - DAV:displayname, DAV:resourcetype, and DAV:supportedlock. - - The last four properties are WebDAV-specific, defined in section 13. - Since GET is not supported on this resource, the get* properties - (e.g., getcontentlength) are not defined on this resource. The DAV- - specific properties assert that "container" was created on December - 1, 1997, at 5:42:21PM, in a time zone 8 hours west of GMT - (creationdate), has a name of "Example collection" (displayname), a - collection resource type (resourcetype), and supports exclusive write - and shared write locks (supportedlock). - - The resource http://www.foo.bar/container/front.html has nine - properties defined on it: - - http://www.foo.bar/boxschema/bigbox (another instance of the "bigbox" - property type), DAV:creationdate, DAV:displayname, - DAV:getcontentlength, DAV:getcontenttype, DAV:getetag, - DAV:getlastmodified, DAV:resourcetype, and DAV:supportedlock. - - - - -Goland, et al. Standards Track [Page 28] - -RFC 2518 WEBDAV February 1999 - - - The DAV-specific properties assert that "front.html" was created on - December 1, 1997, at 6:27:21PM, in a time zone 8 hours west of GMT - (creationdate), has a name of "Example HTML resource" (displayname), - a content length of 4525 bytes (getcontentlength), a MIME type of - "text/html" (getcontenttype), an entity tag of "zzyzx" (getetag), was - last modified on Monday, January 12, 1998, at 09:25:56 GMT - (getlastmodified), has an empty resource type, meaning that it is not - a collection (resourcetype), and supports both exclusive write and - shared write locks (supportedlock). - -8.1.3 Example - Using propname to Retrieve all Property Names - - >>Request - - PROPFIND /container/ HTTP/1.1 - Host: www.foo.bar - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/container/ - - - - - - - - - - HTTP/1.1 200 OK - - - - http://www.foo.bar/container/front.html - - - -Goland, et al. Standards Track [Page 29] - -RFC 2518 WEBDAV February 1999 - - - - - - - - - - - - - - - HTTP/1.1 200 OK - - - - - - In this example, PROPFIND is invoked on the collection resource - http://www.foo.bar/container/, with a propfind XML element containing - the propname XML element, meaning the name of all properties should - be returned. Since no Depth header is present, it assumes its - default value of "infinity", meaning the name of the properties on - the collection and all its progeny should be returned. - - Consistent with the previous example, resource - http://www.foo.bar/container/ has six properties defined on it, - http://www.foo.bar/boxschema/bigbox, - http://www.foo.bar/boxschema/author, DAV:creationdate, - DAV:displayname, DAV:resourcetype, and DAV:supportedlock. - - The resource http://www.foo.bar/container/index.html, a member of the - "container" collection, has nine properties defined on it, - http://www.foo.bar/boxschema/bigbox, DAV:creationdate, - DAV:displayname, DAV:getcontentlength, DAV:getcontenttype, - DAV:getetag, DAV:getlastmodified, DAV:resourcetype, and - DAV:supportedlock. - - This example also demonstrates the use of XML namespace scoping, and - the default namespace. Since the "xmlns" attribute does not contain - an explicit "shorthand name" (prefix) letter, the namespace applies - by default to all enclosed elements. Hence, all elements which do - not explicitly state the namespace to which they belong are members - of the "DAV:" namespace schema. - - - - - - - -Goland, et al. Standards Track [Page 30] - -RFC 2518 WEBDAV February 1999 - - -8.2 PROPPATCH - - The PROPPATCH method processes instructions specified in the request - body to set and/or remove properties defined on the resource - identified by the Request-URI. - - All DAV compliant resources MUST support the PROPPATCH method and - MUST process instructions that are specified using the - propertyupdate, set, and remove XML elements of the DAV schema. - Execution of the directives in this method is, of course, subject to - access control constraints. DAV compliant resources SHOULD support - the setting of arbitrary dead properties. - - The request message body of a PROPPATCH method MUST contain the - propertyupdate XML element. Instruction processing MUST occur in the - order instructions are received (i.e., from top to bottom). - Instructions MUST either all be executed or none executed. Thus if - any error occurs during processing all executed instructions MUST be - undone and a proper error result returned. Instruction processing - details can be found in the definition of the set and remove - instructions in section 12.13. - -8.2.1 Status Codes for use with 207 (Multi-Status) - - The following are examples of response codes one would expect to be - used in a 207 (Multi-Status) response for this method. Note, - however, that unless explicitly prohibited any 2/3/4/5xx series - response code may be used in a 207 (Multi-Status) response. - - 200 (OK) - The command succeeded. As there can be a mixture of sets - and removes in a body, a 201 (Created) seems inappropriate. - - 403 (Forbidden) - The client, for reasons the server chooses not to - specify, cannot alter one of the properties. - - 409 (Conflict) - The client has provided a value whose semantics are - not appropriate for the property. This includes trying to set read- - only properties. - - 423 (Locked) - The specified resource is locked and the client either - is not a lock owner or the lock type requires a lock token to be - submitted and the client did not submit it. - - 507 (Insufficient Storage) - The server did not have sufficient space - to record the property. - - - - - - -Goland, et al. Standards Track [Page 31] - -RFC 2518 WEBDAV February 1999 - - -8.2.2 Example - PROPPATCH - - >>Request - - PROPPATCH /bar.html HTTP/1.1 - Host: www.foo.com - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - Jim Whitehead - Roy Fielding - - - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.com/bar.html - - - HTTP/1.1 424 Failed Dependency - - - - HTTP/1.1 409 Conflict - - Copyright Owner can not be deleted or - altered. - - - - - -Goland, et al. Standards Track [Page 32] - -RFC 2518 WEBDAV February 1999 - - - In this example, the client requests the server to set the value of - the http://www.w3.com/standards/z39.50/Authors property, and to - remove the property http://www.w3.com/standards/z39.50/Copyright- - Owner. Since the Copyright-Owner property could not be removed, no - property modifications occur. The 424 (Failed Dependency) status - code for the Authors property indicates this action would have - succeeded if it were not for the conflict with removing the - Copyright-Owner property. - -8.3 MKCOL Method - - The MKCOL method is used to create a new collection. All DAV - compliant resources MUST support the MKCOL method. - -8.3.1 Request - - MKCOL creates a new collection resource at the location specified by - the Request-URI. If the resource identified by the Request-URI is - non-null then the MKCOL MUST fail. During MKCOL processing, a server - MUST make the Request-URI a member of its parent collection, unless - the Request-URI is "/". If no such ancestor exists, the method MUST - fail. When the MKCOL operation creates a new collection resource, - all ancestors MUST already exist, or the method MUST fail with a 409 - (Conflict) status code. For example, if a request to create - collection /a/b/c/d/ is made, and neither /a/b/ nor /a/b/c/ exists, - the request must fail. - - When MKCOL is invoked without a request body, the newly created - collection SHOULD have no members. - - A MKCOL request message may contain a message body. The behavior of - a MKCOL request when the body is present is limited to creating - collections, members of a collection, bodies of members and - properties on the collections or members. If the server receives a - MKCOL request entity type it does not support or understand it MUST - respond with a 415 (Unsupported Media Type) status code. The exact - behavior of MKCOL for various request media types is undefined in - this document, and will be specified in separate documents. - -8.3.2 Status Codes - - Responses from a MKCOL request MUST NOT be cached as MKCOL has non- - idempotent semantics. - - 201 (Created) - The collection or structured resource was created in - its entirety. - - - - - -Goland, et al. Standards Track [Page 33] - -RFC 2518 WEBDAV February 1999 - - - 403 (Forbidden) - This indicates at least one of two conditions: 1) - the server does not allow the creation of collections at the given - location in its namespace, or 2) the parent collection of the - Request-URI exists but cannot accept members. - - 405 (Method Not Allowed) - MKCOL can only be executed on a - deleted/non-existent resource. - - 409 (Conflict) - A collection cannot be made at the Request-URI until - one or more intermediate collections have been created. - - 415 (Unsupported Media Type)- The server does not support the request - type of the body. - - 507 (Insufficient Storage) - The resource does not have sufficient - space to record the state of the resource after the execution of this - method. - -8.3.3 Example - MKCOL - - This example creates a collection called /webdisc/xfiles/ on the - server www.server.org. - - >>Request - - MKCOL /webdisc/xfiles/ HTTP/1.1 - Host: www.server.org - - >>Response - - HTTP/1.1 201 Created - -8.4 GET, HEAD for Collections - - The semantics of GET are unchanged when applied to a collection, - since GET is defined as, "retrieve whatever information (in the form - of an entity) is identified by the Request-URI" [RFC2068]. GET when - applied to a collection may return the contents of an "index.html" - resource, a human-readable view of the contents of the collection, or - something else altogether. Hence it is possible that the result of a - GET on a collection will bear no correlation to the membership of the - collection. - - Similarly, since the definition of HEAD is a GET without a response - message body, the semantics of HEAD are unmodified when applied to - collection resources. - - - - - -Goland, et al. Standards Track [Page 34] - -RFC 2518 WEBDAV February 1999 - - -8.5 POST for Collections - - Since by definition the actual function performed by POST is - determined by the server and often depends on the particular - resource, the behavior of POST when applied to collections cannot be - meaningfully modified because it is largely undefined. Thus the - semantics of POST are unmodified when applied to a collection. - -8.6 DELETE - - 8.6.1 DELETE for Non-Collection Resources - - If the DELETE method is issued to a non-collection resource whose - URIs are an internal member of one or more collections, then during - DELETE processing a server MUST remove any URI for the resource - identified by the Request-URI from collections which contain it as a - member. - -8.6.2 DELETE for Collections - - The DELETE method on a collection MUST act as if a "Depth: infinity" - header was used on it. A client MUST NOT submit a Depth header with - a DELETE on a collection with any value but infinity. - - DELETE instructs that the collection specified in the Request-URI and - all resources identified by its internal member URIs are to be - deleted. - - If any resource identified by a member URI cannot be deleted then all - of the member's ancestors MUST NOT be deleted, so as to maintain - namespace consistency. - - Any headers included with DELETE MUST be applied in processing every - resource to be deleted. - - When the DELETE method has completed processing it MUST result in a - consistent namespace. - - If an error occurs with a resource other than the resource identified - in the Request-URI then the response MUST be a 207 (Multi-Status). - 424 (Failed Dependency) errors SHOULD NOT be in the 207 (Multi- - Status). They can be safely left out because the client will know - that the ancestors of a resource could not be deleted when the client - receives an error for the ancestor's progeny. Additionally 204 (No - Content) errors SHOULD NOT be returned in the 207 (Multi-Status). - The reason for this prohibition is that 204 (No Content) is the - default success code. - - - - -Goland, et al. Standards Track [Page 35] - -RFC 2518 WEBDAV February 1999 - - -8.6.2.1 Example - DELETE - - >>Request - - DELETE /container/ HTTP/1.1 - Host: www.foo.bar - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/container/resource3 - HTTP/1.1 423 Locked - - - - In this example the attempt to delete - http://www.foo.bar/container/resource3 failed because it is locked, - and no lock token was submitted with the request. Consequently, the - attempt to delete http://www.foo.bar/container/ also failed. Thus the - client knows that the attempt to delete http://www.foo.bar/container/ - must have also failed since the parent can not be deleted unless its - child has also been deleted. Even though a Depth header has not been - included, a depth of infinity is assumed because the method is on a - collection. - -8.7 PUT - -8.7.1 PUT for Non-Collection Resources - - A PUT performed on an existing resource replaces the GET response - entity of the resource. Properties defined on the resource may be - recomputed during PUT processing but are not otherwise affected. For - example, if a server recognizes the content type of the request body, - it may be able to automatically extract information that could be - profitably exposed as properties. - - A PUT that would result in the creation of a resource without an - appropriately scoped parent collection MUST fail with a 409 - (Conflict). - - - - - - -Goland, et al. Standards Track [Page 36] - -RFC 2518 WEBDAV February 1999 - - -8.7.2 PUT for Collections - - As defined in the HTTP/1.1 specification [RFC2068], the "PUT method - requests that the enclosed entity be stored under the supplied - Request-URI." Since submission of an entity representing a - collection would implicitly encode creation and deletion of - resources, this specification intentionally does not define a - transmission format for creating a collection using PUT. Instead, - the MKCOL method is defined to create collections. - - When the PUT operation creates a new non-collection resource all - ancestors MUST already exist. If all ancestors do not exist, the - method MUST fail with a 409 (Conflict) status code. For example, if - resource /a/b/c/d.html is to be created and /a/b/c/ does not exist, - then the request must fail. - -8.8 COPY Method - - The COPY method creates a duplicate of the source resource, - identified by the Request-URI, in the destination resource, - identified by the URI in the Destination header. The Destination - header MUST be present. The exact behavior of the COPY method - depends on the type of the source resource. - - All WebDAV compliant resources MUST support the COPY method. - However, support for the COPY method does not guarantee the ability - to copy a resource. For example, separate programs may control - resources on the same server. As a result, it may not be possible to - copy a resource to a location that appears to be on the same server. - -8.8.1 COPY for HTTP/1.1 resources - - When the source resource is not a collection the result of the COPY - method is the creation of a new resource at the destination whose - state and behavior match that of the source resource as closely as - possible. After a successful COPY invocation, all properties on the - source resource MUST be duplicated on the destination resource, - subject to modifying headers and XML elements, following the - definition for copying properties. Since the environment at the - destination may be different than at the source due to factors - outside the scope of control of the server, such as the absence of - resources required for correct operation, it may not be possible to - completely duplicate the behavior of the resource at the destination. - Subsequent alterations to the destination resource will not modify - the source resource. Subsequent alterations to the source resource - will not modify the destination resource. - - - - - -Goland, et al. Standards Track [Page 37] - -RFC 2518 WEBDAV February 1999 - - -8.8.2. COPY for Properties - - The following section defines how properties on a resource are - handled during a COPY operation. - - Live properties SHOULD be duplicated as identically behaving live - properties at the destination resource. If a property cannot be - copied live, then its value MUST be duplicated, octet-for-octet, in - an identically named, dead property on the destination resource - subject to the effects of the propertybehavior XML element. - - The propertybehavior XML element can specify that properties are - copied on best effort, that all live properties must be successfully - copied or the method must fail, or that a specified list of live - properties must be successfully copied or the method must fail. The - propertybehavior XML element is defined in section 12.12. - -8.8.3 COPY for Collections - - The COPY method on a collection without a Depth header MUST act as if - a Depth header with value "infinity" was included. A client may - submit a Depth header on a COPY on a collection with a value of "0" - or "infinity". DAV compliant servers MUST support the "0" and - "infinity" Depth header behaviors. - - A COPY of depth infinity instructs that the collection resource - identified by the Request-URI is to be copied to the location - identified by the URI in the Destination header, and all its internal - member resources are to be copied to a location relative to it, - recursively through all levels of the collection hierarchy. - - A COPY of "Depth: 0" only instructs that the collection and its - properties but not resources identified by its internal member URIs, - are to be copied. - - Any headers included with a COPY MUST be applied in processing every - resource to be copied with the exception of the Destination header. - - The Destination header only specifies the destination URI for the - Request-URI. When applied to members of the collection identified by - the Request-URI the value of Destination is to be modified to reflect - the current location in the hierarchy. So, if the Request- URI is - /a/ with Host header value http://fun.com/ and the Destination is - http://fun.com/b/ then when http://fun.com/a/c/d is processed it must - use a Destination of http://fun.com/b/c/d. - - - - - - -Goland, et al. Standards Track [Page 38] - -RFC 2518 WEBDAV February 1999 - - - When the COPY method has completed processing it MUST have created a - consistent namespace at the destination (see section 5.1 for the - definition of namespace consistency). However, if an error occurs - while copying an internal collection, the server MUST NOT copy any - resources identified by members of this collection (i.e., the server - must skip this subtree), as this would create an inconsistent - namespace. After detecting an error, the COPY operation SHOULD try to - finish as much of the original copy operation as possible (i.e., the - server should still attempt to copy other subtrees and their members, - that are not descendents of an error-causing collection). So, for - example, if an infinite depth copy operation is performed on - collection /a/, which contains collections /a/b/ and /a/c/, and an - error occurs copying /a/b/, an attempt should still be made to copy - /a/c/. Similarly, after encountering an error copying a non- - collection resource as part of an infinite depth copy, the server - SHOULD try to finish as much of the original copy operation as - possible. - - If an error in executing the COPY method occurs with a resource other - than the resource identified in the Request-URI then the response - MUST be a 207 (Multi-Status). - - The 424 (Failed Dependency) status code SHOULD NOT be returned in the - 207 (Multi-Status) response from a COPY method. These responses can - be safely omitted because the client will know that the progeny of a - resource could not be copied when the client receives an error for - the parent. Additionally 201 (Created)/204 (No Content) status codes - SHOULD NOT be returned as values in 207 (Multi-Status) responses from - COPY methods. They, too, can be safely omitted because they are the - default success codes. - -8.8.4 COPY and the Overwrite Header - - If a resource exists at the destination and the Overwrite header is - "T" then prior to performing the copy the server MUST perform a - DELETE with "Depth: infinity" on the destination resource. If the - Overwrite header is set to "F" then the operation will fail. - -8.8.5 Status Codes - - 201 (Created) - The source resource was successfully copied. The - copy operation resulted in the creation of a new resource. - - 204 (No Content) - The source resource was successfully copied to a - pre-existing destination resource. - - 403 (Forbidden) _ The source and destination URIs are the same. - - - - -Goland, et al. Standards Track [Page 39] - -RFC 2518 WEBDAV February 1999 - - - 409 (Conflict) _ A resource cannot be created at the destination - until one or more intermediate collections have been created. - - 412 (Precondition Failed) - The server was unable to maintain the - liveness of the properties listed in the propertybehavior XML element - or the Overwrite header is "F" and the state of the destination - resource is non-null. - - 423 (Locked) - The destination resource was locked. - - 502 (Bad Gateway) - This may occur when the destination is on another - server and the destination server refuses to accept the resource. - - 507 (Insufficient Storage) - The destination resource does not have - sufficient space to record the state of the resource after the - execution of this method. - -8.8.6 Example - COPY with Overwrite - - This example shows resource - http://www.ics.uci.edu/~fielding/index.html being copied to the - location http://www.ics.uci.edu/users/f/fielding/index.html. The 204 - (No Content) status code indicates the existing resource at the - destination was overwritten. - - >>Request - - COPY /~fielding/index.html HTTP/1.1 - Host: www.ics.uci.edu - Destination: http://www.ics.uci.edu/users/f/fielding/index.html - - >>Response - - HTTP/1.1 204 No Content - -8.8.7 Example - COPY with No Overwrite - - The following example shows the same copy operation being performed, - but with the Overwrite header set to "F." A response of 412 - (Precondition Failed) is returned because the destination resource - has a non-null state. - - >>Request - - COPY /~fielding/index.html HTTP/1.1 - Host: www.ics.uci.edu - Destination: http://www.ics.uci.edu/users/f/fielding/index.html - Overwrite: F - - - -Goland, et al. Standards Track [Page 40] - -RFC 2518 WEBDAV February 1999 - - - >>Response - - HTTP/1.1 412 Precondition Failed - -8.8.8 Example - COPY of a Collection - - >>Request - - COPY /container/ HTTP/1.1 - Host: www.foo.bar - Destination: http://www.foo.bar/othercontainer/ - Depth: infinity - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - * - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/othercontainer/R2/ - HTTP/1.1 412 Precondition Failed - - - - The Depth header is unnecessary as the default behavior of COPY on a - collection is to act as if a "Depth: infinity" header had been - submitted. In this example most of the resources, along with the - collection, were copied successfully. However the collection R2 - failed, most likely due to a problem with maintaining the liveness of - properties (this is specified by the propertybehavior XML element). - Because there was an error copying R2, none of R2's members were - copied. However no errors were listed for those members due to the - error minimization rules given in section 8.8.3. - - - - - - - - -Goland, et al. Standards Track [Page 41] - -RFC 2518 WEBDAV February 1999 - - -8.9 MOVE Method - - The MOVE operation on a non-collection resource is the logical - equivalent of a copy (COPY), followed by consistency maintenance - processing, followed by a delete of the source, where all three - actions are performed atomically. The consistency maintenance step - allows the server to perform updates caused by the move, such as - updating all URIs other than the Request-URI which identify the - source resource, to point to the new destination resource. - Consequently, the Destination header MUST be present on all MOVE - methods and MUST follow all COPY requirements for the COPY part of - the MOVE method. All DAV compliant resources MUST support the MOVE - method. However, support for the MOVE method does not guarantee the - ability to move a resource to a particular destination. - - For example, separate programs may actually control different sets of - resources on the same server. Therefore, it may not be possible to - move a resource within a namespace that appears to belong to the same - server. - - If a resource exists at the destination, the destination resource - will be DELETEd as a side-effect of the MOVE operation, subject to - the restrictions of the Overwrite header. - -8.9.1 MOVE for Properties - - The behavior of properties on a MOVE, including the effects of the - propertybehavior XML element, MUST be the same as specified in - section 8.8.2. - -8.9.2 MOVE for Collections - - A MOVE with "Depth: infinity" instructs that the collection - identified by the Request-URI be moved to the URI specified in the - Destination header, and all resources identified by its internal - member URIs are to be moved to locations relative to it, recursively - through all levels of the collection hierarchy. - - The MOVE method on a collection MUST act as if a "Depth: infinity" - header was used on it. A client MUST NOT submit a Depth header on a - MOVE on a collection with any value but "infinity". - - Any headers included with MOVE MUST be applied in processing every - resource to be moved with the exception of the Destination header. - - The behavior of the Destination header is the same as given for COPY - on collections. - - - - -Goland, et al. Standards Track [Page 42] - -RFC 2518 WEBDAV February 1999 - - - When the MOVE method has completed processing it MUST have created a - consistent namespace at both the source and destination (see section - 5.1 for the definition of namespace consistency). However, if an - error occurs while moving an internal collection, the server MUST NOT - move any resources identified by members of the failed collection - (i.e., the server must skip the error-causing subtree), as this would - create an inconsistent namespace. In this case, after detecting the - error, the move operation SHOULD try to finish as much of the - original move as possible (i.e., the server should still attempt to - move other subtrees and the resources identified by their members, - that are not descendents of an error-causing collection). So, for - example, if an infinite depth move is performed on collection /a/, - which contains collections /a/b/ and /a/c/, and an error occurs - moving /a/b/, an attempt should still be made to try moving /a/c/. - Similarly, after encountering an error moving a non-collection - resource as part of an infinite depth move, the server SHOULD try to - finish as much of the original move operation as possible. - - If an error occurs with a resource other than the resource identified - in the Request-URI then the response MUST be a 207 (Multi-Status). - - The 424 (Failed Dependency) status code SHOULD NOT be returned in the - 207 (Multi-Status) response from a MOVE method. These errors can be - safely omitted because the client will know that the progeny of a - resource could not be moved when the client receives an error for the - parent. Additionally 201 (Created)/204 (No Content) responses SHOULD - NOT be returned as values in 207 (Multi-Status) responses from a - MOVE. These responses can be safely omitted because they are the - default success codes. - -8.9.3 MOVE and the Overwrite Header - - If a resource exists at the destination and the Overwrite header is - "T" then prior to performing the move the server MUST perform a - DELETE with "Depth: infinity" on the destination resource. If the - Overwrite header is set to "F" then the operation will fail. - -8.9.4 Status Codes - - 201 (Created) - The source resource was successfully moved, and a new - resource was created at the destination. - - 204 (No Content) - The source resource was successfully moved to a - pre-existing destination resource. - - 403 (Forbidden) _ The source and destination URIs are the same. - - - - - -Goland, et al. Standards Track [Page 43] - -RFC 2518 WEBDAV February 1999 - - - 409 (Conflict) _ A resource cannot be created at the destination - until one or more intermediate collections have been created. - - 412 (Precondition Failed) - The server was unable to maintain the - liveness of the properties listed in the propertybehavior XML element - or the Overwrite header is "F" and the state of the destination - resource is non-null. - - 423 (Locked) - The source or the destination resource was locked. - - 502 (Bad Gateway) - This may occur when the destination is on another - server and the destination server refuses to accept the resource. - -8.9.5 Example - MOVE of a Non-Collection - - This example shows resource - http://www.ics.uci.edu/~fielding/index.html being moved to the - location http://www.ics.uci.edu/users/f/fielding/index.html. The - contents of the destination resource would have been overwritten if - the destination resource had been non-null. In this case, since - there was nothing at the destination resource, the response code is - 201 (Created). - - >>Request - - MOVE /~fielding/index.html HTTP/1.1 - Host: www.ics.uci.edu - Destination: http://www.ics.uci.edu/users/f/fielding/index.html - - >>Response - - HTTP/1.1 201 Created - Location: http://www.ics.uci.edu/users/f/fielding/index.html - - -8.9.6 Example - MOVE of a Collection - - >>Request - - MOVE /container/ HTTP/1.1 - Host: www.foo.bar - Destination: http://www.foo.bar/othercontainer/ - Overwrite: F - If: () - () - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - -Goland, et al. Standards Track [Page 44] - -RFC 2518 WEBDAV February 1999 - - - - - * - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/othercontainer/C2/ - HTTP/1.1 423 Locked - - - - In this example the client has submitted a number of lock tokens with - the request. A lock token will need to be submitted for every - resource, both source and destination, anywhere in the scope of the - method, that is locked. In this case the proper lock token was not - submitted for the destination http://www.foo.bar/othercontainer/C2/. - This means that the resource /container/C2/ could not be moved. - Because there was an error copying /container/C2/, none of - /container/C2's members were copied. However no errors were listed - for those members due to the error minimization rules given in - section 8.8.3. User agent authentication has previously occurred via - a mechanism outside the scope of the HTTP protocol, in an underlying - transport layer. - -8.10 LOCK Method - - The following sections describe the LOCK method, which is used to - take out a lock of any access type. These sections on the LOCK - method describe only those semantics that are specific to the LOCK - method and are independent of the access type of the lock being - requested. - - Any resource which supports the LOCK method MUST, at minimum, support - the XML request and response formats defined herein. - - - - - - - - - -Goland, et al. Standards Track [Page 45] - -RFC 2518 WEBDAV February 1999 - - -8.10.1 Operation - - A LOCK method invocation creates the lock specified by the lockinfo - XML element on the Request-URI. Lock method requests SHOULD have a - XML request body which contains an owner XML element for this lock - request, unless this is a refresh request. The LOCK request may have - a Timeout header. - - Clients MUST assume that locks may arbitrarily disappear at any time, - regardless of the value given in the Timeout header. The Timeout - header only indicates the behavior of the server if "extraordinary" - circumstances do not occur. For example, an administrator may remove - a lock at any time or the system may crash in such a way that it - loses the record of the lock's existence. The response MUST contain - the value of the lockdiscovery property in a prop XML element. - - In order to indicate the lock token associated with a newly created - lock, a Lock-Token response header MUST be included in the response - for every successful LOCK request for a new lock. Note that the - Lock-Token header would not be returned in the response for a - successful refresh LOCK request because a new lock was not created. - -8.10.2 The Effect of Locks on Properties and Collections - - The scope of a lock is the entire state of the resource, including - its body and associated properties. As a result, a lock on a - resource MUST also lock the resource's properties. - - For collections, a lock also affects the ability to add or remove - members. The nature of the effect depends upon the type of access - control involved. - -8.10.3 Locking Replicated Resources - - A resource may be made available through more than one URI. However - locks apply to resources, not URIs. Therefore a LOCK request on a - resource MUST NOT succeed if can not be honored by all the URIs - through which the resource is addressable. - -8.10.4 Depth and Locking - - The Depth header may be used with the LOCK method. Values other than - 0 or infinity MUST NOT be used with the Depth header on a LOCK - method. All resources that support the LOCK method MUST support the - Depth header. - - A Depth header of value 0 means to just lock the resource specified - by the Request-URI. - - - -Goland, et al. Standards Track [Page 46] - -RFC 2518 WEBDAV February 1999 - - - If the Depth header is set to infinity then the resource specified in - the Request-URI along with all its internal members, all the way down - the hierarchy, are to be locked. A successful result MUST return a - single lock token which represents all the resources that have been - locked. If an UNLOCK is successfully executed on this token, all - associated resources are unlocked. If the lock cannot be granted to - all resources, a 409 (Conflict) status code MUST be returned with a - response entity body containing a multistatus XML element describing - which resource(s) prevented the lock from being granted. Hence, - partial success is not an option. Either the entire hierarchy is - locked or no resources are locked. - - If no Depth header is submitted on a LOCK request then the request - MUST act as if a "Depth:infinity" had been submitted. - -8.10.5 Interaction with other Methods - - The interaction of a LOCK with various methods is dependent upon the - lock type. However, independent of lock type, a successful DELETE of - a resource MUST cause all of its locks to be removed. - -8.10.6 Lock Compatibility Table - - The table below describes the behavior that occurs when a lock - request is made on a resource. - - Current lock state/ | Shared Lock | Exclusive - Lock request | | Lock - =====================+=================+============== - None | True | True - ---------------------+-----------------+-------------- - Shared Lock | True | False - ---------------------+-----------------+-------------- - Exclusive Lock | False | False* - ------------------------------------------------------ - - Legend: True = lock may be granted. False = lock MUST NOT be - granted. *=It is illegal for a principal to request the same lock - twice. - - The current lock state of a resource is given in the leftmost column, - and lock requests are listed in the first row. The intersection of a - row and column gives the result of a lock request. For example, if a - shared lock is held on a resource, and an exclusive lock is - requested, the table entry is "false", indicating the lock must not - be granted. - - - - - -Goland, et al. Standards Track [Page 47] - -RFC 2518 WEBDAV February 1999 - - -8.10.7 Status Codes - - 200 (OK) - The lock request succeeded and the value of the - lockdiscovery property is included in the body. - - 412 (Precondition Failed) - The included lock token was not - enforceable on this resource or the server could not satisfy the - request in the lockinfo XML element. - - 423 (Locked) - The resource is locked, so the method has been - rejected. - -8.10.8 Example - Simple Lock Request - - >>Request - - LOCK /workspace/webdav/proposal.doc HTTP/1.1 - Host: webdav.sb.aol.com - Timeout: Infinite, Second-4100000000 - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - Authorization: Digest username="ejw", - realm="ejw@webdav.sb.aol.com", nonce="...", - uri="/workspace/webdav/proposal.doc", - response="...", opaque="..." - - - - - - - http://www.ics.uci.edu/~ejw/contact.html - - - - >>Response - - HTTP/1.1 200 OK - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - - Infinity - - - -Goland, et al. Standards Track [Page 48] - -RFC 2518 WEBDAV February 1999 - - - - - http://www.ics.uci.edu/~ejw/contact.html - - - Second-604800 - - - opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4 - - - - - - - This example shows the successful creation of an exclusive write lock - on resource http://webdav.sb.aol.com/workspace/webdav/proposal.doc. - The resource http://www.ics.uci.edu/~ejw/contact.html contains - contact information for the owner of the lock. The server has an - activity-based timeout policy in place on this resource, which causes - the lock to automatically be removed after 1 week (604800 seconds). - Note that the nonce, response, and opaque fields have not been - calculated in the Authorization request header. - -8.10.9 Example - Refreshing a Write Lock - - >>Request - - LOCK /workspace/webdav/proposal.doc HTTP/1.1 - Host: webdav.sb.aol.com - Timeout: Infinite, Second-4100000000 - If: () - Authorization: Digest username="ejw", - realm="ejw@webdav.sb.aol.com", nonce="...", - uri="/workspace/webdav/proposal.doc", - response="...", opaque="..." - - >>Response - - HTTP/1.1 200 OK - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - - - - - -Goland, et al. Standards Track [Page 49] - -RFC 2518 WEBDAV February 1999 - - - - Infinity - - - http://www.ics.uci.edu/~ejw/contact.html - - - Second-604800 - - - opaquelocktoken:e71d4fae-5dec-22d6-fea5-00a0c91e6be4 - - - - - - - This request would refresh the lock, resetting any time outs. Notice - that the client asked for an infinite time out but the server choose - to ignore the request. In this example, the nonce, response, and - opaque fields have not been calculated in the Authorization request - header. - -8.10.10 Example - Multi-Resource Lock Request - - >>Request - - LOCK /webdav/ HTTP/1.1 - Host: webdav.sb.aol.com - Timeout: Infinite, Second-4100000000 - Depth: infinity - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - Authorization: Digest username="ejw", - realm="ejw@webdav.sb.aol.com", nonce="...", - uri="/workspace/webdav/proposal.doc", - response="...", opaque="..." - - - - - - - http://www.ics.uci.edu/~ejw/contact.html - - - - >>Response - - - -Goland, et al. Standards Track [Page 50] - -RFC 2518 WEBDAV February 1999 - - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://webdav.sb.aol.com/webdav/secret - HTTP/1.1 403 Forbidden - - - http://webdav.sb.aol.com/webdav/ - - - HTTP/1.1 424 Failed Dependency - - - - - This example shows a request for an exclusive write lock on a - collection and all its children. In this request, the client has - specified that it desires an infinite length lock, if available, - otherwise a timeout of 4.1 billion seconds, if available. The request - entity body contains the contact information for the principal taking - out the lock, in this case a web page URL. - - The error is a 403 (Forbidden) response on the resource - http://webdav.sb.aol.com/webdav/secret. Because this resource could - not be locked, none of the resources were locked. Note also that the - lockdiscovery property for the Request-URI has been included as - required. In this example the lockdiscovery property is empty which - means that there are no outstanding locks on the resource. - - In this example, the nonce, response, and opaque fields have not been - calculated in the Authorization request header. - -8.11 UNLOCK Method - - The UNLOCK method removes the lock identified by the lock token in - the Lock-Token request header from the Request-URI, and all other - resources included in the lock. If all resources which have been - locked under the submitted lock token can not be unlocked then the - UNLOCK request MUST fail. - - Any DAV compliant resource which supports the LOCK method MUST - support the UNLOCK method. - - - - - -Goland, et al. Standards Track [Page 51] - -RFC 2518 WEBDAV February 1999 - - -8.11.1 Example - UNLOCK - - >>Request - - UNLOCK /workspace/webdav/info.doc HTTP/1.1 - Host: webdav.sb.aol.com - Lock-Token: - Authorization: Digest username="ejw", - realm="ejw@webdav.sb.aol.com", nonce="...", - uri="/workspace/webdav/proposal.doc", - response="...", opaque="..." - - >>Response - - HTTP/1.1 204 No Content - - In this example, the lock identified by the lock token - "opaquelocktoken:a515cfa4-5da4-22e1-f5b5-00a0451e6bf7" is - successfully removed from the resource - http://webdav.sb.aol.com/workspace/webdav/info.doc. If this lock - included more than just one resource, the lock is removed from all - resources included in the lock. The 204 (No Content) status code is - used instead of 200 (OK) because there is no response entity body. - - In this example, the nonce, response, and opaque fields have not been - calculated in the Authorization request header. - -9 HTTP Headers for Distributed Authoring - -9.1 DAV Header - - DAV = "DAV" ":" "1" ["," "2"] ["," 1#extend] - - This header indicates that the resource supports the DAV schema and - protocol as specified. All DAV compliant resources MUST return the - DAV header on all OPTIONS responses. - - The value is a list of all compliance classes that the resource - supports. Note that above a comma has already been added to the 2. - This is because a resource can not be level 2 compliant unless it is - also level 1 compliant. Please refer to section 15 for more details. - In general, however, support for one compliance class does not entail - support for any other. - -9.2 Depth Header - - Depth = "Depth" ":" ("0" | "1" | "infinity") - - - - -Goland, et al. Standards Track [Page 52] - -RFC 2518 WEBDAV February 1999 - - - The Depth header is used with methods executed on resources which - could potentially have internal members to indicate whether the - method is to be applied only to the resource ("Depth: 0"), to the - resource and its immediate children, ("Depth: 1"), or the resource - and all its progeny ("Depth: infinity"). - - The Depth header is only supported if a method's definition - explicitly provides for such support. - - The following rules are the default behavior for any method that - supports the Depth header. A method may override these defaults by - defining different behavior in its definition. - - Methods which support the Depth header may choose not to support all - of the header's values and may define, on a case by case basis, the - behavior of the method if a Depth header is not present. For example, - the MOVE method only supports "Depth: infinity" and if a Depth header - is not present will act as if a "Depth: infinity" header had been - applied. - - Clients MUST NOT rely upon methods executing on members of their - hierarchies in any particular order or on the execution being atomic - unless the particular method explicitly provides such guarantees. - - Upon execution, a method with a Depth header will perform as much of - its assigned task as possible and then return a response specifying - what it was able to accomplish and what it failed to do. - - So, for example, an attempt to COPY a hierarchy may result in some of - the members being copied and some not. - - Any headers on a method that has a defined interaction with the Depth - header MUST be applied to all resources in the scope of the method - except where alternative behavior is explicitly defined. For example, - an If-Match header will have its value applied against every resource - in the method's scope and will cause the method to fail if the header - fails to match. - - If a resource, source or destination, within the scope of the method - with a Depth header is locked in such a way as to prevent the - successful execution of the method, then the lock token for that - resource MUST be submitted with the request in the If request header. - - The Depth header only specifies the behavior of the method with - regards to internal children. If a resource does not have internal - children then the Depth header MUST be ignored. - - - - - -Goland, et al. Standards Track [Page 53] - -RFC 2518 WEBDAV February 1999 - - - Please note, however, that it is always an error to submit a value - for the Depth header that is not allowed by the method's definition. - Thus submitting a "Depth: 1" on a COPY, even if the resource does not - have internal members, will result in a 400 (Bad Request). The method - should fail not because the resource doesn't have internal members, - but because of the illegal value in the header. - -9.3 Destination Header - - Destination = "Destination" ":" absoluteURI - - The Destination header specifies the URI which identifies a - destination resource for methods such as COPY and MOVE, which take - two URIs as parameters. Note that the absoluteURI production is - defined in [RFC2396]. - -9.4 If Header - - If = "If" ":" ( 1*No-tag-list | 1*Tagged-list) - No-tag-list = List - Tagged-list = Resource 1*List - Resource = Coded-URL - List = "(" 1*(["Not"](State-token | "[" entity-tag "]")) ")" - State-token = Coded-URL - Coded-URL = "<" absoluteURI ">" - - The If header is intended to have similar functionality to the If- - Match header defined in section 14.25 of [RFC2068]. However the If - header is intended for use with any URI which represents state - information, referred to as a state token, about a resource as well - as ETags. A typical example of a state token is a lock token, and - lock tokens are the only state tokens defined in this specification. - - All DAV compliant resources MUST honor the If header. - - The If header's purpose is to describe a series of state lists. If - the state of the resource to which the header is applied does not - match any of the specified state lists then the request MUST fail - with a 412 (Precondition Failed). If one of the described state - lists matches the state of the resource then the request may succeed. - - Note that the absoluteURI production is defined in [RFC2396]. - - - - - - - - - -Goland, et al. Standards Track [Page 54] - -RFC 2518 WEBDAV February 1999 - - -9.4.1 No-tag-list Production - - The No-tag-list production describes a series of state tokens and - ETags. If multiple No-tag-list productions are used then one only - needs to match the state of the resource for the method to be allowed - to continue. - - If a method, due to the presence of a Depth or Destination header, is - applied to multiple resources then the No-tag-list production MUST be - applied to each resource the method is applied to. - -9.4.1.1 Example - No-tag-list If Header - - If: ( ["I am an ETag"]) (["I am another - ETag"]) - - The previous header would require that any resources within the scope - of the method must either be locked with the specified lock token and - in the state identified by the "I am an ETag" ETag or in the state - identified by the second ETag "I am another ETag". To put the matter - more plainly one can think of the previous If header as being in the - form (or (and ["I am an ETag"]) (and - ["I am another ETag"])). - -9.4.2 Tagged-list Production - - The tagged-list production scopes a list production. That is, it - specifies that the lists following the resource specification only - apply to the specified resource. The scope of the resource - production begins with the list production immediately following the - resource production and ends with the next resource production, if - any. - - When the If header is applied to a particular resource, the Tagged- - list productions MUST be searched to determine if any of the listed - resources match the operand resource(s) for the current method. If - none of the resource productions match the current resource then the - header MUST be ignored. If one of the resource productions does - match the name of the resource under consideration then the list - productions following the resource production MUST be applied to the - resource in the manner specified in the previous section. - - The same URI MUST NOT appear more than once in a resource production - in an If header. - - - - - - - -Goland, et al. Standards Track [Page 55] - -RFC 2518 WEBDAV February 1999 - - -9.4.2.1 Example - Tagged List If header - - COPY /resource1 HTTP/1.1 - Host: www.foo.bar - Destination: http://www.foo.bar/resource2 - If: ( - [W/"A weak ETag"]) (["strong ETag"]) - (["another strong ETag"]) - - In this example http://www.foo.bar/resource1 is being copied to - http://www.foo.bar/resource2. When the method is first applied to - http://www.foo.bar/resource1, resource1 must be in the state - specified by "( [W/"A weak ETag"]) - (["strong ETag"])", that is, it either must be locked with a lock - token of "locktoken:a-write-lock-token" and have a weak entity tag - W/"A weak ETag" or it must have a strong entity tag "strong ETag". - - That is the only success condition since the resource - http://www.bar.bar/random never has the method applied to it (the - only other resource listed in the If header) and - http://www.foo.bar/resource2 is not listed in the If header. - -9.4.3 not Production - - Every state token or ETag is either current, and hence describes the - state of a resource, or is not current, and does not describe the - state of a resource. The boolean operation of matching a state token - or ETag to the current state of a resource thus resolves to a true or - false value. The not production is used to reverse that value. The - scope of the not production is the state-token or entity-tag - immediately following it. - - If: (Not ) - - When submitted with a request, this If header requires that all - operand resources must not be locked with locktoken:write1 and must - be locked with locktoken:write2. - -9.4.4 Matching Function - - When performing If header processing, the definition of a matching - state token or entity tag is as follows. - - Matching entity tag: Where the entity tag matches an entity tag - associated with that resource. - - Matching state token: Where there is an exact match between the state - token in the If header and any state token on the resource. - - - -Goland, et al. Standards Track [Page 56] - -RFC 2518 WEBDAV February 1999 - - -9.4.5 If Header and Non-DAV Compliant Proxies - - Non-DAV compliant proxies will not honor the If header, since they - will not understand the If header, and HTTP requires non-understood - headers to be ignored. When communicating with HTTP/1.1 proxies, the - "Cache-Control: no-cache" request header MUST be used so as to - prevent the proxy from improperly trying to service the request from - its cache. When dealing with HTTP/1.0 proxies the "Pragma: no-cache" - request header MUST be used for the same reason. - -9.5 Lock-Token Header - - Lock-Token = "Lock-Token" ":" Coded-URL - - The Lock-Token request header is used with the UNLOCK method to - identify the lock to be removed. The lock token in the Lock-Token - request header MUST identify a lock that contains the resource - identified by Request-URI as a member. - - The Lock-Token response header is used with the LOCK method to - indicate the lock token created as a result of a successful LOCK - request to create a new lock. - -9.6 Overwrite Header - - Overwrite = "Overwrite" ":" ("T" | "F") - - The Overwrite header specifies whether the server should overwrite - the state of a non-null destination resource during a COPY or MOVE. - A value of "F" states that the server must not perform the COPY or - MOVE operation if the state of the destination resource is non-null. - If the overwrite header is not included in a COPY or MOVE request - then the resource MUST treat the request as if it has an overwrite - header of value "T". While the Overwrite header appears to duplicate - the functionality of the If-Match: * header of HTTP/1.1, If-Match - applies only to the Request-URI, and not to the Destination of a COPY - or MOVE. - - If a COPY or MOVE is not performed due to the value of the Overwrite - header, the method MUST fail with a 412 (Precondition Failed) status - code. - - All DAV compliant resources MUST support the Overwrite header. - -9.7 Status-URI Response Header - - The Status-URI response header may be used with the 102 (Processing) - status code to inform the client as to the status of a method. - - - -Goland, et al. Standards Track [Page 57] - -RFC 2518 WEBDAV February 1999 - - - Status-URI = "Status-URI" ":" *(Status-Code Coded-URL) ; Status-Code - is defined in 6.1.1 of [RFC2068] - - The URIs listed in the header are source resources which have been - affected by the outstanding method. The status code indicates the - resolution of the method on the identified resource. So, for - example, if a MOVE method on a collection is outstanding and a 102 - (Processing) response with a Status-URI response header is returned, - the included URIs will indicate resources that have had move - attempted on them and what the result was. - -9.8 Timeout Request Header - - TimeOut = "Timeout" ":" 1#TimeType - TimeType = ("Second-" DAVTimeOutVal | "Infinite" | Other) - DAVTimeOutVal = 1*digit - Other = "Extend" field-value ; See section 4.2 of [RFC2068] - - Clients may include Timeout headers in their LOCK requests. However, - the server is not required to honor or even consider these requests. - Clients MUST NOT submit a Timeout request header with any method - other than a LOCK method. - - A Timeout request header MUST contain at least one TimeType and may - contain multiple TimeType entries. The purpose of listing multiple - TimeType entries is to indicate multiple different values and value - types that are acceptable to the client. The client lists the - TimeType entries in order of preference. - - Timeout response values MUST use a Second value, Infinite, or a - TimeType the client has indicated familiarity with. The server may - assume a client is familiar with any TimeType submitted in a Timeout - header. - - The "Second" TimeType specifies the number of seconds that will - elapse between granting of the lock at the server, and the automatic - removal of the lock. The timeout value for TimeType "Second" MUST - NOT be greater than 2^32-1. - - The timeout counter SHOULD be restarted any time an owner of the lock - sends a method to any member of the lock, including unsupported - methods, or methods which are unsuccessful. However the lock MUST be - refreshed if a refresh LOCK method is successfully received. - - If the timeout expires then the lock may be lost. Specifically, if - the server wishes to harvest the lock upon time-out, the server - SHOULD act as if an UNLOCK method was executed by the server on the - resource using the lock token of the timed-out lock, performed with - - - -Goland, et al. Standards Track [Page 58] - -RFC 2518 WEBDAV February 1999 - - - its override authority. Thus logs should be updated with the - disposition of the lock, notifications should be sent, etc., just as - they would be for an UNLOCK request. - - Servers are advised to pay close attention to the values submitted by - clients, as they will be indicative of the type of activity the - client intends to perform. For example, an applet running in a - browser may need to lock a resource, but because of the instability - of the environment within which the applet is running, the applet may - be turned off without warning. As a result, the applet is likely to - ask for a relatively small timeout value so that if the applet dies, - the lock can be quickly harvested. However, a document management - system is likely to ask for an extremely long timeout because its - user may be planning on going off-line. - - A client MUST NOT assume that just because the time-out has expired - the lock has been lost. - -10 Status Code Extensions to HTTP/1.1 - - The following status codes are added to those defined in HTTP/1.1 - [RFC2068]. - -10.1 102 Processing - - The 102 (Processing) status code is an interim response used to - inform the client that the server has accepted the complete request, - but has not yet completed it. This status code SHOULD only be sent - when the server has a reasonable expectation that the request will - take significant time to complete. As guidance, if a method is taking - longer than 20 seconds (a reasonable, but arbitrary value) to process - the server SHOULD return a 102 (Processing) response. The server MUST - send a final response after the request has been completed. - - Methods can potentially take a long period of time to process, - especially methods that support the Depth header. In such cases the - client may time-out the connection while waiting for a response. To - prevent this the server may return a 102 (Processing) status code to - indicate to the client that the server is still processing the - method. - -10.2 207 Multi-Status - - The 207 (Multi-Status) status code provides status for multiple - independent operations (see section 11 for more information). - - - - - - -Goland, et al. Standards Track [Page 59] - -RFC 2518 WEBDAV February 1999 - - -10.3 422 Unprocessable Entity - - The 422 (Unprocessable Entity) status code means the server - understands the content type of the request entity (hence a - 415(Unsupported Media Type) status code is inappropriate), and the - syntax of the request entity is correct (thus a 400 (Bad Request) - status code is inappropriate) but was unable to process the contained - instructions. For example, this error condition may occur if an XML - request body contains well-formed (i.e., syntactically correct), but - semantically erroneous XML instructions. - -10.4 423 Locked - - The 423 (Locked) status code means the source or destination resource - of a method is locked. - -10.5 424 Failed Dependency - - The 424 (Failed Dependency) status code means that the method could - not be performed on the resource because the requested action - depended on another action and that action failed. For example, if a - command in a PROPPATCH method fails then, at minimum, the rest of the - commands will also fail with 424 (Failed Dependency). - -10.6 507 Insufficient Storage - - The 507 (Insufficient Storage) status code means the method could not - be performed on the resource because the server is unable to store - the representation needed to successfully complete the request. This - condition is considered to be temporary. If the request which - received this status code was the result of a user action, the - request MUST NOT be repeated until it is requested by a separate user - action. - -11 Multi-Status Response - - The default 207 (Multi-Status) response body is a text/xml or - application/xml HTTP entity that contains a single XML element called - multistatus, which contains a set of XML elements called response - which contain 200, 300, 400, and 500 series status codes generated - during the method invocation. 100 series status codes SHOULD NOT be - recorded in a response XML element. - - - - - - - - - -Goland, et al. Standards Track [Page 60] - -RFC 2518 WEBDAV February 1999 - - -12 XML Element Definitions - - In the section below, the final line of each section gives the - element type declaration using the format defined in [REC-XML]. The - "Value" field, where present, specifies further restrictions on the - allowable contents of the XML element using BNF (i.e., to further - restrict the values of a PCDATA element). - -12.1 activelock XML Element - - Name: activelock - Namespace: DAV: - Purpose: Describes a lock on a resource. - - - -12.1.1 depth XML Element - - Name: depth - Namespace: DAV: - Purpose: The value of the Depth header. - Value: "0" | "1" | "infinity" - - - -12.1.2 locktoken XML Element - - Name: locktoken - Namespace: DAV: - Purpose: The lock token associated with a lock. - Description: The href contains one or more opaque lock token URIs - which all refer to the same lock (i.e., the OpaqueLockToken-URI - production in section 6.4). - - - -12.1.3 timeout XML Element - - Name: timeout - Namespace: DAV: - Purpose: The timeout associated with a lock - Value: TimeType ;Defined in section 9.8 - - - - - - - - -Goland, et al. Standards Track [Page 61] - -RFC 2518 WEBDAV February 1999 - - -12.2 collection XML Element - - Name: collection - Namespace: DAV: - Purpose: Identifies the associated resource as a collection. The - resourcetype property of a collection resource MUST have this value. - - - -12.3 href XML Element - - Name: href - Namespace: DAV: - Purpose: Identifies the content of the element as a URI. - Value: URI ; See section 3.2.1 of [RFC2068] - - - -12.4 link XML Element - - Name: link - Namespace: DAV: - Purpose: Identifies the property as a link and contains the source - and destination of that link. - Description: The link XML element is used to provide the sources and - destinations of a link. The name of the property containing the link - XML element provides the type of the link. Link is a multi-valued - element, so multiple links may be used together to indicate multiple - links with the same type. The values in the href XML elements inside - the src and dst XML elements of the link XML element MUST NOT be - rejected if they point to resources which do not exist. - - - -12.4.1 dst XML Element - - Name: dst - Namespace: DAV: - Purpose: Indicates the destination of a link - Value: URI - - - -12.4.2 src XML Element - - Name: src - Namespace: DAV: - Purpose: Indicates the source of a link. - - - -Goland, et al. Standards Track [Page 62] - -RFC 2518 WEBDAV February 1999 - - - Value: URI - - - -12.5 lockentry XML Element - - Name: lockentry - Namespace: DAV: - Purpose: Defines the types of locks that can be used with the - resource. - - - -12.6 lockinfo XML Element - - Name: lockinfo - Namespace: DAV: - Purpose: The lockinfo XML element is used with a LOCK method to - specify the type of lock the client wishes to have created. - - - -12.7 lockscope XML Element - - Name: lockscope - Namespace: DAV: - Purpose: Specifies whether a lock is an exclusive lock, or a - shared lock. - - - -12.7.1 exclusive XML Element - - Name: exclusive - Namespace: DAV: - Purpose: Specifies an exclusive lock - - - -12.7.2 shared XML Element - - Name: shared - Namespace: DAV: - Purpose: Specifies a shared lock - - - - - - - -Goland, et al. Standards Track [Page 63] - -RFC 2518 WEBDAV February 1999 - - -12.8 locktype XML Element - - Name: locktype - Namespace: DAV: - Purpose: Specifies the access type of a lock. At present, this - specification only defines one lock type, the write lock. - - - -12.8.1 write XML Element - - Name: write - Namespace: DAV: - Purpose: Specifies a write lock. - - - -12.9 multistatus XML Element - - Name: multistatus - Namespace: DAV: - Purpose: Contains multiple response messages. - Description: The responsedescription at the top level is used to - provide a general message describing the overarching nature of the - response. If this value is available an application may use it - instead of presenting the individual response descriptions contained - within the responses. - - - -12.9.1 response XML Element - - Name: response - Namespace: DAV: - Purpose: Holds a single response describing the effect of a - method on resource and/or its properties. - Description: A particular href MUST NOT appear more than once as the - child of a response XML element under a multistatus XML element. - This requirement is necessary in order to keep processing costs for a - response to linear time. Essentially, this prevents having to search - in order to group together all the responses by href. There are, - however, no requirements regarding ordering based on href values. - - - - - - - - -Goland, et al. Standards Track [Page 64] - -RFC 2518 WEBDAV February 1999 - - -12.9.1.1 propstat XML Element - - Name: propstat - Namespace: DAV: - Purpose: Groups together a prop and status element that is - associated with a particular href element. - Description: The propstat XML element MUST contain one prop XML - element and one status XML element. The contents of the prop XML - element MUST only list the names of properties to which the result in - the status element applies. - - - -12.9.1.2 status XML Element - - Name: status - Namespace: DAV: - Purpose: Holds a single HTTP status-line - Value: status-line ;status-line defined in [RFC2068] - - - -12.9.2 responsedescription XML Element - - Name: responsedescription - Namespace: DAV: - Purpose: Contains a message that can be displayed to the user - explaining the nature of the response. - Description: This XML element provides information suitable to be - presented to a user. - - - -12.10 owner XML Element - - Name: owner - Namespace: DAV: - Purpose: Provides information about the principal taking out a - lock. - Description: The owner XML element provides information sufficient - for either directly contacting a principal (such as a telephone - number or Email URI), or for discovering the principal (such as the - URL of a homepage) who owns a lock. - - - - - - - - -Goland, et al. Standards Track [Page 65] - -RFC 2518 WEBDAV February 1999 - - -12.11 prop XML element - - Name: prop - Namespace: DAV: - Purpose: Contains properties related to a resource. - Description: The prop XML element is a generic container for - properties defined on resources. All elements inside a prop XML - element MUST define properties related to the resource. No other - elements may be used inside of a prop element. - - - -12.12 propertybehavior XML element - - Name: propertybehavior Namespace: DAV: Purpose: Specifies - how properties are handled during a COPY or MOVE. - Description: The propertybehavior XML element specifies how - properties are handled during a COPY or MOVE. If this XML element is - not included in the request body then the server is expected to act - as defined by the default property handling behavior of the - associated method. All WebDAV compliant resources MUST support the - propertybehavior XML element. - - - -12.12.1 keepalive XML element - - Name: keepalive - Namespace: DAV: - Purpose: Specifies requirements for the copying/moving of live - properties. - Description: If a list of URIs is included as the value of keepalive - then the named properties MUST be "live" after they are copied - (moved) to the destination resource of a COPY (or MOVE). If the - value "*" is given for the keepalive XML element, this designates - that all live properties on the source resource MUST be live on the - destination. If the requirements specified by the keepalive element - can not be honored then the method MUST fail with a 412 (Precondition - Failed). All DAV compliant resources MUST support the keepalive XML - element for use with the COPY and MOVE methods. - Value: "*" ; #PCDATA value can only be "*" - - - - - - - - - - -Goland, et al. Standards Track [Page 66] - -RFC 2518 WEBDAV February 1999 - - -12.12.2 omit XML element - - Name: omit - Namespace: DAV: - Purpose: The omit XML element instructs the server that it should - use best effort to copy properties but a failure to copy a property - MUST NOT cause the method to fail. Description: The default behavior - for a COPY or MOVE is to copy/move all properties or fail the method. - In certain circumstances, such as when a server copies a resource - over another protocol such as FTP, it may not be possible to - copy/move the properties associated with the resource. Thus any - attempt to copy/move over FTP would always have to fail because - properties could not be moved over, even as dead properties. All DAV - compliant resources MUST support the omit XML element on COPY/MOVE - methods. - - - -12.13 propertyupdate XML element - - Name: propertyupdate - Namespace: DAV: - Purpose: Contains a request to alter the properties on a - resource. - Description: This XML element is a container for the information - required to modify the properties on the resource. This XML element - is multi-valued. - - - -12.13.1 remove XML element - - Name: remove - Namespace: DAV: - Purpose: Lists the DAV properties to be removed from a resource. - Description: Remove instructs that the properties specified in prop - should be removed. Specifying the removal of a property that does - not exist is not an error. All the XML elements in a prop XML - element inside of a remove XML element MUST be empty, as only the - names of properties to be removed are required. - - - -12.13.2 set XML element - - Name: set - Namespace: DAV: - Purpose: Lists the DAV property values to be set for a resource. - - - -Goland, et al. Standards Track [Page 67] - -RFC 2518 WEBDAV February 1999 - - - Description: The set XML element MUST contain only a prop XML - element. The elements contained by the prop XML element inside the - set XML element MUST specify the name and value of properties that - are set on the resource identified by Request-URI. If a property - already exists then its value is replaced. Language tagging - information in the property's value (in the "xml:lang" attribute, if - present) MUST be persistently stored along with the property, and - MUST be subsequently retrievable using PROPFIND. - - - -12.14 propfind XML Element - - Name: propfind - Namespace: DAV: - Purpose: Specifies the properties to be returned from a PROPFIND - method. Two special elements are specified for use with propfind, - allprop and propname. If prop is used inside propfind it MUST only - contain property names, not values. - - - -12.14.1 allprop XML Element - - Name: allprop Namespace: DAV: Purpose: The allprop XML - element specifies that all property names and values on the resource - are to be returned. - - - -12.14.2 propname XML Element - - Name: propname Namespace: DAV: Purpose: The propname XML - element specifies that only a list of property names on the resource - is to be returned. - - - -13 DAV Properties - - For DAV properties, the name of the property is also the same as the - name of the XML element that contains its value. In the section - below, the final line of each section gives the element type - declaration using the format defined in [REC-XML]. The "Value" field, - where present, specifies further restrictions on the allowable - contents of the XML element using BNF (i.e., to further restrict the - values of a PCDATA element). - - - - -Goland, et al. Standards Track [Page 68] - -RFC 2518 WEBDAV February 1999 - - -13.1 creationdate Property - - Name: creationdate - Namespace: DAV: - Purpose: Records the time and date the resource was created. - Value: date-time ; See Appendix 2 - Description: The creationdate property should be defined on all DAV - compliant resources. If present, it contains a timestamp of the - moment when the resource was created (i.e., the moment it had non- - null state). - - - -13.2 displayname Property - - Name: displayname - Namespace: DAV: - Purpose: Provides a name for the resource that is suitable for - presentation to a user. - Description: The displayname property should be defined on all DAV - compliant resources. If present, the property contains a description - of the resource that is suitable for presentation to a user. - - - -13.3 getcontentlanguage Property - - Name: getcontentlanguage - Namespace: DAV: - Purpose: Contains the Content-Language header returned by a GET - without accept headers - Description: The getcontentlanguage property MUST be defined on any - DAV compliant resource that returns the Content-Language header on a - GET. - Value: language-tag ;language-tag is defined in section 14.13 - of [RFC2068] - - - -13.4 getcontentlength Property - - Name: getcontentlength - Namespace: DAV: - Purpose: Contains the Content-Length header returned by a GET - without accept headers. - Description: The getcontentlength property MUST be defined on any - DAV compliant resource that returns the Content-Length header in - response to a GET. - - - -Goland, et al. Standards Track [Page 69] - -RFC 2518 WEBDAV February 1999 - - - Value: content-length ; see section 14.14 of [RFC2068] - - - -13.5 getcontenttype Property - - Name: getcontenttype - Namespace: DAV: - Purpose: Contains the Content-Type header returned by a GET - without accept headers. - Description: This getcontenttype property MUST be defined on any DAV - compliant resource that returns the Content-Type header in response - to a GET. - Value: media-type ; defined in section 3.7 of [RFC2068] - - - -13.6 getetag Property - - Name: getetag - Namespace: DAV: - Purpose: Contains the ETag header returned by a GET without - accept headers. - Description: The getetag property MUST be defined on any DAV - compliant resource that returns the Etag header. - Value: entity-tag ; defined in section 3.11 of [RFC2068] - - - -13.7 getlastmodified Property - - Name: getlastmodified - Namespace: DAV: - Purpose: Contains the Last-Modified header returned by a GET - method without accept headers. - Description: Note that the last-modified date on a resource may - reflect changes in any part of the state of the resource, not - necessarily just a change to the response to the GET method. For - example, a change in a property may cause the last-modified date to - change. The getlastmodified property MUST be defined on any DAV - compliant resource that returns the Last-Modified header in response - to a GET. - Value: HTTP-date ; defined in section 3.3.1 of [RFC2068] - - - - - - - - -Goland, et al. Standards Track [Page 70] - -RFC 2518 WEBDAV February 1999 - - -13.8 lockdiscovery Property - - Name: lockdiscovery - Namespace: DAV: - Purpose: Describes the active locks on a resource - Description: The lockdiscovery property returns a listing of who has - a lock, what type of lock he has, the timeout type and the time - remaining on the timeout, and the associated lock token. The server - is free to withhold any or all of this information if the requesting - principal does not have sufficient access rights to see the requested - data. - - - -13.8.1 Example - Retrieving the lockdiscovery Property - - >>Request - - PROPFIND /container/ HTTP/1.1 - Host: www.foo.bar - Content-Length: xxxx - Content-Type: text/xml; charset="utf-8" - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/container/ - - - - - - - 0 - Jane Smith - Infinite - - - - -Goland, et al. Standards Track [Page 71] - -RFC 2518 WEBDAV February 1999 - - - - opaquelocktoken:f81de2ad-7f3d-a1b2-4f3c-00a0c91a9d76 - - - - - - HTTP/1.1 200 OK - - - - - This resource has a single exclusive write lock on it, with an - infinite timeout. - -13.9 resourcetype Property - - Name: resourcetype - Namespace: DAV: - Purpose: Specifies the nature of the resource. - Description: The resourcetype property MUST be defined on all DAV - compliant resources. The default value is empty. - - - -13.10 source Property - - Name: source - Namespace: DAV: - Purpose: The destination of the source link identifies the - resource that contains the unprocessed source of the link's source. - Description: The source of the link (src) is typically the URI of the - output resource on which the link is defined, and there is typically - only one destination (dst) of the link, which is the URI where the - unprocessed source of the resource may be accessed. When more than - one link destination exists, this specification asserts no policy on - ordering. - - - -13.10.1 Example - A source Property - - - - - - Source - http://foo.bar/program - - - -Goland, et al. Standards Track [Page 72] - -RFC 2518 WEBDAV February 1999 - - - http://foo.bar/src/main.c - - - Library - http://foo.bar/program - http://foo.bar/src/main.lib - - - Makefile - http://foo.bar/program - http://foo.bar/src/makefile - - - - - In this example the resource http://foo.bar/program has a source - property that contains three links. Each link contains three - elements, two of which, src and dst, are part of the DAV schema - defined in this document, and one which is defined by the schema - http://www.foocorp.com/project/ (Source, Library, and Makefile). A - client which only implements the elements in the DAV spec will not - understand the foocorp elements and will ignore them, thus seeing the - expected source and destination links. An enhanced client may know - about the foocorp elements and be able to present the user with - additional information about the links. This example demonstrates - the power of XML markup, allowing element values to be enhanced - without breaking older clients. - -13.11 supportedlock Property - - Name: supportedlock - Namespace: DAV: - Purpose: To provide a listing of the lock capabilities supported - by the resource. - Description: The supportedlock property of a resource returns a - listing of the combinations of scope and access types which may be - specified in a lock request on the resource. Note that the actual - contents are themselves controlled by access controls so a server is - not required to provide information the client is not authorized to - see. - - - -13.11.1 Example - Retrieving the supportedlock Property - - >>Request - - PROPFIND /container/ HTTP/1.1 - - - -Goland, et al. Standards Track [Page 73] - -RFC 2518 WEBDAV February 1999 - - - Host: www.foo.bar - Content-Length: xxxx - Content-Type: text/xml; charset="utf-8" - - - - - - - >>Response - - HTTP/1.1 207 Multi-Status - Content-Type: text/xml; charset="utf-8" - Content-Length: xxxx - - - - - http://www.foo.bar/container/ - - - - - - - - - - - - - - HTTP/1.1 200 OK - - - - -14 Instructions for Processing XML in DAV - - All DAV compliant resources MUST ignore any unknown XML element and - all its children encountered while processing a DAV method that uses - XML as its command language. - - This restriction also applies to the processing, by clients, of DAV - property values where unknown XML elements SHOULD be ignored unless - the property's schema declares otherwise. - - - - - -Goland, et al. Standards Track [Page 74] - -RFC 2518 WEBDAV February 1999 - - - This restriction does not apply to setting dead DAV properties on the - server where the server MUST record unknown XML elements. - - Additionally, this restriction does not apply to the use of XML where - XML happens to be the content type of the entity body, for example, - when used as the body of a PUT. - - Since XML can be transported as text/xml or application/xml, a DAV - server MUST accept DAV method requests with XML parameters - transported as either text/xml or application/xml, and DAV client - MUST accept XML responses using either text/xml or application/xml. - -15 DAV Compliance Classes - - A DAV compliant resource can choose from two classes of compliance. - A client can discover the compliance classes of a resource by - executing OPTIONS on the resource, and examining the "DAV" header - which is returned. - - Since this document describes extensions to the HTTP/1.1 protocol, - minimally all DAV compliant resources, clients, and proxies MUST be - compliant with [RFC2068]. - - Compliance classes are not necessarily sequential. A resource that is - class 2 compliant must also be class 1 compliant; but if additional - compliance classes are defined later, a resource that is class 1, 2, - and 4 compliant might not be class 3 compliant. Also note that - identifiers other than numbers may be used as compliance class - identifiers. - -15.1 Class 1 - - A class 1 compliant resource MUST meet all "MUST" requirements in all - sections of this document. - - Class 1 compliant resources MUST return, at minimum, the value "1" in - the DAV header on all responses to the OPTIONS method. - -15.2 Class 2 - - A class 2 compliant resource MUST meet all class 1 requirements and - support the LOCK method, the supportedlock property, the - lockdiscovery property, the Time-Out response header and the Lock- - Token request header. A class "2" compliant resource SHOULD also - support the Time-Out request header and the owner XML element. - - Class 2 compliant resources MUST return, at minimum, the values "1" - and "2" in the DAV header on all responses to the OPTIONS method. - - - -Goland, et al. Standards Track [Page 75] - -RFC 2518 WEBDAV February 1999 - - -16 Internationalization Considerations - - In the realm of internationalization, this specification complies - with the IETF Character Set Policy [RFC2277]. In this specification, - human-readable fields can be found either in the value of a property, - or in an error message returned in a response entity body. In both - cases, the human-readable content is encoded using XML, which has - explicit provisions for character set tagging and encoding, and - requires that XML processors read XML elements encoded, at minimum, - using the UTF-8 [UTF-8] encoding of the ISO 10646 multilingual plane. - XML examples in this specification demonstrate use of the charset - parameter of the Content-Type header, as defined in [RFC2376], as - well as the XML "encoding" attribute, which together provide charset - identification information for MIME and XML processors. - - XML also provides a language tagging capability for specifying the - language of the contents of a particular XML element. XML uses - either IANA registered language tags (see [RFC1766]) or ISO 639 - language tags [ISO-639] in the "xml:lang" attribute of an XML element - to identify the language of its content and attributes. - - WebDAV applications MUST support the character set tagging, character - set encoding, and the language tagging functionality of the XML - specification. Implementors of WebDAV applications are strongly - encouraged to read "XML Media Types" [RFC2376] for instruction on - which MIME media type to use for XML transport, and on use of the - charset parameter of the Content-Type header. - - Names used within this specification fall into three categories: - names of protocol elements such as methods and headers, names of XML - elements, and names of properties. Naming of protocol elements - follows the precedent of HTTP, using English names encoded in USASCII - for methods and headers. Since these protocol elements are not - visible to users, and are in fact simply long token identifiers, they - do not need to support encoding in multiple character sets. - Similarly, though the names of XML elements used in this - specification are English names encoded in UTF-8, these names are not - visible to the user, and hence do not need to support multiple - character set encodings. - - The name of a property defined on a resource is a URI. Although some - applications (e.g., a generic property viewer) will display property - URIs directly to their users, it is expected that the typical - application will use a fixed set of properties, and will provide a - mapping from the property name URI to a human-readable field when - displaying the property name to a user. It is only in the case where - - - - - -Goland, et al. Standards Track [Page 76] - -RFC 2518 WEBDAV February 1999 - - - the set of properties is not known ahead of time that an application - need display a property name URI to a user. We recommend that - applications provide human-readable property names wherever feasible. - - For error reporting, we follow the convention of HTTP/1.1 status - codes, including with each status code a short, English description - of the code (e.g., 423 (Locked)). While the possibility exists that - a poorly crafted user agent would display this message to a user, - internationalized applications will ignore this message, and display - an appropriate message in the user's language and character set. - - Since interoperation of clients and servers does not require locale - information, this specification does not specify any mechanism for - transmission of this information. - -17 Security Considerations - - This section is provided to detail issues concerning security - implications of which WebDAV applications need to be aware. - - All of the security considerations of HTTP/1.1 (discussed in - [RFC2068]) and XML (discussed in [RFC2376]) also apply to WebDAV. In - addition, the security risks inherent in remote authoring require - stronger authentication technology, introduce several new privacy - concerns, and may increase the hazards from poor server design. - These issues are detailed below. - -17.1 Authentication of Clients - - Due to their emphasis on authoring, WebDAV servers need to use - authentication technology to protect not just access to a network - resource, but the integrity of the resource as well. Furthermore, - the introduction of locking functionality requires support for - authentication. - - A password sent in the clear over an insecure channel is an - inadequate means for protecting the accessibility and integrity of a - resource as the password may be intercepted. Since Basic - authentication for HTTP/1.1 performs essentially clear text - transmission of a password, Basic authentication MUST NOT be used to - authenticate a WebDAV client to a server unless the connection is - secure. Furthermore, a WebDAV server MUST NOT send Basic - authentication credentials in a WWW-Authenticate header unless the - connection is secure. Examples of secure connections include a - Transport Layer Security (TLS) connection employing a strong cipher - suite with mutual authentication of client and server, or a - connection over a network which is physically secure, for example, an - isolated network in a building with restricted access. - - - -Goland, et al. Standards Track [Page 77] - -RFC 2518 WEBDAV February 1999 - - - WebDAV applications MUST support the Digest authentication scheme - [RFC2069]. Since Digest authentication verifies that both parties to - a communication know a shared secret, a password, without having to - send that secret in the clear, Digest authentication avoids the - security problems inherent in Basic authentication while providing a - level of authentication which is useful in a wide range of scenarios. - -17.2 Denial of Service - - Denial of service attacks are of special concern to WebDAV servers. - WebDAV plus HTTP enables denial of service attacks on every part of a - system's resources. - - The underlying storage can be attacked by PUTting extremely large - files. - - Asking for recursive operations on large collections can attack - processing time. - - Making multiple pipelined requests on multiple connections can attack - network connections. - - WebDAV servers need to be aware of the possibility of a denial of - service attack at all levels. - -17.3 Security through Obscurity - - WebDAV provides, through the PROPFIND method, a mechanism for listing - the member resources of a collection. This greatly diminishes the - effectiveness of security or privacy techniques that rely only on the - difficulty of discovering the names of network resources. Users of - WebDAV servers are encouraged to use access control techniques to - prevent unwanted access to resources, rather than depending on the - relative obscurity of their resource names. - -17.4 Privacy Issues Connected to Locks - - When submitting a lock request a user agent may also submit an owner - XML field giving contact information for the person taking out the - lock (for those cases where a person, rather than a robot, is taking - out the lock). This contact information is stored in a lockdiscovery - property on the resource, and can be used by other collaborators to - begin negotiation over access to the resource. However, in many - cases this contact information can be very private, and should not be - widely disseminated. Servers SHOULD limit read access to the - lockdiscovery property as appropriate. Furthermore, user agents - - - - - -Goland, et al. Standards Track [Page 78] - -RFC 2518 WEBDAV February 1999 - - - SHOULD provide control over whether contact information is sent at - all, and if contact information is sent, control over exactly what - information is sent. - -17.5 Privacy Issues Connected to Properties - - Since property values are typically used to hold information such as - the author of a document, there is the possibility that privacy - concerns could arise stemming from widespread access to a resource's - property data. To reduce the risk of inadvertent release of private - information via properties, servers are encouraged to develop access - control mechanisms that separate read access to the resource body and - read access to the resource's properties. This allows a user to - control the dissemination of their property data without overly - restricting access to the resource's contents. - -17.6 Reduction of Security due to Source Link - - HTTP/1.1 warns against providing read access to script code because - it may contain sensitive information. Yet WebDAV, via its source - link facility, can potentially provide a URI for script resources so - they may be authored. For HTTP/1.1, a server could reasonably - prevent access to source resources due to the predominance of read- - only access. WebDAV, with its emphasis on authoring, encourages read - and write access to source resources, and provides the source link - facility to identify the source. This reduces the security benefits - of eliminating access to source resources. Users and administrators - of WebDAV servers should be very cautious when allowing remote - authoring of scripts, limiting read and write access to the source - resources to authorized principals. - -17.7 Implications of XML External Entities - - XML supports a facility known as "external entities", defined in - section 4.2.2 of [REC-XML], which instruct an XML processor to - retrieve and perform an inline include of XML located at a particular - URI. An external XML entity can be used to append or modify the - document type declaration (DTD) associated with an XML document. An - external XML entity can also be used to include XML within the - content of an XML document. For non-validating XML, such as the XML - used in this specification, including an external XML entity is not - required by [REC-XML]. However, [REC-XML] does state that an XML - processor may, at its discretion, include the external XML entity. - - External XML entities have no inherent trustworthiness and are - subject to all the attacks that are endemic to any HTTP GET request. - Furthermore, it is possible for an external XML entity to modify the - DTD, and hence affect the final form of an XML document, in the worst - - - -Goland, et al. Standards Track [Page 79] - -RFC 2518 WEBDAV February 1999 - - - case significantly modifying its semantics, or exposing the XML - processor to the security risks discussed in [RFC2376]. Therefore, - implementers must be aware that external XML entities should be - treated as untrustworthy. - - There is also the scalability risk that would accompany a widely - deployed application which made use of external XML entities. In - this situation, it is possible that there would be significant - numbers of requests for one external XML entity, potentially - overloading any server which fields requests for the resource - containing the external XML entity. - -17.8 Risks Connected with Lock Tokens - - This specification, in section 6.4, requires the use of Universal - Unique Identifiers (UUIDs) for lock tokens, in order to guarantee - their uniqueness across space and time. UUIDs, as defined in [ISO- - 11578], contain a "node" field which "consists of the IEEE address, - usually the host address. For systems with multiple IEEE 802 nodes, - any available node address can be used." Since a WebDAV server will - issue many locks over its lifetime, the implication is that it will - also be publicly exposing its IEEE 802 address. - - There are several risks associated with exposure of IEEE 802 - addresses. Using the IEEE 802 address: - - * It is possible to track the movement of hardware from subnet to - subnet. - - * It may be possible to identify the manufacturer of the hardware - running a WebDAV server. - - * It may be possible to determine the number of each type of computer - running WebDAV. - - Section 6.4.1 of this specification details an alternate mechanism - for generating the "node" field of a UUID without using an IEEE 802 - address, which alleviates the risks associated with exposure of IEEE - 802 addresses by using an alternate source of uniqueness. - -18 IANA Considerations - - This document defines two namespaces, the namespace of property - names, and the namespace of WebDAV-specific XML elements used within - property values. - - - - - - -Goland, et al. Standards Track [Page 80] - -RFC 2518 WEBDAV February 1999 - - - URIs are used for both names, for several reasons. Assignment of a - URI does not require a request to a central naming authority, and - hence allow WebDAV property names and XML elements to be quickly - defined by any WebDAV user or application. URIs also provide a - unique address space, ensuring that the distributed users of WebDAV - will not have collisions among the property names and XML elements - they create. - - This specification defines a distinguished set of property names and - XML elements that are understood by all WebDAV applications. The - property names and XML elements in this specification are all derived - from the base URI DAV: by adding a suffix to this URI, for example, - DAV:creationdate for the "creationdate" property. - - This specification also defines a URI scheme for the encoding of lock - tokens, the opaquelocktoken URI scheme described in section 6.4. - - To ensure correct interoperation based on this specification, IANA - must reserve the URI namespaces starting with "DAV:" and with - "opaquelocktoken:" for use by this specification, its revisions, and - related WebDAV specifications. - -19 Intellectual Property - - The following notice is copied from RFC 2026 [RFC2026], section 10.4, - and describes the position of the IETF concerning intellectual - property claims made against this document. - - The IETF takes no position regarding the validity or scope of any - intellectual property or other rights that might be claimed to - pertain to the implementation or use other technology described in - this document or the extent to which any license under such rights - might or might not be available; neither does it represent that it - has made any effort to identify any such rights. Information on the - IETF's procedures with respect to rights in standards-track and - standards-related documentation can be found in BCP-11. Copies of - claims of rights made available for publication and any assurances of - licenses to be made available, or the result of an attempt made to - obtain a general license or permission for the use of such - proprietary rights by implementors or users of this specification can - be obtained from the IETF Secretariat. - - The IETF invites any interested party to bring to its attention any - copyrights, patents or patent applications, or other proprietary - rights which may cover technology that may be required to practice - this standard. Please address the information to the IETF Executive - Director. - - - - -Goland, et al. Standards Track [Page 81] - -RFC 2518 WEBDAV February 1999 - - -20 Acknowledgements - - A specification such as this thrives on piercing critical review and - withers from apathetic neglect. The authors gratefully acknowledge - the contributions of the following people, whose insights were so - valuable at every stage of our work. - - Terry Allen, Harald Alvestrand, Jim Amsden, Becky Anderson, Alan - Babich, Sanford Barr, Dylan Barrell, Bernard Chester, Tim Berners- - Lee, Dan Connolly, Jim Cunningham, Ron Daniel, Jr., Jim Davis, Keith - Dawson, Mark Day, Brian Deen, Martin Duerst, David Durand, Lee - Farrell, Chuck Fay, Wesley Felter, Roy Fielding, Mark Fisher, Alan - Freier, George Florentine, Jim Gettys, Phill Hallam-Baker, Dennis - Hamilton, Steve Henning, Mead Himelstein, Alex Hopmann, Andre van der - Hoek, Ben Laurie, Paul Leach, Ora Lassila, Karen MacArthur, Steven - Martin, Larry Masinter, Michael Mealling, Keith Moore, Thomas Narten, - Henrik Nielsen, Kenji Ota, Bob Parker, Glenn Peterson, Jon Radoff, - Saveen Reddy, Henry Sanders, Christopher Seiwald, Judith Slein, Mike - Spreitzer, Einar Stefferud, Greg Stein, Ralph Swick, Kenji Takahashi, - Richard N. Taylor, Robert Thau, John Turner, Sankar Virdhagriswaran, - Fabio Vitali, Gregory Woodhouse, and Lauren Wood. - - Two from this list deserve special mention. The contributions by - Larry Masinter have been invaluable, both in helping the formation of - the working group and in patiently coaching the authors along the - way. In so many ways he has set high standards we have toiled to - meet. The contributions of Judith Slein in clarifying the - requirements, and in patiently reviewing draft after draft, both - improved this specification and expanded our minds on document - management. - - We would also like to thank John Turner for developing the XML DTD. - -21 References - -21.1 Normative References - - [RFC1766] Alvestrand, H., "Tags for the Identification of - Languages", RFC 1766, March 1995. - - [RFC2277] Alvestrand, H., "IETF Policy on Character Sets and - Languages", BCP 18, RFC 2277, January 1998. - - [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate - Requirement Levels", BCP 14, RFC 2119, March 1997. - - - - - - -Goland, et al. Standards Track [Page 82] - -RFC 2518 WEBDAV February 1999 - - - [RFC2396] Berners-Lee, T., Fielding, R. and L. Masinter, - "Uniform Resource Identifiers (URI): Generic Syntax", - RFC 2396, August 1998. - - [REC-XML] T. Bray, J. Paoli, C. M. Sperberg-McQueen, - "Extensible Markup Language (XML)." World Wide Web - Consortium Recommendation REC-xml-19980210. - http://www.w3.org/TR/1998/REC-xml-19980210. - - [REC-XML-NAMES] T. Bray, D. Hollander, A. Layman, "Namespaces in - XML". World Wide Web Consortium Recommendation REC- - xml-names-19990114. http://www.w3.org/TR/1999/REC- - xml-names-19990114/ - - [RFC2069] Franks, J., Hallam-Baker, P., Hostetler, J., Leach, - P, Luotonen, A., Sink, E. and L. Stewart, "An - Extension to HTTP : Digest Access Authentication", - RFC 2069, January 1997. - - [RFC2068] Fielding, R., Gettys, J., Mogul, J., Frystyk, H. and - T. Berners-Lee, "Hypertext Transfer Protocol -- - HTTP/1.1", RFC 2068, January 1997. - - [ISO-639] ISO (International Organization for Standardization). - ISO 639:1988. "Code for the representation of names - of languages." - - [ISO-8601] ISO (International Organization for Standardization). - ISO 8601:1988. "Data elements and interchange formats - - Information interchange - Representation of dates - and times." - - [ISO-11578] ISO (International Organization for Standardization). - ISO/IEC 11578:1996. "Information technology - Open - Systems Interconnection - Remote Procedure Call - (RPC)" - - [RFC2141] Moats, R., "URN Syntax", RFC 2141, May 1997. - - [UTF-8] Yergeau, F., "UTF-8, a transformation format of - Unicode and ISO 10646", RFC 2279, January 1998. - -21.2 Informational References - - [RFC2026] Bradner, S., "The Internet Standards Process - Revision - 3", BCP 9, RFC 2026, October 1996. - - - - - -Goland, et al. Standards Track [Page 83] - -RFC 2518 WEBDAV February 1999 - - - [RFC1807] Lasher, R. and D. Cohen, "A Format for Bibliographic - Records", RFC 1807, June 1995. - - [WF] C. Lagoze, "The Warwick Framework: A Container - Architecture for Diverse Sets of Metadata", D-Lib - Magazine, July/August 1996. - http://www.dlib.org/dlib/july96/lagoze/07lagoze.html - - [USMARC] Network Development and MARC Standards, Office, ed. 1994. - "USMARC Format for Bibliographic Data", 1994. Washington, - DC: Cataloging Distribution Service, Library of Congress. - - [REC-PICS] J. Miller, T. Krauskopf, P. Resnick, W. Treese, "PICS - Label Distribution Label Syntax and Communication - Protocols" Version 1.1, World Wide Web Consortium - Recommendation REC-PICS-labels-961031. - http://www.w3.org/pub/WWW/TR/REC-PICS-labels-961031.html. - - [RFC2291] Slein, J., Vitali, F., Whitehead, E. and D. Durand, - "Requirements for Distributed Authoring and Versioning - Protocol for the World Wide Web", RFC 2291, February 1998. - - [RFC2413] Weibel, S., Kunze, J., Lagoze, C. and M. Wolf, "Dublin - Core Metadata for Resource Discovery", RFC 2413, September - 1998. - - [RFC2376] Whitehead, E. and M. Murata, "XML Media Types", RFC 2376, - July 1998. - -22 Authors' Addresses - - Y. Y. Goland - Microsoft Corporation - One Microsoft Way - Redmond, WA 98052-6399 - - EMail: yarong@microsoft.com - - - E. J. Whitehead, Jr. - Dept. Of Information and Computer Science - University of California, Irvine - Irvine, CA 92697-3425 - - EMail: ejw@ics.uci.edu - - - - - - -Goland, et al. Standards Track [Page 84] - -RFC 2518 WEBDAV February 1999 - - - A. Faizi - Netscape - 685 East Middlefield Road - Mountain View, CA 94043 - - EMail: asad@netscape.com - - - S. R. Carter - Novell - 1555 N. Technology Way - M/S ORM F111 - Orem, UT 84097-2399 - - EMail: srcarter@novell.com - - - D. Jensen - Novell - 1555 N. Technology Way - M/S ORM F111 - Orem, UT 84097-2399 - - EMail: dcjensen@novell.com - - - - - - - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 85] - -RFC 2518 WEBDAV February 1999 - - -23 Appendices - -23.1 Appendix 1 - WebDAV Document Type Definition - - This section provides a document type definition, following the rules - in [REC-XML], for the XML elements used in the protocol stream and in - the values of properties. It collects the element definitions given - in sections 12 and 13. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 86] - -RFC 2518 WEBDAV February 1999 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ]> - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 87] - -RFC 2518 WEBDAV February 1999 - - -23.2 Appendix 2 - ISO 8601 Date and Time Profile - - The creationdate property specifies the use of the ISO 8601 date - format [ISO-8601]. This section defines a profile of the ISO 8601 - date format for use with this specification. This profile is quoted - from an Internet-Draft by Chris Newman, and is mentioned here to - properly attribute his work. - - date-time = full-date "T" full-time - - full-date = date-fullyear "-" date-month "-" date-mday - full-time = partial-time time-offset - - date-fullyear = 4DIGIT - date-month = 2DIGIT ; 01-12 - date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on - month/year - time-hour = 2DIGIT ; 00-23 - time-minute = 2DIGIT ; 00-59 - time-second = 2DIGIT ; 00-59, 00-60 based on leap second rules - time-secfrac = "." 1*DIGIT - time-numoffset = ("+" / "-") time-hour ":" time-minute - time-offset = "Z" / time-numoffset - - partial-time = time-hour ":" time-minute ":" time-second - [time-secfrac] - - Numeric offsets are calculated as local time minus UTC (Coordinated - Universal Time). So the equivalent time in UTC can be determined by - subtracting the offset from the local time. For example, 18:50:00- - 04:00 is the same time as 22:58:00Z. - - If the time in UTC is known, but the offset to local time is unknown, - this can be represented with an offset of "-00:00". This differs - from an offset of "Z" which implies that UTC is the preferred - reference point for the specified time. - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 88] - -RFC 2518 WEBDAV February 1999 - - -23.3 Appendix 3 - Notes on Processing XML Elements - -23.3.1 Notes on Empty XML Elements - - XML supports two mechanisms for indicating that an XML element does - not have any content. The first is to declare an XML element of the - form . The second is to declare an XML element of the form - . The two XML elements are semantically identical. - - It is a violation of the XML specification to use the form if - the associated DTD declares the element to be EMPTY (e.g., ). If such a statement is included, then the empty element - format, must be used. If the element is not declared to be - EMPTY, then either form or may be used for empty - elements. - - 23.3.2 Notes on Illegal XML Processing - - XML is a flexible data format that makes it easy to submit data that - appears legal but in fact is not. The philosophy of "Be flexible in - what you accept and strict in what you send" still applies, but it - must not be applied inappropriately. XML is extremely flexible in - dealing with issues of white space, element ordering, inserting new - elements, etc. This flexibility does not require extension, - especially not in the area of the meaning of elements. - - There is no kindness in accepting illegal combinations of XML - elements. At best it will cause an unwanted result and at worst it - can cause real damage. - -23.3.2.1 Example - XML Syntax Error - - The following request body for a PROPFIND method is illegal. - - - - - - - - The definition of the propfind element only allows for the allprop or - the propname element, not both. Thus the above is an error and must - be responded to with a 400 (Bad Request). - - - - - - - - -Goland, et al. Standards Track [Page 89] - -RFC 2518 WEBDAV February 1999 - - - Imagine, however, that a server wanted to be "kind" and decided to - pick the allprop element as the true element and respond to it. A - client running over a bandwidth limited line who intended to execute - a propname would be in for a big surprise if the server treated the - command as an allprop. - - Additionally, if a server were lenient and decided to reply to this - request, the results would vary randomly from server to server, with - some servers executing the allprop directive, and others executing - the propname directive. This reduces interoperability rather than - increasing it. - -23.3.2.2 Example - Unknown XML Element - - The previous example was illegal because it contained two elements - that were explicitly banned from appearing together in the propfind - element. However, XML is an extensible language, so one can imagine - new elements being defined for use with propfind. Below is the - request body of a PROPFIND and, like the previous example, must be - rejected with a 400 (Bad Request) by a server that does not - understand the expired-props element. - - - - - - - To understand why a 400 (Bad Request) is returned let us look at the - request body as the server unfamiliar with expired-props sees it. - - - - - - As the server does not understand the expired-props element, - according to the WebDAV-specific XML processing rules specified in - section 14, it must ignore it. Thus the server sees an empty - propfind, which by the definition of the propfind element is illegal. - - Please note that had the extension been additive it would not - necessarily have resulted in a 400 (Bad Request). For example, - imagine the following request body for a PROPFIND: - - - - - - -Goland, et al. Standards Track [Page 90] - -RFC 2518 WEBDAV February 1999 - - - - *boss* - - - The previous example contains the fictitious element leave-out. Its - purpose is to prevent the return of any property whose name matches - the submitted pattern. If the previous example were submitted to a - server unfamiliar with leave-out, the only result would be that the - leave-out element would be ignored and a propname would be executed. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 91] - -RFC 2518 WEBDAV February 1999 - - -23.4 Appendix 4 -- XML Namespaces for WebDAV - -23.4.1 Introduction - - All DAV compliant systems MUST support the XML namespace extensions - as specified in [REC-XML-NAMES]. - -23.4.2 Meaning of Qualified Names - - [Note to the reader: This section does not appear in [REC-XML-NAMES], - but is necessary to avoid ambiguity for WebDAV XML processors.] - - WebDAV compliant XML processors MUST interpret a qualified name as a - URI constructed by appending the LocalPart to the namespace name URI. - - Example - - - - Johnny Updraft - - - - - In this example, the qualified element name "del:glider" is - interpreted as the URL "http://www.del.jensen.org/glider". - - - - Johnny Updraft - - - - - Even though this example is syntactically different from the previous - example, it is semantically identical. Each instance of the - namespace name "bar" is replaced with "http://www.del.jensen.org/" - and then appended to the local name for each element tag. The - resulting tag names in this example are exactly the same as for the - previous example. - - - - Johnny Updraft - - - - - - - -Goland, et al. Standards Track [Page 92] - -RFC 2518 WEBDAV February 1999 - - - This example is semantically identical to the two previous ones. - Each instance of the namespace name "foo" is replaced with - "http://www.del.jensen.org/glide" which is then appended to the local - name for each element tag, the resulting tag names are identical to - those in the previous examples. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 93] - -RFC 2518 WEBDAV February 1999 - - -24. Full Copyright Statement - - Copyright (C) The Internet Society (1999). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - - - - - - - - - - - - - - - - - - - - - - - - -Goland, et al. Standards Track [Page 94] - diff --git a/docs/specs/rfc2616.txt b/docs/specs/rfc2616.txt deleted file mode 100644 index 32f6f69..0000000 --- a/docs/specs/rfc2616.txt +++ /dev/null @@ -1,9934 +0,0 @@ - -[[ Text in double brackets is from the unofficial errata at ]] -[[ http://skrb.org/ietf/http_errata.html ]] - - -Network Working Group R. Fielding -Request for Comments: 2616 UC Irvine -Obsoletes: 2068 J. Gettys -Category: Standards Track Compaq/W3C - J. Mogul - Compaq - H. Frystyk - W3C/MIT - L. Masinter - Xerox - P. Leach - Microsoft - T. Berners-Lee - W3C/MIT - June 1999 - - - Hypertext Transfer Protocol -- HTTP/1.1 - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (1999). All Rights Reserved. - -Abstract - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol for distributed, collaborative, hypermedia information - systems. It is a generic, stateless, protocol which can be used for - many tasks beyond its use for hypertext, such as name servers and - distributed object management systems, through extension of its - request methods, error codes and headers [47]. A feature of HTTP is - the typing and negotiation of data representation, allowing systems - to be built independently of the data being transferred. - - HTTP has been in use by the World-Wide Web global information - initiative since 1990. This specification defines the protocol - referred to as "HTTP/1.1", and is an update to RFC 2068 [33]. - - - - - - -Fielding, et al. Standards Track [Page 1] - -RFC 2616 HTTP/1.1 June 1999 - - -Table of Contents - - 1 Introduction ...................................................7 - 1.1 Purpose......................................................7 - 1.2 Requirements .................................................8 - 1.3 Terminology ..................................................8 - 1.4 Overall Operation ...........................................12 - 2 Notational Conventions and Generic Grammar ....................14 - 2.1 Augmented BNF ...............................................14 - 2.2 Basic Rules .................................................15 - 3 Protocol Parameters ...........................................17 - 3.1 HTTP Version ................................................17 - 3.2 Uniform Resource Identifiers ................................18 - 3.2.1 General Syntax ...........................................19 - 3.2.2 http URL .................................................19 - 3.2.3 URI Comparison ...........................................20 - 3.3 Date/Time Formats ...........................................20 - 3.3.1 Full Date ................................................20 - 3.3.2 Delta Seconds ............................................21 - 3.4 Character Sets ..............................................21 - 3.4.1 Missing Charset ..........................................22 - 3.5 Content Codings .............................................23 - 3.6 Transfer Codings ............................................24 - 3.6.1 Chunked Transfer Coding ..................................25 - 3.7 Media Types .................................................26 - 3.7.1 Canonicalization and Text Defaults .......................27 - 3.7.2 Multipart Types ..........................................27 - 3.8 Product Tokens ..............................................28 - 3.9 Quality Values ..............................................29 - 3.10 Language Tags ...............................................29 - 3.11 Entity Tags .................................................30 - 3.12 Range Units .................................................30 - 4 HTTP Message ..................................................31 - 4.1 Message Types ...............................................31 - 4.2 Message Headers .............................................31 - 4.3 Message Body ................................................32 - 4.4 Message Length ..............................................33 - 4.5 General Header Fields .......................................34 - 5 Request .......................................................35 - 5.1 Request-Line ................................................35 - 5.1.1 Method ...................................................36 - 5.1.2 Request-URI ..............................................36 - 5.2 The Resource Identified by a Request ........................38 - 5.3 Request Header Fields .......................................38 - 6 Response ......................................................39 - 6.1 Status-Line .................................................39 - 6.1.1 Status Code and Reason Phrase ............................39 - 6.2 Response Header Fields ......................................41 - - - -Fielding, et al. Standards Track [Page 2] - -RFC 2616 HTTP/1.1 June 1999 - - - 7 Entity ........................................................42 - 7.1 Entity Header Fields ........................................42 - 7.2 Entity Body .................................................43 - 7.2.1 Type .....................................................43 - 7.2.2 Entity Length ............................................43 - 8 Connections ...................................................44 - 8.1 Persistent Connections ......................................44 - 8.1.1 Purpose ..................................................44 - 8.1.2 Overall Operation ........................................45 - 8.1.3 Proxy Servers ............................................46 - 8.1.4 Practical Considerations .................................46 - 8.2 Message Transmission Requirements ...........................47 - 8.2.1 Persistent Connections and Flow Control ..................47 - 8.2.2 Monitoring Connections for Error Status Messages .........48 - 8.2.3 Use of the 100 (Continue) Status .........................48 - 8.2.4 Client Behavior if Server Prematurely Closes Connection ..50 - 9 Method Definitions ............................................51 - 9.1 Safe and Idempotent Methods .................................51 - 9.1.1 Safe Methods .............................................51 - 9.1.2 Idempotent Methods .......................................51 - 9.2 OPTIONS .....................................................52 - 9.3 GET .........................................................53 - 9.4 HEAD ........................................................54 - 9.5 POST ........................................................54 - 9.6 PUT .........................................................55 - 9.7 DELETE ......................................................56 - 9.8 TRACE .......................................................56 - 9.9 CONNECT .....................................................57 - 10 Status Code Definitions ......................................57 - 10.1 Informational 1xx ...........................................57 - 10.1.1 100 Continue .............................................58 - 10.1.2 101 Switching Protocols ..................................58 - 10.2 Successful 2xx ..............................................58 - 10.2.1 200 OK ...................................................58 - 10.2.2 201 Created ..............................................59 - 10.2.3 202 Accepted .............................................59 - 10.2.4 203 Non-Authoritative Information ........................59 - 10.2.5 204 No Content ...........................................60 - 10.2.6 205 Reset Content ........................................60 - 10.2.7 206 Partial Content ......................................60 - 10.3 Redirection 3xx .............................................61 - 10.3.1 300 Multiple Choices .....................................61 - 10.3.2 301 Moved Permanently ....................................62 - 10.3.3 302 Found ................................................62 - 10.3.4 303 See Other ............................................63 - 10.3.5 304 Not Modified .........................................63 - 10.3.6 305 Use Proxy ............................................64 - 10.3.7 306 (Unused) .............................................64 - - - -Fielding, et al. Standards Track [Page 3] - -RFC 2616 HTTP/1.1 June 1999 - - - 10.3.8 307 Temporary Redirect ...................................65 - 10.4 Client Error 4xx ............................................65 - 10.4.1 400 Bad Request .........................................65 - 10.4.2 401 Unauthorized ........................................66 - 10.4.3 402 Payment Required ....................................66 - 10.4.4 403 Forbidden ...........................................66 - 10.4.5 404 Not Found ...........................................66 - 10.4.6 405 Method Not Allowed ..................................66 - 10.4.7 406 Not Acceptable ......................................67 - 10.4.8 407 Proxy Authentication Required .......................67 - 10.4.9 408 Request Timeout .....................................67 - 10.4.10 409 Conflict ............................................67 - 10.4.11 410 Gone ................................................68 - 10.4.12 411 Length Required .....................................68 - 10.4.13 412 Precondition Failed .................................68 - 10.4.14 413 Request Entity Too Large ............................69 - 10.4.15 414 Request-URI Too Long ................................69 - 10.4.16 415 Unsupported Media Type ..............................69 - 10.4.17 416 Requested Range Not Satisfiable .....................69 - 10.4.18 417 Expectation Failed ..................................70 - 10.5 Server Error 5xx ............................................70 - 10.5.1 500 Internal Server Error ................................70 - 10.5.2 501 Not Implemented ......................................70 - 10.5.3 502 Bad Gateway ..........................................70 - 10.5.4 503 Service Unavailable ..................................70 - 10.5.5 504 Gateway Timeout ......................................71 - 10.5.6 505 HTTP Version Not Supported ...........................71 - 11 Access Authentication ........................................71 - 12 Content Negotiation ..........................................71 - 12.1 Server-driven Negotiation ...................................72 - 12.2 Agent-driven Negotiation ....................................73 - 12.3 Transparent Negotiation .....................................74 - 13 Caching in HTTP ..............................................74 - 13.1.1 Cache Correctness ........................................75 - 13.1.2 Warnings .................................................76 - 13.1.3 Cache-control Mechanisms .................................77 - 13.1.4 Explicit User Agent Warnings .............................78 - 13.1.5 Exceptions to the Rules and Warnings .....................78 - 13.1.6 Client-controlled Behavior ...............................79 - 13.2 Expiration Model ............................................79 - 13.2.1 Server-Specified Expiration ..............................79 - 13.2.2 Heuristic Expiration .....................................80 - 13.2.3 Age Calculations .........................................80 - 13.2.4 Expiration Calculations ..................................83 - 13.2.5 Disambiguating Expiration Values .........................84 - 13.2.6 Disambiguating Multiple Responses ........................84 - 13.3 Validation Model ............................................85 - 13.3.1 Last-Modified Dates ......................................86 - - - -Fielding, et al. Standards Track [Page 4] - -RFC 2616 HTTP/1.1 June 1999 - - - 13.3.2 Entity Tag Cache Validators ..............................86 - 13.3.3 Weak and Strong Validators ...............................86 - 13.3.4 Rules for When to Use Entity Tags and Last-Modified Dates.89 - 13.3.5 Non-validating Conditionals ..............................90 - 13.4 Response Cacheability .......................................91 - 13.5 Constructing Responses From Caches ..........................92 - 13.5.1 End-to-end and Hop-by-hop Headers ........................92 - 13.5.2 Non-modifiable Headers ...................................92 - 13.5.3 Combining Headers ........................................94 - 13.5.4 Combining Byte Ranges ....................................95 - 13.6 Caching Negotiated Responses ................................95 - 13.7 Shared and Non-Shared Caches ................................96 - 13.8 Errors or Incomplete Response Cache Behavior ................97 - 13.9 Side Effects of GET and HEAD ................................97 - 13.10 Invalidation After Updates or Deletions ...................97 - 13.11 Write-Through Mandatory ...................................98 - 13.12 Cache Replacement .........................................99 - 13.13 History Lists .............................................99 - 14 Header Field Definitions ....................................100 - 14.1 Accept .....................................................100 - 14.2 Accept-Charset .............................................102 - 14.3 Accept-Encoding ............................................102 - 14.4 Accept-Language ............................................104 - 14.5 Accept-Ranges ..............................................105 - 14.6 Age ........................................................106 - 14.7 Allow ......................................................106 - 14.8 Authorization ..............................................107 - 14.9 Cache-Control ..............................................108 - 14.9.1 What is Cacheable .......................................109 - 14.9.2 What May be Stored by Caches ............................110 - 14.9.3 Modifications of the Basic Expiration Mechanism .........111 - 14.9.4 Cache Revalidation and Reload Controls ..................113 - 14.9.5 No-Transform Directive ..................................115 - 14.9.6 Cache Control Extensions ................................116 - 14.10 Connection ...............................................117 - 14.11 Content-Encoding .........................................118 - 14.12 Content-Language .........................................118 - 14.13 Content-Length ...........................................119 - 14.14 Content-Location .........................................120 - 14.15 Content-MD5 ..............................................121 - 14.16 Content-Range ............................................122 - 14.17 Content-Type .............................................124 - 14.18 Date .....................................................124 - 14.18.1 Clockless Origin Server Operation ......................125 - 14.19 ETag .....................................................126 - 14.20 Expect ...................................................126 - 14.21 Expires ..................................................127 - 14.22 From .....................................................128 - - - -Fielding, et al. Standards Track [Page 5] - -RFC 2616 HTTP/1.1 June 1999 - - - 14.23 Host .....................................................128 - 14.24 If-Match .................................................129 - 14.25 If-Modified-Since ........................................130 - 14.26 If-None-Match ............................................132 - 14.27 If-Range .................................................133 - 14.28 If-Unmodified-Since ......................................134 - 14.29 Last-Modified ............................................134 - 14.30 Location .................................................135 - 14.31 Max-Forwards .............................................136 - 14.32 Pragma ...................................................136 - 14.33 Proxy-Authenticate .......................................137 - 14.34 Proxy-Authorization ......................................137 - 14.35 Range ....................................................138 - 14.35.1 Byte Ranges ...........................................138 - 14.35.2 Range Retrieval Requests ..............................139 - 14.36 Referer ..................................................140 - 14.37 Retry-After ..............................................141 - 14.38 Server ...................................................141 - 14.39 TE .......................................................142 - 14.40 Trailer ..................................................143 - 14.41 Transfer-Encoding..........................................143 - 14.42 Upgrade ..................................................144 - 14.43 User-Agent ...............................................145 - 14.44 Vary .....................................................145 - 14.45 Via ......................................................146 - 14.46 Warning ..................................................148 - 14.47 WWW-Authenticate .........................................150 - 15 Security Considerations .......................................150 - 15.1 Personal Information....................................151 - 15.1.1 Abuse of Server Log Information .........................151 - 15.1.2 Transfer of Sensitive Information .......................151 - 15.1.3 Encoding Sensitive Information in URI's .................152 - 15.1.4 Privacy Issues Connected to Accept Headers ..............152 - 15.2 Attacks Based On File and Path Names .......................153 - 15.3 DNS Spoofing ...............................................154 - 15.4 Location Headers and Spoofing ..............................154 - 15.5 Content-Disposition Issues .................................154 - 15.6 Authentication Credentials and Idle Clients ................155 - 15.7 Proxies and Caching ........................................155 - 15.7.1 Denial of Service Attacks on Proxies....................156 - 16 Acknowledgments .............................................156 - 17 References ..................................................158 - 18 Authors' Addresses ..........................................162 - 19 Appendices ..................................................164 - 19.1 Internet Media Type message/http and application/http ......164 - 19.2 Internet Media Type multipart/byteranges ...................165 - 19.3 Tolerant Applications ......................................166 - 19.4 Differences Between HTTP Entities and RFC 2045 Entities ....167 - - - -Fielding, et al. Standards Track [Page 6] - -RFC 2616 HTTP/1.1 June 1999 - - - 19.4.1 MIME-Version ............................................167 - 19.4.2 Conversion to Canonical Form ............................167 - 19.4.3 Conversion of Date Formats ..............................168 - 19.4.4 Introduction of Content-Encoding ........................168 - 19.4.5 No Content-Transfer-Encoding ............................168 - 19.4.6 Introduction of Transfer-Encoding .......................169 - 19.4.7 MHTML and Line Length Limitations .......................169 - 19.5 Additional Features ........................................169 - 19.5.1 Content-Disposition .....................................170 - 19.6 Compatibility with Previous Versions .......................170 - 19.6.1 Changes from HTTP/1.0 ...................................171 - 19.6.2 Compatibility with HTTP/1.0 Persistent Connections ......172 - 19.6.3 Changes from RFC 2068 ...................................172 - 20 Index .......................................................175 - 21 Full Copyright Statement ....................................176 - -1 Introduction - -1.1 Purpose - - The Hypertext Transfer Protocol (HTTP) is an application-level - protocol for distributed, collaborative, hypermedia information - systems. HTTP has been in use by the World-Wide Web global - information initiative since 1990. The first version of HTTP, - referred to as HTTP/0.9, was a simple protocol for raw data transfer - across the Internet. HTTP/1.0, as defined by RFC 1945 [6], improved - the protocol by allowing messages to be in the format of MIME-like - messages, containing metainformation about the data transferred and - modifiers on the request/response semantics. However, HTTP/1.0 does - not sufficiently take into consideration the effects of hierarchical - proxies, caching, the need for persistent connections, or virtual - hosts. In addition, the proliferation of incompletely-implemented - applications calling themselves "HTTP/1.0" has necessitated a - protocol version change in order for two communicating applications - to determine each other's true capabilities. - - This specification defines the protocol referred to as "HTTP/1.1". - This protocol includes more stringent requirements than HTTP/1.0 in - order to ensure reliable implementation of its features. - - Practical information systems require more functionality than simple - retrieval, including search, front-end update, and annotation. HTTP - allows an open-ended set of methods and headers that indicate the - purpose of a request [47]. It builds on the discipline of reference - provided by the Uniform Resource Identifier (URI) [3], as a location - (URL) [4] or name (URN) [20], for indicating the resource to which a - - - - - -Fielding, et al. Standards Track [Page 7] - -RFC 2616 HTTP/1.1 June 1999 - - - method is to be applied. Messages are passed in a format similar to - that used by Internet mail [9] as defined by the Multipurpose - Internet Mail Extensions (MIME) [7]. - - HTTP is also used as a generic protocol for communication between - user agents and proxies/gateways to other Internet systems, including - those supported by the SMTP [16], NNTP [13], FTP [18], Gopher [2], - and WAIS [10] protocols. In this way, HTTP allows basic hypermedia - access to resources available from diverse applications. - -1.2 Requirements - - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", - "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this - document are to be interpreted as described in RFC 2119 [34]. - - An implementation is not compliant if it fails to satisfy one or more - of the MUST or REQUIRED level requirements for the protocols it - implements. An implementation that satisfies all the MUST or REQUIRED - level and all the SHOULD level requirements for its protocols is said - to be "unconditionally compliant"; one that satisfies all the MUST - level requirements but not all the SHOULD level requirements for its - protocols is said to be "conditionally compliant." - -1.3 Terminology - - This specification uses a number of terms to refer to the roles - played by participants in, and objects of, the HTTP communication. - - connection - A transport layer virtual circuit established between two programs - for the purpose of communication. - - message - The basic unit of HTTP communication, consisting of a structured - sequence of octets matching the syntax defined in section 4 and - transmitted via the connection. - - request - An HTTP request message, as defined in section 5. - - response - An HTTP response message, as defined in section 6. - - - - - - - - -Fielding, et al. Standards Track [Page 8] - -RFC 2616 HTTP/1.1 June 1999 - - - resource - A network data object or service that can be identified by a URI, - as defined in section 3.2. Resources may be available in multiple - representations (e.g. multiple languages, data formats, size, and - resolutions) or vary in other ways. - - entity - The information transferred as the payload of a request or - response. An entity consists of metainformation in the form of - entity-header fields and content in the form of an entity-body, as - described in section 7. - - representation - An entity included with a response that is subject to content - negotiation, as described in section 12. There may exist multiple - representations associated with a particular response status. - - content negotiation - The mechanism for selecting the appropriate representation when - servicing a request, as described in section 12. The - representation of entities in any response can be negotiated - (including error responses). - - variant - A resource may have one, or more than one, representation(s) - associated with it at any given instant. Each of these - representations is termed a `varriant'. Use of the term `variant' - does not necessarily imply that the resource is subject to content - negotiation. - - client - A program that establishes connections for the purpose of sending - requests. - - user agent - The client which initiates a request. These are often browsers, - editors, spiders (web-traversing robots), or other end user tools. - - server - An application program that accepts connections in order to - service requests by sending back responses. Any given program may - be capable of being both a client and a server; our use of these - terms refers only to the role being performed by the program for a - particular connection, rather than to the program's capabilities - in general. Likewise, any server may act as an origin server, - proxy, gateway, or tunnel, switching behavior based on the nature - of each request. - - - - -Fielding, et al. Standards Track [Page 9] - -RFC 2616 HTTP/1.1 June 1999 - - - origin server - The server on which a given resource resides or is to be created. - - proxy - An intermediary program which acts as both a server and a client - for the purpose of making requests on behalf of other clients. - Requests are serviced internally or by passing them on, with - possible translation, to other servers. A proxy MUST implement - both the client and server requirements of this specification. A - "transparent proxy" is a proxy that does not modify the request or - response beyond what is required for proxy authentication and - identification. A "non-transparent proxy" is a proxy that modifies - the request or response in order to provide some added service to - the user agent, such as group annotation services, media type - transformation, protocol reduction, or anonymity filtering. Except - where either transparent or non-transparent behavior is explicitly - stated, the HTTP proxy requirements apply to both types of - proxies. - - gateway - A server which acts as an intermediary for some other server. - Unlike a proxy, a gateway receives requests as if it were the - origin server for the requested resource; the requesting client - may not be aware that it is communicating with a gateway. - - tunnel - An intermediary program which is acting as a blind relay between - two connections. Once active, a tunnel is not considered a party - to the HTTP communication, though the tunnel may have been - initiated by an HTTP request. The tunnel ceases to exist when both - ends of the relayed connections are closed. - - cache - A program's local store of response messages and the subsystem - that controls its message storage, retrieval, and deletion. A - cache stores cacheable responses in order to reduce the response - time and network bandwidth consumption on future, equivalent - requests. Any client or server may include a cache, though a cache - cannot be used by a server that is acting as a tunnel. - - cacheable - A response is cacheable if a cache is allowed to store a copy of - the response message for use in answering subsequent requests. The - rules for determining the cacheability of HTTP responses are - defined in section 13. Even if a resource is cacheable, there may - be additional constraints on whether a cache can use the cached - copy for a particular request. - - - - -Fielding, et al. Standards Track [Page 10] - -RFC 2616 HTTP/1.1 June 1999 - - - first-hand - A response is first-hand if it comes directly and without - unnecessary delay from the origin server, perhaps via one or more - proxies. A response is also first-hand if its validity has just - been checked directly with the origin server. - - explicit expiration time - The time at which the origin server intends that an entity should - no longer be returned by a cache without further validation. - - heuristic expiration time - An expiration time assigned by a cache when no explicit expiration - time is available. - - age - The age of a response is the time since it was sent by, or - successfully validated with, the origin server. - - freshness lifetime - The length of time between the generation of a response and its - expiration time. - - fresh - A response is fresh if its age has not yet exceeded its freshness - lifetime. - - stale - A response is stale if its age has passed its freshness lifetime. - - semantically transparent - A cache behaves in a "semantically transparent" manner, with - respect to a particular response, when its use affects neither the - requesting client nor the origin server, except to improve - performance. When a cache is semantically transparent, the client - receives exactly the same response (except for hop-by-hop headers) - that it would have received had its request been handled directly - by the origin server. - - validator - A protocol element (e.g., an entity tag or a Last-Modified time) - that is used to find out whether a cache entry is an equivalent - copy of an entity. - - upstream/downstream - Upstream and downstream describe the flow of a message: all - messages flow from upstream to downstream. - - - - - -Fielding, et al. Standards Track [Page 11] - -RFC 2616 HTTP/1.1 June 1999 - - - inbound/outbound - Inbound and outbound refer to the request and response paths for - messages: "inbound" means "traveling toward the origin server", - and "outbound" means "traveling toward the user agent" - -1.4 Overall Operation - - The HTTP protocol is a request/response protocol. A client sends a - request to the server in the form of a request method, URI, and - protocol version, followed by a MIME-like message containing request - modifiers, client information, and possible body content over a - connection with a server. The server responds with a status line, - including the message's protocol version and a success or error code, - followed by a MIME-like message containing server information, entity - metainformation, and possible entity-body content. The relationship - between HTTP and MIME is described in appendix 19.4. - - Most HTTP communication is initiated by a user agent and consists of - a request to be applied to a resource on some origin server. In the - simplest case, this may be accomplished via a single connection (v) - between the user agent (UA) and the origin server (O). - - request chain ------------------------> - UA -------------------v------------------- O - <----------------------- response chain - - A more complicated situation occurs when one or more intermediaries - are present in the request/response chain. There are three common - forms of intermediary: proxy, gateway, and tunnel. A proxy is a - forwarding agent, receiving requests for a URI in its absolute form, - rewriting all or part of the message, and forwarding the reformatted - request toward the server identified by the URI. A gateway is a - receiving agent, acting as a layer above some other server(s) and, if - necessary, translating the requests to the underlying server's - protocol. A tunnel acts as a relay point between two connections - without changing the messages; tunnels are used when the - communication needs to pass through an intermediary (such as a - firewall) even when the intermediary cannot understand the contents - of the messages. - - request chain --------------------------------------> - UA -----v----- A -----v----- B -----v----- C -----v----- O - <------------------------------------- response chain - - The figure above shows three intermediaries (A, B, and C) between the - user agent and origin server. A request or response message that - travels the whole chain will pass through four separate connections. - This distinction is important because some HTTP communication options - - - -Fielding, et al. Standards Track [Page 12] - -RFC 2616 HTTP/1.1 June 1999 - - - may apply only to the connection with the nearest, non-tunnel - neighbor, only to the end-points of the chain, or to all connections - along the chain. Although the diagram is linear, each participant may - be engaged in multiple, simultaneous communications. For example, B - may be receiving requests from many clients other than A, and/or - forwarding requests to servers other than C, at the same time that it - is handling A's request. - - Any party to the communication which is not acting as a tunnel may - employ an internal cache for handling requests. The effect of a cache - is that the request/response chain is shortened if one of the - participants along the chain has a cached response applicable to that - request. The following illustrates the resulting chain if B has a - cached copy of an earlier response from O (via C) for a request which - has not been cached by UA or A. - - request chain ----------> - UA -----v----- A -----v----- B - - - - - - C - - - - - - O - <--------- response chain - - Not all responses are usefully cacheable, and some requests may - contain modifiers which place special requirements on cache behavior. - HTTP requirements for cache behavior and cacheable responses are - defined in section 13. - - In fact, there are a wide variety of architectures and configurations - of caches and proxies currently being experimented with or deployed - across the World Wide Web. These systems include national hierarchies - of proxy caches to save transoceanic bandwidth, systems that - broadcast or multicast cache entries, organizations that distribute - subsets of cached data via CD-ROM, and so on. HTTP systems are used - in corporate intranets over high-bandwidth links, and for access via - PDAs with low-power radio links and intermittent connectivity. The - goal of HTTP/1.1 is to support the wide diversity of configurations - already deployed while introducing protocol constructs that meet the - needs of those who build web applications that require high - reliability and, failing that, at least reliable indications of - failure. - - HTTP communication usually takes place over TCP/IP connections. The - default port is TCP 80 [19], but other ports can be used. This does - not preclude HTTP from being implemented on top of any other protocol - on the Internet, or on other networks. HTTP only presumes a reliable - transport; any protocol that provides such guarantees can be used; - the mapping of the HTTP/1.1 request and response structures onto the - transport data units of the protocol in question is outside the scope - of this specification. - - - - -Fielding, et al. Standards Track [Page 13] - -RFC 2616 HTTP/1.1 June 1999 - - - In HTTP/1.0, most implementations used a new connection for each - request/response exchange. In HTTP/1.1, a connection may be used for - one or more request/response exchanges, although connections may be - closed for a variety of reasons (see section 8.1). - -2 Notational Conventions and Generic Grammar - -2.1 Augmented BNF - - All of the mechanisms specified in this document are described in - both prose and an augmented Backus-Naur Form (BNF) similar to that - used by RFC 822 [9]. Implementors will need to be familiar with the - notation in order to understand this specification. The augmented BNF - includes the following constructs: - - name = definition - The name of a rule is simply the name itself (without any - enclosing "<" and ">") and is separated from its definition by the - equal "=" character. White space is only significant in that - indentation of continuation lines is used to indicate a rule - definition that spans more than one line. Certain basic rules are - in uppercase, such as SP, LWS, HT, CRLF, DIGIT, ALPHA, etc. Angle - brackets are used within definitions whenever their presence will - facilitate discerning the use of rule names. - - "literal" - Quotation marks surround literal text. Unless stated otherwise, - the text is case-insensitive. - - rule1 | rule2 - Elements separated by a bar ("|") are alternatives, e.g., "yes | - no" will accept yes or no. - - (rule1 rule2) - Elements enclosed in parentheses are treated as a single element. - Thus, "(elem (foo | bar) elem)" allows the token sequences "elem - foo elem" and "elem bar elem". - - *rule - The character "*" preceding an element indicates repetition. The - full form is "*element" indicating at least and at most - occurrences of element. Default values are 0 and infinity so - that "*(element)" allows any number, including zero; "1*element" - requires at least one; and "1*2element" allows one or two. - - [rule] - Square brackets enclose optional elements; "[foo bar]" is - equivalent to "*1(foo bar)". - - - -Fielding, et al. Standards Track [Page 14] - -RFC 2616 HTTP/1.1 June 1999 - - - N rule - Specific repetition: "(element)" is equivalent to - "*(element)"; that is, exactly occurrences of (element). - Thus 2DIGIT is a 2-digit number, and 3ALPHA is a string of three - alphabetic characters. - - #rule - A construct "#" is defined, similar to "*", for defining lists of - elements. The full form is "#element" indicating at least - and at most elements, each separated by one or more commas - (",") and OPTIONAL linear white space (LWS). This makes the usual - form of lists very easy; a rule such as - ( *LWS element *( *LWS "," *LWS element )) - can be shown as - 1#element - Wherever this construct is used, null elements are allowed, but do - not contribute to the count of elements present. That is, - "(element), , (element) " is permitted, but counts as only two - elements. Therefore, where at least one element is required, at - least one non-null element MUST be present. Default values are 0 - and infinity so that "#element" allows any number, including zero; - "1#element" requires at least one; and "1#2element" allows one or - two. - - ; comment - A semi-colon, set off some distance to the right of rule text, - starts a comment that continues to the end of line. This is a - simple way of including useful notes in parallel with the - specifications. - - implied *LWS - The grammar described by this specification is word-based. Except - where noted otherwise, linear white space (LWS) can be included - between any two adjacent words (token or quoted-string), and - between adjacent words and separators, without changing the - interpretation of a field. At least one delimiter (LWS and/or - - separators) MUST exist between any two tokens (for the definition - of "token" below), since they would otherwise be interpreted as a - single token. - -2.2 Basic Rules - - The following rules are used throughout this specification to - describe basic parsing constructs. The US-ASCII coded character set - is defined by ANSI X3.4-1986 [21]. - - - - - -Fielding, et al. Standards Track [Page 15] - -RFC 2616 HTTP/1.1 June 1999 - - - OCTET = - CHAR = - UPALPHA = - LOALPHA = - ALPHA = UPALPHA | LOALPHA - DIGIT = - CTL = - CR = - LF = - SP = - HT = - <"> = - - HTTP/1.1 defines the sequence CR LF as the end-of-line marker for all - protocol elements except the entity-body (see appendix 19.3 for - tolerant applications). The end-of-line marker within an entity-body - is defined by its associated media type, as described in section 3.7. - - CRLF = CR LF - - HTTP/1.1 header field values can be folded onto multiple lines if the - continuation line begins with a space or horizontal tab. All linear - white space, including folding, has the same semantics as SP. A - recipient MAY replace any linear white space with a single SP before - interpreting the field value or forwarding the message downstream. - - LWS = [CRLF] 1*( SP | HT ) - - The TEXT rule is only used for descriptive field contents and values - that are not intended to be interpreted by the message parser. Words - of *TEXT MAY contain characters from character sets other than ISO- - 8859-1 [22] only when encoded according to the rules of RFC 2047 - [14]. - - TEXT = - - A CRLF is allowed in the definition of TEXT only as part of a header - field continuation. It is expected that the folding LWS will be - replaced with a single SP before interpretation of the TEXT value. - - Hexadecimal numeric characters are used in several protocol elements. - - HEX = "A" | "B" | "C" | "D" | "E" | "F" - | "a" | "b" | "c" | "d" | "e" | "f" | DIGIT - - - - - -Fielding, et al. Standards Track [Page 16] - -RFC 2616 HTTP/1.1 June 1999 - - - Many HTTP/1.1 header field values consist of words separated by LWS - or special characters. These special characters MUST be in a quoted - string to be used within a parameter value (as defined in section - 3.6). - - token = 1* - separators = "(" | ")" | "<" | ">" | "@" - | "," | ";" | ":" | "\" | <"> - | "/" | "[" | "]" | "?" | "=" - | "{" | "}" | SP | HT - - Comments can be included in some HTTP header fields by surrounding - the comment text with parentheses. Comments are only allowed in - fields containing "comment" as part of their field value definition. - In all other fields, parentheses are considered part of the field - value. - - comment = "(" *( ctext | quoted-pair | comment ) ")" - ctext = - - A string of text is parsed as a single word if it is quoted using - double-quote marks. - - quoted-string = ( <"> *(qdtext | quoted-pair ) <"> ) - qdtext = > - - The backslash character ("\") MAY be used as a single-character - quoting mechanism only within quoted-string and comment constructs. - - quoted-pair = "\" CHAR - -3 Protocol Parameters - -3.1 HTTP Version - - HTTP uses a "." numbering scheme to indicate versions - of the protocol. The protocol versioning policy is intended to allow - the sender to indicate the format of a message and its capacity for - understanding further HTTP communication, rather than the features - obtained via that communication. No change is made to the version - number for the addition of message components which do not affect - communication behavior or which only add to extensible field values. - The number is incremented when the changes made to the - protocol add features which do not change the general message parsing - algorithm, but which may add to the message semantics and imply - additional capabilities of the sender. The number is - incremented when the format of a message within the protocol is - changed. See RFC 2145 [36] for a fuller explanation. - - - -Fielding, et al. Standards Track [Page 17] - -RFC 2616 HTTP/1.1 June 1999 - - - The version of an HTTP message is indicated by an HTTP-Version field - in the first line of the message. [[HTTP-Version is case-sensitive.]] - - HTTP-Version = "HTTP" "/" 1*DIGIT "." 1*DIGIT - - Note that the major and minor numbers MUST be treated as separate - integers and that each MAY be incremented higher than a single digit. - Thus, HTTP/2.4 is a lower version than HTTP/2.13, which in turn is - lower than HTTP/12.3. Leading zeros MUST be ignored by recipients and - MUST NOT be sent. - - An application that sends a request or response message that includes - HTTP-Version of "HTTP/1.1" MUST be at least conditionally compliant - with this specification. Applications that are at least conditionally - compliant with this specification SHOULD use an HTTP-Version of - "HTTP/1.1" in their messages, and MUST do so for any message that is - not compatible with HTTP/1.0. For more details on when to send - specific HTTP-Version values, see RFC 2145 [36]. - - The HTTP version of an application is the highest HTTP version for - which the application is at least conditionally compliant. - - Proxy and gateway applications need to be careful when forwarding - messages in protocol versions different from that of the application. - Since the protocol version indicates the protocol capability of the - sender, a proxy/gateway MUST NOT send a message with a version - indicator which is greater than its actual version. If a higher - version request is received, the proxy/gateway MUST either downgrade - the request version, or respond with an error, or switch to tunnel - behavior. - - Due to interoperability problems with HTTP/1.0 proxies discovered - since the publication of RFC 2068[33], caching proxies MUST, gateways - MAY, and tunnels MUST NOT upgrade the request to the highest version - they support. The proxy/gateway's response to that request MUST be in - the same major version as the request. - - Note: Converting between versions of HTTP may involve modification - of header fields required or forbidden by the versions involved. - -3.2 Uniform Resource Identifiers - - URIs have been known by many names: WWW addresses, Universal Document - Identifiers, Universal Resource Identifiers [3], and finally the - combination of Uniform Resource Locators (URL) [4] and Names (URN) - [20]. As far as HTTP is concerned, Uniform Resource Identifiers are - simply formatted strings which identify--via name, location, or any - other characteristic--a resource. - - - -Fielding, et al. Standards Track [Page 18] - -RFC 2616 HTTP/1.1 June 1999 - - -3.2.1 General Syntax - - URIs in HTTP can be represented in absolute form or relative to some - known base URI [11], depending upon the context of their use. The two - forms are differentiated by the fact that absolute URIs always begin - with a scheme name followed by a colon. For definitive information on - URL syntax and semantics, see "Uniform Resource Identifiers (URI): - Generic Syntax and Semantics," RFC 2396 [42] (which replaces RFCs - 1738 [4] and RFC 1808 [11]). This specification adopts the - definitions of "URI-reference", "absoluteURI", "relativeURI", "port", - "host","abs_path", "rel_path", and "authority" from that - specification. - - The HTTP protocol does not place any a priori limit on the length of - a URI. Servers MUST be able to handle the URI of any resource they - serve, and SHOULD be able to handle URIs of unbounded length if they - provide GET-based forms that could generate such URIs. A server - SHOULD return 414 (Request-URI Too Long) status if a URI is longer - than the server can handle (see section 10.4.15). - - Note: Servers ought to be cautious about depending on URI lengths - above 255 bytes, because some older client or proxy - implementations might not properly support these lengths. - -3.2.2 http URL - - The "http" scheme is used to locate network resources via the HTTP - protocol. This section defines the scheme-specific syntax and - semantics for http URLs. - - http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]] - - If the port is empty or not given, port 80 is assumed. The semantics - are that the identified resource is located at the server listening - for TCP connections on that port of that host, and the Request-URI - for the resource is abs_path (section 5.1.2). The use of IP addresses - in URLs SHOULD be avoided whenever possible (see RFC 1900 [24]). If - the abs_path is not present in the URL, it MUST be given as "/" when - used as a Request-URI for a resource (section 5.1.2). If a proxy - receives a host name which is not a fully qualified domain name, it - MAY add its domain to the host name it received. If a proxy receives - a fully qualified domain name, the proxy MUST NOT change the host - name. - - - - - - - - -Fielding, et al. Standards Track [Page 19] - -RFC 2616 HTTP/1.1 June 1999 - - -3.2.3 URI Comparison - - When comparing two URIs to decide if they match or not, a client - SHOULD use a case-sensitive octet-by-octet comparison of the entire - URIs, with these exceptions: - - - A port that is empty or not given is equivalent to the default - port for that URI-reference; - - - Comparisons of host names MUST be case-insensitive; - - - Comparisons of scheme names MUST be case-insensitive; - - - An empty abs_path is equivalent to an abs_path of "/". - - Characters other than those in the "reserved" and "unsafe" sets (see - RFC 2396 [42]) are equivalent to their ""%" HEX HEX" encoding. - [[ Ignore reference to "unsafe" set. ]] - - For example, the following three URIs are equivalent: - - http://abc.com:80/~smith/home.html - http://ABC.com/%7Esmith/home.html - http://ABC.com:/%7esmith/home.html - -3.3 Date/Time Formats - -3.3.1 Full Date - - HTTP applications have historically allowed three different formats - for the representation of date/time stamps: - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - The first format is preferred as an Internet standard and represents - a fixed-length subset of that defined by RFC 1123 [8] (an update to - RFC 822 [9]). The second format is in common use, but is based on the - obsolete RFC 850 [12] date format and lacks a four-digit year. - HTTP/1.1 clients and servers that parse the date value MUST accept - all three formats (for compatibility with HTTP/1.0), though they MUST - only generate the RFC 1123 format for representing HTTP-date values - in header fields. See section 19.3 for further information. - - Note: Recipients of date values are encouraged to be robust in - accepting date values that may have been sent by non-HTTP - applications, as is sometimes the case when retrieving or posting - messages via proxies/gateways to SMTP or NNTP. - - - -Fielding, et al. Standards Track [Page 20] - -RFC 2616 HTTP/1.1 June 1999 - - - All HTTP date/time stamps MUST be represented in Greenwich Mean Time - (GMT), without exception. For the purposes of HTTP, GMT is exactly - equal to UTC (Coordinated Universal Time). This is indicated in the - first two formats by the inclusion of "GMT" as the three-letter - abbreviation for time zone, and MUST be assumed when reading the - asctime format. HTTP-date is case sensitive and MUST NOT include - additional LWS beyond that specifically included as SP in the - grammar. - - HTTP-date = rfc1123-date | rfc850-date | asctime-date - rfc1123-date = wkday "," SP date1 SP time SP "GMT" - rfc850-date = weekday "," SP date2 SP time SP "GMT" - asctime-date = wkday SP date3 SP time SP 4DIGIT - date1 = 2DIGIT SP month SP 4DIGIT - ; day month year (e.g., 02 Jun 1982) - date2 = 2DIGIT "-" month "-" 2DIGIT - ; day-month-year (e.g., 02-Jun-82) - date3 = month SP ( 2DIGIT | ( SP 1DIGIT )) - ; month day (e.g., Jun 2) - time = 2DIGIT ":" 2DIGIT ":" 2DIGIT - ; 00:00:00 - 23:59:59 - wkday = "Mon" | "Tue" | "Wed" - | "Thu" | "Fri" | "Sat" | "Sun" - weekday = "Monday" | "Tuesday" | "Wednesday" - | "Thursday" | "Friday" | "Saturday" | "Sunday" - month = "Jan" | "Feb" | "Mar" | "Apr" - | "May" | "Jun" | "Jul" | "Aug" - | "Sep" | "Oct" | "Nov" | "Dec" - - Note: HTTP requirements for the date/time stamp format apply only - to their usage within the protocol stream. Clients and servers are - not required to use these formats for user presentation, request - logging, etc. - -3.3.2 Delta Seconds - - Some HTTP header fields allow a time value to be specified as an - integer number of seconds, represented in decimal, after the time - that the message was received. - - delta-seconds = 1*DIGIT - -3.4 Character Sets - - HTTP uses the same definition of the term "character set" as that - described for MIME: - - - - - -Fielding, et al. Standards Track [Page 21] - -RFC 2616 HTTP/1.1 June 1999 - - - The term "character set" is used in this document to refer to a - method used with one or more tables to convert a sequence of octets - into a sequence of characters. Note that unconditional conversion in - the other direction is not required, in that not all characters may - be available in a given character set and a character set may provide - more than one sequence of octets to represent a particular character. - This definition is intended to allow various kinds of character - encoding, from simple single-table mappings such as US-ASCII to - complex table switching methods such as those that use ISO-2022's - techniques. However, the definition associated with a MIME character - set name MUST fully specify the mapping to be performed from octets - to characters. In particular, use of external profiling information - to determine the exact mapping is not permitted. - - Note: This use of the term "character set" is more commonly - referred to as a "character encoding." However, since HTTP and - MIME share the same registry, it is important that the terminology - also be shared. - - HTTP character sets are identified by case-insensitive tokens. The - complete set of tokens is defined by the IANA Character Set registry - [19]. - - charset = token - -[[ HTTP uses charset in two contexts: within an Accept-Charset request ]] -[[ header (in which the charset value is an unquoted token) and as the ]] -[[ value of a parameter in a Content-type header (within a request or ]] -[[ response), in which case the parameter value of the charset ]] -[[ parameter may be quoted. ]] - - Although HTTP allows an arbitrary token to be used as a charset - value, any token that has a predefined value within the IANA - Character Set registry [19] MUST represent the character set defined - by that registry. Applications SHOULD limit their use of character - sets to those defined by the IANA registry. - - Implementors should be aware of IETF character set requirements [38] - [41]. - -3.4.1 Missing Charset - - Some HTTP/1.0 software has interpreted a Content-Type header without - charset parameter incorrectly to mean "recipient should guess." - Senders wishing to defeat this behavior MAY include a charset - parameter even when the charset is ISO-8859-1 and SHOULD do so when - it is known that it will not confuse the recipient. - - Unfortunately, some older HTTP/1.0 clients did not deal properly with - an explicit charset parameter. HTTP/1.1 recipients MUST respect the - charset label provided by the sender; and those user agents that have - a provision to "guess" a charset MUST use the charset from the - - - - - -Fielding, et al. Standards Track [Page 22] - -RFC 2616 HTTP/1.1 June 1999 - - - content-type field if they support that charset, rather than the - recipient's preference, when initially displaying a document. See - section 3.7.1. - -3.5 Content Codings - - Content coding values indicate an encoding transformation that has - been or can be applied to an entity. Content codings are primarily - used to allow a document to be compressed or otherwise usefully - transformed without losing the identity of its underlying media type - and without loss of information. Frequently, the entity is stored in - coded form, transmitted directly, and only decoded by the recipient. - - content-coding = token - - All content-coding values are case-insensitive. HTTP/1.1 uses - content-coding values in the Accept-Encoding (section 14.3) and - Content-Encoding (section 14.11) header fields. Although the value - describes the content-coding, what is more important is that it - indicates what decoding mechanism will be required to remove the - encoding. - - The Internet Assigned Numbers Authority (IANA) acts as a registry for - content-coding value tokens. Initially, the registry contains the - following tokens: - - gzip An encoding format produced by the file compression program - "gzip" (GNU zip) as described in RFC 1952 [25]. This format is a - Lempel-Ziv coding (LZ77) with a 32 bit CRC. - - compress - The encoding format produced by the common UNIX file compression - program "compress". This format is an adaptive Lempel-Ziv-Welch - coding (LZW). - - Use of program names for the identification of encoding formats - is not desirable and is discouraged for future encodings. Their - use here is representative of historical practice, not good - design. For compatibility with previous implementations of HTTP, - applications SHOULD consider "x-gzip" and "x-compress" to be - equivalent to "gzip" and "compress" respectively. - - deflate - The "zlib" format defined in RFC 1950 [31] in combination with - the "deflate" compression mechanism described in RFC 1951 [29]. - - - - - - -Fielding, et al. Standards Track [Page 23] - -RFC 2616 HTTP/1.1 June 1999 - - - identity - The default (identity) encoding; the use of no transformation - whatsoever. This content-coding is used only in the Accept- - Encoding header, and SHOULD NOT be used in the Content-Encoding - header. - - New content-coding value tokens SHOULD be registered; to allow - interoperability between clients and servers, specifications of the - content coding algorithms needed to implement a new value SHOULD be - publicly available and adequate for independent implementation, and - conform to the purpose of content coding defined in this section. - -3.6 Transfer Codings - - Transfer-coding values are used to indicate an encoding - transformation that has been, can be, or may need to be applied to an - entity-body in order to ensure "safe transport" through the network. - This differs from a content coding in that the transfer-coding is a - property of the message, not of the original entity. - - transfer-coding = "chunked" | transfer-extension - transfer-extension = token *( ";" parameter ) - - Parameters are in the form of attribute/value pairs. - - parameter = attribute "=" value - attribute = token - value = token | quoted-string - - All transfer-coding values are case-insensitive. HTTP/1.1 uses - transfer-coding values in the TE header field (section 14.39) and in - the Transfer-Encoding header field (section 14.41). - - Whenever a transfer-coding is applied to a message-body, the set of - transfer-codings MUST include "chunked", unless the message is - terminated by closing the connection. When the "chunked" transfer- - coding is used, it MUST be the last transfer-coding applied to the - message-body. The "chunked" transfer-coding MUST NOT be applied more - than once to a message-body. These rules allow the recipient to - determine the transfer-length of the message (section 4.4). - - Transfer-codings are analogous to the Content-Transfer-Encoding - values of MIME [7], which were designed to enable safe transport of - binary data over a 7-bit transport service. However, safe transport - has a different focus for an 8bit-clean transfer protocol. In HTTP, - the only unsafe characteristic of message-bodies is the difficulty in - determining the exact body length (section 7.2.2), or the desire to - encrypt data over a shared transport. - - - -Fielding, et al. Standards Track [Page 24] - -RFC 2616 HTTP/1.1 June 1999 - - - The Internet Assigned Numbers Authority (IANA) acts as a registry for - transfer-coding value tokens. Initially, the registry contains the - following tokens: "chunked" (section 3.6.1), "identity" (section - 3.6.2), "gzip" (section 3.5), "compress" (section 3.5), and "deflate" - (section 3.5). - - [[ Remove reference to "identity" token ]] - - New transfer-coding value tokens SHOULD be registered in the same way - as new content-coding value tokens (section 3.5). - - A server which receives an entity-body with a transfer-coding it does - not understand SHOULD return 501 (Unimplemented), and close the - connection. A server MUST NOT send transfer-codings to an HTTP/1.0 - client. - -3.6.1 Chunked Transfer Coding - - The chunked encoding modifies the body of a message in order to - transfer it as a series of chunks, each with its own size indicator, - followed by an OPTIONAL trailer containing entity-header fields. This - allows dynamically produced content to be transferred along with the - information necessary for the recipient to verify that it has - received the full message. - - Chunked-Body = *chunk - last-chunk - trailer - CRLF - - chunk = chunk-size [ chunk-extension ] CRLF - chunk-data CRLF - chunk-size = 1*HEX - last-chunk = 1*("0") [ chunk-extension ] CRLF - - chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) - chunk-ext-name = token - chunk-ext-val = token | quoted-string - chunk-data = chunk-size(OCTET) - trailer = *(entity-header CRLF) - - The chunk-size field is a string of hex digits indicating the size of - the chunk. The chunked encoding is ended by any chunk whose size is - zero, followed by the trailer, which is terminated by an empty line. - - [[ "the size of the chunk" means "the size of the chunk-data in ]] - [[ octets" ]] - - The trailer allows the sender to include additional HTTP header - fields at the end of the message. The Trailer header field can be - used to indicate which header fields are included in a trailer (see - section 14.40). - - - - -Fielding, et al. Standards Track [Page 25] - -RFC 2616 HTTP/1.1 June 1999 - - - A server using chunked transfer-coding in a response MUST NOT use the - trailer for any header fields unless at least one of the following is - true: - - a)the request included a TE header field that indicates "trailers" is - acceptable in the transfer-coding of the response, as described in - section 14.39; or, - - b)the server is the origin server for the response, the trailer - fields consist entirely of optional metadata, and the recipient - could use the message (in a manner acceptable to the origin server) - without receiving this metadata. In other words, the origin server - is willing to accept the possibility that the trailer fields might - be silently discarded along the path to the client. - - This requirement prevents an interoperability failure when the - message is being received by an HTTP/1.1 (or later) proxy and - forwarded to an HTTP/1.0 recipient. It avoids a situation where - compliance with the protocol would have necessitated a possibly - infinite buffer on the proxy. - - An example process for decoding a Chunked-Body is presented in - appendix 19.4.6. - - All HTTP/1.1 applications MUST be able to receive and decode the - "chunked" transfer-coding, and MUST ignore chunk-extension extensions - they do not understand. - -3.7 Media Types - - HTTP uses Internet Media Types [17] in the Content-Type (section - 14.17) and Accept (section 14.1) header fields in order to provide - open and extensible data typing and type negotiation. - - media-type = type "/" subtype *( ";" parameter ) - type = token - subtype = token - - Parameters MAY follow the type/subtype in the form of attribute/value - pairs (as defined in section 3.6). - - The type, subtype, and parameter attribute names are case- - insensitive. Parameter values might or might not be case-sensitive, - depending on the semantics of the parameter name. Linear white space - (LWS) MUST NOT be used between the type and subtype, nor between an - attribute and its value. The presence or absence of a parameter might - be significant to the processing of a media-type, depending on its - definition within the media type registry. - - - -Fielding, et al. Standards Track [Page 26] - -RFC 2616 HTTP/1.1 June 1999 - - - Note that some older HTTP applications do not recognize media type - parameters. When sending data to older HTTP applications, - implementations SHOULD only use media type parameters when they are - required by that type/subtype definition. - - Media-type values are registered with the Internet Assigned Number - Authority (IANA [19]). The media type registration process is - outlined in RFC 1590 [17]. Use of non-registered media types is - discouraged. - - [[ "RFC 1590" should be "RFC 2048" ]] - -3.7.1 Canonicalization and Text Defaults - - Internet media types are registered with a canonical form. An - entity-body transferred via HTTP messages MUST be represented in the - appropriate canonical form prior to its transmission except for - "text" types, as defined in the next paragraph. - - When in canonical form, media subtypes of the "text" type use CRLF as - the text line break. HTTP relaxes this requirement and allows the - transport of text media with plain CR or LF alone representing a line - break when it is done consistently for an entire entity-body. HTTP - applications MUST accept CRLF, bare CR, and bare LF as being - representative of a line break in text media received via HTTP. In - addition, if the text is represented in a character set that does not - use octets 13 and 10 for CR and LF respectively, as is the case for - some multi-byte character sets, HTTP allows the use of whatever octet - sequences are defined by that character set to represent the - equivalent of CR and LF for line breaks. This flexibility regarding - line breaks applies only to text media in the entity-body; a bare CR - or LF MUST NOT be substituted for CRLF within any of the HTTP control - structures (such as header fields and multipart boundaries). - - If an entity-body is encoded with a content-coding, the underlying - data MUST be in a form defined above prior to being encoded. - - The "charset" parameter is used with some media types to define the - character set (section 3.4) of the data. When no explicit charset - parameter is provided by the sender, media subtypes of the "text" - type are defined to have a default charset value of "ISO-8859-1" when - received via HTTP. Data in character sets other than "ISO-8859-1" or - its subsets MUST be labeled with an appropriate charset value. See - section 3.4.1 for compatibility problems. - -3.7.2 Multipart Types - - MIME provides for a number of "multipart" types -- encapsulations of - one or more entities within a single message-body. All multipart - types share a common syntax, as defined in section 5.1.1 of RFC 2046 - - - -Fielding, et al. Standards Track [Page 27] - -RFC 2616 HTTP/1.1 June 1999 - - - [40], and MUST include a boundary parameter as part of the media type - value. The message body is itself a protocol element and MUST - therefore use only CRLF to represent line breaks between body-parts. - Unlike in RFC 2046, the epilogue of any multipart message MUST be - empty; HTTP applications MUST NOT transmit the epilogue (even if the - original multipart contains an epilogue). These restrictions exist in - order to preserve the self-delimiting nature of a multipart message- - body, wherein the "end" of the message-body is indicated by the - ending multipart boundary. - - In general, HTTP treats a multipart message-body no differently than - any other media type: strictly as payload. The one exception is the - "multipart/byteranges" type (appendix 19.2) when it appears in a 206 - (Partial Content) response, which will be interpreted by some HTTP - caching mechanisms as described in sections 13.5.4 and 14.16. In all - other cases, an HTTP user agent SHOULD follow the same or similar - behavior as a MIME user agent would upon receipt of a multipart type. - The MIME header fields within each body-part of a multipart message- - body do not have any significance to HTTP beyond that defined by - their MIME semantics. - - In general, an HTTP user agent SHOULD follow the same or similar - behavior as a MIME user agent would upon receipt of a multipart type. - If an application receives an unrecognized multipart subtype, the - application MUST treat it as being equivalent to "multipart/mixed". - - Note: The "multipart/form-data" type has been specifically defined - for carrying form data suitable for processing via the POST - request method, as described in RFC 1867 [15]. - -3.8 Product Tokens - - Product tokens are used to allow communicating applications to - identify themselves by software name and version. Most fields using - product tokens also allow sub-products which form a significant part - of the application to be listed, separated by white space. By - convention, the products are listed in order of their significance - for identifying the application. - - product = token ["/" product-version] - product-version = token - - Examples: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - Server: Apache/0.8.4 - - - - - -Fielding, et al. Standards Track [Page 28] - -RFC 2616 HTTP/1.1 June 1999 - - - Product tokens SHOULD be short and to the point. They MUST NOT be - used for advertising or other non-essential information. Although any - token character MAY appear in a product-version, this token SHOULD - only be used for a version identifier (i.e., successive versions of - the same product SHOULD only differ in the product-version portion of - the product value). - -3.9 Quality Values - - HTTP content negotiation (section 12) uses short "floating point" - numbers to indicate the relative importance ("weight") of various - negotiable parameters. A weight is normalized to a real number in - the range 0 through 1, where 0 is the minimum and 1 the maximum - value. If a parameter has a quality value of 0, then content with - this parameter is `not acceptable' for the client. HTTP/1.1 - applications MUST NOT generate more than three digits after the - decimal point. User configuration of these values SHOULD also be - limited in this fashion. - - qvalue = ( "0" [ "." 0*3DIGIT ] ) - | ( "1" [ "." 0*3("0") ] ) - - "Quality values" is a misnomer, since these values merely represent - relative degradation in desired quality. - -3.10 Language Tags - - A language tag identifies a natural language spoken, written, or - otherwise conveyed by human beings for communication of information - to other human beings. Computer languages are explicitly excluded. - HTTP uses language tags within the Accept-Language and Content- - Language fields. - - The syntax and registry of HTTP language tags is the same as that - defined by RFC 1766 [1]. In summary, a language tag is composed of 1 - or more parts: A primary language tag and a possibly empty series of - subtags: - - language-tag = primary-tag *( "-" subtag ) - primary-tag = 1*8ALPHA - subtag = 1*8ALPHA - - [[ Updated by RFC 3066: subtags may now contain digits ]] - - White space is not allowed within the tag and all tags are case- - insensitive. The name space of language tags is administered by the - IANA. Example tags include: - - en, en-US, en-cockney, i-cherokee, x-pig-latin - - - - -Fielding, et al. Standards Track [Page 29] - -RFC 2616 HTTP/1.1 June 1999 - - - where any two-letter primary-tag is an ISO-639 language abbreviation - and any two-letter initial subtag is an ISO-3166 country code. (The - last three tags above are not registered tags; all but the last are - examples of tags which could be registered in future.) - -3.11 Entity Tags - - Entity tags are used for comparing two or more entities from the same - requested resource. HTTP/1.1 uses entity tags in the ETag (section - 14.19), If-Match (section 14.24), If-None-Match (section 14.26), and - If-Range (section 14.27) header fields. The definition of how they - are used and compared as cache validators is in section 13.3.3. An - entity tag consists of an opaque quoted string, possibly prefixed by - a weakness indicator. - - entity-tag = [ weak ] opaque-tag - weak = "W/" - opaque-tag = quoted-string - - A "strong entity tag" MAY be shared by two entities of a resource - only if they are equivalent by octet equality. - - A "weak entity tag," indicated by the "W/" prefix, MAY be shared by - two entities of a resource only if the entities are equivalent and - could be substituted for each other with no significant change in - semantics. A weak entity tag can only be used for weak comparison. - - An entity tag MUST be unique across all versions of all entities - associated with a particular resource. A given entity tag value MAY - be used for entities obtained by requests on different URIs. The use - of the same entity tag value in conjunction with entities obtained by - requests on different URIs does not imply the equivalence of those - entities. - -3.12 Range Units - - HTTP/1.1 allows a client to request that only part (a range of) the - response entity be included within the response. HTTP/1.1 uses range - units in the Range (section 14.35) and Content-Range (section 14.16) - header fields. An entity can be broken down into subranges according - to various structural units. - - range-unit = bytes-unit | other-range-unit - bytes-unit = "bytes" - other-range-unit = token - - The only range unit defined by HTTP/1.1 is "bytes". HTTP/1.1 - implementations MAY ignore ranges specified using other units. - - - -Fielding, et al. Standards Track [Page 30] - -RFC 2616 HTTP/1.1 June 1999 - - - HTTP/1.1 has been designed to allow implementations of applications - that do not depend on knowledge of ranges. - -4 HTTP Message - -4.1 Message Types - - HTTP messages consist of requests from client to server and responses - from server to client. - - HTTP-message = Request | Response ; HTTP/1.1 messages - - Request (section 5) and Response (section 6) messages use the generic - message format of RFC 822 [9] for transferring entities (the payload - of the message). Both types of message consist of a start-line, zero - or more header fields (also known as "headers"), an empty line (i.e., - a line with nothing preceding the CRLF) indicating the end of the - header fields, and possibly a message-body. - - generic-message = start-line - *(message-header CRLF) - CRLF - [ message-body ] - start-line = Request-Line | Status-Line - - In the interest of robustness, servers SHOULD ignore any empty - line(s) received where a Request-Line is expected. In other words, if - the server is reading the protocol stream at the beginning of a - message and receives a CRLF first, it should ignore the CRLF. - - Certain buggy HTTP/1.0 client implementations generate extra CRLF's - after a POST request. To restate what is explicitly forbidden by the - BNF, an HTTP/1.1 client MUST NOT preface or follow a request with an - extra CRLF. - -4.2 Message Headers - - HTTP header fields, which include general-header (section 4.5), - request-header (section 5.3), response-header (section 6.2), and - entity-header (section 7.1) fields, follow the same generic format as - that given in Section 3.1 of RFC 822 [9]. Each header field consists - of a name followed by a colon (":") and the field value. Field names - are case-insensitive. The field value MAY be preceded by any amount - of LWS, though a single SP is preferred. Header fields can be - extended over multiple lines by preceding each extra line with at - least one SP or HT. Applications ought to follow "common form", where - one is known or indicated, when generating HTTP constructs, since - there might exist some implementations that fail to accept anything - - - -Fielding, et al. Standards Track [Page 31] - -RFC 2616 HTTP/1.1 June 1999 - - - beyond the common forms. - - message-header = field-name ":" [ field-value ] - field-name = token - field-value = *( field-content | LWS ) - field-content = - - The field-content does not include any leading or trailing LWS: - linear white space occurring before the first non-whitespace - character of the field-value or after the last non-whitespace - character of the field-value. Such leading or trailing LWS MAY be - removed without changing the semantics of the field value. Any LWS - that occurs between field-content MAY be replaced with a single SP - before interpreting the field value or forwarding the message - downstream. - - The order in which header fields with differing field names are - received is not significant. However, it is "good practice" to send - general-header fields first, followed by request-header or response- - header fields, and ending with the entity-header fields. - - Multiple message-header fields with the same field-name MAY be - present in a message if and only if the entire field-value for that - header field is defined as a comma-separated list [i.e., #(values)]. - It MUST be possible to combine the multiple header fields into one - "field-name: field-value" pair, without changing the semantics of the - message, by appending each subsequent field-value to the first, each - separated by a comma. The order in which header fields with the same - field-name are received is therefore significant to the - interpretation of the combined field value, and thus a proxy MUST NOT - change the order of these field values when a message is forwarded. - -4.3 Message Body - - The message-body (if any) of an HTTP message is used to carry the - entity-body associated with the request or response. The message-body - differs from the entity-body only when a transfer-coding has been - applied, as indicated by the Transfer-Encoding header field (section - 14.41). - - message-body = entity-body - | - - Transfer-Encoding MUST be used to indicate any transfer-codings - applied by an application to ensure safe and proper transfer of the - message. Transfer-Encoding is a property of the message, not of the - - - -Fielding, et al. Standards Track [Page 32] - -RFC 2616 HTTP/1.1 June 1999 - - - entity, and thus MAY be added or removed by any application along the - request/response chain. (However, section 3.6 places restrictions on - when certain transfer-codings may be used.) - - The rules for when a message-body is allowed in a message differ for - requests and responses. - - The presence of a message-body in a request is signaled by the - inclusion of a Content-Length or Transfer-Encoding header field in - the request's message-headers. A message-body MUST NOT be included in - a request if the specification of the request method (section 5.1.1) - does not allow sending an entity-body in requests. A server SHOULD - read and forward a message-body on any request; if the request method - does not include defined semantics for an entity-body, then the - message-body SHOULD be ignored when handling the request. - - For response messages, whether or not a message-body is included with - a message is dependent on both the request method and the response - status code (section 6.1.1). All responses to the HEAD request method - MUST NOT include a message-body, even though the presence of entity- - header fields might lead one to believe they do. All 1xx - (informational), 204 (no content), and 304 (not modified) responses - MUST NOT include a message-body. All other responses do include a - message-body, although it MAY be of zero length. - -4.4 Message Length - - The transfer-length of a message is the length of the message-body as - it appears in the message; that is, after any transfer-codings have - been applied. When a message-body is included with a message, the - transfer-length of that body is determined by one of the following - (in order of precedence): - - 1.Any response message which "MUST NOT" include a message-body (such - as the 1xx, 204, and 304 responses and any response to a HEAD - request) is always terminated by the first empty line after the - header fields, regardless of the entity-header fields present in - the message. - - 2.If a Transfer-Encoding header field (section 14.41) is present and - has any value other than "identity", then the transfer-length is - defined by use of the "chunked" transfer-coding (section 3.6), - unless the message is terminated by closing the connection. - - [[ Remove 'and has any value other than "identity"' ]] - - 3.If a Content-Length header field (section 14.13) is present, its - decimal value in OCTETs represents both the entity-length and the - transfer-length. The Content-Length header field MUST NOT be sent - if these two lengths are different (i.e., if a Transfer-Encoding - - - -Fielding, et al. Standards Track [Page 33] - -RFC 2616 HTTP/1.1 June 1999 - - - header field is present). If a message is received with both a - Transfer-Encoding header field and a Content-Length header field, - the latter MUST be ignored. - - 4.If the message uses the media type "multipart/byteranges", and the - ransfer-length is not otherwise specified, then this self- - elimiting media type defines the transfer-length. This media type - UST NOT be used unless the sender knows that the recipient can arse - it; the presence in a request of a Range header with ultiple byte- - range specifiers from a 1.1 client implies that the lient can parse - multipart/byteranges responses. - - A range header might be forwarded by a 1.0 proxy that does not - understand multipart/byteranges; in this case the server MUST - delimit the message using methods defined in items 1,3 or 5 of - this section. - - 5.By the server closing the connection. (Closing the connection - cannot be used to indicate the end of a request body, since that - would leave no possibility for the server to send back a response.) - - For compatibility with HTTP/1.0 applications, HTTP/1.1 requests - containing a message-body MUST include a valid Content-Length header - field unless the server is known to be HTTP/1.1 compliant. If a - request contains a message-body and a Content-Length is not given, - the server SHOULD respond with 400 (bad request) if it cannot - determine the length of the message, or with 411 (length required) if - it wishes to insist on receiving a valid Content-Length. - - All HTTP/1.1 applications that receive entities MUST accept the - "chunked" transfer-coding (section 3.6), thus allowing this mechanism - to be used for messages when the message length cannot be determined - in advance. - - Messages MUST NOT include both a Content-Length header field and a - non-identity transfer-coding. If the message does include a non- - identity transfer-coding, the Content-Length MUST be ignored. - - [[ Remove "non-identity" both times ]] - - When a Content-Length is given in a message where a message-body is - allowed, its field value MUST exactly match the number of OCTETs in - the message-body. HTTP/1.1 user agents MUST notify the user when an - invalid length is received and detected. - -4.5 General Header Fields - - There are a few header fields which have general applicability for - both request and response messages, but which do not apply to the - entity being transferred. These header fields apply only to the - - - -Fielding, et al. Standards Track [Page 34] - -RFC 2616 HTTP/1.1 June 1999 - - - message being transmitted. - - general-header = Cache-Control ; Section 14.9 - | Connection ; Section 14.10 - | Date ; Section 14.18 - | Pragma ; Section 14.32 - | Trailer ; Section 14.40 - | Transfer-Encoding ; Section 14.41 - | Upgrade ; Section 14.42 - | Via ; Section 14.45 - | Warning ; Section 14.46 - - General-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields may be given the semantics of general - header fields if all parties in the communication recognize them to - be general-header fields. Unrecognized header fields are treated as - entity-header fields. - -5 Request - - A request message from a client to a server includes, within the - first line of that message, the method to be applied to the resource, - the identifier of the resource, and the protocol version in use. - - Request = Request-Line ; Section 5.1 - *(( general-header ; Section 4.5 - | request-header ; Section 5.3 - | entity-header ) CRLF) ; Section 7.1 - CRLF - [ message-body ] ; Section 4.3 - -5.1 Request-Line - - The Request-Line begins with a method token, followed by the - Request-URI and the protocol version, and ending with CRLF. The - elements are separated by SP characters. No CR or LF is allowed - except in the final CRLF sequence. - - Request-Line = Method SP Request-URI SP HTTP-Version CRLF - - - - - - - - - - - -Fielding, et al. Standards Track [Page 35] - -RFC 2616 HTTP/1.1 June 1999 - - -5.1.1 Method - - The Method token indicates the method to be performed on the - resource identified by the Request-URI. The method is case-sensitive. - - Method = "OPTIONS" ; Section 9.2 - | "GET" ; Section 9.3 - | "HEAD" ; Section 9.4 - | "POST" ; Section 9.5 - | "PUT" ; Section 9.6 - | "DELETE" ; Section 9.7 - | "TRACE" ; Section 9.8 - | "CONNECT" ; Section 9.9 - | extension-method - extension-method = token - - The list of methods allowed by a resource can be specified in an - Allow header field (section 14.7). The return code of the response - always notifies the client whether a method is currently allowed on a - resource, since the set of allowed methods can change dynamically. An - origin server SHOULD return the status code 405 (Method Not Allowed) - if the method is known by the origin server but not allowed for the - requested resource, and 501 (Not Implemented) if the method is - unrecognized or not implemented by the origin server. The methods GET - and HEAD MUST be supported by all general-purpose servers. All other - methods are OPTIONAL; however, if the above methods are implemented, - they MUST be implemented with the same semantics as those specified - in section 9. - -5.1.2 Request-URI - - The Request-URI is a Uniform Resource Identifier (section 3.2) and - identifies the resource upon which to apply the request. - - Request-URI = "*" | absoluteURI | abs_path | authority - [[ Request-URI = "*" | absoluteURI | abs_path [ "?" query ] | authority ]] - - The four options for Request-URI are dependent on the nature of the - request. The asterisk "*" means that the request does not apply to a - particular resource, but to the server itself, and is only allowed - when the method used does not necessarily apply to a resource. One - example would be - - OPTIONS * HTTP/1.1 - - The absoluteURI form is REQUIRED when the request is being made to a - proxy. The proxy is requested to forward the request or service it - from a valid cache, and return the response. Note that the proxy MAY - forward the request on to another proxy or directly to the server - - - -Fielding, et al. Standards Track [Page 36] - -RFC 2616 HTTP/1.1 June 1999 - - - specified by the absoluteURI. In order to avoid request loops, a - proxy MUST be able to recognize all of its server names, including - any aliases, local variations, and the numeric IP address. An example - Request-Line would be: - - GET http://www.w3.org/pub/WWW/TheProject.html HTTP/1.1 - - To allow for transition to absoluteURIs in all requests in future - versions of HTTP, all HTTP/1.1 servers MUST accept the absoluteURI - form in requests, even though HTTP/1.1 clients will only generate - them in requests to proxies. - - The authority form is only used by the CONNECT method (section 9.9). - - The most common form of Request-URI is that used to identify a - resource on an origin server or gateway. In this case the absolute - path of the URI MUST be transmitted (see section 3.2.1, abs_path) as - the Request-URI, and the network location of the URI (authority) MUST - be transmitted in a Host header field. For example, a client wishing - to retrieve the resource above directly from the origin server would - create a TCP connection to port 80 of the host "www.w3.org" and send - the lines: - - GET /pub/WWW/TheProject.html HTTP/1.1 - Host: www.w3.org - - followed by the remainder of the Request. Note that the absolute path - cannot be empty; if none is present in the original URI, it MUST be - given as "/" (the server root). - - The Request-URI is transmitted in the format specified in section - 3.2.1. If the Request-URI is encoded using the "% HEX HEX" encoding - [42], the origin server MUST decode the Request-URI in order to - properly interpret the request. Servers SHOULD respond to invalid - Request-URIs with an appropriate status code. - - A transparent proxy MUST NOT rewrite the "abs_path" part of the - received Request-URI when forwarding it to the next inbound server, - except as noted above to replace a null abs_path with "/". - - Note: The "no rewrite" rule prevents the proxy from changing the - meaning of the request when the origin server is improperly using - a non-reserved URI character for a reserved purpose. Implementors - should be aware that some pre-HTTP/1.1 proxies have been known to - rewrite the Request-URI. - - - - - - -Fielding, et al. Standards Track [Page 37] - -RFC 2616 HTTP/1.1 June 1999 - - -5.2 The Resource Identified by a Request - - The exact resource identified by an Internet request is determined by - examining both the Request-URI and the Host header field. - - An origin server that does not allow resources to differ by the - requested host MAY ignore the Host header field value when - determining the resource identified by an HTTP/1.1 request. (But see - section 19.6.1.1 for other requirements on Host support in HTTP/1.1.) - - An origin server that does differentiate resources based on the host - requested (sometimes referred to as virtual hosts or vanity host - names) MUST use the following rules for determining the requested - resource on an HTTP/1.1 request: - - 1. If Request-URI is an absoluteURI, the host is part of the - Request-URI. Any Host header field value in the request MUST be - ignored. - - 2. If the Request-URI is not an absoluteURI, and the request includes - a Host header field, the host is determined by the Host header - field value. - - 3. If the host as determined by rule 1 or 2 is not a valid host on - the server, the response MUST be a 400 (Bad Request) error message. - - Recipients of an HTTP/1.0 request that lacks a Host header field MAY - attempt to use heuristics (e.g., examination of the URI path for - something unique to a particular host) in order to determine what - exact resource is being requested. - -5.3 Request Header Fields - - The request-header fields allow the client to pass additional - information about the request, and about the client itself, to the - server. These fields act as request modifiers, with semantics - equivalent to the parameters on a programming language method - invocation. - - request-header = Accept ; Section 14.1 - | Accept-Charset ; Section 14.2 - | Accept-Encoding ; Section 14.3 - | Accept-Language ; Section 14.4 - | Authorization ; Section 14.8 - | Expect ; Section 14.20 - | From ; Section 14.22 - | Host ; Section 14.23 - | If-Match ; Section 14.24 - - - -Fielding, et al. Standards Track [Page 38] - -RFC 2616 HTTP/1.1 June 1999 - - - | If-Modified-Since ; Section 14.25 - | If-None-Match ; Section 14.26 - | If-Range ; Section 14.27 - | If-Unmodified-Since ; Section 14.28 - | Max-Forwards ; Section 14.31 - | Proxy-Authorization ; Section 14.34 - | Range ; Section 14.35 - | Referer ; Section 14.36 - | TE ; Section 14.39 - | User-Agent ; Section 14.43 - - Request-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields MAY be given the semantics of request- - header fields if all parties in the communication recognize them to - be request-header fields. Unrecognized header fields are treated as - entity-header fields. - -6 Response - - After receiving and interpreting a request message, a server responds - with an HTTP response message. - - Response = Status-Line ; Section 6.1 - *(( general-header ; Section 4.5 - | response-header ; Section 6.2 - | entity-header ) CRLF) ; Section 7.1 - CRLF - [ message-body ] ; Section 7.2 - -6.1 Status-Line - - The first line of a Response message is the Status-Line, consisting - of the protocol version followed by a numeric status code and its - associated textual phrase, with each element separated by SP - characters. No CR or LF is allowed except in the final CRLF sequence. - - Status-Line = HTTP-Version SP Status-Code SP Reason-Phrase CRLF - -6.1.1 Status Code and Reason Phrase - - The Status-Code element is a 3-digit integer result code of the - attempt to understand and satisfy the request. These codes are fully - defined in section 10. The Reason-Phrase is intended to give a short - textual description of the Status-Code. The Status-Code is intended - for use by automata and the Reason-Phrase is intended for the human - user. The client is not required to examine or display the Reason- - Phrase. - - - -Fielding, et al. Standards Track [Page 39] - -RFC 2616 HTTP/1.1 June 1999 - - - The first digit of the Status-Code defines the class of response. The - last two digits do not have any categorization role. There are 5 - values for the first digit: - - - 1xx: Informational - Request received, continuing process - - - 2xx: Success - The action was successfully received, - understood, and accepted - - - 3xx: Redirection - Further action must be taken in order to - complete the request - - - 4xx: Client Error - The request contains bad syntax or cannot - be fulfilled - - - 5xx: Server Error - The server failed to fulfill an apparently - valid request - - The individual values of the numeric status codes defined for - HTTP/1.1, and an example set of corresponding Reason-Phrase's, are - presented below. The reason phrases listed here are only - recommendations -- they MAY be replaced by local equivalents without - affecting the protocol. - - Status-Code = - "100" ; Section 10.1.1: Continue - | "101" ; Section 10.1.2: Switching Protocols - | "200" ; Section 10.2.1: OK - | "201" ; Section 10.2.2: Created - | "202" ; Section 10.2.3: Accepted - | "203" ; Section 10.2.4: Non-Authoritative Information - | "204" ; Section 10.2.5: No Content - | "205" ; Section 10.2.6: Reset Content - | "206" ; Section 10.2.7: Partial Content - | "300" ; Section 10.3.1: Multiple Choices - | "301" ; Section 10.3.2: Moved Permanently - | "302" ; Section 10.3.3: Found - | "303" ; Section 10.3.4: See Other - | "304" ; Section 10.3.5: Not Modified - | "305" ; Section 10.3.6: Use Proxy - | "307" ; Section 10.3.8: Temporary Redirect - | "400" ; Section 10.4.1: Bad Request - | "401" ; Section 10.4.2: Unauthorized - | "402" ; Section 10.4.3: Payment Required - | "403" ; Section 10.4.4: Forbidden - | "404" ; Section 10.4.5: Not Found - | "405" ; Section 10.4.6: Method Not Allowed - | "406" ; Section 10.4.7: Not Acceptable - - - -Fielding, et al. Standards Track [Page 40] - -RFC 2616 HTTP/1.1 June 1999 - - - | "407" ; Section 10.4.8: Proxy Authentication Required - | "408" ; Section 10.4.9: Request Time-out - | "409" ; Section 10.4.10: Conflict - | "410" ; Section 10.4.11: Gone - | "411" ; Section 10.4.12: Length Required - | "412" ; Section 10.4.13: Precondition Failed - | "413" ; Section 10.4.14: Request Entity Too Large - | "414" ; Section 10.4.15: Request-URI Too Large - | "415" ; Section 10.4.16: Unsupported Media Type - | "416" ; Section 10.4.17: Requested range not satisfiable - | "417" ; Section 10.4.18: Expectation Failed - | "500" ; Section 10.5.1: Internal Server Error - | "501" ; Section 10.5.2: Not Implemented - | "502" ; Section 10.5.3: Bad Gateway - | "503" ; Section 10.5.4: Service Unavailable - | "504" ; Section 10.5.5: Gateway Time-out - | "505" ; Section 10.5.6: HTTP Version not supported - | extension-code - - extension-code = 3DIGIT - Reason-Phrase = * - - HTTP status codes are extensible. HTTP applications are not required - to understand the meaning of all registered status codes, though such - understanding is obviously desirable. However, applications MUST - understand the class of any status code, as indicated by the first - digit, and treat any unrecognized response as being equivalent to the - x00 status code of that class, with the exception that an - unrecognized response MUST NOT be cached. For example, if an - unrecognized status code of 431 is received by the client, it can - safely assume that there was something wrong with its request and - treat the response as if it had received a 400 status code. In such - cases, user agents SHOULD present to the user the entity returned - with the response, since that entity is likely to include human- - readable information which will explain the unusual status. - -6.2 Response Header Fields - - The response-header fields allow the server to pass additional - information about the response which cannot be placed in the Status- - Line. These header fields give information about the server and about - further access to the resource identified by the Request-URI. - - response-header = Accept-Ranges ; Section 14.5 - | Age ; Section 14.6 - | ETag ; Section 14.19 - | Location ; Section 14.30 - | Proxy-Authenticate ; Section 14.33 - - - -Fielding, et al. Standards Track [Page 41] - -RFC 2616 HTTP/1.1 June 1999 - - - | Retry-After ; Section 14.37 - | Server ; Section 14.38 - | Vary ; Section 14.44 - | WWW-Authenticate ; Section 14.47 - - Response-header field names can be extended reliably only in - combination with a change in the protocol version. However, new or - experimental header fields MAY be given the semantics of response- - header fields if all parties in the communication recognize them to - be response-header fields. Unrecognized header fields are treated as - entity-header fields. - -7 Entity - - Request and Response messages MAY transfer an entity if not otherwise - restricted by the request method or response status code. An entity - consists of entity-header fields and an entity-body, although some - responses will only include the entity-headers. - - In this section, both sender and recipient refer to either the client - or the server, depending on who sends and who receives the entity. - -7.1 Entity Header Fields - - Entity-header fields define metainformation about the entity-body or, - if no body is present, about the resource identified by the request. - Some of this metainformation is OPTIONAL; some might be REQUIRED by - portions of this specification. - - entity-header = Allow ; Section 14.7 - | Content-Encoding ; Section 14.11 - | Content-Language ; Section 14.12 - | Content-Length ; Section 14.13 - | Content-Location ; Section 14.14 - | Content-MD5 ; Section 14.15 - | Content-Range ; Section 14.16 - | Content-Type ; Section 14.17 - | Expires ; Section 14.21 - | Last-Modified ; Section 14.29 - | extension-header - - extension-header = message-header - - The extension-header mechanism allows additional entity-header fields - to be defined without changing the protocol, but these fields cannot - be assumed to be recognizable by the recipient. Unrecognized header - fields SHOULD be ignored by the recipient and MUST be forwarded by - transparent proxies. - - - -Fielding, et al. Standards Track [Page 42] - -RFC 2616 HTTP/1.1 June 1999 - - -7.2 Entity Body - - The entity-body (if any) sent with an HTTP request or response is in - a format and encoding defined by the entity-header fields. - - entity-body = *OCTET - - An entity-body is only present in a message when a message-body is - present, as described in section 4.3. The entity-body is obtained - from the message-body by decoding any Transfer-Encoding that might - have been applied to ensure safe and proper transfer of the message. - -7.2.1 Type - - When an entity-body is included with a message, the data type of that - body is determined via the header fields Content-Type and Content- - Encoding. These define a two-layer, ordered encoding model: - - entity-body := Content-Encoding( Content-Type( data ) ) - - Content-Type specifies the media type of the underlying data. - Content-Encoding may be used to indicate any additional content - codings applied to the data, usually for the purpose of data - compression, that are a property of the requested resource. There is - no default encoding. - - Any HTTP/1.1 message containing an entity-body SHOULD include a - Content-Type header field defining the media type of that body. If - and only if the media type is not given by a Content-Type field, the - recipient MAY attempt to guess the media type via inspection of its - content and/or the name extension(s) of the URI used to identify the - resource. If the media type remains unknown, the recipient SHOULD - treat it as type "application/octet-stream". - -7.2.2 Entity Length - - The entity-length of a message is the length of the message-body - before any transfer-codings have been applied. Section 4.4 defines - how the transfer-length of a message-body is determined. - - - - - - - - - - - - -Fielding, et al. Standards Track [Page 43] - -RFC 2616 HTTP/1.1 June 1999 - - -8 Connections - -8.1 Persistent Connections - -8.1.1 Purpose - - Prior to persistent connections, a separate TCP connection was - established to fetch each URL, increasing the load on HTTP servers - and causing congestion on the Internet. The use of inline images and - other associated data often require a client to make multiple - requests of the same server in a short amount of time. Analysis of - these performance problems and results from a prototype - implementation are available [26] [30]. Implementation experience and - measurements of actual HTTP/1.1 (RFC 2068) implementations show good - results [39]. Alternatives have also been explored, for example, - T/TCP [27]. - - Persistent HTTP connections have a number of advantages: - - - By opening and closing fewer TCP connections, CPU time is saved - in routers and hosts (clients, servers, proxies, gateways, - tunnels, or caches), and memory used for TCP protocol control - blocks can be saved in hosts. - - - HTTP requests and responses can be pipelined on a connection. - Pipelining allows a client to make multiple requests without - waiting for each response, allowing a single TCP connection to - be used much more efficiently, with much lower elapsed time. - - - Network congestion is reduced by reducing the number of packets - caused by TCP opens, and by allowing TCP sufficient time to - determine the congestion state of the network. - - - Latency on subsequent requests is reduced since there is no time - spent in TCP's connection opening handshake. - - - HTTP can evolve more gracefully, since errors can be reported - without the penalty of closing the TCP connection. Clients using - future versions of HTTP might optimistically try a new feature, - but if communicating with an older server, retry with old - semantics after an error is reported. - - HTTP implementations SHOULD implement persistent connections. - - - - - - - - -Fielding, et al. Standards Track [Page 44] - -RFC 2616 HTTP/1.1 June 1999 - - -8.1.2 Overall Operation - - A significant difference between HTTP/1.1 and earlier versions of - HTTP is that persistent connections are the default behavior of any - HTTP connection. That is, unless otherwise indicated, the client - SHOULD assume that the server will maintain a persistent connection, - even after error responses from the server. - - Persistent connections provide a mechanism by which a client and a - server can signal the close of a TCP connection. This signaling takes - place using the Connection header field (section 14.10). Once a close - has been signaled, the client MUST NOT send any more requests on that - connection. - -8.1.2.1 Negotiation - - An HTTP/1.1 server MAY assume that a HTTP/1.1 client intends to - maintain a persistent connection unless a Connection header including - the connection-token "close" was sent in the request. If the server - chooses to close the connection immediately after sending the - response, it SHOULD send a Connection header including the - connection-token close. - - An HTTP/1.1 client MAY expect a connection to remain open, but would - decide to keep it open based on whether the response from a server - contains a Connection header with the connection-token close. In case - the client does not want to maintain a connection for more than that - request, it SHOULD send a Connection header including the - connection-token close. - - If either the client or the server sends the close token in the - Connection header, that request becomes the last one for the - connection. - - Clients and servers SHOULD NOT assume that a persistent connection is - maintained for HTTP versions less than 1.1 unless it is explicitly - signaled. See section 19.6.2 for more information on backward - compatibility with HTTP/1.0 clients. - - In order to remain persistent, all messages on the connection MUST - have a self-defined message length (i.e., one not defined by closure - of the connection), as described in section 4.4. - - - - - - - - - -Fielding, et al. Standards Track [Page 45] - -RFC 2616 HTTP/1.1 June 1999 - - -8.1.2.2 Pipelining - - A client that supports persistent connections MAY "pipeline" its - requests (i.e., send multiple requests without waiting for each - response). A server MUST send its responses to those requests in the - same order that the requests were received. - - Clients which assume persistent connections and pipeline immediately - after connection establishment SHOULD be prepared to retry their - connection if the first pipelined attempt fails. If a client does - such a retry, it MUST NOT pipeline before it knows the connection is - persistent. Clients MUST also be prepared to resend their requests if - the server closes the connection before sending all of the - corresponding responses. - - Clients SHOULD NOT pipeline requests using non-idempotent methods or - non-idempotent sequences of methods (see section 9.1.2). Otherwise, a - premature termination of the transport connection could lead to - indeterminate results. A client wishing to send a non-idempotent - request SHOULD wait to send that request until it has received the - response status for the previous request. - -8.1.3 Proxy Servers - - It is especially important that proxies correctly implement the - properties of the Connection header field as specified in section - 14.10. - - The proxy server MUST signal persistent connections separately with - its clients and the origin servers (or other proxy servers) that it - connects to. Each persistent connection applies to only one transport - link. - - A proxy server MUST NOT establish a HTTP/1.1 persistent connection - with an HTTP/1.0 client (but see RFC 2068 [33] for information and - discussion of the problems with the Keep-Alive header implemented by - many HTTP/1.0 clients). - -8.1.4 Practical Considerations - - Servers will usually have some time-out value beyond which they will - no longer maintain an inactive connection. Proxy servers might make - this a higher value since it is likely that the client will be making - more connections through the same server. The use of persistent - connections places no requirements on the length (or existence) of - this time-out for either the client or the server. - - - - - -Fielding, et al. Standards Track [Page 46] - -RFC 2616 HTTP/1.1 June 1999 - - - When a client or server wishes to time-out it SHOULD issue a graceful - close on the transport connection. Clients and servers SHOULD both - constantly watch for the other side of the transport close, and - respond to it as appropriate. If a client or server does not detect - the other side's close promptly it could cause unnecessary resource - drain on the network. - - A client, server, or proxy MAY close the transport connection at any - time. For example, a client might have started to send a new request - at the same time that the server has decided to close the "idle" - connection. From the server's point of view, the connection is being - closed while it was idle, but from the client's point of view, a - request is in progress. - - This means that clients, servers, and proxies MUST be able to recover - from asynchronous close events. Client software SHOULD reopen the - transport connection and retransmit the aborted sequence of requests - without user interaction so long as the request sequence is - idempotent (see section 9.1.2). Non-idempotent methods or sequences - MUST NOT be automatically retried, although user agents MAY offer a - human operator the choice of retrying the request(s). Confirmation by - user-agent software with semantic understanding of the application - MAY substitute for user confirmation. The automatic retry SHOULD NOT - be repeated if the second sequence of requests fails. - - Servers SHOULD always respond to at least one request per connection, - if at all possible. Servers SHOULD NOT close a connection in the - middle of transmitting a response, unless a network or client failure - is suspected. - - Clients that use persistent connections SHOULD limit the number of - simultaneous connections that they maintain to a given server. A - single-user client SHOULD NOT maintain more than 2 connections with - any server or proxy. A proxy SHOULD use up to 2*N connections to - another server or proxy, where N is the number of simultaneously - active users. These guidelines are intended to improve HTTP response - times and avoid congestion. - -8.2 Message Transmission Requirements - -8.2.1 Persistent Connections and Flow Control - - HTTP/1.1 servers SHOULD maintain persistent connections and use TCP's - flow control mechanisms to resolve temporary overloads, rather than - terminating connections with the expectation that clients will retry. - The latter technique can exacerbate network congestion. - - - - - -Fielding, et al. Standards Track [Page 47] - -RFC 2616 HTTP/1.1 June 1999 - - -8.2.2 Monitoring Connections for Error Status Messages - - An HTTP/1.1 (or later) client sending a message-body SHOULD monitor - the network connection for an error status while it is transmitting - the request. If the client sees an error status, it SHOULD - immediately cease transmitting the body. If the body is being sent - using a "chunked" encoding (section 3.6), a zero length chunk and - empty trailer MAY be used to prematurely mark the end of the message. - If the body was preceded by a Content-Length header, the client MUST - close the connection. - -8.2.3 Use of the 100 (Continue) Status - - The purpose of the 100 (Continue) status (see section 10.1.1) is to - allow a client that is sending a request message with a request body - to determine if the origin server is willing to accept the request - (based on the request headers) before the client sends the request - body. In some cases, it might either be inappropriate or highly - inefficient for the client to send the body if the server will reject - the message without looking at the body. - - Requirements for HTTP/1.1 clients: - - - If a client will wait for a 100 (Continue) response before - sending the request body, it MUST send an Expect request-header - field (section 14.20) with the "100-continue" expectation. - - - A client MUST NOT send an Expect request-header field (section - 14.20) with the "100-continue" expectation if it does not intend - to send a request body. - - Because of the presence of older implementations, the protocol allows - ambiguous situations in which a client may send "Expect: 100- - continue" without receiving either a 417 (Expectation Failed) status - or a 100 (Continue) status. Therefore, when a client sends this - header field to an origin server (possibly via a proxy) from which it - has never seen a 100 (Continue) status, the client SHOULD NOT wait - for an indefinite period before sending the request body. - - Requirements for HTTP/1.1 origin servers: - - - Upon receiving a request which includes an Expect request-header - field with the "100-continue" expectation, an origin server MUST - either respond with 100 (Continue) status and continue to read - from the input stream, or respond with a final status code. The - origin server MUST NOT wait for the request body before sending - the 100 (Continue) response. If it responds with a final status - code, it MAY close the transport connection or it MAY continue - - - -Fielding, et al. Standards Track [Page 48] - -RFC 2616 HTTP/1.1 June 1999 - - - to read and discard the rest of the request. It MUST NOT - perform the requested method if it returns a final status code. - - - An origin server SHOULD NOT send a 100 (Continue) response if - the request message does not include an Expect request-header - field with the "100-continue" expectation, and MUST NOT send a - 100 (Continue) response if such a request comes from an HTTP/1.0 - (or earlier) client. There is an exception to this rule: for - compatibility with RFC 2068, a server MAY send a 100 (Continue) - status in response to an HTTP/1.1 PUT or POST request that does - not include an Expect request-header field with the "100- - continue" expectation. This exception, the purpose of which is - to minimize any client processing delays associated with an - undeclared wait for 100 (Continue) status, applies only to - HTTP/1.1 requests, and not to requests with any other HTTP- - version value. - - - An origin server MAY omit a 100 (Continue) response if it has - already received some or all of the request body for the - corresponding request. - - - An origin server that sends a 100 (Continue) response MUST - ultimately send a final status code, once the request body is - received and processed, unless it terminates the transport - connection prematurely. - - - If an origin server receives a request that does not include an - Expect request-header field with the "100-continue" expectation, - the request includes a request body, and the server responds - with a final status code before reading the entire request body - from the transport connection, then the server SHOULD NOT close - the transport connection until it has read the entire request, - or until the client closes the connection. Otherwise, the client - might not reliably receive the response message. However, this - requirement is not be construed as preventing a server from - defending itself against denial-of-service attacks, or from - badly broken client implementations. - - Requirements for HTTP/1.1 proxies: - - - If a proxy receives a request that includes an Expect request- - header field with the "100-continue" expectation, and the proxy - either knows that the next-hop server complies with HTTP/1.1 or - higher, or does not know the HTTP version of the next-hop - server, it MUST forward the request, including the Expect header - field. - - - - - -Fielding, et al. Standards Track [Page 49] - -RFC 2616 HTTP/1.1 June 1999 - - - - If the proxy knows that the version of the next-hop server is - HTTP/1.0 or lower, it MUST NOT forward the request, and it MUST - respond with a 417 (Expectation Failed) status. - - - Proxies SHOULD maintain a cache recording the HTTP version - numbers received from recently-referenced next-hop servers. - - - A proxy MUST NOT forward a 100 (Continue) response if the - request message was received from an HTTP/1.0 (or earlier) - client and did not include an Expect request-header field with - the "100-continue" expectation. This requirement overrides the - general rule for forwarding of 1xx responses (see section 10.1). - -8.2.4 Client Behavior if Server Prematurely Closes Connection - - If an HTTP/1.1 client sends a request which includes a request body, - but which does not include an Expect request-header field with the - "100-continue" expectation, and if the client is not directly - connected to an HTTP/1.1 origin server, and if the client sees the - connection close before receiving any status from the server, the - client SHOULD retry the request. If the client does retry this - request, it MAY use the following "binary exponential backoff" - algorithm to be assured of obtaining a reliable response: - - 1. Initiate a new connection to the server - - 2. Transmit the request-headers - - 3. Initialize a variable R to the estimated round-trip time to the - server (e.g., based on the time it took to establish the - connection), or to a constant value of 5 seconds if the round- - trip time is not available. - - 4. Compute T = R * (2**N), where N is the number of previous - retries of this request. - - 5. Wait either for an error response from the server, or for T - seconds (whichever comes first) - - 6. If no error response is received, after T seconds transmit the - body of the request. - - 7. If client sees that the connection is closed prematurely, - repeat from step 1 until the request is accepted, an error - response is received, or the user becomes impatient and - terminates the retry process. - - - - - -Fielding, et al. Standards Track [Page 50] - -RFC 2616 HTTP/1.1 June 1999 - - - If at any point an error status is received, the client - - - SHOULD NOT continue and - - - SHOULD close the connection if it has not completed sending the - request message. - -9 Method Definitions - - The set of common methods for HTTP/1.1 is defined below. Although - this set can be expanded, additional methods cannot be assumed to - share the same semantics for separately extended clients and servers. - - The Host request-header field (section 14.23) MUST accompany all - HTTP/1.1 requests. - -9.1 Safe and Idempotent Methods - -9.1.1 Safe Methods - - Implementors should be aware that the software represents the user in - their interactions over the Internet, and should be careful to allow - the user to be aware of any actions they might take which may have an - unexpected significance to themselves or others. - - In particular, the convention has been established that the GET and - HEAD methods SHOULD NOT have the significance of taking an action - other than retrieval. These methods ought to be considered "safe". - This allows user agents to represent other methods, such as POST, PUT - and DELETE, in a special way, so that the user is made aware of the - fact that a possibly unsafe action is being requested. - - Naturally, it is not possible to ensure that the server does not - generate side-effects as a result of performing a GET request; in - fact, some dynamic resources consider that a feature. The important - distinction here is that the user did not request the side-effects, - so therefore cannot be held accountable for them. - -9.1.2 Idempotent Methods - - Methods can also have the property of "idempotence" in that (aside - from error or expiration issues) the side-effects of N > 0 identical - requests is the same as for a single request. The methods GET, HEAD, - PUT and DELETE share this property. Also, the methods OPTIONS and - TRACE SHOULD NOT have side effects, and so are inherently idempotent. - - - - - - -Fielding, et al. Standards Track [Page 51] - -RFC 2616 HTTP/1.1 June 1999 - - - However, it is possible that a sequence of several requests is non- - idempotent, even if all of the methods executed in that sequence are - idempotent. (A sequence is idempotent if a single execution of the - entire sequence always yields a result that is not changed by a - reexecution of all, or part, of that sequence.) For example, a - sequence is non-idempotent if its result depends on a value that is - later modified in the same sequence. - - A sequence that never has side effects is idempotent, by definition - (provided that no concurrent operations are being executed on the - same set of resources). - -9.2 OPTIONS - - The OPTIONS method represents a request for information about the - communication options available on the request/response chain - identified by the Request-URI. This method allows the client to - determine the options and/or requirements associated with a resource, - or the capabilities of a server, without implying a resource action - or initiating a resource retrieval. - - Responses to this method are not cacheable. - - If the OPTIONS request includes an entity-body (as indicated by the - presence of Content-Length or Transfer-Encoding), then the media type - MUST be indicated by a Content-Type field. Although this - specification does not define any use for such a body, future - extensions to HTTP might use the OPTIONS body to make more detailed - queries on the server. A server that does not support such an - extension MAY discard the request body. - - If the Request-URI is an asterisk ("*"), the OPTIONS request is - intended to apply to the server in general rather than to a specific - resource. Since a server's communication options typically depend on - the resource, the "*" request is only useful as a "ping" or "no-op" - type of method; it does nothing beyond allowing the client to test - the capabilities of the server. For example, this can be used to test - a proxy for HTTP/1.1 compliance (or lack thereof). - - If the Request-URI is not an asterisk, the OPTIONS request applies - only to the options that are available when communicating with that - resource. - - A 200 response SHOULD include any header fields that indicate - optional features implemented by the server and applicable to that - resource (e.g., Allow), possibly including extensions not defined by - this specification. The response body, if any, SHOULD also include - information about the communication options. The format for such a - - - -Fielding, et al. Standards Track [Page 52] - -RFC 2616 HTTP/1.1 June 1999 - - - body is not defined by this specification, but might be defined by - future extensions to HTTP. Content negotiation MAY be used to select - the appropriate response format. If no response body is included, the - response MUST include a Content-Length field with a field-value of - "0". - - The Max-Forwards request-header field MAY be used to target a - specific proxy in the request chain. When a proxy receives an OPTIONS - request on an absoluteURI for which request forwarding is permitted, - the proxy MUST check for a Max-Forwards field. If the Max-Forwards - field-value is zero ("0"), the proxy MUST NOT forward the message; - instead, the proxy SHOULD respond with its own communication options. - If the Max-Forwards field-value is an integer greater than zero, the - proxy MUST decrement the field-value when it forwards the request. If - no Max-Forwards field is present in the request, then the forwarded - request MUST NOT include a Max-Forwards field. - -9.3 GET - - The GET method means retrieve whatever information (in the form of an - entity) is identified by the Request-URI. If the Request-URI refers - to a data-producing process, it is the produced data which shall be - returned as the entity in the response and not the source text of the - process, unless that text happens to be the output of the process. - - The semantics of the GET method change to a "conditional GET" if the - request message includes an If-Modified-Since, If-Unmodified-Since, - If-Match, If-None-Match, or If-Range header field. A conditional GET - method requests that the entity be transferred only under the - circumstances described by the conditional header field(s). The - conditional GET method is intended to reduce unnecessary network - usage by allowing cached entities to be refreshed without requiring - multiple requests or transferring data already held by the client. - - The semantics of the GET method change to a "partial GET" if the - request message includes a Range header field. A partial GET requests - that only part of the entity be transferred, as described in section - 14.35. The partial GET method is intended to reduce unnecessary - network usage by allowing partially-retrieved entities to be - completed without transferring data already held by the client. - - The response to a GET request is cacheable if and only if it meets - the requirements for HTTP caching described in section 13. - - See section 15.1.3 for security considerations when used for forms. - - - - - - -Fielding, et al. Standards Track [Page 53] - -RFC 2616 HTTP/1.1 June 1999 - - -9.4 HEAD - - The HEAD method is identical to GET except that the server MUST NOT - return a message-body in the response. The metainformation contained - in the HTTP headers in response to a HEAD request SHOULD be identical - to the information sent in response to a GET request. This method can - be used for obtaining metainformation about the entity implied by the - request without transferring the entity-body itself. This method is - often used for testing hypertext links for validity, accessibility, - and recent modification. - - The response to a HEAD request MAY be cacheable in the sense that the - information contained in the response MAY be used to update a - previously cached entity from that resource. If the new field values - indicate that the cached entity differs from the current entity (as - would be indicated by a change in Content-Length, Content-MD5, ETag - or Last-Modified), then the cache MUST treat the cache entry as - stale. - -9.5 POST - - The POST method is used to request that the origin server accept the - entity enclosed in the request as a new subordinate of the resource - identified by the Request-URI in the Request-Line. POST is designed - to allow a uniform method to cover the following functions: - -[[ Should be: ]] -[[ The POST method is used to request that the origin server accept the ]] -[[ entity enclosed in the request as data to be processed by the resource ]] -[[ identified by the Request-URI in the Request-Line. POST is designed ]] -[[ to allow a uniform method to cover the following functions: ]] - - - Annotation of existing resources; - - - Posting a message to a bulletin board, newsgroup, mailing list, - or similar group of articles; - - - Providing a block of data, such as the result of submitting a - form, to a data-handling process; - - - Extending a database through an append operation. - - The actual function performed by the POST method is determined by the - server and is usually dependent on the Request-URI. The posted entity - is subordinate to that URI in the same way that a file is subordinate - to a directory containing it, a news article is subordinate to a - newsgroup to which it is posted, or a record is subordinate to a - database. - - [[ Remove second sentence ("The posted entity is subordinate") above ]] - - The action performed by the POST method might not result in a - resource that can be identified by a URI. In this case, either 200 - (OK) or 204 (No Content) is the appropriate response status, - depending on whether or not the response includes an entity that - describes the result. - - - -Fielding, et al. Standards Track [Page 54] - -RFC 2616 HTTP/1.1 June 1999 - - - If a resource has been created on the origin server, the response - SHOULD be 201 (Created) and contain an entity which describes the - status of the request and refers to the new resource, and a Location - header (see section 14.30). - - Responses to this method are not cacheable, unless the response - includes appropriate Cache-Control or Expires header fields. However, - the 303 (See Other) response can be used to direct the user agent to - retrieve a cacheable resource. - - POST requests MUST obey the message transmission requirements set out - in section 8.2. - - See section 15.1.3 for security considerations. - -9.6 PUT - - The PUT method requests that the enclosed entity be stored under the - supplied Request-URI. If the Request-URI refers to an already - existing resource, the enclosed entity SHOULD be considered as a - modified version of the one residing on the origin server. If the - Request-URI does not point to an existing resource, and that URI is - capable of being defined as a new resource by the requesting user - agent, the origin server can create the resource with that URI. If a - new resource is created, the origin server MUST inform the user agent - via the 201 (Created) response. If an existing resource is modified, - either the 200 (OK) or 204 (No Content) response codes SHOULD be sent - to indicate successful completion of the request. If the resource - could not be created or modified with the Request-URI, an appropriate - error response SHOULD be given that reflects the nature of the - problem. The recipient of the entity MUST NOT ignore any Content-* - (e.g. Content-Range) headers that it does not understand or implement - and MUST return a 501 (Not Implemented) response in such cases. - - If the request passes through a cache and the Request-URI identifies - one or more currently cached entities, those entries SHOULD be - treated as stale. Responses to this method are not cacheable. - - The fundamental difference between the POST and PUT requests is - reflected in the different meaning of the Request-URI. The URI in a - POST request identifies the resource that will handle the enclosed - entity. That resource might be a data-accepting process, a gateway to - some other protocol, or a separate entity that accepts annotations. - In contrast, the URI in a PUT request identifies the entity enclosed - with the request -- the user agent knows what URI is intended and the - server MUST NOT attempt to apply the request to some other resource. - If the server desires that the request be applied to a different URI, - - - - -Fielding, et al. Standards Track [Page 55] - -RFC 2616 HTTP/1.1 June 1999 - - - it MUST send a 301 (Moved Permanently) response; the user agent MAY - then make its own decision regarding whether or not to redirect the - request. - - A single resource MAY be identified by many different URIs. For - example, an article might have a URI for identifying "the current - version" which is separate from the URI identifying each particular - version. In this case, a PUT request on a general URI might result in - several other URIs being defined by the origin server. - - HTTP/1.1 does not define how a PUT method affects the state of an - origin server. - - PUT requests MUST obey the message transmission requirements set out - in section 8.2. - - Unless otherwise specified for a particular entity-header, the - entity-headers in the PUT request SHOULD be applied to the resource - created or modified by the PUT. - -9.7 DELETE - - The DELETE method requests that the origin server delete the resource - identified by the Request-URI. This method MAY be overridden by human - intervention (or other means) on the origin server. The client cannot - be guaranteed that the operation has been carried out, even if the - status code returned from the origin server indicates that the action - has been completed successfully. However, the server SHOULD NOT - indicate success unless, at the time the response is given, it - intends to delete the resource or move it to an inaccessible - location. - - A successful response SHOULD be 200 (OK) if the response includes an - entity describing the status, 202 (Accepted) if the action has not - yet been enacted, or 204 (No Content) if the action has been enacted - but the response does not include an entity. - - If the request passes through a cache and the Request-URI identifies - one or more currently cached entities, those entries SHOULD be - treated as stale. Responses to this method are not cacheable. - -9.8 TRACE - - The TRACE method is used to invoke a remote, application-layer loop- - back of the request message. The final recipient of the request - SHOULD reflect the message received back to the client as the - entity-body of a 200 (OK) response. The final recipient is either the - - - - -Fielding, et al. Standards Track [Page 56] - -RFC 2616 HTTP/1.1 June 1999 - - - origin server or the first proxy or gateway to receive a Max-Forwards - value of zero (0) in the request (see section 14.31). A TRACE request - MUST NOT include an entity. - - TRACE allows the client to see what is being received at the other - end of the request chain and use that data for testing or diagnostic - information. The value of the Via header field (section 14.45) is of - particular interest, since it acts as a trace of the request chain. - Use of the Max-Forwards header field allows the client to limit the - length of the request chain, which is useful for testing a chain of - proxies forwarding messages in an infinite loop. - - If the request is valid, the response SHOULD contain the entire - request message in the entity-body, with a Content-Type of - "message/http". Responses to this method MUST NOT be cached. - -9.9 CONNECT - - This specification reserves the method name CONNECT for use with a - proxy that can dynamically switch to being a tunnel (e.g. SSL - tunneling [44]). - -10 Status Code Definitions - - Each Status-Code is described below, including a description of which - method(s) it can follow and any metainformation required in the - response. - -10.1 Informational 1xx - - This class of status code indicates a provisional response, - consisting only of the Status-Line and optional headers, and is - terminated by an empty line. There are no required headers for this - class of status code. Since HTTP/1.0 did not define any 1xx status - codes, servers MUST NOT send a 1xx response to an HTTP/1.0 client - except under experimental conditions. - - A client MUST be prepared to accept one or more 1xx status responses - prior to a regular response, even if the client does not expect a 100 - (Continue) status message. Unexpected 1xx status responses MAY be - ignored by a user agent. - - Proxies MUST forward 1xx responses, unless the connection between the - proxy and its client has been closed, or unless the proxy itself - requested the generation of the 1xx response. (For example, if a - - - - - - -Fielding, et al. Standards Track [Page 57] - -RFC 2616 HTTP/1.1 June 1999 - - - proxy adds a "Expect: 100-continue" field when it forwards a request, - then it need not forward the corresponding 100 (Continue) - response(s).) - -10.1.1 100 Continue - - The client SHOULD continue with its request. This interim response is - used to inform the client that the initial part of the request has - been received and has not yet been rejected by the server. The client - SHOULD continue by sending the remainder of the request or, if the - request has already been completed, ignore this response. The server - MUST send a final response after the request has been completed. See - section 8.2.3 for detailed discussion of the use and handling of this - status code. - -10.1.2 101 Switching Protocols - - The server understands and is willing to comply with the client's - request, via the Upgrade message header field (section 14.42), for a - change in the application protocol being used on this connection. The - server will switch protocols to those defined by the response's - Upgrade header field immediately after the empty line which - terminates the 101 response. - - The protocol SHOULD be switched only when it is advantageous to do - so. For example, switching to a newer version of HTTP is advantageous - over older versions, and switching to a real-time, synchronous - protocol might be advantageous when delivering resources that use - such features. - -10.2 Successful 2xx - - This class of status code indicates that the client's request was - successfully received, understood, and accepted. - -10.2.1 200 OK - - The request has succeeded. The information returned with the response - is dependent on the method used in the request, for example: - - GET an entity corresponding to the requested resource is sent in - the response; - - HEAD the entity-header fields corresponding to the requested - resource are sent in the response without any message-body; - - POST an entity describing or containing the result of the action; - - - - -Fielding, et al. Standards Track [Page 58] - -RFC 2616 HTTP/1.1 June 1999 - - - TRACE an entity containing the request message as received by the - end server. - -10.2.2 201 Created - - The request has been fulfilled and resulted in a new resource being - created. The newly created resource can be referenced by the URI(s) - returned in the entity of the response, with the most specific URI - for the resource given by a Location header field. The response - SHOULD include an entity containing a list of resource - characteristics and location(s) from which the user or user agent can - choose the one most appropriate. The entity format is specified by - the media type given in the Content-Type header field. The origin - server MUST create the resource before returning the 201 status code. - If the action cannot be carried out immediately, the server SHOULD - respond with 202 (Accepted) response instead. - - A 201 response MAY contain an ETag response header field indicating - the current value of the entity tag for the requested variant just - created, see section 14.19. - -10.2.3 202 Accepted - - The request has been accepted for processing, but the processing has - not been completed. The request might or might not eventually be - acted upon, as it might be disallowed when processing actually takes - place. There is no facility for re-sending a status code from an - asynchronous operation such as this. - - The 202 response is intentionally non-committal. Its purpose is to - allow a server to accept a request for some other process (perhaps a - batch-oriented process that is only run once per day) without - requiring that the user agent's connection to the server persist - until the process is completed. The entity returned with this - response SHOULD include an indication of the request's current status - and either a pointer to a status monitor or some estimate of when the - user can expect the request to be fulfilled. - -10.2.4 203 Non-Authoritative Information - - The returned metainformation in the entity-header is not the - definitive set as available from the origin server, but is gathered - from a local or a third-party copy. The set presented MAY be a subset - or superset of the original version. For example, including local - annotation information about the resource might result in a superset - of the metainformation known by the origin server. Use of this - response code is not required and is only appropriate when the - response would otherwise be 200 (OK). - - - -Fielding, et al. Standards Track [Page 59] - -RFC 2616 HTTP/1.1 June 1999 - - -10.2.5 204 No Content - - The server has fulfilled the request but does not need to return an - entity-body, and might want to return updated metainformation. The - response MAY include new or updated metainformation in the form of - entity-headers, which if present SHOULD be associated with the - requested variant. - - If the client is a user agent, it SHOULD NOT change its document view - from that which caused the request to be sent. This response is - primarily intended to allow input for actions to take place without - causing a change to the user agent's active document view, although - any new or updated metainformation SHOULD be applied to the document - currently in the user agent's active view. - - The 204 response MUST NOT include a message-body, and thus is always - terminated by the first empty line after the header fields. - -10.2.6 205 Reset Content - - The server has fulfilled the request and the user agent SHOULD reset - the document view which caused the request to be sent. This response - is primarily intended to allow input for actions to take place via - user input, followed by a clearing of the form in which the input is - given so that the user can easily initiate another input action. The - response MUST NOT include an entity. - -10.2.7 206 Partial Content - - The server has fulfilled the partial GET request for the resource. - The request MUST have included a Range header field (section 14.35) - indicating the desired range, and MAY have included an If-Range - header field (section 14.27) to make the request conditional. - - The response MUST include the following header fields: - - - Either a Content-Range header field (section 14.16) indicating - the range included with this response, or a multipart/byteranges - Content-Type including Content-Range fields for each part. If a - Content-Length header field is present in the response, its - value MUST match the actual number of OCTETs transmitted in the - message-body. - - - Date - - - ETag and/or Content-Location, if the header would have been sent - in a 200 response to the same request - - - - -Fielding, et al. Standards Track [Page 60] - -RFC 2616 HTTP/1.1 June 1999 - - - - Expires, Cache-Control, and/or Vary, if the field-value might - differ from that sent in any previous response for the same - variant - - If the 206 response is the result of an If-Range request that used a - strong cache validator (see section 13.3.3), the response SHOULD NOT - include other entity-headers. If the response is the result of an - If-Range request that used a weak validator, the response MUST NOT - include other entity-headers; this prevents inconsistencies between - cached entity-bodies and updated headers. Otherwise, the response - MUST include all of the entity-headers that would have been returned - with a 200 (OK) response to the same request. - -[[ Should be: ]] -[[ If the 206 response is the result of an If-Range request, the ]] -[[ response SHOULD NOT include other entity-headers. Otherwise, the ]] -[[ response MUST include all of the entity-headers that would have ]] -[[ been returned with a 200 (OK) response to the same request. ]] - - A cache MUST NOT combine a 206 response with other previously cached - content if the ETag or Last-Modified headers do not match exactly, - see 13.5.4. - - A cache that does not support the Range and Content-Range headers - MUST NOT cache 206 (Partial) responses. - -10.3 Redirection 3xx - - This class of status code indicates that further action needs to be - taken by the user agent in order to fulfill the request. The action - required MAY be carried out by the user agent without interaction - with the user if and only if the method used in the second request is - GET or HEAD. A client SHOULD detect infinite redirection loops, since - such loops generate network traffic for each redirection. - - Note: previous versions of this specification recommended a - maximum of five redirections. Content developers should be aware - that there might be clients that implement such a fixed - limitation. - -10.3.1 300 Multiple Choices - - The requested resource corresponds to any one of a set of - representations, each with its own specific location, and agent- - driven negotiation information (section 12) is being provided so that - the user (or user agent) can select a preferred representation and - redirect its request to that location. - - Unless it was a HEAD request, the response SHOULD include an entity - containing a list of resource characteristics and location(s) from - which the user or user agent can choose the one most appropriate. The - entity format is specified by the media type given in the Content- - Type header field. Depending upon the format and the capabilities of - - - - -Fielding, et al. Standards Track [Page 61] - -RFC 2616 HTTP/1.1 June 1999 - - - the user agent, selection of the most appropriate choice MAY be - performed automatically. However, this specification does not define - any standard for such automatic selection. - - If the server has a preferred choice of representation, it SHOULD - include the specific URI for that representation in the Location - field; user agents MAY use the Location field value for automatic - redirection. This response is cacheable unless indicated otherwise. - -10.3.2 301 Moved Permanently - - The requested resource has been assigned a new permanent URI and any - future references to this resource SHOULD use one of the returned - URIs. Clients with link editing capabilities ought to automatically - re-link references to the Request-URI to one or more of the new - references returned by the server, where possible. This response is - cacheable unless indicated otherwise. - - The new permanent URI SHOULD be given by the Location field in the - response. Unless the request method was HEAD, the entity of the - response SHOULD contain a short hypertext note with a hyperlink to - the new URI(s). - - If the 301 status code is received in response to a request other - than GET or HEAD, the user agent MUST NOT automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - -[[ Should be: ]] -[[ If the 301 status code is received in response to a request method ]] -[[ that is known to be "safe", as defined in section 9.1.1, then the ]] -[[ request MAY be automatically redirected by the user agent without ]] -[[ confirmation. Otherwise, the user agent MUST NOT automatically ]] -[[ redirect the request unless it is confirmed by the user, since the ]] -[[ new URI might change the conditions under which the request was ]] -[[ issued. ]] - - Note: When automatically redirecting a POST request after - receiving a 301 status code, some existing HTTP/1.0 user agents - will erroneously change it into a GET request. - -10.3.3 302 Found - - The requested resource resides temporarily under a different URI. - Since the redirection might be altered on occasion, the client SHOULD - continue to use the Request-URI for future requests. This response - is only cacheable if indicated by a Cache-Control or Expires header - field. - - The temporary URI SHOULD be given by the Location field in the - response. Unless the request method was HEAD, the entity of the - response SHOULD contain a short hypertext note with a hyperlink to - the new URI(s). - - - - - - - -Fielding, et al. Standards Track [Page 62] - -RFC 2616 HTTP/1.1 June 1999 - - - If the 302 status code is received in response to a request other - than GET or HEAD, the user agent MUST NOT automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - [[ See errata to 10.3.3 ]] - - Note: RFC 1945 and RFC 2068 specify that the client is not allowed - to change the method on the redirected request. However, most - existing user agent implementations treat 302 as if it were a 303 - response, performing a GET on the Location field-value regardless - of the original request method. The status codes 303 and 307 have - been added for servers that wish to make unambiguously clear which - kind of reaction is expected of the client. - -10.3.4 303 See Other - - The response to the request can be found under a different URI and - SHOULD be retrieved using a GET method on that resource. This method - exists primarily to allow the output of a POST-activated script to - redirect the user agent to a selected resource. The new URI is not a - substitute reference for the originally requested resource. The 303 - response MUST NOT be cached, but the response to the second - (redirected) request might be cacheable. - - The different URI SHOULD be given by the Location field in the - response. Unless the request method was HEAD, the entity of the - response SHOULD contain a short hypertext note with a hyperlink to - the new URI(s). - - Note: Many pre-HTTP/1.1 user agents do not understand the 303 - status. When interoperability with such clients is a concern, the - 302 status code may be used instead, since most user agents react - to a 302 response as described here for 303. - -10.3.5 304 Not Modified - - If the client has performed a conditional GET request and access is - allowed, but the document has not been modified, the server SHOULD - respond with this status code. The 304 response MUST NOT contain a - message-body, and thus is always terminated by the first empty line - after the header fields. - - The response MUST include the following header fields: - - - Date, unless its omission is required by section 14.18.1 - - - - - - - -Fielding, et al. Standards Track [Page 63] - -RFC 2616 HTTP/1.1 June 1999 - - - If a clockless origin server obeys these rules, and proxies and - clients add their own Date to any response received without one (as - already specified by [RFC 2068], section 14.19), caches will operate - correctly. - - - ETag and/or Content-Location, if the header would have been sent - in a 200 response to the same request - - - Expires, Cache-Control, and/or Vary, if the field-value might - differ from that sent in any previous response for the same - variant - - If the conditional GET used a strong cache validator (see section - 13.3.3), the response SHOULD NOT include other entity-headers. - Otherwise (i.e., the conditional GET used a weak validator), the - response MUST NOT include other entity-headers; this prevents - inconsistencies between cached entity-bodies and updated headers. - - If a 304 response indicates an entity not currently cached, then the - cache MUST disregard the response and repeat the request without the - conditional. - - If a cache uses a received 304 response to update a cache entry, the - cache MUST update the entry to reflect any new field values given in - the response. - -10.3.6 305 Use Proxy - - The requested resource MUST be accessed through the proxy given by - the Location field. The Location field gives the URI of the proxy. - The recipient is expected to repeat this single request via the - proxy. 305 responses MUST only be generated by origin servers. - - Note: RFC 2068 was not clear that 305 was intended to redirect a - single request, and to be generated by origin servers only. Not - observing these limitations has significant security consequences. - -10.3.7 306 (Unused) - - The 306 status code was used in a previous version of the - specification, is no longer used, and the code is reserved. - - - - - - - - - - -Fielding, et al. Standards Track [Page 64] - -RFC 2616 HTTP/1.1 June 1999 - - -10.3.8 307 Temporary Redirect - - The requested resource resides temporarily under a different URI. - Since the redirection MAY be altered on occasion, the client SHOULD - continue to use the Request-URI for future requests. This response - is only cacheable if indicated by a Cache-Control or Expires header - field. - - The temporary URI SHOULD be given by the Location field in the - response. Unless the request method was HEAD, the entity of the - response SHOULD contain a short hypertext note with a hyperlink to - the new URI(s) , since many pre-HTTP/1.1 user agents do not - understand the 307 status. Therefore, the note SHOULD contain the - information necessary for a user to repeat the original request on - the new URI. - - If the 307 status code is received in response to a request other - than GET or HEAD, the user agent MUST NOT automatically redirect the - request unless it can be confirmed by the user, since this might - change the conditions under which the request was issued. - - [[ See errata to 10.3.3 ]] - -10.4 Client Error 4xx - - The 4xx class of status code is intended for cases in which the - client seems to have erred. Except when responding to a HEAD request, - the server SHOULD include an entity containing an explanation of the - error situation, and whether it is a temporary or permanent - condition. These status codes are applicable to any request method. - User agents SHOULD display any included entity to the user. - - If the client is sending data, a server implementation using TCP - SHOULD be careful to ensure that the client acknowledges receipt of - the packet(s) containing the response, before the server closes the - input connection. If the client continues sending data to the server - after the close, the server's TCP stack will send a reset packet to - the client, which may erase the client's unacknowledged input buffers - before they can be read and interpreted by the HTTP application. - -10.4.1 400 Bad Request - - The request could not be understood by the server due to malformed - syntax. The client SHOULD NOT repeat the request without - modifications. - - - - - - - - -Fielding, et al. Standards Track [Page 65] - -RFC 2616 HTTP/1.1 June 1999 - - -10.4.2 401 Unauthorized - - The request requires user authentication. The response MUST include a - WWW-Authenticate header field (section 14.47) containing a challenge - applicable to the requested resource. The client MAY repeat the - request with a suitable Authorization header field (section 14.8). If - the request already included Authorization credentials, then the 401 - response indicates that authorization has been refused for those - credentials. If the 401 response contains the same challenge as the - prior response, and the user agent has already attempted - authentication at least once, then the user SHOULD be presented the - entity that was given in the response, since that entity might - include relevant diagnostic information. HTTP access authentication - is explained in "HTTP Authentication: Basic and Digest Access - Authentication" [43]. - -10.4.3 402 Payment Required - - This code is reserved for future use. - -10.4.4 403 Forbidden - - The server understood the request, but is refusing to fulfill it. - Authorization will not help and the request SHOULD NOT be repeated. - If the request method was not HEAD and the server wishes to make - public why the request has not been fulfilled, it SHOULD describe the - reason for the refusal in the entity. If the server does not wish to - make this information available to the client, the status code 404 - (Not Found) can be used instead. - -10.4.5 404 Not Found - - The server has not found anything matching the Request-URI. No - indication is given of whether the condition is temporary or - permanent. The 410 (Gone) status code SHOULD be used if the server - knows, through some internally configurable mechanism, that an old - resource is permanently unavailable and has no forwarding address. - This status code is commonly used when the server does not wish to - reveal exactly why the request has been refused, or when no other - response is applicable. - -10.4.6 405 Method Not Allowed - - The method specified in the Request-Line is not allowed for the - resource identified by the Request-URI. The response MUST include an - Allow header containing a list of valid methods for the requested - resource. - - - - -Fielding, et al. Standards Track [Page 66] - -RFC 2616 HTTP/1.1 June 1999 - - -10.4.7 406 Not Acceptable - - The resource identified by the request is only capable of generating - response entities which have content characteristics not acceptable - according to the accept headers sent in the request. - - Unless it was a HEAD request, the response SHOULD include an entity - containing a list of available entity characteristics and location(s) - from which the user or user agent can choose the one most - appropriate. The entity format is specified by the media type given - in the Content-Type header field. Depending upon the format and the - capabilities of the user agent, selection of the most appropriate - choice MAY be performed automatically. However, this specification - does not define any standard for such automatic selection. - - Note: HTTP/1.1 servers are allowed to return responses which are - not acceptable according to the accept headers sent in the - request. In some cases, this may even be preferable to sending a - 406 response. User agents are encouraged to inspect the headers of - an incoming response to determine if it is acceptable. - - If the response could be unacceptable, a user agent SHOULD - temporarily stop receipt of more data and query the user for a - decision on further actions. - -10.4.8 407 Proxy Authentication Required - - This code is similar to 401 (Unauthorized), but indicates that the - client must first authenticate itself with the proxy. The proxy MUST - return a Proxy-Authenticate header field (section 14.33) containing a - challenge applicable to the proxy for the requested resource. The - client MAY repeat the request with a suitable Proxy-Authorization - header field (section 14.34). HTTP access authentication is explained - in "HTTP Authentication: Basic and Digest Access Authentication" - [43]. - -10.4.9 408 Request Timeout - - The client did not produce a request within the time that the server - was prepared to wait. The client MAY repeat the request without - modifications at any later time. - -10.4.10 409 Conflict - - The request could not be completed due to a conflict with the current - state of the resource. This code is only allowed in situations where - it is expected that the user might be able to resolve the conflict - and resubmit the request. The response body SHOULD include enough - - - -Fielding, et al. Standards Track [Page 67] - -RFC 2616 HTTP/1.1 June 1999 - - - information for the user to recognize the source of the conflict. - Ideally, the response entity would include enough information for the - user or user agent to fix the problem; however, that might not be - possible and is not required. - - Conflicts are most likely to occur in response to a PUT request. For - example, if versioning were being used and the entity being PUT - included changes to a resource which conflict with those made by an - earlier (third-party) request, the server might use the 409 response - to indicate that it can't complete the request. In this case, the - response entity would likely contain a list of the differences - between the two versions in a format defined by the response - Content-Type. - -10.4.11 410 Gone - - The requested resource is no longer available at the server and no - forwarding address is known. This condition is expected to be - considered permanent. Clients with link editing capabilities SHOULD - delete references to the Request-URI after user approval. If the - server does not know, or has no facility to determine, whether or not - the condition is permanent, the status code 404 (Not Found) SHOULD be - used instead. This response is cacheable unless indicated otherwise. - - The 410 response is primarily intended to assist the task of web - maintenance by notifying the recipient that the resource is - intentionally unavailable and that the server owners desire that - remote links to that resource be removed. Such an event is common for - limited-time, promotional services and for resources belonging to - individuals no longer working at the server's site. It is not - necessary to mark all permanently unavailable resources as "gone" or - to keep the mark for any length of time -- that is left to the - discretion of the server owner. - -10.4.12 411 Length Required - - The server refuses to accept the request without a defined Content- - Length. The client MAY repeat the request if it adds a valid - Content-Length header field containing the length of the message-body - in the request message. - -10.4.13 412 Precondition Failed - - The precondition given in one or more of the request-header fields - evaluated to false when it was tested on the server. This response - code allows the client to place preconditions on the current resource - metainformation (header field data) and thus prevent the requested - method from being applied to a resource other than the one intended. - - - -Fielding, et al. Standards Track [Page 68] - -RFC 2616 HTTP/1.1 June 1999 - - -10.4.14 413 Request Entity Too Large - - The server is refusing to process a request because the request - entity is larger than the server is willing or able to process. The - server MAY close the connection to prevent the client from continuing - the request. - - If the condition is temporary, the server SHOULD include a Retry- - After header field to indicate that it is temporary and after what - time the client MAY try again. - -10.4.15 414 Request-URI Too Long - - The server is refusing to service the request because the Request-URI - is longer than the server is willing to interpret. This rare - condition is only likely to occur when a client has improperly - converted a POST request to a GET request with long query - information, when the client has descended into a URI "black hole" of - redirection (e.g., a redirected URI prefix that points to a suffix of - itself), or when the server is under attack by a client attempting to - exploit security holes present in some servers using fixed-length - buffers for reading or manipulating the Request-URI. - -10.4.16 415 Unsupported Media Type - - The server is refusing to service the request because the entity of - the request is in a format not supported by the requested resource - for the requested method. - -10.4.17 416 Requested Range Not Satisfiable - - A server SHOULD return a response with this status code if a request - included a Range request-header field (section 14.35), and none of - the range-specifier values in this field overlap the current extent - of the selected resource, and the request did not include an If-Range - request-header field. (For byte-ranges, this means that the first- - byte-pos of all of the byte-range-spec values were greater than the - current length of the selected resource.) - - When this status code is returned for a byte-range request, the - response SHOULD include a Content-Range entity-header field - specifying the current length of the selected resource (see section - 14.16). This response MUST NOT use the multipart/byteranges content- - type. - - - - - - - -Fielding, et al. Standards Track [Page 69] - -RFC 2616 HTTP/1.1 June 1999 - - -10.4.18 417 Expectation Failed - - The expectation given in an Expect request-header field (see section - 14.20) could not be met by this server, or, if the server is a proxy, - the server has unambiguous evidence that the request could not be met - by the next-hop server. - -10.5 Server Error 5xx - - Response status codes beginning with the digit "5" indicate cases in - which the server is aware that it has erred or is incapable of - performing the request. Except when responding to a HEAD request, the - server SHOULD include an entity containing an explanation of the - error situation, and whether it is a temporary or permanent - condition. User agents SHOULD display any included entity to the - user. These response codes are applicable to any request method. - -10.5.1 500 Internal Server Error - - The server encountered an unexpected condition which prevented it - from fulfilling the request. - -10.5.2 501 Not Implemented - - The server does not support the functionality required to fulfill the - request. This is the appropriate response when the server does not - recognize the request method and is not capable of supporting it for - any resource. - -10.5.3 502 Bad Gateway - - The server, while acting as a gateway or proxy, received an invalid - response from the upstream server it accessed in attempting to - fulfill the request. - -10.5.4 503 Service Unavailable - - The server is currently unable to handle the request due to a - temporary overloading or maintenance of the server. The implication - is that this is a temporary condition which will be alleviated after - some delay. If known, the length of the delay MAY be indicated in a - Retry-After header. If no Retry-After is given, the client SHOULD - handle the response as it would for a 500 response. - - Note: The existence of the 503 status code does not imply that a - server must use it when becoming overloaded. Some servers may wish - to simply refuse the connection. - - - - -Fielding, et al. Standards Track [Page 70] - -RFC 2616 HTTP/1.1 June 1999 - - -10.5.5 504 Gateway Timeout - - The server, while acting as a gateway or proxy, did not receive a - timely response from the upstream server specified by the URI (e.g. - HTTP, FTP, LDAP) or some other auxiliary server (e.g. DNS) it needed - to access in attempting to complete the request. - - Note: Note to implementors: some deployed proxies are known to - return 400 or 500 when DNS lookups time out. - -10.5.6 505 HTTP Version Not Supported - - The server does not support, or refuses to support, the HTTP protocol - version that was used in the request message. The server is - indicating that it is unable or unwilling to complete the request - using the same major version as the client, as described in section - 3.1, other than with this error message. The response SHOULD contain - an entity describing why that version is not supported and what other - protocols are supported by that server. - -11 Access Authentication - - HTTP provides several OPTIONAL challenge-response authentication - mechanisms which can be used by a server to challenge a client - request and by a client to provide authentication information. The - general framework for access authentication, and the specification of - "basic" and "digest" authentication, are specified in "HTTP - Authentication: Basic and Digest Access Authentication" [43]. This - specification adopts the definitions of "challenge" and "credentials" - from that specification. - -12 Content Negotiation - - Most HTTP responses include an entity which contains information for - interpretation by a human user. Naturally, it is desirable to supply - the user with the "best available" entity corresponding to the - request. Unfortunately for servers and caches, not all users have the - same preferences for what is "best," and not all user agents are - equally capable of rendering all entity types. For that reason, HTTP - has provisions for several mechanisms for "content negotiation" -- - the process of selecting the best representation for a given response - when there are multiple representations available. - - Note: This is not called "format negotiation" because the - alternate representations may be of the same media type, but use - different capabilities of that type, be in different languages, - etc. - - - - -Fielding, et al. Standards Track [Page 71] - -RFC 2616 HTTP/1.1 June 1999 - - - Any response containing an entity-body MAY be subject to negotiation, - including error responses. - - There are two kinds of content negotiation which are possible in - HTTP: server-driven and agent-driven negotiation. These two kinds of - negotiation are orthogonal and thus may be used separately or in - combination. One method of combination, referred to as transparent - negotiation, occurs when a cache uses the agent-driven negotiation - information provided by the origin server in order to provide - server-driven negotiation for subsequent requests. - -12.1 Server-driven Negotiation - - If the selection of the best representation for a response is made by - an algorithm located at the server, it is called server-driven - negotiation. Selection is based on the available representations of - the response (the dimensions over which it can vary; e.g. language, - content-coding, etc.) and the contents of particular header fields in - the request message or on other information pertaining to the request - (such as the network address of the client). - - Server-driven negotiation is advantageous when the algorithm for - selecting from among the available representations is difficult to - describe to the user agent, or when the server desires to send its - "best guess" to the client along with the first response (hoping to - avoid the round-trip delay of a subsequent request if the "best - guess" is good enough for the user). In order to improve the server's - guess, the user agent MAY include request header fields (Accept, - Accept-Language, Accept-Encoding, etc.) which describe its - preferences for such a response. - - Server-driven negotiation has disadvantages: - - 1. It is impossible for the server to accurately determine what - might be "best" for any given user, since that would require - complete knowledge of both the capabilities of the user agent - and the intended use for the response (e.g., does the user want - to view it on screen or print it on paper?). - - 2. Having the user agent describe its capabilities in every - request can be both very inefficient (given that only a small - percentage of responses have multiple representations) and a - potential violation of the user's privacy. - - 3. It complicates the implementation of an origin server and the - algorithms for generating responses to a request. - - - - - -Fielding, et al. Standards Track [Page 72] - -RFC 2616 HTTP/1.1 June 1999 - - - 4. It may limit a public cache's ability to use the same response - for multiple user's requests. - - HTTP/1.1 includes the following request-header fields for enabling - server-driven negotiation through description of user agent - capabilities and user preferences: Accept (section 14.1), Accept- - Charset (section 14.2), Accept-Encoding (section 14.3), Accept- - Language (section 14.4), and User-Agent (section 14.43). However, an - origin server is not limited to these dimensions and MAY vary the - response based on any aspect of the request, including information - outside the request-header fields or within extension header fields - not defined by this specification. - - The Vary header field can be used to express the parameters the - server uses to select a representation that is subject to server- - driven negotiation. See section 13.6 for use of the Vary header field - by caches and section 14.44 for use of the Vary header field by - servers. - -12.2 Agent-driven Negotiation - - With agent-driven negotiation, selection of the best representation - for a response is performed by the user agent after receiving an - initial response from the origin server. Selection is based on a list - of the available representations of the response included within the - header fields or entity-body of the initial response, with each - representation identified by its own URI. Selection from among the - representations may be performed automatically (if the user agent is - capable of doing so) or manually by the user selecting from a - generated (possibly hypertext) menu. - - Agent-driven negotiation is advantageous when the response would vary - over commonly-used dimensions (such as type, language, or encoding), - when the origin server is unable to determine a user agent's - capabilities from examining the request, and generally when public - caches are used to distribute server load and reduce network usage. - - Agent-driven negotiation suffers from the disadvantage of needing a - second request to obtain the best alternate representation. This - second request is only efficient when caching is used. In addition, - this specification does not define any mechanism for supporting - automatic selection, though it also does not prevent any such - mechanism from being developed as an extension and used within - HTTP/1.1. - - - - - - - -Fielding, et al. Standards Track [Page 73] - -RFC 2616 HTTP/1.1 June 1999 - - - HTTP/1.1 defines the 300 (Multiple Choices) and 406 (Not Acceptable) - status codes for enabling agent-driven negotiation when the server is - unwilling or unable to provide a varying response using server-driven - negotiation. - -12.3 Transparent Negotiation - - Transparent negotiation is a combination of both server-driven and - agent-driven negotiation. When a cache is supplied with a form of the - list of available representations of the response (as in agent-driven - negotiation) and the dimensions of variance are completely understood - by the cache, then the cache becomes capable of performing server- - driven negotiation on behalf of the origin server for subsequent - requests on that resource. - - Transparent negotiation has the advantage of distributing the - negotiation work that would otherwise be required of the origin - server and also removing the second request delay of agent-driven - negotiation when the cache is able to correctly guess the right - response. - - This specification does not define any mechanism for transparent - negotiation, though it also does not prevent any such mechanism from - being developed as an extension that could be used within HTTP/1.1. - -13 Caching in HTTP - - HTTP is typically used for distributed information systems, where - performance can be improved by the use of response caches. The - HTTP/1.1 protocol includes a number of elements intended to make - caching work as well as possible. Because these elements are - inextricable from other aspects of the protocol, and because they - interact with each other, it is useful to describe the basic caching - design of HTTP separately from the detailed descriptions of methods, - headers, response codes, etc. - - Caching would be useless if it did not significantly improve - performance. The goal of caching in HTTP/1.1 is to eliminate the need - to send requests in many cases, and to eliminate the need to send - full responses in many other cases. The former reduces the number of - network round-trips required for many operations; we use an - "expiration" mechanism for this purpose (see section 13.2). The - latter reduces network bandwidth requirements; we use a "validation" - mechanism for this purpose (see section 13.3). - - Requirements for performance, availability, and disconnected - operation require us to be able to relax the goal of semantic - transparency. The HTTP/1.1 protocol allows origin servers, caches, - - - -Fielding, et al. Standards Track [Page 74] - -RFC 2616 HTTP/1.1 June 1999 - - - and clients to explicitly reduce transparency when necessary. - However, because non-transparent operation may confuse non-expert - users, and might be incompatible with certain server applications - (such as those for ordering merchandise), the protocol requires that - transparency be relaxed - - - only by an explicit protocol-level request when relaxed by - client or origin server - - - only with an explicit warning to the end user when relaxed by - cache or client - - Therefore, the HTTP/1.1 protocol provides these important elements: - - 1. Protocol features that provide full semantic transparency when - this is required by all parties. - - 2. Protocol features that allow an origin server or user agent to - explicitly request and control non-transparent operation. - - 3. Protocol features that allow a cache to attach warnings to - responses that do not preserve the requested approximation of - semantic transparency. - - A basic principle is that it must be possible for the clients to - detect any potential relaxation of semantic transparency. - - Note: The server, cache, or client implementor might be faced with - design decisions not explicitly discussed in this specification. - If a decision might affect semantic transparency, the implementor - ought to err on the side of maintaining transparency unless a - careful and complete analysis shows significant benefits in - breaking transparency. - -13.1.1 Cache Correctness - - A correct cache MUST respond to a request with the most up-to-date - response held by the cache that is appropriate to the request (see - sections 13.2.5, 13.2.6, and 13.12) which meets one of the following - conditions: - - 1. It has been checked for equivalence with what the origin server - would have returned by revalidating the response with the - origin server (section 13.3); - - - - - - - -Fielding, et al. Standards Track [Page 75] - -RFC 2616 HTTP/1.1 June 1999 - - - 2. It is "fresh enough" (see section 13.2). In the default case, - this means it meets the least restrictive freshness requirement - of the client, origin server, and cache (see section 14.9); if - the origin server so specifies, it is the freshness requirement - of the origin server alone. - - If a stored response is not "fresh enough" by the most - restrictive freshness requirement of both the client and the - origin server, in carefully considered circumstances the cache - MAY still return the response with the appropriate Warning - header (see section 13.1.5 and 14.46), unless such a response - is prohibited (e.g., by a "no-store" cache-directive, or by a - "no-cache" cache-request-directive; see section 14.9). - - 3. It is an appropriate 304 (Not Modified), 305 (Proxy Redirect), - or error (4xx or 5xx) response message. - - If the cache can not communicate with the origin server, then a - correct cache SHOULD respond as above if the response can be - correctly served from the cache; if not it MUST return an error or - warning indicating that there was a communication failure. - - If a cache receives a response (either an entire response, or a 304 - (Not Modified) response) that it would normally forward to the - requesting client, and the received response is no longer fresh, the - cache SHOULD forward it to the requesting client without adding a new - Warning (but without removing any existing Warning headers). A cache - SHOULD NOT attempt to revalidate a response simply because that - response became stale in transit; this might lead to an infinite - loop. A user agent that receives a stale response without a Warning - MAY display a warning indication to the user. - -13.1.2 Warnings - - Whenever a cache returns a response that is neither first-hand nor - "fresh enough" (in the sense of condition 2 in section 13.1.1), it - MUST attach a warning to that effect, using a Warning general-header. - The Warning header and the currently defined warnings are described - in section 14.46. The warning allows clients to take appropriate - action. - - Warnings MAY be used for other purposes, both cache-related and - otherwise. The use of a warning, rather than an error status code, - distinguish these responses from true failures. - - Warnings are assigned three digit warn-codes. The first digit - indicates whether the Warning MUST or MUST NOT be deleted from a - stored cache entry after a successful revalidation: - - - -Fielding, et al. Standards Track [Page 76] - -RFC 2616 HTTP/1.1 June 1999 - - - 1xx Warnings that describe the freshness or revalidation status of - the response, and so MUST be deleted after a successful - revalidation. 1XX warn-codes MAY be generated by a cache only when - validating a cached entry. It MUST NOT be generated by clients. - - 2xx Warnings that describe some aspect of the entity body or entity - headers that is not rectified by a revalidation (for example, a - lossy compression of the entity bodies) and which MUST NOT be - deleted after a successful revalidation. - - See section 14.46 for the definitions of the codes themselves. - - HTTP/1.0 caches will cache all Warnings in responses, without - deleting the ones in the first category. Warnings in responses that - are passed to HTTP/1.0 caches carry an extra warning-date field, - which prevents a future HTTP/1.1 recipient from believing an - erroneously cached Warning. - - Warnings also carry a warning text. The text MAY be in any - appropriate natural language (perhaps based on the client's Accept - headers), and include an OPTIONAL indication of what character set is - used. - - Multiple warnings MAY be attached to a response (either by the origin - server or by a cache), including multiple warnings with the same code - number. For example, a server might provide the same warning with - texts in both English and Basque. - - When multiple warnings are attached to a response, it might not be - practical or reasonable to display all of them to the user. This - version of HTTP does not specify strict priority rules for deciding - which warnings to display and in what order, but does suggest some - heuristics. - -13.1.3 Cache-control Mechanisms - - The basic cache mechanisms in HTTP/1.1 (server-specified expiration - times and validators) are implicit directives to caches. In some - cases, a server or client might need to provide explicit directives - to the HTTP caches. We use the Cache-Control header for this purpose. - - The Cache-Control header allows a client or server to transmit a - variety of directives in either requests or responses. These - directives typically override the default caching algorithms. As a - general rule, if there is any apparent conflict between header - values, the most restrictive interpretation is applied (that is, the - one that is most likely to preserve semantic transparency). However, - - - - -Fielding, et al. Standards Track [Page 77] - -RFC 2616 HTTP/1.1 June 1999 - - - in some cases, cache-control directives are explicitly specified as - weakening the approximation of semantic transparency (for example, - "max-stale" or "public"). - - The cache-control directives are described in detail in section 14.9. - -13.1.4 Explicit User Agent Warnings - - Many user agents make it possible for users to override the basic - caching mechanisms. For example, the user agent might allow the user - to specify that cached entities (even explicitly stale ones) are - never validated. Or the user agent might habitually add "Cache- - Control: max-stale=3600" to every request. The user agent SHOULD NOT - default to either non-transparent behavior, or behavior that results - in abnormally ineffective caching, but MAY be explicitly configured - to do so by an explicit action of the user. - - If the user has overridden the basic caching mechanisms, the user - agent SHOULD explicitly indicate to the user whenever this results in - the display of information that might not meet the server's - transparency requirements (in particular, if the displayed entity is - known to be stale). Since the protocol normally allows the user agent - to determine if responses are stale or not, this indication need only - be displayed when this actually happens. The indication need not be a - dialog box; it could be an icon (for example, a picture of a rotting - fish) or some other indicator. - - If the user has overridden the caching mechanisms in a way that would - abnormally reduce the effectiveness of caches, the user agent SHOULD - continually indicate this state to the user (for example, by a - display of a picture of currency in flames) so that the user does not - inadvertently consume excess resources or suffer from excessive - latency. - -13.1.5 Exceptions to the Rules and Warnings - - In some cases, the operator of a cache MAY choose to configure it to - return stale responses even when not requested by clients. This - decision ought not be made lightly, but may be necessary for reasons - of availability or performance, especially when the cache is poorly - connected to the origin server. Whenever a cache returns a stale - response, it MUST mark it as such (using a Warning header) enabling - the client software to alert the user that there might be a potential - problem. - - - - - - - -Fielding, et al. Standards Track [Page 78] - -RFC 2616 HTTP/1.1 June 1999 - - - It also allows the user agent to take steps to obtain a first-hand or - fresh response. For this reason, a cache SHOULD NOT return a stale - response if the client explicitly requests a first-hand or fresh one, - unless it is impossible to comply for technical or policy reasons. - -13.1.6 Client-controlled Behavior - - While the origin server (and to a lesser extent, intermediate caches, - by their contribution to the age of a response) are the primary - source of expiration information, in some cases the client might need - to control a cache's decision about whether to return a cached - response without validating it. Clients do this using several - directives of the Cache-Control header. - - A client's request MAY specify the maximum age it is willing to - accept of an unvalidated response; specifying a value of zero forces - the cache(s) to revalidate all responses. A client MAY also specify - the minimum time remaining before a response expires. Both of these - options increase constraints on the behavior of caches, and so cannot - further relax the cache's approximation of semantic transparency. - - A client MAY also specify that it will accept stale responses, up to - some maximum amount of staleness. This loosens the constraints on the - caches, and so might violate the origin server's specified - constraints on semantic transparency, but might be necessary to - support disconnected operation, or high availability in the face of - poor connectivity. - -13.2 Expiration Model - -13.2.1 Server-Specified Expiration - - HTTP caching works best when caches can entirely avoid making - requests to the origin server. The primary mechanism for avoiding - requests is for an origin server to provide an explicit expiration - time in the future, indicating that a response MAY be used to satisfy - subsequent requests. In other words, a cache can return a fresh - response without first contacting the server. - - Our expectation is that servers will assign future explicit - expiration times to responses in the belief that the entity is not - likely to change, in a semantically significant way, before the - expiration time is reached. This normally preserves semantic - transparency, as long as the server's expiration times are carefully - chosen. - - - - - - -Fielding, et al. Standards Track [Page 79] - -RFC 2616 HTTP/1.1 June 1999 - - - The expiration mechanism applies only to responses taken from a cache - and not to first-hand responses forwarded immediately to the - requesting client. - - If an origin server wishes to force a semantically transparent cache - to validate every request, it MAY assign an explicit expiration time - in the past. This means that the response is always stale, and so the - cache SHOULD validate it before using it for subsequent requests. See - section 14.9.4 for a more restrictive way to force revalidation. - - If an origin server wishes to force any HTTP/1.1 cache, no matter how - it is configured, to validate every request, it SHOULD use the "must- - revalidate" cache-control directive (see section 14.9). - - Servers specify explicit expiration times using either the Expires - header, or the max-age directive of the Cache-Control header. - - An expiration time cannot be used to force a user agent to refresh - its display or reload a resource; its semantics apply only to caching - mechanisms, and such mechanisms need only check a resource's - expiration status when a new request for that resource is initiated. - See section 13.13 for an explanation of the difference between caches - and history mechanisms. - -13.2.2 Heuristic Expiration - - Since origin servers do not always provide explicit expiration times, - HTTP caches typically assign heuristic expiration times, employing - algorithms that use other header values (such as the Last-Modified - time) to estimate a plausible expiration time. The HTTP/1.1 - specification does not provide specific algorithms, but does impose - worst-case constraints on their results. Since heuristic expiration - times might compromise semantic transparency, they ought to used - cautiously, and we encourage origin servers to provide explicit - expiration times as much as possible. - -13.2.3 Age Calculations - - In order to know if a cached entry is fresh, a cache needs to know if - its age exceeds its freshness lifetime. We discuss how to calculate - the latter in section 13.2.4; this section describes how to calculate - the age of a response or cache entry. - - In this discussion, we use the term "now" to mean "the current value - of the clock at the host performing the calculation." Hosts that use - HTTP, but especially hosts running origin servers and caches, SHOULD - use NTP [28] or some similar protocol to synchronize their clocks to - a globally accurate time standard. - - - -Fielding, et al. Standards Track [Page 80] - -RFC 2616 HTTP/1.1 June 1999 - - - HTTP/1.1 requires origin servers to send a Date header, if possible, - with every response, giving the time at which the response was - generated (see section 14.18). We use the term "date_value" to denote - the value of the Date header, in a form appropriate for arithmetic - operations. - - HTTP/1.1 uses the Age response-header to convey the estimated age of - the response message when obtained from a cache. The Age field value - is the cache's estimate of the amount of time since the response was - generated or revalidated by the origin server. - - In essence, the Age value is the sum of the time that the response - has been resident in each of the caches along the path from the - origin server, plus the amount of time it has been in transit along - network paths. - - We use the term "age_value" to denote the value of the Age header, in - a form appropriate for arithmetic operations. - - A response's age can be calculated in two entirely independent ways: - - 1. now minus date_value, if the local clock is reasonably well - synchronized to the origin server's clock. If the result is - negative, the result is replaced by zero. - - 2. age_value, if all of the caches along the response path - implement HTTP/1.1. - - Given that we have two independent ways to compute the age of a - response when it is received, we can combine these as - - corrected_received_age = max(now - date_value, age_value) - - and as long as we have either nearly synchronized clocks or all- - HTTP/1.1 paths, one gets a reliable (conservative) result. - - Because of network-imposed delays, some significant interval might - pass between the time that a server generates a response and the time - it is received at the next outbound cache or client. If uncorrected, - this delay could result in improperly low ages. - - Because the request that resulted in the returned Age value must have - been initiated prior to that Age value's generation, we can correct - for delays imposed by the network by recording the time at which the - request was initiated. Then, when an Age value is received, it MUST - be interpreted relative to the time the request was initiated, not - - - - - -Fielding, et al. Standards Track [Page 81] - -RFC 2616 HTTP/1.1 June 1999 - - - the time that the response was received. This algorithm results in - conservative behavior no matter how much delay is experienced. So, we - compute: - - corrected_initial_age = corrected_received_age - + (now - request_time) - - where "request_time" is the time (according to the local clock) when - the request that elicited this response was sent. - - Summary of age calculation algorithm, when a cache receives a - response: - - /* - * age_value - * is the value of Age: header received by the cache with - * this response. - * date_value - * is the value of the origin server's Date: header - * request_time - * is the (local) time when the cache made the request - * that resulted in this cached response - * response_time - * is the (local) time when the cache received the - * response - * now - * is the current (local) time - */ - - apparent_age = max(0, response_time - date_value); - corrected_received_age = max(apparent_age, age_value); - response_delay = response_time - request_time; - corrected_initial_age = corrected_received_age + response_delay; - resident_time = now - response_time; - current_age = corrected_initial_age + resident_time; - - The current_age of a cache entry is calculated by adding the amount - of time (in seconds) since the cache entry was last validated by the - origin server to the corrected_initial_age. When a response is - generated from a cache entry, the cache MUST include a single Age - header field in the response with a value equal to the cache entry's - current_age. - - The presence of an Age header field in a response implies that a - response is not first-hand. However, the converse is not true, since - the lack of an Age header field in a response does not imply that the - - - - - -Fielding, et al. Standards Track [Page 82] - -RFC 2616 HTTP/1.1 June 1999 - - - response is first-hand unless all caches along the request path are - compliant with HTTP/1.1 (i.e., older HTTP caches did not implement - the Age header field). - -13.2.4 Expiration Calculations - - In order to decide whether a response is fresh or stale, we need to - compare its freshness lifetime to its age. The age is calculated as - described in section 13.2.3; this section describes how to calculate - the freshness lifetime, and to determine if a response has expired. - In the discussion below, the values can be represented in any form - appropriate for arithmetic operations. - - We use the term "expires_value" to denote the value of the Expires - header. We use the term "max_age_value" to denote an appropriate - value of the number of seconds carried by the "max-age" directive of - the Cache-Control header in a response (see section 14.9.3). - - The max-age directive takes priority over Expires, so if max-age is - present in a response, the calculation is simply: - - freshness_lifetime = max_age_value - - Otherwise, if Expires is present in the response, the calculation is: - - freshness_lifetime = expires_value - date_value - - Note that neither of these calculations is vulnerable to clock skew, - since all of the information comes from the origin server. - - If none of Expires, Cache-Control: max-age, or Cache-Control: s- - maxage (see section 14.9.3) appears in the response, and the response - does not include other restrictions on caching, the cache MAY compute - a freshness lifetime using a heuristic. The cache MUST attach Warning - 113 to any response whose age is more than 24 hours if such warning - has not already been added. - - Also, if the response does have a Last-Modified time, the heuristic - expiration value SHOULD be no more than some fraction of the interval - since that time. A typical setting of this fraction might be 10%. - - The calculation to determine if a response has expired is quite - simple: - - response_is_fresh = (freshness_lifetime > current_age) - - - - - - -Fielding, et al. Standards Track [Page 83] - -RFC 2616 HTTP/1.1 June 1999 - - -13.2.5 Disambiguating Expiration Values - - Because expiration values are assigned optimistically, it is possible - for two caches to contain fresh values for the same resource that are - different. - - If a client performing a retrieval receives a non-first-hand response - for a request that was already fresh in its own cache, and the Date - header in its existing cache entry is newer than the Date on the new - response, then the client MAY ignore the response. If so, it MAY - retry the request with a "Cache-Control: max-age=0" directive (see - section 14.9), to force a check with the origin server. - - If a cache has two fresh responses for the same representation with - different validators, it MUST use the one with the more recent Date - header. This situation might arise because the cache is pooling - responses from other caches, or because a client has asked for a - reload or a revalidation of an apparently fresh cache entry. - -13.2.6 Disambiguating Multiple Responses - - Because a client might be receiving responses via multiple paths, so - that some responses flow through one set of caches and other - responses flow through a different set of caches, a client might - receive responses in an order different from that in which the origin - server sent them. We would like the client to use the most recently - generated response, even if older responses are still apparently - fresh. - - Neither the entity tag nor the expiration value can impose an - ordering on responses, since it is possible that a later response - intentionally carries an earlier expiration time. The Date values are - ordered to a granularity of one second. - - When a client tries to revalidate a cache entry, and the response it - receives contains a Date header that appears to be older than the one - for the existing entry, then the client SHOULD repeat the request - unconditionally, and include - - Cache-Control: max-age=0 - - to force any intermediate caches to validate their copies directly - with the origin server, or - - Cache-Control: no-cache - - to force any intermediate caches to obtain a new copy from the origin - server. - - - -Fielding, et al. Standards Track [Page 84] - -RFC 2616 HTTP/1.1 June 1999 - - - If the Date values are equal, then the client MAY use either response - (or MAY, if it is being extremely prudent, request a new response). - Servers MUST NOT depend on clients being able to choose - deterministically between responses generated during the same second, - if their expiration times overlap. - -13.3 Validation Model - - When a cache has a stale entry that it would like to use as a - response to a client's request, it first has to check with the origin - server (or possibly an intermediate cache with a fresh response) to - see if its cached entry is still usable. We call this "validating" - the cache entry. Since we do not want to have to pay the overhead of - retransmitting the full response if the cached entry is good, and we - do not want to pay the overhead of an extra round trip if the cached - entry is invalid, the HTTP/1.1 protocol supports the use of - conditional methods. - - The key protocol features for supporting conditional methods are - those concerned with "cache validators." When an origin server - generates a full response, it attaches some sort of validator to it, - which is kept with the cache entry. When a client (user agent or - proxy cache) makes a conditional request for a resource for which it - has a cache entry, it includes the associated validator in the - request. - - The server then checks that validator against the current validator - for the entity, and, if they match (see section 13.3.3), it responds - with a special status code (usually, 304 (Not Modified)) and no - entity-body. Otherwise, it returns a full response (including - entity-body). Thus, we avoid transmitting the full response if the - validator matches, and we avoid an extra round trip if it does not - match. - - In HTTP/1.1, a conditional request looks exactly the same as a normal - request for the same resource, except that it carries a special - header (which includes the validator) that implicitly turns the - method (usually, GET) into a conditional. - - The protocol includes both positive and negative senses of cache- - validating conditions. That is, it is possible to request either that - a method be performed if and only if a validator matches or if and - only if no validators match. - - - - - - - - -Fielding, et al. Standards Track [Page 85] - -RFC 2616 HTTP/1.1 June 1999 - - - Note: a response that lacks a validator may still be cached, and - served from cache until it expires, unless this is explicitly - prohibited by a cache-control directive. However, a cache cannot - do a conditional retrieval if it does not have a validator for the - entity, which means it will not be refreshable after it expires. - -13.3.1 Last-Modified Dates - - The Last-Modified entity-header field value is often used as a cache - validator. In simple terms, a cache entry is considered to be valid - if the entity has not been modified since the Last-Modified value. - -13.3.2 Entity Tag Cache Validators - - The ETag response-header field value, an entity tag, provides for an - "opaque" cache validator. This might allow more reliable validation - in situations where it is inconvenient to store modification dates, - where the one-second resolution of HTTP date values is not - sufficient, or where the origin server wishes to avoid certain - paradoxes that might arise from the use of modification dates. - - Entity Tags are described in section 3.11. The headers used with - entity tags are described in sections 14.19, 14.24, 14.26 and 14.44. - -13.3.3 Weak and Strong Validators - - Since both origin servers and caches will compare two validators to - decide if they represent the same or different entities, one normally - would expect that if the entity (the entity-body or any entity- - headers) changes in any way, then the associated validator would - change as well. If this is true, then we call this validator a - "strong validator." - - However, there might be cases when a server prefers to change the - validator only on semantically significant changes, and not when - insignificant aspects of the entity change. A validator that does not - always change when the resource changes is a "weak validator." - - Entity tags are normally "strong validators," but the protocol - provides a mechanism to tag an entity tag as "weak." One can think of - a strong validator as one that changes whenever the bits of an entity - changes, while a weak value changes whenever the meaning of an entity - changes. Alternatively, one can think of a strong validator as part - of an identifier for a specific entity, while a weak validator is - part of an identifier for a set of semantically equivalent entities. - - Note: One example of a strong validator is an integer that is - incremented in stable storage every time an entity is changed. - - - -Fielding, et al. Standards Track [Page 86] - -RFC 2616 HTTP/1.1 June 1999 - - - An entity's modification time, if represented with one-second - resolution, could be a weak validator, since it is possible that - the resource might be modified twice during a single second. - - Support for weak validators is optional. However, weak validators - allow for more efficient caching of equivalent objects; for - example, a hit counter on a site is probably good enough if it is - updated every few days or weeks, and any value during that period - is likely "good enough" to be equivalent. - - A "use" of a validator is either when a client generates a request - and includes the validator in a validating header field, or when a - server compares two validators. - - Strong validators are usable in any context. Weak validators are only - usable in contexts that do not depend on exact equality of an entity. - For example, either kind is usable for a conditional GET of a full - entity. However, only a strong validator is usable for a sub-range - retrieval, since otherwise the client might end up with an internally - inconsistent entity. - - Clients MAY issue simple (non-subrange) GET requests with either weak - validators or strong validators. Clients MUST NOT use weak validators - in other forms of request. - - The only function that the HTTP/1.1 protocol defines on validators is - comparison. There are two validator comparison functions, depending - on whether the comparison context allows the use of weak validators - or not: - - - The strong comparison function: in order to be considered equal, - both validators MUST be identical in every way, and both MUST - NOT be weak. - - - The weak comparison function: in order to be considered equal, - both validators MUST be identical in every way, but either or - both of them MAY be tagged as "weak" without affecting the - result. - - An entity tag is strong unless it is explicitly tagged as weak. - Section 3.11 gives the syntax for entity tags. - - A Last-Modified time, when used as a validator in a request, is - implicitly weak unless it is possible to deduce that it is strong, - using the following rules: - - - The validator is being compared by an origin server to the - actual current validator for the entity and, - - - -Fielding, et al. Standards Track [Page 87] - -RFC 2616 HTTP/1.1 June 1999 - - - - That origin server reliably knows that the associated entity did - not change twice during the second covered by the presented - validator. - - or - - - The validator is about to be used by a client in an If- - Modified-Since or If-Unmodified-Since header, because the client - has a cache entry for the associated entity, and - - - That cache entry includes a Date value, which gives the time - when the origin server sent the original response, and - - - The presented Last-Modified time is at least 60 seconds before - the Date value. - - or - - - The validator is being compared by an intermediate cache to the - validator stored in its cache entry for the entity, and - - - That cache entry includes a Date value, which gives the time - when the origin server sent the original response, and - - - The presented Last-Modified time is at least 60 seconds before - the Date value. - - This method relies on the fact that if two different responses were - sent by the origin server during the same second, but both had the - same Last-Modified time, then at least one of those responses would - have a Date value equal to its Last-Modified time. The arbitrary 60- - second limit guards against the possibility that the Date and Last- - Modified values are generated from different clocks, or at somewhat - different times during the preparation of the response. An - implementation MAY use a value larger than 60 seconds, if it is - believed that 60 seconds is too short. - - If a client wishes to perform a sub-range retrieval on a value for - which it has only a Last-Modified time and no opaque validator, it - MAY do this only if the Last-Modified time is strong in the sense - described here. - - A cache or origin server receiving a conditional request, other than - a full-body GET request, MUST use the strong comparison function to - evaluate the condition. - - These rules allow HTTP/1.1 caches and clients to safely perform sub- - range retrievals on values that have been obtained from HTTP/1.0 - - - -Fielding, et al. Standards Track [Page 88] - -RFC 2616 HTTP/1.1 June 1999 - - - servers. - -13.3.4 Rules for When to Use Entity Tags and Last-Modified Dates - - We adopt a set of rules and recommendations for origin servers, - clients, and caches regarding when various validator types ought to - be used, and for what purposes. - - HTTP/1.1 origin servers: - - - SHOULD send an entity tag validator unless it is not feasible to - generate one. - - - MAY send a weak entity tag instead of a strong entity tag, if - performance considerations support the use of weak entity tags, - or if it is unfeasible to send a strong entity tag. - - - SHOULD send a Last-Modified value if it is feasible to send one, - unless the risk of a breakdown in semantic transparency that - could result from using this date in an If-Modified-Since header - would lead to serious problems. - - In other words, the preferred behavior for an HTTP/1.1 origin server - is to send both a strong entity tag and a Last-Modified value. - - In order to be legal, a strong entity tag MUST change whenever the - associated entity value changes in any way. A weak entity tag SHOULD - change whenever the associated entity changes in a semantically - significant way. - - Note: in order to provide semantically transparent caching, an - origin server must avoid reusing a specific strong entity tag - value for two different entities, or reusing a specific weak - entity tag value for two semantically different entities. Cache - entries might persist for arbitrarily long periods, regardless of - expiration times, so it might be inappropriate to expect that a - cache will never again attempt to validate an entry using a - validator that it obtained at some point in the past. - - HTTP/1.1 clients: - - - If an entity tag has been provided by the origin server, MUST - use that entity tag in any cache-conditional request (using If- - Match or If-None-Match). - - - If only a Last-Modified value has been provided by the origin - server, SHOULD use that value in non-subrange cache-conditional - requests (using If-Modified-Since). - - - -Fielding, et al. Standards Track [Page 89] - -RFC 2616 HTTP/1.1 June 1999 - - - - If only a Last-Modified value has been provided by an HTTP/1.0 - origin server, MAY use that value in subrange cache-conditional - requests (using If-Unmodified-Since:). The user agent SHOULD - provide a way to disable this, in case of difficulty. - - - If both an entity tag and a Last-Modified value have been - provided by the origin server, SHOULD use both validators in - cache-conditional requests. This allows both HTTP/1.0 and - HTTP/1.1 caches to respond appropriately. - - An HTTP/1.1 origin server, upon receiving a conditional request that - includes both a Last-Modified date (e.g., in an If-Modified-Since or - If-Unmodified-Since header field) and one or more entity tags (e.g., - in an If-Match, If-None-Match, or If-Range header field) as cache - validators, MUST NOT return a response status of 304 (Not Modified) - unless doing so is consistent with all of the conditional header - fields in the request. - - An HTTP/1.1 caching proxy, upon receiving a conditional request that - includes both a Last-Modified date and one or more entity tags as - cache validators, MUST NOT return a locally cached response to the - client unless that cached response is consistent with all of the - conditional header fields in the request. - - Note: The general principle behind these rules is that HTTP/1.1 - servers and clients should transmit as much non-redundant - information as is available in their responses and requests. - HTTP/1.1 systems receiving this information will make the most - conservative assumptions about the validators they receive. - - HTTP/1.0 clients and caches will ignore entity tags. Generally, - last-modified values received or used by these systems will - support transparent and efficient caching, and so HTTP/1.1 origin - servers should provide Last-Modified values. In those rare cases - where the use of a Last-Modified value as a validator by an - HTTP/1.0 system could result in a serious problem, then HTTP/1.1 - origin servers should not provide one. - -13.3.5 Non-validating Conditionals - - The principle behind entity tags is that only the service author - knows the semantics of a resource well enough to select an - appropriate cache validation mechanism, and the specification of any - validator comparison function more complex than byte-equality would - open up a can of worms. Thus, comparisons of any other headers - (except Last-Modified, for compatibility with HTTP/1.0) are never - used for purposes of validating a cache entry. - - - - -Fielding, et al. Standards Track [Page 90] - -RFC 2616 HTTP/1.1 June 1999 - - -13.4 Response Cacheability - - Unless specifically constrained by a cache-control (section 14.9) - directive, a caching system MAY always store a successful response - (see section 13.8) as a cache entry, MAY return it without validation - if it is fresh, and MAY return it after successful validation. If - there is neither a cache validator nor an explicit expiration time - associated with a response, we do not expect it to be cached, but - certain caches MAY violate this expectation (for example, when little - or no network connectivity is available). A client can usually detect - that such a response was taken from a cache by comparing the Date - header to the current time. - - Note: some HTTP/1.0 caches are known to violate this expectation - without providing any Warning. - - However, in some cases it might be inappropriate for a cache to - retain an entity, or to return it in response to a subsequent - request. This might be because absolute semantic transparency is - deemed necessary by the service author, or because of security or - privacy considerations. Certain cache-control directives are - therefore provided so that the server can indicate that certain - resource entities, or portions thereof, are not to be cached - regardless of other considerations. - - Note that section 14.8 normally prevents a shared cache from saving - and returning a response to a previous request if that request - included an Authorization header. - - A response received with a status code of 200, 203, 206, 300, 301 or - 410 MAY be stored by a cache and used in reply to a subsequent - request, subject to the expiration mechanism, unless a cache-control - directive prohibits caching. However, a cache that does not support - the Range and Content-Range headers MUST NOT cache 206 (Partial - Content) responses. - - A response received with any other status code (e.g. status codes 302 - and 307) MUST NOT be returned in a reply to a subsequent request - unless there are cache-control directives or another header(s) that - explicitly allow it. For example, these include the following: an - Expires header (section 14.21); a "max-age", "s-maxage", "must- - revalidate", "proxy-revalidate", "public" or "private" cache-control - directive (section 14.9). - - - - - - - - -Fielding, et al. Standards Track [Page 91] - -RFC 2616 HTTP/1.1 June 1999 - - -13.5 Constructing Responses From Caches - - The purpose of an HTTP cache is to store information received in - response to requests for use in responding to future requests. In - many cases, a cache simply returns the appropriate parts of a - response to the requester. However, if the cache holds a cache entry - based on a previous response, it might have to combine parts of a new - response with what is held in the cache entry. - -13.5.1 End-to-end and Hop-by-hop Headers - - For the purpose of defining the behavior of caches and non-caching - proxies, we divide HTTP headers into two categories: - - - End-to-end headers, which are transmitted to the ultimate - recipient of a request or response. End-to-end headers in - responses MUST be stored as part of a cache entry and MUST be - transmitted in any response formed from a cache entry. - - - Hop-by-hop headers, which are meaningful only for a single - transport-level connection, and are not stored by caches or - forwarded by proxies. - - The following HTTP/1.1 headers are hop-by-hop headers: - - - Connection - - Keep-Alive - - Proxy-Authenticate - - Proxy-Authorization - - TE - - Trailers [[should be "Trailer"]] - - Transfer-Encoding - - Upgrade - - All other headers defined by HTTP/1.1 are end-to-end headers. - - Other hop-by-hop headers MUST be listed in a Connection header, - (section 14.10) to be introduced into HTTP/1.1 (or later). - -13.5.2 Non-modifiable Headers - - Some features of the HTTP/1.1 protocol, such as Digest - Authentication, depend on the value of certain end-to-end headers. A - transparent proxy SHOULD NOT modify an end-to-end header unless the - definition of that header requires or specifically allows that. - - - - - - -Fielding, et al. Standards Track [Page 92] - -RFC 2616 HTTP/1.1 June 1999 - - - A transparent proxy MUST NOT modify any of the following fields in a - request or response, and it MUST NOT add any of these fields if not - already present: - - - Content-Location - - - Content-MD5 - - - ETag - - - Last-Modified - - A transparent proxy MUST NOT modify any of the following fields in a - response: - - - Expires - - but it MAY add any of these fields if not already present. If an - Expires header is added, it MUST be given a field-value identical to - that of the Date header in that response. - - A proxy MUST NOT modify or add any of the following fields in a - message that contains the no-transform cache-control directive, or in - any request: - - - Content-Encoding - - - Content-Range - - - Content-Type - - A non-transparent proxy MAY modify or add these fields to a message - that does not include no-transform, but if it does so, it MUST add a - Warning 214 (Transformation applied) if one does not already appear - in the message (see section 14.46). - - Warning: unnecessary modification of end-to-end headers might - cause authentication failures if stronger authentication - mechanisms are introduced in later versions of HTTP. Such - authentication mechanisms MAY rely on the values of header fields - not listed here. - - The Content-Length field of a request or response is added or deleted - according to the rules in section 4.4. A transparent proxy MUST - preserve the entity-length (section 7.2.2) of the entity-body, - although it MAY change the transfer-length (section 4.4). - - - - - -Fielding, et al. Standards Track [Page 93] - -RFC 2616 HTTP/1.1 June 1999 - - -13.5.3 Combining Headers - - When a cache makes a validating request to a server, and the server - provides a 304 (Not Modified) response or a 206 (Partial Content) - response, the cache then constructs a response to send to the - requesting client. - - If the status code is 304 (Not Modified), the cache uses the entity- - body stored in the cache entry as the entity-body of this outgoing - response. If the status code is 206 (Partial Content) and the ETag or - Last-Modified headers match exactly, the cache MAY combine the - contents stored in the cache entry with the new contents received in - the response and use the result as the entity-body of this outgoing - response, (see 13.5.4). - - The end-to-end headers stored in the cache entry are used for the - constructed response, except that - - - any stored Warning headers with warn-code 1xx (see section - 14.46) MUST be deleted from the cache entry and the forwarded - response. - - - any stored Warning headers with warn-code 2xx MUST be retained - in the cache entry and the forwarded response. - - - any end-to-end headers provided in the 304 or 206 response MUST - replace the corresponding headers from the cache entry. - - Unless the cache decides to remove the cache entry, it MUST also - replace the end-to-end headers stored with the cache entry with - corresponding headers received in the incoming response, except for - Warning headers as described immediately above. If a header field- - name in the incoming response matches more than one header in the - cache entry, all such old headers MUST be replaced. - - In other words, the set of end-to-end headers received in the - incoming response overrides all corresponding end-to-end headers - stored with the cache entry (except for stored Warning headers with - warn-code 1xx, which are deleted even if not overridden). - - Note: this rule allows an origin server to use a 304 (Not - Modified) or a 206 (Partial Content) response to update any header - associated with a previous response for the same entity or sub- - ranges thereof, although it might not always be meaningful or - correct to do so. This rule does not allow an origin server to use - a 304 (Not Modified) or a 206 (Partial Content) response to - entirely delete a header that it had provided with a previous - response. - - - -Fielding, et al. Standards Track [Page 94] - -RFC 2616 HTTP/1.1 June 1999 - - -13.5.4 Combining Byte Ranges - - A response might transfer only a subrange of the bytes of an entity- - body, either because the request included one or more Range - specifications, or because a connection was broken prematurely. After - several such transfers, a cache might have received several ranges of - the same entity-body. - - If a cache has a stored non-empty set of subranges for an entity, and - an incoming response transfers another subrange, the cache MAY - combine the new subrange with the existing set if both the following - conditions are met: - - - Both the incoming response and the cache entry have a cache - validator. - - - The two cache validators match using the strong comparison - function (see section 13.3.3). - - If either requirement is not met, the cache MUST use only the most - recent partial response (based on the Date values transmitted with - every response, and using the incoming response if these values are - equal or missing), and MUST discard the other partial information. - -13.6 Caching Negotiated Responses - - Use of server-driven content negotiation (section 12.1), as indicated - by the presence of a Vary header field in a response, alters the - conditions and procedure by which a cache can use the response for - subsequent requests. See section 14.44 for use of the Vary header - field by servers. - - A server SHOULD use the Vary header field to inform a cache of what - request-header fields were used to select among multiple - representations of a cacheable response subject to server-driven - negotiation. The set of header fields named by the Vary field value - is known as the "selecting" request-headers. - - When the cache receives a subsequent request whose Request-URI - specifies one or more cache entries including a Vary header field, - the cache MUST NOT use such a cache entry to construct a response to - the new request unless all of the selecting request-headers present - in the new request match the corresponding stored request-headers in - the original request. - - The selecting request-headers from two requests are defined to match - if and only if the selecting request-headers in the first request can - be transformed to the selecting request-headers in the second request - - - -Fielding, et al. Standards Track [Page 95] - -RFC 2616 HTTP/1.1 June 1999 - - - by adding or removing linear white space (LWS) at places where this - is allowed by the corresponding BNF, and/or combining multiple - message-header fields with the same field name following the rules - about message headers in section 4.2. - - A Vary header field-value of "*" always fails to match and subsequent - requests on that resource can only be properly interpreted by the - origin server. - - If the selecting request header fields for the cached entry do not - match the selecting request header fields of the new request, then - the cache MUST NOT use a cached entry to satisfy the request unless - it first relays the new request to the origin server in a conditional - request and the server responds with 304 (Not Modified), including an - entity tag or Content-Location that indicates the entity to be used. - - If an entity tag was assigned to a cached representation, the - forwarded request SHOULD be conditional and include the entity tags - in an If-None-Match header field from all its cache entries for the - resource. This conveys to the server the set of entities currently - held by the cache, so that if any one of these entities matches the - requested entity, the server can use the ETag header field in its 304 - (Not Modified) response to tell the cache which entry is appropriate. - If the entity-tag of the new response matches that of an existing - entry, the new response SHOULD be used to update the header fields of - the existing entry, and the result MUST be returned to the client. - - If any of the existing cache entries contains only partial content - for the associated entity, its entity-tag SHOULD NOT be included in - the If-None-Match header field unless the request is for a range that - would be fully satisfied by that entry. - - If a cache receives a successful response whose Content-Location - field matches that of an existing cache entry for the same Request- - ]URI, whose entity-tag differs from that of the existing entry, and - whose Date is more recent than that of the existing entry, the - existing entry SHOULD NOT be returned in response to future requests - and SHOULD be deleted from the cache. - -13.7 Shared and Non-Shared Caches - - For reasons of security and privacy, it is necessary to make a - distinction between "shared" and "non-shared" caches. A non-shared - cache is one that is accessible only to a single user. Accessibility - in this case SHOULD be enforced by appropriate security mechanisms. - All other caches are considered to be "shared." Other sections of - - - - - -Fielding, et al. Standards Track [Page 96] - -RFC 2616 HTTP/1.1 June 1999 - - - this specification place certain constraints on the operation of - shared caches in order to prevent loss of privacy or failure of - access controls. - -13.8 Errors or Incomplete Response Cache Behavior - - A cache that receives an incomplete response (for example, with fewer - bytes of data than specified in a Content-Length header) MAY store - the response. However, the cache MUST treat this as a partial - response. Partial responses MAY be combined as described in section - 13.5.4; the result might be a full response or might still be - partial. A cache MUST NOT return a partial response to a client - without explicitly marking it as such, using the 206 (Partial - Content) status code. A cache MUST NOT return a partial response - using a status code of 200 (OK). - - If a cache receives a 5xx response while attempting to revalidate an - entry, it MAY either forward this response to the requesting client, - or act as if the server failed to respond. In the latter case, it MAY - return a previously received response unless the cached entry - includes the "must-revalidate" cache-control directive (see section - 14.9). - -13.9 Side Effects of GET and HEAD - - Unless the origin server explicitly prohibits the caching of their - responses, the application of GET and HEAD methods to any resources - SHOULD NOT have side effects that would lead to erroneous behavior if - these responses are taken from a cache. They MAY still have side - effects, but a cache is not required to consider such side effects in - its caching decisions. Caches are always expected to observe an - origin server's explicit restrictions on caching. - - We note one exception to this rule: since some applications have - traditionally used GETs and HEADs with query URLs (those containing a - "?" in the rel_path part) to perform operations with significant side - effects, caches MUST NOT treat responses to such URIs as fresh unless - the server provides an explicit expiration time. This specifically - means that responses from HTTP/1.0 servers for such URIs SHOULD NOT - be taken from a cache. See section 9.1.1 for related information. - -13.10 Invalidation After Updates or Deletions - - The effect of certain methods performed on a resource at the origin - server might cause one or more existing cache entries to become non- - transparently invalid. That is, although they might continue to be - "fresh," they do not accurately reflect what the origin server would - return for a new request on that resource. - - - -Fielding, et al. Standards Track [Page 97] - -RFC 2616 HTTP/1.1 June 1999 - - - There is no way for the HTTP protocol to guarantee that all such - cache entries are marked invalid. For example, the request that - caused the change at the origin server might not have gone through - the proxy where a cache entry is stored. However, several rules help - reduce the likelihood of erroneous behavior. - - In this section, the phrase "invalidate an entity" means that the - cache will either remove all instances of that entity from its - storage, or will mark these as "invalid" and in need of a mandatory - revalidation before they can be returned in response to a subsequent - request. - - Some HTTP methods MUST cause a cache to invalidate an entity. This is - either the entity referred to by the Request-URI, or by the Location - or Content-Location headers (if present). These methods are: - - - PUT - - - DELETE - - - POST - - In order to prevent denial of service attacks, an invalidation based - on the URI in a Location or Content-Location header MUST only be - performed if the host part is the same as in the Request-URI. - -[[ Should be: ]] -[[ An invalidation based on the URI in a Location or Content-Location ]] -[[ header MUST NOT be performed if the host part of that URI differs ]] -[[ from the host part in the Request-URI. This helps prevent denial of ]] -[[ service attacks. ]] - - A cache that passes through requests for methods it does not - understand SHOULD invalidate any entities referred to by the - Request-URI. - -13.11 Write-Through Mandatory - - All methods that might be expected to cause modifications to the - origin server's resources MUST be written through to the origin - server. This currently includes all methods except for GET and HEAD. - A cache MUST NOT reply to such a request from a client before having - transmitted the request to the inbound server, and having received a - corresponding response from the inbound server. This does not prevent - a proxy cache from sending a 100 (Continue) response before the - inbound server has sent its final reply. - - The alternative (known as "write-back" or "copy-back" caching) is not - allowed in HTTP/1.1, due to the difficulty of providing consistent - updates and the problems arising from server, cache, or network - failure prior to write-back. - - - - - - -Fielding, et al. Standards Track [Page 98] - -RFC 2616 HTTP/1.1 June 1999 - - -13.12 Cache Replacement - - If a new cacheable (see sections 14.9.2, 13.2.5, 13.2.6 and 13.8) - response is received from a resource while any existing responses for - the same resource are cached, the cache SHOULD use the new response - to reply to the current request. It MAY insert it into cache storage - and MAY, if it meets all other requirements, use it to respond to any - future requests that would previously have caused the old response to - be returned. If it inserts the new response into cache storage the - rules in section 13.5.3 apply. - - Note: a new response that has an older Date header value than - existing cached responses is not cacheable. - -13.13 History Lists - - User agents often have history mechanisms, such as "Back" buttons and - history lists, which can be used to redisplay an entity retrieved - earlier in a session. - - History mechanisms and caches are different. In particular history - mechanisms SHOULD NOT try to show a semantically transparent view of - the current state of a resource. Rather, a history mechanism is meant - to show exactly what the user saw at the time when the resource was - retrieved. - - By default, an expiration time does not apply to history mechanisms. - If the entity is still in storage, a history mechanism SHOULD display - it even if the entity has expired, unless the user has specifically - configured the agent to refresh expired history documents. - - This is not to be construed to prohibit the history mechanism from - telling the user that a view might be stale. - - Note: if history list mechanisms unnecessarily prevent users from - viewing stale resources, this will tend to force service authors - to avoid using HTTP expiration controls and cache controls when - they would otherwise like to. Service authors may consider it - important that users not be presented with error messages or - warning messages when they use navigation controls (such as BACK) - to view previously fetched resources. Even though sometimes such - resources ought not to cached, or ought to expire quickly, user - interface considerations may force service authors to resort to - other means of preventing caching (e.g. "once-only" URLs) in order - not to suffer the effects of improperly functioning history - mechanisms. - - - - - -Fielding, et al. Standards Track [Page 99] - -RFC 2616 HTTP/1.1 June 1999 - - -14 Header Field Definitions - - This section defines the syntax and semantics of all standard - HTTP/1.1 header fields. For entity-header fields, both sender and - recipient refer to either the client or the server, depending on who - sends and who receives the entity. - -14.1 Accept - - The Accept request-header field can be used to specify certain media - types which are acceptable for the response. Accept headers can be - used to indicate that the request is specifically limited to a small - set of desired types, as in the case of a request for an in-line - image. - - Accept = "Accept" ":" - #( media-range [ accept-params ] ) - - media-range = ( "*/*" - | ( type "/" "*" ) - | ( type "/" subtype ) - ) *( ";" parameter ) - accept-params = ";" "q" "=" qvalue *( accept-extension ) - accept-extension = ";" token [ "=" ( token | quoted-string ) ] - - The asterisk "*" character is used to group media types into ranges, - with "*/*" indicating all media types and "type/*" indicating all - subtypes of that type. The media-range MAY include media type - parameters that are applicable to that range. - - Each media-range MAY be followed by one or more accept-params, - beginning with the "q" parameter for indicating a relative quality - factor. The first "q" parameter (if any) separates the media-range - parameter(s) from the accept-params. Quality factors allow the user - or user agent to indicate the relative degree of preference for that - media-range, using the qvalue scale from 0 to 1 (section 3.9). The - default value is q=1. - - Note: Use of the "q" parameter name to separate media type - parameters from Accept extension parameters is due to historical - practice. Although this prevents any media type parameter named - "q" from being used with a media range, such an event is believed - to be unlikely given the lack of any "q" parameters in the IANA - media type registry and the rare usage of any media type - parameters in Accept. Future media types are discouraged from - registering any parameter named "q". - - - - - -Fielding, et al. Standards Track [Page 100] - -RFC 2616 HTTP/1.1 June 1999 - - - The example - - Accept: audio/*; q=0.2, audio/basic - - SHOULD be interpreted as "I prefer audio/basic, but send me any audio - type if it is the best available after an 80% mark-down in quality." - - If no Accept header field is present, then it is assumed that the - client accepts all media types. If an Accept header field is present, - and if the server cannot send a response which is acceptable - according to the combined Accept field value, then the server SHOULD - send a 406 (not acceptable) response. - - A more elaborate example is - - Accept: text/plain; q=0.5, text/html, - text/x-dvi; q=0.8, text/x-c - - Verbally, this would be interpreted as "text/html and text/x-c are - the preferred media types, but if they do not exist, then send the - text/x-dvi entity, and if that does not exist, send the text/plain - entity." - - Media ranges can be overridden by more specific media ranges or - specific media types. If more than one media range applies to a given - type, the most specific reference has precedence. For example, - - Accept: text/*, text/html, text/html;level=1, */* - - have the following precedence: - - 1) text/html;level=1 - 2) text/html - 3) text/* - 4) */* - - The media type quality factor associated with a given type is - determined by finding the media range with the highest precedence - which matches that type. For example, - - Accept: text/*;q=0.3, text/html;q=0.7, text/html;level=1, - text/html;level=2;q=0.4, */*;q=0.5 - - would cause the following values to be associated: - - text/html;level=1 = 1 - text/html = 0.7 - text/plain = 0.3 - - - -Fielding, et al. Standards Track [Page 101] - -RFC 2616 HTTP/1.1 June 1999 - - - image/jpeg = 0.5 - text/html;level=2 = 0.4 - text/html;level=3 = 0.7 - - Note: A user agent might be provided with a default set of quality - values for certain media ranges. However, unless the user agent is - a closed system which cannot interact with other rendering agents, - this default set ought to be configurable by the user. - -14.2 Accept-Charset - - The Accept-Charset request-header field can be used to indicate what - character sets are acceptable for the response. This field allows - clients capable of understanding more comprehensive or special- - purpose character sets to signal that capability to a server which is - capable of representing documents in those character sets. - - Accept-Charset = "Accept-Charset" ":" - 1#( ( charset | "*" )[ ";" "q" "=" qvalue ] ) - - - Character set values are described in section 3.4. Each charset MAY - be given an associated quality value which represents the user's - preference for that charset. The default value is q=1. An example is - - Accept-Charset: iso-8859-5, unicode-1-1;q=0.8 - - The special value "*", if present in the Accept-Charset field, - matches every character set (including ISO-8859-1) which is not - mentioned elsewhere in the Accept-Charset field. If no "*" is present - in an Accept-Charset field, then all character sets not explicitly - mentioned get a quality value of 0, except for ISO-8859-1, which gets - a quality value of 1 if not explicitly mentioned. - - If no Accept-Charset header is present, the default is that any - character set is acceptable. If an Accept-Charset header is present, - and if the server cannot send a response which is acceptable - according to the Accept-Charset header, then the server SHOULD send - an error response with the 406 (not acceptable) status code, though - the sending of an unacceptable response is also allowed. - -14.3 Accept-Encoding - - The Accept-Encoding request-header field is similar to Accept, but - restricts the content-codings (section 3.5) that are acceptable in - the response. - - Accept-Encoding = "Accept-Encoding" ":" - - - -Fielding, et al. Standards Track [Page 102] - -RFC 2616 HTTP/1.1 June 1999 - - - 1#( codings [ ";" "q" "=" qvalue ] ) - codings = ( content-coding | "*" ) - - [[ http://lists.w3.org/Archives/Public/ietf-http-wg/2005AprJun/0029.html ]] - [[ points out that the "1#" must be "#" to make the examples below and ]] - [[ the text of rule 4 correct. ]] - - Examples of its use are: - - Accept-Encoding: compress, gzip - Accept-Encoding: - Accept-Encoding: * - Accept-Encoding: compress;q=0.5, gzip;q=1.0 - Accept-Encoding: gzip;q=1.0, identity; q=0.5, *;q=0 - - A server tests whether a content-coding is acceptable, according to - an Accept-Encoding field, using these rules: - - 1. If the content-coding is one of the content-codings listed in - the Accept-Encoding field, then it is acceptable, unless it is - accompanied by a qvalue of 0. (As defined in section 3.9, a - qvalue of 0 means "not acceptable.") - - 2. The special "*" symbol in an Accept-Encoding field matches any - available content-coding not explicitly listed in the header - field. - - 3. If multiple content-codings are acceptable, then the acceptable - content-coding with the highest non-zero qvalue is preferred. - - 4. The "identity" content-coding is always acceptable, unless - specifically refused because the Accept-Encoding field includes - "identity;q=0", or because the field includes "*;q=0" and does - not explicitly include the "identity" content-coding. If the - Accept-Encoding field-value is empty, then only the "identity" - encoding is acceptable. - - If an Accept-Encoding field is present in a request, and if the - server cannot send a response which is acceptable according to the - Accept-Encoding header, then the server SHOULD send an error response - with the 406 (Not Acceptable) status code. - - If no Accept-Encoding field is present in a request, the server MAY - assume that the client will accept any content coding. In this case, - if "identity" is one of the available content-codings, then the - server SHOULD use the "identity" content-coding, unless it has - additional information that a different content-coding is meaningful - to the client. - - Note: If the request does not include an Accept-Encoding field, - and if the "identity" content-coding is unavailable, then - content-codings commonly understood by HTTP/1.0 clients (i.e., - - - -Fielding, et al. Standards Track [Page 103] - -RFC 2616 HTTP/1.1 June 1999 - - - "gzip" and "compress") are preferred; some older clients - improperly display messages sent with other content-codings. The - server might also make this decision based on information about - the particular user-agent or client. - - Note: Most HTTP/1.0 applications do not recognize or obey qvalues - associated with content-codings. This means that qvalues will not - work and are not permitted with x-gzip or x-compress. - -14.4 Accept-Language - - The Accept-Language request-header field is similar to Accept, but - restricts the set of natural languages that are preferred as a - response to the request. Language tags are defined in section 3.10. - - Accept-Language = "Accept-Language" ":" - 1#( language-range [ ";" "q" "=" qvalue ] ) - language-range = ( ( 1*8ALPHA *( "-" 1*8ALPHA ) ) | "*" ) - - Each language-range MAY be given an associated quality value which - represents an estimate of the user's preference for the languages - specified by that range. The quality value defaults to "q=1". For - example, - - Accept-Language: da, en-gb;q=0.8, en;q=0.7 - - would mean: "I prefer Danish, but will accept British English and - other types of English." A language-range matches a language-tag if - it exactly equals the tag, or if it exactly equals a prefix of the - tag such that the first tag character following the prefix is "-". - The special range "*", if present in the Accept-Language field, - matches every tag not matched by any other range present in the - Accept-Language field. - - Note: This use of a prefix matching rule does not imply that - language tags are assigned to languages in such a way that it is - always true that if a user understands a language with a certain - tag, then this user will also understand all languages with tags - for which this tag is a prefix. The prefix rule simply allows the - use of prefix tags if this is the case. - - The language quality factor assigned to a language-tag by the - Accept-Language field is the quality value of the longest language- - range in the field that matches the language-tag. If no language- - range in the field matches the tag, the language quality factor - assigned is 0. If no Accept-Language header is present in the - request, the server - - - - -Fielding, et al. Standards Track [Page 104] - -RFC 2616 HTTP/1.1 June 1999 - - - SHOULD assume that all languages are equally acceptable. If an - Accept-Language header is present, then all languages which are - assigned a quality factor greater than 0 are acceptable. - - It might be contrary to the privacy expectations of the user to send - an Accept-Language header with the complete linguistic preferences of - the user in every request. For a discussion of this issue, see - section 15.1.4. - - As intelligibility is highly dependent on the individual user, it is - recommended that client applications make the choice of linguistic - preference available to the user. If the choice is not made - available, then the Accept-Language header field MUST NOT be given in - the request. - - Note: When making the choice of linguistic preference available to - the user, we remind implementors of the fact that users are not - familiar with the details of language matching as described above, - and should provide appropriate guidance. As an example, users - might assume that on selecting "en-gb", they will be served any - kind of English document if British English is not available. A - user agent might suggest in such a case to add "en" to get the - best matching behavior. - -14.5 Accept-Ranges - - The Accept-Ranges response-header field allows the server to - indicate its acceptance of range requests for a resource: - - Accept-Ranges = "Accept-Ranges" ":" acceptable-ranges - acceptable-ranges = 1#range-unit | "none" - - Origin servers that accept byte-range requests MAY send - - Accept-Ranges: bytes - - but are not required to do so. Clients MAY generate byte-range - requests without having received this header for the resource - involved. Range units are defined in section 3.12. - - Servers that do not accept any kind of range request for a - resource MAY send - - Accept-Ranges: none - - to advise the client not to attempt a range request. - - - - - -Fielding, et al. Standards Track [Page 105] - -RFC 2616 HTTP/1.1 June 1999 - - -14.6 Age - - The Age response-header field conveys the sender's estimate of the - amount of time since the response (or its revalidation) was - generated at the origin server. A cached response is "fresh" if - its age does not exceed its freshness lifetime. Age values are - calculated as specified in section 13.2.3. - - Age = "Age" ":" age-value - age-value = delta-seconds - - Age values are non-negative decimal integers, representing time in - seconds. - - If a cache receives a value larger than the largest positive - integer it can represent, or if any of its age calculations - overflows, it MUST transmit an Age header with a value of - 2147483648 (2^31). An HTTP/1.1 server that includes a cache MUST - include an Age header field in every response generated from its - own cache. Caches SHOULD use an arithmetic type of at least 31 - bits of range. - -14.7 Allow - - The Allow entity-header field lists the set of methods supported - by the resource identified by the Request-URI. The purpose of this - field is strictly to inform the recipient of valid methods - associated with the resource. An Allow header field MUST be - present in a 405 (Method Not Allowed) response. - - Allow = "Allow" ":" #Method - - Example of use: - - Allow: GET, HEAD, PUT - - This field cannot prevent a client from trying other methods. - However, the indications given by the Allow header field value - SHOULD be followed. The actual set of allowed methods is defined - by the origin server at the time of each request. - - The Allow header field MAY be provided with a PUT request to - recommend the methods to be supported by the new or modified - resource. The server is not required to support these methods and - SHOULD include an Allow header in the response giving the actual - supported methods. - - - - - -Fielding, et al. Standards Track [Page 106] - -RFC 2616 HTTP/1.1 June 1999 - - - A proxy MUST NOT modify the Allow header field even if it does not - understand all the methods specified, since the user agent might - have other means of communicating with the origin server. - -14.8 Authorization - - A user agent that wishes to authenticate itself with a server-- - usually, but not necessarily, after receiving a 401 response--does - so by including an Authorization request-header field with the - request. The Authorization field value consists of credentials - containing the authentication information of the user agent for - the realm of the resource being requested. - - Authorization = "Authorization" ":" credentials - - HTTP access authentication is described in "HTTP Authentication: - Basic and Digest Access Authentication" [43]. If a request is - authenticated and a realm specified, the same credentials SHOULD - be valid for all other requests within this realm (assuming that - the authentication scheme itself does not require otherwise, such - as credentials that vary according to a challenge value or using - synchronized clocks). - - When a shared cache (see section 13.7) receives a request - containing an Authorization field, it MUST NOT return the - corresponding response as a reply to any other request, unless one - of the following specific exceptions holds: - - 1. If the response includes the "s-maxage" cache-control - directive, the cache MAY use that response in replying to a - subsequent request. But (if the specified maximum age has - passed) a proxy cache MUST first revalidate it with the origin - server, using the request-headers from the new request to allow - the origin server to authenticate the new request. (This is the - defined behavior for s-maxage.) If the response includes "s- - maxage=0", the proxy MUST always revalidate it before re-using - it. - - 2. If the response includes the "must-revalidate" cache-control - directive, the cache MAY use that response in replying to a - subsequent request. But if the response is stale, all caches - MUST first revalidate it with the origin server, using the - request-headers from the new request to allow the origin server - to authenticate the new request. - - 3. If the response includes the "public" cache-control directive, - it MAY be returned in reply to any subsequent request. - - - - -Fielding, et al. Standards Track [Page 107] - -RFC 2616 HTTP/1.1 June 1999 - - -14.9 Cache-Control - - The Cache-Control general-header field is used to specify directives - that MUST be obeyed by all caching mechanisms along the - request/response chain. The directives specify behavior intended to - prevent caches from adversely interfering with the request or - response. These directives typically override the default caching - algorithms. Cache directives are unidirectional in that the presence - of a directive in a request does not imply that the same directive is - to be given in the response. - - Note that HTTP/1.0 caches might not implement Cache-Control and - might only implement Pragma: no-cache (see section 14.32). - - Cache directives MUST be passed through by a proxy or gateway - application, regardless of their significance to that application, - since the directives might be applicable to all recipients along the - request/response chain. It is not possible to specify a cache- - directive for a specific cache. - - Cache-Control = "Cache-Control" ":" 1#cache-directive - - cache-directive = cache-request-directive - | cache-response-directive - - cache-request-directive = - "no-cache" ; Section 14.9.1 - | "no-store" ; Section 14.9.2 - | "max-age" "=" delta-seconds ; Section 14.9.3, 14.9.4 - | "max-stale" [ "=" delta-seconds ] ; Section 14.9.3 - | "min-fresh" "=" delta-seconds ; Section 14.9.3 - | "no-transform" ; Section 14.9.5 - | "only-if-cached" ; Section 14.9.4 - | cache-extension ; Section 14.9.6 - - cache-response-directive = - "public" ; Section 14.9.1 - | "private" [ "=" <"> 1#field-name <"> ] ; Section 14.9.1 - | "no-cache" [ "=" <"> 1#field-name <"> ]; Section 14.9.1 - | "no-store" ; Section 14.9.2 - | "no-transform" ; Section 14.9.5 - | "must-revalidate" ; Section 14.9.4 - | "proxy-revalidate" ; Section 14.9.4 - | "max-age" "=" delta-seconds ; Section 14.9.3 - | "s-maxage" "=" delta-seconds ; Section 14.9.3 - | cache-extension ; Section 14.9.6 - - cache-extension = token [ "=" ( token | quoted-string ) ] - - - -Fielding, et al. Standards Track [Page 108] - -RFC 2616 HTTP/1.1 June 1999 - - - When a directive appears without any 1#field-name parameter, the - directive applies to the entire request or response. When such a - directive appears with a 1#field-name parameter, it applies only to - the named field or fields, and not to the rest of the request or - response. This mechanism supports extensibility; implementations of - future versions of the HTTP protocol might apply these directives to - header fields not defined in HTTP/1.1. - - The cache-control directives can be broken down into these general - categories: - - - Restrictions on what are cacheable; these may only be imposed by - the origin server. - - - Restrictions on what may be stored by a cache; these may be - imposed by either the origin server or the user agent. - - - Modifications of the basic expiration mechanism; these may be - imposed by either the origin server or the user agent. - - - Controls over cache revalidation and reload; these may only be - imposed by a user agent. - - - Control over transformation of entities. - - - Extensions to the caching system. - -14.9.1 What is Cacheable - - By default, a response is cacheable if the requirements of the - request method, request header fields, and the response status - indicate that it is cacheable. Section 13.4 summarizes these defaults - for cacheability. The following Cache-Control response directives - allow an origin server to override the default cacheability of a - response: - - public - Indicates that the response MAY be cached by any cache, even if it - would normally be non-cacheable or cacheable only within a non- - shared cache. (See also Authorization, section 14.8, for - additional details.) - - private - Indicates that all or part of the response message is intended for - a single user and MUST NOT be cached by a shared cache. This - allows an origin server to state that the specified parts of the - - - - - -Fielding, et al. Standards Track [Page 109] - -RFC 2616 HTTP/1.1 June 1999 - - - response are intended for only one user and are not a valid - response for requests by other users. A private (non-shared) cache - MAY cache the response. - - Note: This usage of the word private only controls where the - response may be cached, and cannot ensure the privacy of the - message content. - - no-cache - If the no-cache directive does not specify a field-name, then a - cache MUST NOT use the response to satisfy a subsequent request - without successful revalidation with the origin server. This - allows an origin server to prevent caching even by caches that - have been configured to return stale responses to client requests. - - If the no-cache directive does specify one or more field-names, - then a cache MAY use the response to satisfy a subsequent request, - subject to any other restrictions on caching. However, the - specified field-name(s) MUST NOT be sent in the response to a - subsequent request without successful revalidation with the origin - server. This allows an origin server to prevent the re-use of - certain header fields in a response, while still allowing caching - of the rest of the response. - - Note: Most HTTP/1.0 caches will not recognize or obey this - directive. - -14.9.2 What May be Stored by Caches - - no-store - The purpose of the no-store directive is to prevent the - inadvertent release or retention of sensitive information (for - example, on backup tapes). The no-store directive applies to the - entire message, and MAY be sent either in a response or in a - request. If sent in a request, a cache MUST NOT store any part of - either this request or any response to it. If sent in a response, - a cache MUST NOT store any part of either this response or the - request that elicited it. This directive applies to both non- - shared and shared caches. "MUST NOT store" in this context means - that the cache MUST NOT intentionally store the information in - non-volatile storage, and MUST make a best-effort attempt to - remove the information from volatile storage as promptly as - possible after forwarding it. - - Even when this directive is associated with a response, users - might explicitly store such a response outside of the caching - system (e.g., with a "Save As" dialog). History buffers MAY store - such responses as part of their normal operation. - - - -Fielding, et al. Standards Track [Page 110] - -RFC 2616 HTTP/1.1 June 1999 - - - The purpose of this directive is to meet the stated requirements - of certain users and service authors who are concerned about - accidental releases of information via unanticipated accesses to - cache data structures. While the use of this directive might - improve privacy in some cases, we caution that it is NOT in any - way a reliable or sufficient mechanism for ensuring privacy. In - particular, malicious or compromised caches might not recognize or - obey this directive, and communications networks might be - vulnerable to eavesdropping. - -14.9.3 Modifications of the Basic Expiration Mechanism - - The expiration time of an entity MAY be specified by the origin - server using the Expires header (see section 14.21). Alternatively, - it MAY be specified using the max-age directive in a response. When - the max-age cache-control directive is present in a cached response, - the response is stale if its current age is greater than the age - value given (in seconds) at the time of a new request for that - resource. The max-age directive on a response implies that the - response is cacheable (i.e., "public") unless some other, more - restrictive cache directive is also present. - - If a response includes both an Expires header and a max-age - directive, the max-age directive overrides the Expires header, even - if the Expires header is more restrictive. This rule allows an origin - server to provide, for a given response, a longer expiration time to - an HTTP/1.1 (or later) cache than to an HTTP/1.0 cache. This might be - useful if certain HTTP/1.0 caches improperly calculate ages or - expiration times, perhaps due to desynchronized clocks. - - Many HTTP/1.0 cache implementations will treat an Expires value that - is less than or equal to the response Date value as being equivalent - to the Cache-Control response directive "no-cache". If an HTTP/1.1 - cache receives such a response, and the response does not include a - Cache-Control header field, it SHOULD consider the response to be - non-cacheable in order to retain compatibility with HTTP/1.0 servers. - - Note: An origin server might wish to use a relatively new HTTP - cache control feature, such as the "private" directive, on a - network including older caches that do not understand that - feature. The origin server will need to combine the new feature - with an Expires field whose value is less than or equal to the - Date value. This will prevent older caches from improperly - caching the response. - - - - - - - -Fielding, et al. Standards Track [Page 111] - -RFC 2616 HTTP/1.1 June 1999 - - - s-maxage - If a response includes an s-maxage directive, then for a shared - cache (but not for a private cache), the maximum age specified by - this directive overrides the maximum age specified by either the - max-age directive or the Expires header. The s-maxage directive - also implies the semantics of the proxy-revalidate directive (see - section 14.9.4), i.e., that the shared cache must not use the - entry after it becomes stale to respond to a subsequent request - without first revalidating it with the origin server. The s- - maxage directive is always ignored by a private cache. - - Note that most older caches, not compliant with this specification, - do not implement any cache-control directives. An origin server - wishing to use a cache-control directive that restricts, but does not - prevent, caching by an HTTP/1.1-compliant cache MAY exploit the - requirement that the max-age directive overrides the Expires header, - and the fact that pre-HTTP/1.1-compliant caches do not observe the - max-age directive. - - Other directives allow a user agent to modify the basic expiration - mechanism. These directives MAY be specified on a request: - - max-age - Indicates that the client is willing to accept a response whose - age is no greater than the specified time in seconds. Unless max- - stale directive is also included, the client is not willing to - accept a stale response. - - min-fresh - Indicates that the client is willing to accept a response whose - freshness lifetime is no less than its current age plus the - specified time in seconds. That is, the client wants a response - that will still be fresh for at least the specified number of - seconds. - - max-stale - Indicates that the client is willing to accept a response that has - exceeded its expiration time. If max-stale is assigned a value, - then the client is willing to accept a response that has exceeded - its expiration time by no more than the specified number of - seconds. If no value is assigned to max-stale, then the client is - willing to accept a stale response of any age. - - If a cache returns a stale response, either because of a max-stale - directive on a request, or because the cache is configured to - override the expiration time of a response, the cache MUST attach a - Warning header to the stale response, using Warning 110 (Response is - stale). - - - -Fielding, et al. Standards Track [Page 112] - -RFC 2616 HTTP/1.1 June 1999 - - - A cache MAY be configured to return stale responses without - validation, but only if this does not conflict with any "MUST"-level - requirements concerning cache validation (e.g., a "must-revalidate" - cache-control directive). - - If both the new request and the cached entry include "max-age" - directives, then the lesser of the two values is used for determining - the freshness of the cached entry for that request. - -14.9.4 Cache Revalidation and Reload Controls - - Sometimes a user agent might want or need to insist that a cache - revalidate its cache entry with the origin server (and not just with - the next cache along the path to the origin server), or to reload its - cache entry from the origin server. End-to-end revalidation might be - necessary if either the cache or the origin server has overestimated - the expiration time of the cached response. End-to-end reload may be - necessary if the cache entry has become corrupted for some reason. - - End-to-end revalidation may be requested either when the client does - not have its own local cached copy, in which case we call it - "unspecified end-to-end revalidation", or when the client does have a - local cached copy, in which case we call it "specific end-to-end - revalidation." - - The client can specify these three kinds of action using Cache- - Control request directives: - - End-to-end reload - The request includes a "no-cache" cache-control directive or, for - compatibility with HTTP/1.0 clients, "Pragma: no-cache". Field - names MUST NOT be included with the no-cache directive in a - request. The server MUST NOT use a cached copy when responding to - such a request. - - Specific end-to-end revalidation - The request includes a "max-age=0" cache-control directive, which - forces each cache along the path to the origin server to - revalidate its own entry, if any, with the next cache or server. - The initial request includes a cache-validating conditional with - the client's current validator. - - Unspecified end-to-end revalidation - The request includes "max-age=0" cache-control directive, which - forces each cache along the path to the origin server to - revalidate its own entry, if any, with the next cache or server. - The initial request does not include a cache-validating - - - - -Fielding, et al. Standards Track [Page 113] - -RFC 2616 HTTP/1.1 June 1999 - - - conditional; the first cache along the path (if any) that holds a - cache entry for this resource includes a cache-validating - conditional with its current validator. - - max-age - When an intermediate cache is forced, by means of a max-age=0 - directive, to revalidate its own cache entry, and the client has - supplied its own validator in the request, the supplied validator - might differ from the validator currently stored with the cache - entry. In this case, the cache MAY use either validator in making - its own request without affecting semantic transparency. - - However, the choice of validator might affect performance. The - best approach is for the intermediate cache to use its own - validator when making its request. If the server replies with 304 - (Not Modified), then the cache can return its now validated copy - to the client with a 200 (OK) response. If the server replies with - a new entity and cache validator, however, the intermediate cache - can compare the returned validator with the one provided in the - client's request, using the strong comparison function. If the - client's validator is equal to the origin server's, then the - intermediate cache simply returns 304 (Not Modified). Otherwise, - it returns the new entity with a 200 (OK) response. - - If a request includes the no-cache directive, it SHOULD NOT - include min-fresh, max-stale, or max-age. - - only-if-cached - In some cases, such as times of extremely poor network - connectivity, a client may want a cache to return only those - responses that it currently has stored, and not to reload or - revalidate with the origin server. To do this, the client may - include the only-if-cached directive in a request. If it receives - this directive, a cache SHOULD either respond using a cached entry - that is consistent with the other constraints of the request, or - respond with a 504 (Gateway Timeout) status. However, if a group - of caches is being operated as a unified system with good internal - connectivity, such a request MAY be forwarded within that group of - caches. - - must-revalidate - Because a cache MAY be configured to ignore a server's specified - expiration time, and because a client request MAY include a max- - stale directive (which has a similar effect), the protocol also - includes a mechanism for the origin server to require revalidation - of a cache entry on any subsequent use. When the must-revalidate - directive is present in a response received by a cache, that cache - MUST NOT use the entry after it becomes stale to respond to a - - - -Fielding, et al. Standards Track [Page 114] - -RFC 2616 HTTP/1.1 June 1999 - - - subsequent request without first revalidating it with the origin - server. (I.e., the cache MUST do an end-to-end revalidation every - time, if, based solely on the origin server's Expires or max-age - value, the cached response is stale.) - - The must-revalidate directive is necessary to support reliable - operation for certain protocol features. In all circumstances an - HTTP/1.1 cache MUST obey the must-revalidate directive; in - particular, if the cache cannot reach the origin server for any - reason, it MUST generate a 504 (Gateway Timeout) response. - - Servers SHOULD send the must-revalidate directive if and only if - failure to revalidate a request on the entity could result in - incorrect operation, such as a silently unexecuted financial - transaction. Recipients MUST NOT take any automated action that - violates this directive, and MUST NOT automatically provide an - unvalidated copy of the entity if revalidation fails. - - Although this is not recommended, user agents operating under - severe connectivity constraints MAY violate this directive but, if - so, MUST explicitly warn the user that an unvalidated response has - been provided. The warning MUST be provided on each unvalidated - access, and SHOULD require explicit user confirmation. - - proxy-revalidate - The proxy-revalidate directive has the same meaning as the must- - revalidate directive, except that it does not apply to non-shared - user agent caches. It can be used on a response to an - authenticated request to permit the user's cache to store and - later return the response without needing to revalidate it (since - it has already been authenticated once by that user), while still - requiring proxies that service many users to revalidate each time - (in order to make sure that each user has been authenticated). - Note that such authenticated responses also need the public cache - control directive in order to allow them to be cached at all. - -14.9.5 No-Transform Directive - - no-transform - Implementors of intermediate caches (proxies) have found it useful - to convert the media type of certain entity bodies. A non- - transparent proxy might, for example, convert between image - formats in order to save cache space or to reduce the amount of - traffic on a slow link. - - Serious operational problems occur, however, when these - transformations are applied to entity bodies intended for certain - kinds of applications. For example, applications for medical - - - -Fielding, et al. Standards Track [Page 115] - -RFC 2616 HTTP/1.1 June 1999 - - - imaging, scientific data analysis and those using end-to-end - authentication, all depend on receiving an entity body that is bit - for bit identical to the original entity-body. - - Therefore, if a message includes the no-transform directive, an - intermediate cache or proxy MUST NOT change those headers that are - listed in section 13.5.2 as being subject to the no-transform - directive. This implies that the cache or proxy MUST NOT change - any aspect of the entity-body that is specified by these headers, - including the value of the entity-body itself. - -14.9.6 Cache Control Extensions - - The Cache-Control header field can be extended through the use of one - or more cache-extension tokens, each with an optional assigned value. - Informational extensions (those which do not require a change in - cache behavior) MAY be added without changing the semantics of other - directives. Behavioral extensions are designed to work by acting as - modifiers to the existing base of cache directives. Both the new - directive and the standard directive are supplied, such that - applications which do not understand the new directive will default - to the behavior specified by the standard directive, and those that - understand the new directive will recognize it as modifying the - requirements associated with the standard directive. In this way, - extensions to the cache-control directives can be made without - requiring changes to the base protocol. - - This extension mechanism depends on an HTTP cache obeying all of the - cache-control directives defined for its native HTTP-version, obeying - certain extensions, and ignoring all directives that it does not - understand. - - For example, consider a hypothetical new response directive called - community which acts as a modifier to the private directive. We - define this new directive to mean that, in addition to any non-shared - cache, any cache which is shared only by members of the community - named within its value may cache the response. An origin server - wishing to allow the UCI community to use an otherwise private - response in their shared cache(s) could do so by including - - Cache-Control: private, community="UCI" - - A cache seeing this header field will act correctly even if the cache - does not understand the community cache-extension, since it will also - see and understand the private directive and thus default to the safe - behavior. - - - - - -Fielding, et al. Standards Track [Page 116] - -RFC 2616 HTTP/1.1 June 1999 - - - Unrecognized cache-directives MUST be ignored; it is assumed that any - cache-directive likely to be unrecognized by an HTTP/1.1 cache will - be combined with standard directives (or the response's default - cacheability) such that the cache behavior will remain minimally - correct even if the cache does not understand the extension(s). - -14.10 Connection - - The Connection general-header field allows the sender to specify - options that are desired for that particular connection and MUST NOT - be communicated by proxies over further connections. - - The Connection header has the following grammar: - - Connection = "Connection" ":" 1#(connection-token) - connection-token = token - - HTTP/1.1 proxies MUST parse the Connection header field before a - message is forwarded and, for each connection-token in this field, - remove any header field(s) from the message with the same name as the - connection-token. Connection options are signaled by the presence of - a connection-token in the Connection header field, not by any - corresponding additional header field(s), since the additional header - field may not be sent if there are no parameters associated with that - connection option. - - Message headers listed in the Connection header MUST NOT include - end-to-end headers, such as Cache-Control. - - HTTP/1.1 defines the "close" connection option for the sender to - signal that the connection will be closed after completion of the - response. For example, - - Connection: close - - in either the request or the response header fields indicates that - the connection SHOULD NOT be considered `persistent' (section 8.1) - after the current request/response is complete. - - HTTP/1.1 applications that do not support persistent connections MUST - include the "close" connection option in every message. - -[[ Should say: ]] -[[ An HTTP/1.1 client that does not support persistent connections ]] -[[ MUST include the "close" connection option in every request message. ]] -[[ ]] -[[ An HTTP/1.1 server that does not support persistent connections ]] -[[ MUST include the "close" connection option in every response ]] -[[ message that does not have a 1xx (informational) status code. ]] - - A system receiving an HTTP/1.0 (or lower-version) message that - includes a Connection header MUST, for each connection-token in this - field, remove and ignore any header field(s) from the message with - the same name as the connection-token. This protects against mistaken - forwarding of such header fields by pre-HTTP/1.1 proxies. See section - 19.6.2. - - - -Fielding, et al. Standards Track [Page 117] - -RFC 2616 HTTP/1.1 June 1999 - - -14.11 Content-Encoding - - The Content-Encoding entity-header field is used as a modifier to the - media-type. When present, its value indicates what additional content - codings have been applied to the entity-body, and thus what decoding - mechanisms must be applied in order to obtain the media-type - referenced by the Content-Type header field. Content-Encoding is - primarily used to allow a document to be compressed without losing - the identity of its underlying media type. - - Content-Encoding = "Content-Encoding" ":" 1#content-coding - - Content codings are defined in section 3.5. An example of its use is - - Content-Encoding: gzip - - The content-coding is a characteristic of the entity identified by - the Request-URI. Typically, the entity-body is stored with this - encoding and is only decoded before rendering or analogous usage. - However, a non-transparent proxy MAY modify the content-coding if the - new coding is known to be acceptable to the recipient, unless the - "no-transform" cache-control directive is present in the message. - - If the content-coding of an entity is not "identity", then the - response MUST include a Content-Encoding entity-header (section - 14.11) that lists the non-identity content-coding(s) used. - - If the content-coding of an entity in a request message is not - acceptable to the origin server, the server SHOULD respond with a - status code of 415 (Unsupported Media Type). - - If multiple encodings have been applied to an entity, the content - codings MUST be listed in the order in which they were applied. - Additional information about the encoding parameters MAY be provided - by other entity-header fields not defined by this specification. - -14.12 Content-Language - - The Content-Language entity-header field describes the natural - language(s) of the intended audience for the enclosed entity. Note - that this might not be equivalent to all the languages used within - the entity-body. - - Content-Language = "Content-Language" ":" 1#language-tag - - - - - - - -Fielding, et al. Standards Track [Page 118] - -RFC 2616 HTTP/1.1 June 1999 - - - Language tags are defined in section 3.10. The primary purpose of - Content-Language is to allow a user to identify and differentiate - entities according to the user's own preferred language. Thus, if the - body content is intended only for a Danish-literate audience, the - appropriate field is - - Content-Language: da - - If no Content-Language is specified, the default is that the content - is intended for all language audiences. This might mean that the - sender does not consider it to be specific to any natural language, - or that the sender does not know for which language it is intended. - - Multiple languages MAY be listed for content that is intended for - multiple audiences. For example, a rendition of the "Treaty of - Waitangi," presented simultaneously in the original Maori and English - versions, would call for - - Content-Language: mi, en - - However, just because multiple languages are present within an entity - does not mean that it is intended for multiple linguistic audiences. - An example would be a beginner's language primer, such as "A First - Lesson in Latin," which is clearly intended to be used by an - English-literate audience. In this case, the Content-Language would - properly only include "en". - - Content-Language MAY be applied to any media type -- it is not - limited to textual documents. - -14.13 Content-Length - - The Content-Length entity-header field indicates the size of the - entity-body, in decimal number of OCTETs, sent to the recipient or, - in the case of the HEAD method, the size of the entity-body that - would have been sent had the request been a GET. - - Content-Length = "Content-Length" ":" 1*DIGIT - - An example is - - Content-Length: 3495 - - Applications SHOULD use this field to indicate the transfer-length of - the message-body, unless this is prohibited by the rules in section - 4.4. - - - - - -Fielding, et al. Standards Track [Page 119] - -RFC 2616 HTTP/1.1 June 1999 - - - Any Content-Length greater than or equal to zero is a valid value. - Section 4.4 describes how to determine the length of a message-body - if a Content-Length is not given. - - Note that the meaning of this field is significantly different from - the corresponding definition in MIME, where it is an optional field - used within the "message/external-body" content-type. In HTTP, it - SHOULD be sent whenever the message's length can be determined prior - to being transferred, unless this is prohibited by the rules in - section 4.4. - -14.14 Content-Location - - The Content-Location entity-header field MAY be used to supply the - resource location for the entity enclosed in the message when that - entity is accessible from a location separate from the requested - resource's URI. A server SHOULD provide a Content-Location for the - variant corresponding to the response entity; especially in the case - where a resource has multiple entities associated with it, and those - entities actually have separate locations by which they might be - individually accessed, the server SHOULD provide a Content-Location - for the particular variant which is returned. - - Content-Location = "Content-Location" ":" - ( absoluteURI | relativeURI ) - - The value of Content-Location also defines the base URI for the - entity. - - The Content-Location value is not a replacement for the original - requested URI; it is only a statement of the location of the resource - corresponding to this particular entity at the time of the request. - Future requests MAY specify the Content-Location URI as the request- - URI if the desire is to identify the source of that particular - entity. - - A cache cannot assume that an entity with a Content-Location - different from the URI used to retrieve it can be used to respond to - later requests on that Content-Location URI. However, the Content- - Location can be used to differentiate between multiple entities - retrieved from a single requested resource, as described in section - 13.6. - - If the Content-Location is a relative URI, the relative URI is - interpreted relative to the Request-URI. - - The meaning of the Content-Location header in PUT or POST requests is - undefined; servers are free to ignore it in those cases. - - - -Fielding, et al. Standards Track [Page 120] - -RFC 2616 HTTP/1.1 June 1999 - - -14.15 Content-MD5 - - The Content-MD5 entity-header field, as defined in RFC 1864 [23], is - an MD5 digest of the entity-body for the purpose of providing an - end-to-end message integrity check (MIC) of the entity-body. (Note: a - MIC is good for detecting accidental modification of the entity-body - in transit, but is not proof against malicious attacks.) - - Content-MD5 = "Content-MD5" ":" md5-digest - md5-digest = - - The Content-MD5 header field MAY be generated by an origin server or - client to function as an integrity check of the entity-body. Only - origin servers or clients MAY generate the Content-MD5 header field; - proxies and gateways MUST NOT generate it, as this would defeat its - value as an end-to-end integrity check. Any recipient of the entity- - body, including gateways and proxies, MAY check that the digest value - in this header field matches that of the entity-body as received. - - The MD5 digest is computed based on the content of the entity-body, - including any content-coding that has been applied, but not including - any transfer-encoding applied to the message-body. If the message is - received with a transfer-encoding, that encoding MUST be removed - prior to checking the Content-MD5 value against the received entity. - - This has the result that the digest is computed on the octets of the - entity-body exactly as, and in the order that, they would be sent if - no transfer-encoding were being applied. - - HTTP extends RFC 1864 to permit the digest to be computed for MIME - composite media-types (e.g., multipart/* and message/rfc822), but - this does not change how the digest is computed as defined in the - preceding paragraph. - - There are several consequences of this. The entity-body for composite - types MAY contain many body-parts, each with its own MIME and HTTP - headers (including Content-MD5, Content-Transfer-Encoding, and - Content-Encoding headers). If a body-part has a Content-Transfer- - Encoding or Content-Encoding header, it is assumed that the content - of the body-part has had the encoding applied, and the body-part is - included in the Content-MD5 digest as is -- i.e., after the - application. The Transfer-Encoding header field is not allowed within - body-parts. - - Conversion of all line breaks to CRLF MUST NOT be done before - computing or checking the digest: the line break convention used in - the text actually transmitted MUST be left unaltered when computing - the digest. - - - -Fielding, et al. Standards Track [Page 121] - -RFC 2616 HTTP/1.1 June 1999 - - - Note: while the definition of Content-MD5 is exactly the same for - HTTP as in RFC 1864 for MIME entity-bodies, there are several ways - in which the application of Content-MD5 to HTTP entity-bodies - differs from its application to MIME entity-bodies. One is that - HTTP, unlike MIME, does not use Content-Transfer-Encoding, and - does use Transfer-Encoding and Content-Encoding. Another is that - HTTP more frequently uses binary content types than MIME, so it is - worth noting that, in such cases, the byte order used to compute - the digest is the transmission byte order defined for the type. - Lastly, HTTP allows transmission of text types with any of several - line break conventions and not just the canonical form using CRLF. - -14.16 Content-Range - - The Content-Range entity-header is sent with a partial entity-body to - specify where in the full entity-body the partial body should be - applied. Range units are defined in section 3.12. - - Content-Range = "Content-Range" ":" content-range-spec - - content-range-spec = byte-content-range-spec - byte-content-range-spec = bytes-unit SP - byte-range-resp-spec "/" - ( instance-length | "*" ) - - byte-range-resp-spec = (first-byte-pos "-" last-byte-pos) - | "*" - instance-length = 1*DIGIT - - The header SHOULD indicate the total length of the full entity-body, - unless this length is unknown or difficult to determine. The asterisk - "*" character means that the instance-length is unknown at the time - when the response was generated. - - Unlike byte-ranges-specifier values (see section 14.35.1), a byte- - range-resp-spec MUST only specify one range, and MUST contain - absolute byte positions for both the first and last byte of the - range. - - A byte-content-range-spec with a byte-range-resp-spec whose last- - byte-pos value is less than its first-byte-pos value, or whose - instance-length value is less than or equal to its last-byte-pos - value, is invalid. The recipient of an invalid byte-content-range- - spec MUST ignore it and any content transferred along with it. - - A server sending a response with status code 416 (Requested range not - satisfiable) SHOULD include a Content-Range field with a byte-range- - resp-spec of "*". The instance-length specifies the current length of - - - -Fielding, et al. Standards Track [Page 122] - -RFC 2616 HTTP/1.1 June 1999 - - - the selected resource. A response with status code 206 (Partial - Content) MUST NOT include a Content-Range field with a byte-range- - resp-spec of "*". - - Examples of byte-content-range-spec values, assuming that the entity - contains a total of 1234 bytes: - - . The first 500 bytes: - bytes 0-499/1234 - - . The second 500 bytes: - bytes 500-999/1234 - - . All except for the first 500 bytes: - bytes 500-1233/1234 - - . The last 500 bytes: - bytes 734-1233/1234 - - When an HTTP message includes the content of a single range (for - example, a response to a request for a single range, or to a request - for a set of ranges that overlap without any holes), this content is - transmitted with a Content-Range header, and a Content-Length header - showing the number of bytes actually transferred. For example, - - HTTP/1.1 206 Partial content - Date: Wed, 15 Nov 1995 06:25:24 GMT - Last-Modified: Wed, 15 Nov 1995 04:58:08 GMT - Content-Range: bytes 21010-47021/47022 - Content-Length: 26012 - Content-Type: image/gif - - When an HTTP message includes the content of multiple ranges (for - example, a response to a request for multiple non-overlapping - ranges), these are transmitted as a multipart message. The multipart - media type used for this purpose is "multipart/byteranges" as defined - in appendix 19.2. See appendix 19.6.3 for a compatibility issue. - - A response to a request for a single range MUST NOT be sent using the - multipart/byteranges media type. A response to a request for - multiple ranges, whose result is a single range, MAY be sent as a - multipart/byteranges media type with one part. A client that cannot - decode a multipart/byteranges message MUST NOT ask for multiple - byte-ranges in a single request. - - When a client requests multiple byte-ranges in one request, the - server SHOULD return them in the order that they appeared in the - request. - - - -Fielding, et al. Standards Track [Page 123] - -RFC 2616 HTTP/1.1 June 1999 - - - If the server ignores a byte-range-spec because it is syntactically - invalid, the server SHOULD treat the request as if the invalid Range - header field did not exist. (Normally, this means return a 200 - response containing the full entity). - - If the server receives a request (other than one including an If- - Range request-header field) with an unsatisfiable Range request- - header field (that is, all of whose byte-range-spec values have a - first-byte-pos value greater than the current length of the selected - resource), it SHOULD return a response code of 416 (Requested range - not satisfiable) (section 10.4.17). - - Note: clients cannot depend on servers to send a 416 (Requested - range not satisfiable) response instead of a 200 (OK) response for - an unsatisfiable Range request-header, since not all servers - implement this request-header. - -14.17 Content-Type - - The Content-Type entity-header field indicates the media type of the - entity-body sent to the recipient or, in the case of the HEAD method, - the media type that would have been sent had the request been a GET. - - Content-Type = "Content-Type" ":" media-type - - Media types are defined in section 3.7. An example of the field is - - Content-Type: text/html; charset=ISO-8859-4 - - Further discussion of methods for identifying the media type of an - entity is provided in section 7.2.1. - -14.18 Date - - The Date general-header field represents the date and time at which - the message was originated, having the same semantics as orig-date in - RFC 822. The field value is an HTTP-date, as described in section - 3.3.1; it MUST be sent in RFC 1123 [8]-date format. - - Date = "Date" ":" HTTP-date - - An example is - - Date: Tue, 15 Nov 1994 08:12:31 GMT - - Origin servers MUST include a Date header field in all responses, - except in these cases: - - - - -Fielding, et al. Standards Track [Page 124] - -RFC 2616 HTTP/1.1 June 1999 - - - 1. If the response status code is 100 (Continue) or 101 (Switching - Protocols), the response MAY include a Date header field, at - the server's option. - - 2. If the response status code conveys a server error, e.g. 500 - (Internal Server Error) or 503 (Service Unavailable), and it is - inconvenient or impossible to generate a valid Date. - - 3. If the server does not have a clock that can provide a - reasonable approximation of the current time, its responses - MUST NOT include a Date header field. In this case, the rules - in section 14.18.1 MUST be followed. - - A received message that does not have a Date header field MUST be - assigned one by the recipient if the message will be cached by that - recipient or gatewayed via a protocol which requires a Date. An HTTP - implementation without a clock MUST NOT cache responses without - revalidating them on every use. An HTTP cache, especially a shared - cache, SHOULD use a mechanism, such as NTP [28], to synchronize its - clock with a reliable external standard. - - Clients SHOULD only send a Date header field in messages that include - an entity-body, as in the case of the PUT and POST requests, and even - then it is optional. A client without a clock MUST NOT send a Date - header field in a request. - - The HTTP-date sent in a Date header SHOULD NOT represent a date and - time subsequent to the generation of the message. It SHOULD represent - the best available approximation of the date and time of message - generation, unless the implementation has no means of generating a - reasonably accurate date and time. In theory, the date ought to - represent the moment just before the entity is generated. In - practice, the date can be generated at any time during the message - origination without affecting its semantic value. - -14.18.1 Clockless Origin Server Operation - - Some origin server implementations might not have a clock available. - An origin server without a clock MUST NOT assign Expires or Last- - Modified values to a response, unless these values were associated - with the resource by a system or user with a reliable clock. It MAY - assign an Expires value that is known, at or before server - configuration time, to be in the past (this allows "pre-expiration" - of responses without storing separate Expires values for each - resource). - - - - - - -Fielding, et al. Standards Track [Page 125] - -RFC 2616 HTTP/1.1 June 1999 - - -14.19 ETag - - The ETag response-header field provides the current value of the - entity tag for the requested variant. The headers used with entity - tags are described in sections 14.24, 14.26 and 14.44. The entity tag - MAY be used for comparison with other entities from the same resource - (see section 13.3.3). - - ETag = "ETag" ":" entity-tag - - Examples: - - ETag: "xyzzy" - ETag: W/"xyzzy" - ETag: "" - -14.20 Expect - - The Expect request-header field is used to indicate that particular - server behaviors are required by the client. - - Expect = "Expect" ":" 1#expectation - - expectation = "100-continue" | expectation-extension - expectation-extension = token [ "=" ( token | quoted-string ) - *expect-params ] - expect-params = ";" token [ "=" ( token | quoted-string ) ] - - - A server that does not understand or is unable to comply with any of - the expectation values in the Expect field of a request MUST respond - with appropriate error status. The server MUST respond with a 417 - (Expectation Failed) status if any of the expectations cannot be met - or, if there are other problems with the request, some other 4xx - status. - - This header field is defined with extensible syntax to allow for - future extensions. If a server receives a request containing an - Expect field that includes an expectation-extension that it does not - support, it MUST respond with a 417 (Expectation Failed) status. - - Comparison of expectation values is case-insensitive for unquoted - tokens (including the 100-continue token), and is case-sensitive for - quoted-string expectation-extensions. - - - - - - - -Fielding, et al. Standards Track [Page 126] - -RFC 2616 HTTP/1.1 June 1999 - - - The Expect mechanism is hop-by-hop: that is, an HTTP/1.1 proxy MUST - return a 417 (Expectation Failed) status if it receives a request - with an expectation that it cannot meet. However, the Expect - request-header itself is end-to-end; it MUST be forwarded if the - request is forwarded. - - Many older HTTP/1.0 and HTTP/1.1 applications do not understand the - Expect header. - - See section 8.2.3 for the use of the 100 (continue) status. - -14.21 Expires - - The Expires entity-header field gives the date/time after which the - response is considered stale. A stale cache entry may not normally be - returned by a cache (either a proxy cache or a user agent cache) - unless it is first validated with the origin server (or with an - intermediate cache that has a fresh copy of the entity). See section - 13.2 for further discussion of the expiration model. - - The presence of an Expires field does not imply that the original - resource will change or cease to exist at, before, or after that - time. - - The format is an absolute date and time as defined by HTTP-date in - section 3.3.1; it MUST be in RFC 1123 date format: - - Expires = "Expires" ":" HTTP-date - - An example of its use is - - Expires: Thu, 01 Dec 1994 16:00:00 GMT - - Note: if a response includes a Cache-Control field with the max- - age directive (see section 14.9.3), that directive overrides the - Expires field. - - HTTP/1.1 clients and caches MUST treat other invalid date formats, - especially including the value "0", as in the past (i.e., "already - expired"). - - To mark a response as "already expired," an origin server sends an - Expires date that is equal to the Date header value. (See the rules - for expiration calculations in section 13.2.4.) - - - - - - - -Fielding, et al. Standards Track [Page 127] - -RFC 2616 HTTP/1.1 June 1999 - - - To mark a response as "never expires," an origin server sends an - Expires date approximately one year from the time the response is - sent. HTTP/1.1 servers SHOULD NOT send Expires dates more than one - year in the future. - - The presence of an Expires header field with a date value of some - time in the future on a response that otherwise would by default be - non-cacheable indicates that the response is cacheable, unless - indicated otherwise by a Cache-Control header field (section 14.9). - -14.22 From - - The From request-header field, if given, SHOULD contain an Internet - e-mail address for the human user who controls the requesting user - agent. The address SHOULD be machine-usable, as defined by "mailbox" - in RFC 822 [9] as updated by RFC 1123 [8]: - - From = "From" ":" mailbox - - An example is: - - From: webmaster@w3.org - - This header field MAY be used for logging purposes and as a means for - identifying the source of invalid or unwanted requests. It SHOULD NOT - be used as an insecure form of access protection. The interpretation - of this field is that the request is being performed on behalf of the - person given, who accepts responsibility for the method performed. In - particular, robot agents SHOULD include this header so that the - person responsible for running the robot can be contacted if problems - occur on the receiving end. - - The Internet e-mail address in this field MAY be separate from the - Internet host which issued the request. For example, when a request - is passed through a proxy the original issuer's address SHOULD be - used. - - The client SHOULD NOT send the From header field without the user's - approval, as it might conflict with the user's privacy interests or - their site's security policy. It is strongly recommended that the - user be able to disable, enable, and modify the value of this field - at any time prior to a request. - -14.23 Host - - The Host request-header field specifies the Internet host and port - number of the resource being requested, as obtained from the original - URI given by the user or referring resource (generally an HTTP URL, - - - -Fielding, et al. Standards Track [Page 128] - -RFC 2616 HTTP/1.1 June 1999 - - - as described in section 3.2.2). The Host field value MUST represent - the naming authority of the origin server or gateway given by the - original URL. This allows the origin server or gateway to - differentiate between internally-ambiguous URLs, such as the root "/" - URL of a server for multiple host names on a single IP address. - - Host = "Host" ":" host [ ":" port ] ; Section 3.2.2 - - A "host" without any trailing port information implies the default - port for the service requested (e.g., "80" for an HTTP URL). For - example, a request on the origin server for - would properly include: - - GET /pub/WWW/ HTTP/1.1 - Host: www.w3.org - - A client MUST include a Host header field in all HTTP/1.1 request - messages . If the requested URI does not include an Internet host - name for the service being requested, then the Host header field MUST - be given with an empty value. An HTTP/1.1 proxy MUST ensure that any - request message it forwards does contain an appropriate Host header - field that identifies the service being requested by the proxy. All - Internet-based HTTP/1.1 servers MUST respond with a 400 (Bad Request) - status code to any HTTP/1.1 request message which lacks a Host header - field. - - See sections 5.2 and 19.6.1.1 for other requirements relating to - Host. - -14.24 If-Match - - The If-Match request-header field is used with a method to make it - conditional. A client that has one or more entities previously - obtained from the resource can verify that one of those entities is - current by including a list of their associated entity tags in the - If-Match header field. Entity tags are defined in section 3.11. The - purpose of this feature is to allow efficient updates of cached - information with a minimum amount of transaction overhead. It is also - used, on updating requests, to prevent inadvertent modification of - the wrong version of a resource. As a special case, the value "*" - matches any current entity of the resource. - - If-Match = "If-Match" ":" ( "*" | 1#entity-tag ) - - If any of the entity tags match the entity tag of the entity that - would have been returned in the response to a similar GET request - (without the If-Match header) on that resource, or if "*" is given - - - - -Fielding, et al. Standards Track [Page 129] - -RFC 2616 HTTP/1.1 June 1999 - - - and any current entity exists for that resource, then the server MAY - perform the requested method as if the If-Match header field did not - exist. - - A server MUST use the strong comparison function (see section 13.3.3) - to compare the entity tags in If-Match. - - If none of the entity tags match, or if "*" is given and no current - entity exists, the server MUST NOT perform the requested method, and - MUST return a 412 (Precondition Failed) response. This behavior is - most useful when the client wants to prevent an updating method, such - as PUT, from modifying a resource that has changed since the client - last retrieved it. - - If the request would, without the If-Match header field, result in - anything other than a 2xx or 412 status, then the If-Match header - MUST be ignored. - - The meaning of "If-Match: *" is that the method SHOULD be performed - if the representation selected by the origin server (or by a cache, - possibly using the Vary mechanism, see section 14.44) exists, and - MUST NOT be performed if the representation does not exist. - - A request intended to update a resource (e.g., a PUT) MAY include an - If-Match header field to signal that the request method MUST NOT be - applied if the entity corresponding to the If-Match value (a single - entity tag) is no longer a representation of that resource. This - allows the user to indicate that they do not wish the request to be - successful if the resource has been changed without their knowledge. - Examples: - - If-Match: "xyzzy" - If-Match: "xyzzy", "r2d2xxxx", "c3piozzzz" - If-Match: * - - The result of a request having both an If-Match header field and - either an If-None-Match or an If-Modified-Since header fields is - undefined by this specification. - -14.25 If-Modified-Since - - The If-Modified-Since request-header field is used with a method to - make it conditional: if the requested variant has not been modified - since the time specified in this field, an entity will not be - returned from the server; instead, a 304 (not modified) response will - be returned without any message-body. - - If-Modified-Since = "If-Modified-Since" ":" HTTP-date - - - -Fielding, et al. Standards Track [Page 130] - -RFC 2616 HTTP/1.1 June 1999 - - - An example of the field is: - - If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT - - A GET method with an If-Modified-Since header and no Range header - requests that the identified entity be transferred only if it has - been modified since the date given by the If-Modified-Since header. - The algorithm for determining this includes the following cases: - - a) If the request would normally result in anything other than a - 200 (OK) status, or if the passed If-Modified-Since date is - invalid, the response is exactly the same as for a normal GET. - A date which is later than the server's current time is - invalid. - - b) If the variant has been modified since the If-Modified-Since - date, the response is exactly the same as for a normal GET. - - c) If the variant has not been modified since a valid If- - Modified-Since date, the server SHOULD return a 304 (Not - Modified) response. - - The purpose of this feature is to allow efficient updates of cached - information with a minimum amount of transaction overhead. - - Note: The Range request-header field modifies the meaning of If- - Modified-Since; see section 14.35 for full details. - - Note: If-Modified-Since times are interpreted by the server, whose - clock might not be synchronized with the client. - - Note: When handling an If-Modified-Since header field, some - servers will use an exact date comparison function, rather than a - less-than function, for deciding whether to send a 304 (Not - Modified) response. To get best results when sending an If- - Modified-Since header field for cache validation, clients are - advised to use the exact date string received in a previous Last- - Modified header field whenever possible. - - Note: If a client uses an arbitrary date in the If-Modified-Since - header instead of a date taken from the Last-Modified header for - the same request, the client should be aware of the fact that this - date is interpreted in the server's understanding of time. The - client should consider unsynchronized clocks and rounding problems - due to the different encodings of time between the client and - server. This includes the possibility of race conditions if the - document has changed between the time it was first requested and - the If-Modified-Since date of a subsequent request, and the - - - -Fielding, et al. Standards Track [Page 131] - -RFC 2616 HTTP/1.1 June 1999 - - - possibility of clock-skew-related problems if the If-Modified- - Since date is derived from the client's clock without correction - to the server's clock. Corrections for different time bases - between client and server are at best approximate due to network - latency. - - The result of a request having both an If-Modified-Since header field - and either an If-Match or an If-Unmodified-Since header fields is - undefined by this specification. - -14.26 If-None-Match - - The If-None-Match request-header field is used with a method to make - it conditional. A client that has one or more entities previously - obtained from the resource can verify that none of those entities is - current by including a list of their associated entity tags in the - If-None-Match header field. The purpose of this feature is to allow - efficient updates of cached information with a minimum amount of - transaction overhead. It is also used to prevent a method (e.g. PUT) - from inadvertently modifying an existing resource when the client - believes that the resource does not exist. - - As a special case, the value "*" matches any current entity of the - resource. - - If-None-Match = "If-None-Match" ":" ( "*" | 1#entity-tag ) - - If any of the entity tags match the entity tag of the entity that - would have been returned in the response to a similar GET request - (without the If-None-Match header) on that resource, or if "*" is - given and any current entity exists for that resource, then the - server MUST NOT perform the requested method, unless required to do - so because the resource's modification date fails to match that - supplied in an If-Modified-Since header field in the request. - Instead, if the request method was GET or HEAD, the server SHOULD - respond with a 304 (Not Modified) response, including the cache- - related header fields (particularly ETag) of one of the entities that - matched. For all other request methods, the server MUST respond with - a status of 412 (Precondition Failed). - - See section 13.3.3 for rules on how to determine if two entities tags - match. The weak comparison function can only be used with GET or HEAD - requests. - - - - - - - - -Fielding, et al. Standards Track [Page 132] - -RFC 2616 HTTP/1.1 June 1999 - - - If none of the entity tags match, then the server MAY perform the - requested method as if the If-None-Match header field did not exist, - but MUST also ignore any If-Modified-Since header field(s) in the - request. That is, if no entity tags match, then the server MUST NOT - return a 304 (Not Modified) response. - - If the request would, without the If-None-Match header field, result - in anything other than a 2xx or 304 status, then the If-None-Match - header MUST be ignored. (See section 13.3.4 for a discussion of - server behavior when both If-Modified-Since and If-None-Match appear - in the same request.) - - The meaning of "If-None-Match: *" is that the method MUST NOT be - performed if the representation selected by the origin server (or by - a cache, possibly using the Vary mechanism, see section 14.44) - exists, and SHOULD be performed if the representation does not exist. - This feature is intended to be useful in preventing races between PUT - operations. - - Examples: - - If-None-Match: "xyzzy" - If-None-Match: W/"xyzzy" - If-None-Match: "xyzzy", "r2d2xxxx", "c3piozzzz" - If-None-Match: W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz" - If-None-Match: * - - The result of a request having both an If-None-Match header field and - either an If-Match or an If-Unmodified-Since header fields is - undefined by this specification. - -14.27 If-Range - - If a client has a partial copy of an entity in its cache, and wishes - to have an up-to-date copy of the entire entity in its cache, it - could use the Range request-header with a conditional GET (using - either or both of If-Unmodified-Since and If-Match.) However, if the - condition fails because the entity has been modified, the client - would then have to make a second request to obtain the entire current - entity-body. - - The If-Range header allows a client to "short-circuit" the second - request. Informally, its meaning is `if the entity is unchanged, send - me the part(s) that I am missing; otherwise, send me the entire new - entity'. - - If-Range = "If-Range" ":" ( entity-tag | HTTP-date ) - - - - -Fielding, et al. Standards Track [Page 133] - -RFC 2616 HTTP/1.1 June 1999 - - - If the client has no entity tag for an entity, but does have a Last- - Modified date, it MAY use that date in an If-Range header. (The - server can distinguish between a valid HTTP-date and any form of - entity-tag by examining no more than two characters.) The If-Range - header SHOULD only be used together with a Range header, and MUST be - ignored if the request does not include a Range header, or if the - server does not support the sub-range operation. - - If the entity tag given in the If-Range header matches the current - entity tag for the entity, then the server SHOULD provide the - specified sub-range of the entity using a 206 (Partial content) - response. If the entity tag does not match, then the server SHOULD - return the entire entity using a 200 (OK) response. - -14.28 If-Unmodified-Since - - The If-Unmodified-Since request-header field is used with a method to - make it conditional. If the requested resource has not been modified - since the time specified in this field, the server SHOULD perform the - requested operation as if the If-Unmodified-Since header were not - present. - - If the requested variant has been modified since the specified time, - the server MUST NOT perform the requested operation, and MUST return - a 412 (Precondition Failed). - - If-Unmodified-Since = "If-Unmodified-Since" ":" HTTP-date - - An example of the field is: - - If-Unmodified-Since: Sat, 29 Oct 1994 19:43:31 GMT - - If the request normally (i.e., without the If-Unmodified-Since - header) would result in anything other than a 2xx or 412 status, the - If-Unmodified-Since header SHOULD be ignored. - - If the specified date is invalid, the header is ignored. - - The result of a request having both an If-Unmodified-Since header - field and either an If-None-Match or an If-Modified-Since header - fields is undefined by this specification. - -14.29 Last-Modified - - The Last-Modified entity-header field indicates the date and time at - which the origin server believes the variant was last modified. - - Last-Modified = "Last-Modified" ":" HTTP-date - - - -Fielding, et al. Standards Track [Page 134] - -RFC 2616 HTTP/1.1 June 1999 - - - An example of its use is - - Last-Modified: Tue, 15 Nov 1994 12:45:26 GMT - - The exact meaning of this header field depends on the implementation - of the origin server and the nature of the original resource. For - files, it may be just the file system last-modified time. For - entities with dynamically included parts, it may be the most recent - of the set of last-modify times for its component parts. For database - gateways, it may be the last-update time stamp of the record. For - virtual objects, it may be the last time the internal state changed. - - An origin server MUST NOT send a Last-Modified date which is later - than the server's time of message origination. In such cases, where - the resource's last modification would indicate some time in the - future, the server MUST replace that date with the message - origination date. - - An origin server SHOULD obtain the Last-Modified value of the entity - as close as possible to the time that it generates the Date value of - its response. This allows a recipient to make an accurate assessment - of the entity's modification time, especially if the entity changes - near the time that the response is generated. - - HTTP/1.1 servers SHOULD send Last-Modified whenever feasible. - -14.30 Location - - The Location response-header field is used to redirect the recipient - to a location other than the Request-URI for completion of the - request or identification of a new resource. For 201 (Created) - responses, the Location is that of the new resource which was created - by the request. For 3xx responses, the location SHOULD indicate the - server's preferred URI for automatic redirection to the resource. The - field value consists of a single absolute URI. - - Location = "Location" ":" absoluteURI - [[ [ "#" fragment ] ]] - - An example is: - - Location: http://www.w3.org/pub/WWW/People.html - - Note: The Content-Location header field (section 14.14) differs - from Location in that the Content-Location identifies the original - location of the entity enclosed in the request. It is therefore - possible for a response to contain header fields for both Location - and Content-Location. Also see section 13.10 for cache - requirements of some methods. - - - -Fielding, et al. Standards Track [Page 135] - -RFC 2616 HTTP/1.1 June 1999 - - -14.31 Max-Forwards - - The Max-Forwards request-header field provides a mechanism with the - TRACE (section 9.8) and OPTIONS (section 9.2) methods to limit the - number of proxies or gateways that can forward the request to the - next inbound server. This can be useful when the client is attempting - to trace a request chain which appears to be failing or looping in - mid-chain. - - Max-Forwards = "Max-Forwards" ":" 1*DIGIT - - The Max-Forwards value is a decimal integer indicating the remaining - number of times this request message may be forwarded. - - Each proxy or gateway recipient of a TRACE or OPTIONS request - containing a Max-Forwards header field MUST check and update its - value prior to forwarding the request. If the received value is zero - (0), the recipient MUST NOT forward the request; instead, it MUST - respond as the final recipient. If the received Max-Forwards value is - greater than zero, then the forwarded message MUST contain an updated - Max-Forwards field with a value decremented by one (1). - - The Max-Forwards header field MAY be ignored for all other methods - defined by this specification and for any extension methods for which - it is not explicitly referred to as part of that method definition. - -14.32 Pragma - - The Pragma general-header field is used to include implementation- - specific directives that might apply to any recipient along the - request/response chain. All pragma directives specify optional - behavior from the viewpoint of the protocol; however, some systems - MAY require that behavior be consistent with the directives. - - Pragma = "Pragma" ":" 1#pragma-directive - pragma-directive = "no-cache" | extension-pragma - extension-pragma = token [ "=" ( token | quoted-string ) ] - - When the no-cache directive is present in a request message, an - application SHOULD forward the request toward the origin server even - if it has a cached copy of what is being requested. This pragma - directive has the same semantics as the no-cache cache-directive (see - section 14.9) and is defined here for backward compatibility with - HTTP/1.0. Clients SHOULD include both header fields when a no-cache - request is sent to a server not known to be HTTP/1.1 compliant. - - - - - - -Fielding, et al. Standards Track [Page 136] - -RFC 2616 HTTP/1.1 June 1999 - - - Pragma directives MUST be passed through by a proxy or gateway - application, regardless of their significance to that application, - since the directives might be applicable to all recipients along the - request/response chain. It is not possible to specify a pragma for a - specific recipient; however, any pragma directive not relevant to a - recipient SHOULD be ignored by that recipient. - - HTTP/1.1 caches SHOULD treat "Pragma: no-cache" as if the client had - sent "Cache-Control: no-cache". No new Pragma directives will be - defined in HTTP. - - Note: because the meaning of "Pragma: no-cache as a response - header field is not actually specified, it does not provide a - reliable replacement for "Cache-Control: no-cache" in a response - -14.33 Proxy-Authenticate - - The Proxy-Authenticate response-header field MUST be included as part - of a 407 (Proxy Authentication Required) response. The field value - consists of a challenge that indicates the authentication scheme and - parameters applicable to the proxy for this Request-URI. - - Proxy-Authenticate = "Proxy-Authenticate" ":" 1#challenge - - The HTTP access authentication process is described in "HTTP - Authentication: Basic and Digest Access Authentication" [43]. Unlike - WWW-Authenticate, the Proxy-Authenticate header field applies only to - the current connection and SHOULD NOT be passed on to downstream - clients. However, an intermediate proxy might need to obtain its own - credentials by requesting them from the downstream client, which in - some circumstances will appear as if the proxy is forwarding the - Proxy-Authenticate header field. - -14.34 Proxy-Authorization - - The Proxy-Authorization request-header field allows the client to - identify itself (or its user) to a proxy which requires - authentication. The Proxy-Authorization field value consists of - credentials containing the authentication information of the user - agent for the proxy and/or realm of the resource being requested. - - Proxy-Authorization = "Proxy-Authorization" ":" credentials - - The HTTP access authentication process is described in "HTTP - Authentication: Basic and Digest Access Authentication" [43] . Unlike - Authorization, the Proxy-Authorization header field applies only to - the next outbound proxy that demanded authentication using the Proxy- - Authenticate field. When multiple proxies are used in a chain, the - - - -Fielding, et al. Standards Track [Page 137] - -RFC 2616 HTTP/1.1 June 1999 - - - Proxy-Authorization header field is consumed by the first outbound - proxy that was expecting to receive credentials. A proxy MAY relay - the credentials from the client request to the next proxy if that is - the mechanism by which the proxies cooperatively authenticate a given - request. - -14.35 Range - -14.35.1 Byte Ranges - - Since all HTTP entities are represented in HTTP messages as sequences - of bytes, the concept of a byte range is meaningful for any HTTP - entity. (However, not all clients and servers need to support byte- - range operations.) - - Byte range specifications in HTTP apply to the sequence of bytes in - the entity-body (not necessarily the same as the message-body). - - A byte range operation MAY specify a single range of bytes, or a set - of ranges within a single entity. - - ranges-specifier = byte-ranges-specifier - byte-ranges-specifier = bytes-unit "=" byte-range-set - byte-range-set = 1#( byte-range-spec | suffix-byte-range-spec ) - byte-range-spec = first-byte-pos "-" [last-byte-pos] - first-byte-pos = 1*DIGIT - last-byte-pos = 1*DIGIT - - The first-byte-pos value in a byte-range-spec gives the byte-offset - of the first byte in a range. The last-byte-pos value gives the - byte-offset of the last byte in the range; that is, the byte - positions specified are inclusive. Byte offsets start at zero. - - If the last-byte-pos value is present, it MUST be greater than or - equal to the first-byte-pos in that byte-range-spec, or the byte- - range-spec is syntactically invalid. The recipient of a byte-range- - set that includes one or more syntactically invalid byte-range-spec - values MUST ignore the header field that includes that byte-range- - set. - - If the last-byte-pos value is absent, or if the value is greater than - or equal to the current length of the entity-body, last-byte-pos is - taken to be equal to one less than the current length of the entity- - body in bytes. - - By its choice of last-byte-pos, a client can limit the number of - bytes retrieved without knowing the size of the entity. - - - - -Fielding, et al. Standards Track [Page 138] - -RFC 2616 HTTP/1.1 June 1999 - - - suffix-byte-range-spec = "-" suffix-length - suffix-length = 1*DIGIT - - A suffix-byte-range-spec is used to specify the suffix of the - entity-body, of a length given by the suffix-length value. (That is, - this form specifies the last N bytes of an entity-body.) If the - entity is shorter than the specified suffix-length, the entire - entity-body is used. - - If a syntactically valid byte-range-set includes at least one byte- - range-spec whose first-byte-pos is less than the current length of - the entity-body, or at least one suffix-byte-range-spec with a non- - zero suffix-length, then the byte-range-set is satisfiable. - Otherwise, the byte-range-set is unsatisfiable. If the byte-range-set - is unsatisfiable, the server SHOULD return a response with a status - of 416 (Requested range not satisfiable). Otherwise, the server - SHOULD return a response with a status of 206 (Partial Content) - containing the satisfiable ranges of the entity-body. - - Examples of byte-ranges-specifier values (assuming an entity-body of - length 10000): - - - The first 500 bytes (byte offsets 0-499, inclusive): bytes=0- - 499 - - - The second 500 bytes (byte offsets 500-999, inclusive): - bytes=500-999 - - - The final 500 bytes (byte offsets 9500-9999, inclusive): - bytes=-500 - - - Or bytes=9500- - - - The first and last bytes only (bytes 0 and 9999): bytes=0-0,-1 - - - Several legal but not canonical specifications of the second 500 - bytes (byte offsets 500-999, inclusive): - bytes=500-600,601-999 - bytes=500-700,601-999 - -14.35.2 Range Retrieval Requests - - HTTP retrieval requests using conditional or unconditional GET - methods MAY request one or more sub-ranges of the entity, instead of - the entire entity, using the Range request header, which applies to - the entity returned as the result of the request: - - Range = "Range" ":" ranges-specifier - - - -Fielding, et al. Standards Track [Page 139] - -RFC 2616 HTTP/1.1 June 1999 - - - A server MAY ignore the Range header. However, HTTP/1.1 origin - servers and intermediate caches ought to support byte ranges when - possible, since Range supports efficient recovery from partially - failed transfers, and supports efficient partial retrieval of large - entities. - - If the server supports the Range header and the specified range or - ranges are appropriate for the entity: - - - The presence of a Range header in an unconditional GET modifies - what is returned if the GET is otherwise successful. In other - words, the response carries a status code of 206 (Partial - Content) instead of 200 (OK). - - - The presence of a Range header in a conditional GET (a request - using one or both of If-Modified-Since and If-None-Match, or - one or both of If-Unmodified-Since and If-Match) modifies what - is returned if the GET is otherwise successful and the - condition is true. It does not affect the 304 (Not Modified) - response returned if the conditional is false. - - In some cases, it might be more appropriate to use the If-Range - header (see section 14.27) in addition to the Range header. - - If a proxy that supports ranges receives a Range request, forwards - the request to an inbound server, and receives an entire entity in - reply, it SHOULD only return the requested range to its client. It - SHOULD store the entire received response in its cache if that is - consistent with its cache allocation policies. - -14.36 Referer - - The Referer[sic] request-header field allows the client to specify, - for the server's benefit, the address (URI) of the resource from - which the Request-URI was obtained (the "referrer", although the - header field is misspelled.) The Referer request-header allows a - server to generate lists of back-links to resources for interest, - logging, optimized caching, etc. It also allows obsolete or mistyped - links to be traced for maintenance. The Referer field MUST NOT be - sent if the Request-URI was obtained from a source that does not have - its own URI, such as input from the user keyboard. - - Referer = "Referer" ":" ( absoluteURI | relativeURI ) - - Example: - - Referer: http://www.w3.org/hypertext/DataSources/Overview.html - - - - -Fielding, et al. Standards Track [Page 140] - -RFC 2616 HTTP/1.1 June 1999 - - - If the field value is a relative URI, it SHOULD be interpreted - relative to the Request-URI. The URI MUST NOT include a fragment. See - section 15.1.3 for security considerations. - -14.37 Retry-After - - The Retry-After response-header field can be used with a 503 (Service - Unavailable) response to indicate how long the service is expected to - be unavailable to the requesting client. This field MAY also be used - with any 3xx (Redirection) response to indicate the minimum time the - user-agent is asked wait before issuing the redirected request. The - value of this field can be either an HTTP-date or an integer number - of seconds (in decimal) after the time of the response. - - Retry-After = "Retry-After" ":" ( HTTP-date | delta-seconds ) - - Two examples of its use are - - Retry-After: Fri, 31 Dec 1999 23:59:59 GMT - Retry-After: 120 - - In the latter example, the delay is 2 minutes. - -14.38 Server - - The Server response-header field contains information about the - software used by the origin server to handle the request. The field - can contain multiple product tokens (section 3.8) and comments - identifying the server and any significant subproducts. The product - tokens are listed in order of their significance for identifying the - application. - - Server = "Server" ":" 1*( product | comment ) - - Example: - - Server: CERN/3.0 libwww/2.17 - - If the response is being forwarded through a proxy, the proxy - application MUST NOT modify the Server response-header. Instead, it - SHOULD include a Via field (as described in section 14.45). - [[ Actually, it MUST ]] - - Note: Revealing the specific software version of the server might - allow the server machine to become more vulnerable to attacks - against software that is known to contain security holes. Server - implementors are encouraged to make this field a configurable - option. - - - - -Fielding, et al. Standards Track [Page 141] - -RFC 2616 HTTP/1.1 June 1999 - - -14.39 TE - - The TE request-header field indicates what extension transfer-codings - it is willing to accept in the response and whether or not it is - willing to accept trailer fields in a chunked transfer-coding. Its - value may consist of the keyword "trailers" and/or a comma-separated - list of extension transfer-coding names with optional accept - parameters (as described in section 3.6). - - TE = "TE" ":" #( t-codings ) - t-codings = "trailers" | ( transfer-extension [ accept-params ] ) - - The presence of the keyword "trailers" indicates that the client is - willing to accept trailer fields in a chunked transfer-coding, as - defined in section 3.6.1. This keyword is reserved for use with - transfer-coding values even though it does not itself represent a - transfer-coding. - - Examples of its use are: - - TE: deflate - TE: - TE: trailers, deflate;q=0.5 - - The TE header field only applies to the immediate connection. - Therefore, the keyword MUST be supplied within a Connection header - field (section 14.10) whenever TE is present in an HTTP/1.1 message. - - A server tests whether a transfer-coding is acceptable, according to - a TE field, using these rules: - - 1. The "chunked" transfer-coding is always acceptable. If the - keyword "trailers" is listed, the client indicates that it is - willing to accept trailer fields in the chunked response on - behalf of itself and any downstream clients. The implication is - that, if given, the client is stating that either all - downstream clients are willing to accept trailer fields in the - forwarded response, or that it will attempt to buffer the - response on behalf of downstream recipients. - - Note: HTTP/1.1 does not define any means to limit the size of a - chunked response such that a client can be assured of buffering - the entire response. - - 2. If the transfer-coding being tested is one of the transfer- - codings listed in the TE field, then it is acceptable unless it - is accompanied by a qvalue of 0. (As defined in section 3.9, a - qvalue of 0 means "not acceptable.") - - - -Fielding, et al. Standards Track [Page 142] - -RFC 2616 HTTP/1.1 June 1999 - - - 3. If multiple transfer-codings are acceptable, then the - acceptable transfer-coding with the highest non-zero qvalue is - preferred. The "chunked" transfer-coding always has a qvalue - of 1. - - If the TE field-value is empty or if no TE field is present, the only - transfer-coding is "chunked". A message with no transfer-coding is - always acceptable. - -14.40 Trailer - - The Trailer general field value indicates that the given set of - header fields is present in the trailer of a message encoded with - chunked transfer-coding. - - Trailer = "Trailer" ":" 1#field-name - - An HTTP/1.1 message SHOULD include a Trailer header field in a - message using chunked transfer-coding with a non-empty trailer. Doing - so allows the recipient to know which header fields to expect in the - trailer. - - If no Trailer header field is present, the trailer SHOULD NOT include - any header fields. See section 3.6.1 for restrictions on the use of - trailer fields in a "chunked" transfer-coding. - - Message header fields listed in the Trailer header field MUST NOT - include the following header fields: - - . Transfer-Encoding - - . Content-Length - - . Trailer - -14.41 Transfer-Encoding - - The Transfer-Encoding general-header field indicates what (if any) - type of transformation has been applied to the message body in order - to safely transfer it between the sender and the recipient. This - differs from the content-coding in that the transfer-coding is a - property of the message, not of the entity. - - Transfer-Encoding = "Transfer-Encoding" ":" 1#transfer-coding - - Transfer-codings are defined in section 3.6. An example is: - - Transfer-Encoding: chunked - - - -Fielding, et al. Standards Track [Page 143] - -RFC 2616 HTTP/1.1 June 1999 - - - If multiple encodings have been applied to an entity, the transfer- - codings MUST be listed in the order in which they were applied. - Additional information about the encoding parameters MAY be provided - by other entity-header fields not defined by this specification. - - Many older HTTP/1.0 applications do not understand the Transfer- - Encoding header. - -14.42 Upgrade - - The Upgrade general-header allows the client to specify what - additional communication protocols it supports and would like to use - if the server finds it appropriate to switch protocols. The server - MUST use the Upgrade header field within a 101 (Switching Protocols) - response to indicate which protocol(s) are being switched. - - Upgrade = "Upgrade" ":" 1#product - - For example, - - Upgrade: HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11 - - The Upgrade header field is intended to provide a simple mechanism - for transition from HTTP/1.1 to some other, incompatible protocol. It - does so by allowing the client to advertise its desire to use another - protocol, such as a later version of HTTP with a higher major version - number, even though the current request has been made using HTTP/1.1. - This eases the difficult transition between incompatible protocols by - allowing the client to initiate a request in the more commonly - supported protocol while indicating to the server that it would like - to use a "better" protocol if available (where "better" is determined - by the server, possibly according to the nature of the method and/or - resource being requested). - - The Upgrade header field only applies to switching application-layer - protocols upon the existing transport-layer connection. Upgrade - cannot be used to insist on a protocol change; its acceptance and use - by the server is optional. The capabilities and nature of the - application-layer communication after the protocol change is entirely - dependent upon the new protocol chosen, although the first action - after changing the protocol MUST be a response to the initial HTTP - request containing the Upgrade header field. - - The Upgrade header field only applies to the immediate connection. - Therefore, the upgrade keyword MUST be supplied within a Connection - header field (section 14.10) whenever Upgrade is present in an - HTTP/1.1 message. - - - - -Fielding, et al. Standards Track [Page 144] - -RFC 2616 HTTP/1.1 June 1999 - - - The Upgrade header field cannot be used to indicate a switch to a - protocol on a different connection. For that purpose, it is more - appropriate to use a 301, 302, 303, or 305 redirection response. - - This specification only defines the protocol name "HTTP" for use by - the family of Hypertext Transfer Protocols, as defined by the HTTP - version rules of section 3.1 and future updates to this - specification. Any token can be used as a protocol name; however, it - will only be useful if both the client and server associate the name - with the same protocol. - -14.43 User-Agent - - The User-Agent request-header field contains information about the - user agent originating the request. This is for statistical purposes, - the tracing of protocol violations, and automated recognition of user - agents for the sake of tailoring responses to avoid particular user - agent limitations. User agents SHOULD include this field with - requests. The field can contain multiple product tokens (section 3.8) - and comments identifying the agent and any subproducts which form a - significant part of the user agent. By convention, the product tokens - are listed in order of their significance for identifying the - application. - - User-Agent = "User-Agent" ":" 1*( product | comment ) - - Example: - - User-Agent: CERN-LineMode/2.15 libwww/2.17b3 - -14.44 Vary - - The Vary field value indicates the set of request-header fields that - fully determines, while the response is fresh, whether a cache is - permitted to use the response to reply to a subsequent request - without revalidation. For uncacheable or stale responses, the Vary - field value advises the user agent about the criteria that were used - to select the representation. A Vary field value of "*" implies that - a cache cannot determine from the request headers of a subsequent - request whether this response is the appropriate representation. See - section 13.6 for use of the Vary header field by caches. - - Vary = "Vary" ":" ( "*" | 1#field-name ) - - An HTTP/1.1 server SHOULD include a Vary header field with any - cacheable response that is subject to server-driven negotiation. - Doing so allows a cache to properly interpret future requests on that - resource and informs the user agent about the presence of negotiation - - - -Fielding, et al. Standards Track [Page 145] - -RFC 2616 HTTP/1.1 June 1999 - - - on that resource. A server MAY include a Vary header field with a - non-cacheable response that is subject to server-driven negotiation, - since this might provide the user agent with useful information about - the dimensions over which the response varies at the time of the - response. - - A Vary field value consisting of a list of field-names signals that - the representation selected for the response is based on a selection - algorithm which considers ONLY the listed request-header field values - in selecting the most appropriate representation. A cache MAY assume - that the same selection will be made for future requests with the - same values for the listed field names, for the duration of time for - which the response is fresh. - - The field-names given are not limited to the set of standard - request-header fields defined by this specification. Field names are - case-insensitive. - - A Vary field value of "*" signals that unspecified parameters not - limited to the request-headers (e.g., the network address of the - client), play a role in the selection of the response representation. - The "*" value MUST NOT be generated by a proxy server; it may only be - generated by an origin server. - -14.45 Via - - The Via general-header field MUST be used by gateways and proxies to - indicate the intermediate protocols and recipients between the user - agent and the server on requests, and between the origin server and - the client on responses. It is analogous to the "Received" field of - RFC 822 [9] and is intended to be used for tracking message forwards, - avoiding request loops, and identifying the protocol capabilities of - all senders along the request/response chain. - - Via = "Via" ":" 1#( received-protocol received-by [ comment ] ) - received-protocol = [ protocol-name "/" ] protocol-version - protocol-name = token - protocol-version = token - received-by = ( host [ ":" port ] ) | pseudonym - pseudonym = token - - The received-protocol indicates the protocol version of the message - received by the server or client along each segment of the - request/response chain. The received-protocol version is appended to - the Via field value when the message is forwarded so that information - about the protocol capabilities of upstream applications remains - visible to all recipients. - - - - -Fielding, et al. Standards Track [Page 146] - -RFC 2616 HTTP/1.1 June 1999 - - - The protocol-name is optional if and only if it would be "HTTP". The - received-by field is normally the host and optional port number of a - recipient server or client that subsequently forwarded the message. - However, if the real host is considered to be sensitive information, - it MAY be replaced by a pseudonym. If the port is not given, it MAY - be assumed to be the default port of the received-protocol. - - Multiple Via field values represents each proxy or gateway that has - forwarded the message. Each recipient MUST append its information - such that the end result is ordered according to the sequence of - forwarding applications. - - Comments MAY be used in the Via header field to identify the software - of the recipient proxy or gateway, analogous to the User-Agent and - Server header fields. However, all comments in the Via field are - optional and MAY be removed by any recipient prior to forwarding the - message. - - For example, a request message could be sent from an HTTP/1.0 user - agent to an internal proxy code-named "fred", which uses HTTP/1.1 to - forward the request to a public proxy at nowhere.com, which completes - the request by forwarding it to the origin server at www.ics.uci.edu. - The request received by www.ics.uci.edu would then have the following - Via header field: - - Via: 1.0 fred, 1.1 nowhere.com (Apache/1.1) - - Proxies and gateways used as a portal through a network firewall - SHOULD NOT, by default, forward the names and ports of hosts within - the firewall region. This information SHOULD only be propagated if - explicitly enabled. If not enabled, the received-by host of any host - behind the firewall SHOULD be replaced by an appropriate pseudonym - for that host. - - For organizations that have strong privacy requirements for hiding - internal structures, a proxy MAY combine an ordered subsequence of - Via header field entries with identical received-protocol values into - a single such entry. For example, - - Via: 1.0 ricky, 1.1 ethel, 1.1 fred, 1.0 lucy - - could be collapsed to - - Via: 1.0 ricky, 1.1 mertz, 1.0 lucy - - - - - - - -Fielding, et al. Standards Track [Page 147] - -RFC 2616 HTTP/1.1 June 1999 - - - Applications SHOULD NOT combine multiple entries unless they are all - under the same organizational control and the hosts have already been - replaced by pseudonyms. Applications MUST NOT combine entries which - have different received-protocol values. - -14.46 Warning - - The Warning general-header field is used to carry additional - information about the status or transformation of a message which - might not be reflected in the message. This information is typically - used to warn about a possible lack of semantic transparency from - caching operations or transformations applied to the entity body of - the message. - - Warning headers are sent with responses using: - - Warning = "Warning" ":" 1#warning-value - - warning-value = warn-code SP warn-agent SP warn-text - [SP warn-date] - - warn-code = 3DIGIT - warn-agent = ( host [ ":" port ] ) | pseudonym - ; the name or pseudonym of the server adding - ; the Warning header, for use in debugging - warn-text = quoted-string - warn-date = <"> HTTP-date <"> - - A response MAY carry more than one Warning header. - - The warn-text SHOULD be in a natural language and character set that - is most likely to be intelligible to the human user receiving the - response. This decision MAY be based on any available knowledge, such - as the location of the cache or user, the Accept-Language field in a - request, the Content-Language field in a response, etc. The default - language is English and the default character set is ISO-8859-1. - - If a character set other than ISO-8859-1 is used, it MUST be encoded - in the warn-text using the method described in RFC 2047 [14]. - - Warning headers can in general be applied to any message, however - some specific warn-codes are specific to caches and can only be - applied to response messages. New Warning headers SHOULD be added - after any existing Warning headers. A cache MUST NOT delete any - Warning header that it received with a message. However, if a cache - successfully validates a cache entry, it SHOULD remove any Warning - headers previously attached to that entry except as specified for - - - - -Fielding, et al. Standards Track [Page 148] - -RFC 2616 HTTP/1.1 June 1999 - - - specific Warning codes. It MUST then add any Warning headers received - in the validating response. In other words, Warning headers are those - that would be attached to the most recent relevant response. - - When multiple Warning headers are attached to a response, the user - agent ought to inform the user of as many of them as possible, in the - order that they appear in the response. If it is not possible to - inform the user of all of the warnings, the user agent SHOULD follow - these heuristics: - - - Warnings that appear early in the response take priority over - those appearing later in the response. - - - Warnings in the user's preferred character set take priority - over warnings in other character sets but with identical warn- - codes and warn-agents. - - Systems that generate multiple Warning headers SHOULD order them with - this user agent behavior in mind. - - Requirements for the behavior of caches with respect to Warnings are - stated in section 13.1.2. - - This is a list of the currently-defined warn-codes, each with a - recommended warn-text in English, and a description of its meaning. - - 110 Response is stale - MUST be included whenever the returned response is stale. - - 111 Revalidation failed - MUST be included if a cache returns a stale response because an - attempt to revalidate the response failed, due to an inability to - reach the server. - - 112 Disconnected operation - SHOULD be included if the cache is intentionally disconnected from - the rest of the network for a period of time. - - 113 Heuristic expiration - MUST be included if the cache heuristically chose a freshness - lifetime greater than 24 hours and the response's age is greater - than 24 hours. - - 199 Miscellaneous warning - The warning text MAY include arbitrary information to be presented - to a human user, or logged. A system receiving this warning MUST - NOT take any automated action, besides presenting the warning to - the user. - - - -Fielding, et al. Standards Track [Page 149] - -RFC 2616 HTTP/1.1 June 1999 - - - 214 Transformation applied - MUST be added by an intermediate cache or proxy if it applies any - transformation changing the content-coding (as specified in the - Content-Encoding header) or media-type (as specified in the - Content-Type header) of the response, or the entity-body of the - response, unless this Warning code already appears in the response. - - 299 Miscellaneous persistent warning - The warning text MAY include arbitrary information to be presented - to a human user, or logged. A system receiving this warning MUST - NOT take any automated action. - - If an implementation sends a message with one or more Warning headers - whose version is HTTP/1.0 or lower, then the sender MUST include in - each warning-value a warn-date that matches the date in the response. - - If an implementation receives a message with a warning-value that - includes a warn-date, and that warn-date is different from the Date - value in the response, then that warning-value MUST be deleted from - the message before storing, forwarding, or using it. (This prevents - bad consequences of naive caching of Warning header fields.) If all - of the warning-values are deleted for this reason, the Warning header - MUST be deleted as well. - -14.47 WWW-Authenticate - - The WWW-Authenticate response-header field MUST be included in 401 - (Unauthorized) response messages. The field value consists of at - least one challenge that indicates the authentication scheme(s) and - parameters applicable to the Request-URI. - - WWW-Authenticate = "WWW-Authenticate" ":" 1#challenge - - The HTTP access authentication process is described in "HTTP - Authentication: Basic and Digest Access Authentication" [43]. User - agents are advised to take special care in parsing the WWW- - Authenticate field value as it might contain more than one challenge, - or if more than one WWW-Authenticate header field is provided, the - contents of a challenge itself can contain a comma-separated list of - authentication parameters. - -15 Security Considerations - - This section is meant to inform application developers, information - providers, and users of the security limitations in HTTP/1.1 as - described by this document. The discussion does not include - definitive solutions to the problems revealed, though it does make - some suggestions for reducing security risks. - - - -Fielding, et al. Standards Track [Page 150] - -RFC 2616 HTTP/1.1 June 1999 - - -15.1 Personal Information - - HTTP clients are often privy to large amounts of personal information - (e.g. the user's name, location, mail address, passwords, encryption - keys, etc.), and SHOULD be very careful to prevent unintentional - leakage of this information via the HTTP protocol to other sources. - We very strongly recommend that a convenient interface be provided - for the user to control dissemination of such information, and that - designers and implementors be particularly careful in this area. - History shows that errors in this area often create serious security - and/or privacy problems and generate highly adverse publicity for the - implementor's company. - -15.1.1 Abuse of Server Log Information - - A server is in the position to save personal data about a user's - requests which might identify their reading patterns or subjects of - interest. This information is clearly confidential in nature and its - handling can be constrained by law in certain countries. People using - the HTTP protocol to provide data are responsible for ensuring that - such material is not distributed without the permission of any - individuals that are identifiable by the published results. - -15.1.2 Transfer of Sensitive Information - - Like any generic data transfer protocol, HTTP cannot regulate the - content of the data that is transferred, nor is there any a priori - method of determining the sensitivity of any particular piece of - information within the context of any given request. Therefore, - applications SHOULD supply as much control over this information as - possible to the provider of that information. Four header fields are - worth special mention in this context: Server, Via, Referer and From. - - Revealing the specific software version of the server might allow the - server machine to become more vulnerable to attacks against software - that is known to contain security holes. Implementors SHOULD make the - Server header field a configurable option. - - Proxies which serve as a portal through a network firewall SHOULD - take special precautions regarding the transfer of header information - that identifies the hosts behind the firewall. In particular, they - SHOULD remove, or replace with sanitized versions, any Via fields - generated behind the firewall. - - The Referer header allows reading patterns to be studied and reverse - links drawn. Although it can be very useful, its power can be abused - if user details are not separated from the information contained in - - - - -Fielding, et al. Standards Track [Page 151] - -RFC 2616 HTTP/1.1 June 1999 - - - the Referer. Even when the personal information has been removed, the - Referer header might indicate a private document's URI whose - publication would be inappropriate. - - The information sent in the From field might conflict with the user's - privacy interests or their site's security policy, and hence it - SHOULD NOT be transmitted without the user being able to disable, - enable, and modify the contents of the field. The user MUST be able - to set the contents of this field within a user preference or - application defaults configuration. - - We suggest, though do not require, that a convenient toggle interface - be provided for the user to enable or disable the sending of From and - Referer information. - - The User-Agent (section 14.43) or Server (section 14.38) header - fields can sometimes be used to determine that a specific client or - server have a particular security hole which might be exploited. - Unfortunately, this same information is often used for other valuable - purposes for which HTTP currently has no better mechanism. - -15.1.3 Encoding Sensitive Information in URI's - - Because the source of a link might be private information or might - reveal an otherwise private information source, it is strongly - recommended that the user be able to select whether or not the - Referer field is sent. For example, a browser client could have a - toggle switch for browsing openly/anonymously, which would - respectively enable/disable the sending of Referer and From - information. - - Clients SHOULD NOT include a Referer header field in a (non-secure) - HTTP request if the referring page was transferred with a secure - protocol. - - Authors of services which use the HTTP protocol SHOULD NOT use GET - based forms for the submission of sensitive data, because this will - cause this data to be encoded in the Request-URI. Many existing - servers, proxies, and user agents will log the request URI in some - place where it might be visible to third parties. Servers can use - POST-based form submission instead - -15.1.4 Privacy Issues Connected to Accept Headers - - Accept request-headers can reveal information about the user to all - servers which are accessed. The Accept-Language header in particular - can reveal information the user would consider to be of a private - nature, because the understanding of particular languages is often - - - -Fielding, et al. Standards Track [Page 152] - -RFC 2616 HTTP/1.1 June 1999 - - - strongly correlated to the membership of a particular ethnic group. - User agents which offer the option to configure the contents of an - Accept-Language header to be sent in every request are strongly - encouraged to let the configuration process include a message which - makes the user aware of the loss of privacy involved. - - An approach that limits the loss of privacy would be for a user agent - to omit the sending of Accept-Language headers by default, and to ask - the user whether or not to start sending Accept-Language headers to a - server if it detects, by looking for any Vary response-header fields - generated by the server, that such sending could improve the quality - of service. - - Elaborate user-customized accept header fields sent in every request, - in particular if these include quality values, can be used by servers - as relatively reliable and long-lived user identifiers. Such user - identifiers would allow content providers to do click-trail tracking, - and would allow collaborating content providers to match cross-server - click-trails or form submissions of individual users. Note that for - many users not behind a proxy, the network address of the host - running the user agent will also serve as a long-lived user - identifier. In environments where proxies are used to enhance - privacy, user agents ought to be conservative in offering accept - header configuration options to end users. As an extreme privacy - measure, proxies could filter the accept headers in relayed requests. - General purpose user agents which provide a high degree of header - configurability SHOULD warn users about the loss of privacy which can - be involved. - -15.2 Attacks Based On File and Path Names - - Implementations of HTTP origin servers SHOULD be careful to restrict - the documents returned by HTTP requests to be only those that were - intended by the server administrators. If an HTTP server translates - HTTP URIs directly into file system calls, the server MUST take - special care not to serve files that were not intended to be - delivered to HTTP clients. For example, UNIX, Microsoft Windows, and - other operating systems use ".." as a path component to indicate a - directory level above the current one. On such a system, an HTTP - server MUST disallow any such construct in the Request-URI if it - would otherwise allow access to a resource outside those intended to - be accessible via the HTTP server. Similarly, files intended for - reference only internally to the server (such as access control - files, configuration files, and script code) MUST be protected from - inappropriate retrieval, since they might contain sensitive - information. Experience has shown that minor bugs in such HTTP server - implementations have turned into security risks. - - - - -Fielding, et al. Standards Track [Page 153] - -RFC 2616 HTTP/1.1 June 1999 - - -15.3 DNS Spoofing - - Clients using HTTP rely heavily on the Domain Name Service, and are - thus generally prone to security attacks based on the deliberate - mis-association of IP addresses and DNS names. Clients need to be - cautious in assuming the continuing validity of an IP number/DNS name - association. - - In particular, HTTP clients SHOULD rely on their name resolver for - confirmation of an IP number/DNS name association, rather than - caching the result of previous host name lookups. Many platforms - already can cache host name lookups locally when appropriate, and - they SHOULD be configured to do so. It is proper for these lookups to - be cached, however, only when the TTL (Time To Live) information - reported by the name server makes it likely that the cached - information will remain useful. - - If HTTP clients cache the results of host name lookups in order to - achieve a performance improvement, they MUST observe the TTL - information reported by DNS. - - If HTTP clients do not observe this rule, they could be spoofed when - a previously-accessed server's IP address changes. As network - renumbering is expected to become increasingly common [24], the - possibility of this form of attack will grow. Observing this - requirement thus reduces this potential security vulnerability. - - This requirement also improves the load-balancing behavior of clients - for replicated servers using the same DNS name and reduces the - likelihood of a user's experiencing failure in accessing sites which - use that strategy. - -15.4 Location Headers and Spoofing - - If a single server supports multiple organizations that do not trust - one another, then it MUST check the values of Location and Content- - Location headers in responses that are generated under control of - said organizations to make sure that they do not attempt to - invalidate resources over which they have no authority. - -15.5 Content-Disposition Issues - - RFC 1806 [35], from which the often implemented Content-Disposition - (see section 19.5.1) header in HTTP is derived, has a number of very - serious security considerations. Content-Disposition is not part of - the HTTP standard, but since it is widely implemented, we are - documenting its use and risks for implementors. See RFC 2183 [49] - (which updates RFC 1806) for details. - - - -Fielding, et al. Standards Track [Page 154] - -RFC 2616 HTTP/1.1 June 1999 - - -15.6 Authentication Credentials and Idle Clients - - Existing HTTP clients and user agents typically retain authentication - information indefinitely. HTTP/1.1. does not provide a method for a - server to direct clients to discard these cached credentials. This is - a significant defect that requires further extensions to HTTP. - Circumstances under which credential caching can interfere with the - application's security model include but are not limited to: - - - Clients which have been idle for an extended period following - which the server might wish to cause the client to reprompt the - user for credentials. - - - Applications which include a session termination indication - (such as a `logout' or `commit' button on a page) after which - the server side of the application `knows' that there is no - further reason for the client to retain the credentials. - - This is currently under separate study. There are a number of work- - arounds to parts of this problem, and we encourage the use of - password protection in screen savers, idle time-outs, and other - methods which mitigate the security problems inherent in this - problem. In particular, user agents which cache credentials are - encouraged to provide a readily accessible mechanism for discarding - cached credentials under user control. - -15.7 Proxies and Caching - - By their very nature, HTTP proxies are men-in-the-middle, and - represent an opportunity for man-in-the-middle attacks. Compromise of - the systems on which the proxies run can result in serious security - and privacy problems. Proxies have access to security-related - information, personal information about individual users and - organizations, and proprietary information belonging to users and - content providers. A compromised proxy, or a proxy implemented or - configured without regard to security and privacy considerations, - might be used in the commission of a wide range of potential attacks. - - Proxy operators should protect the systems on which proxies run as - they would protect any system that contains or transports sensitive - information. In particular, log information gathered at proxies often - contains highly sensitive personal information, and/or information - about organizations. Log information should be carefully guarded, and - appropriate guidelines for use developed and followed. (Section - 15.1.1). - - - - - - -Fielding, et al. Standards Track [Page 155] - -RFC 2616 HTTP/1.1 June 1999 - - - Caching proxies provide additional potential vulnerabilities, since - the contents of the cache represent an attractive target for - malicious exploitation. Because cache contents persist after an HTTP - request is complete, an attack on the cache can reveal information - long after a user believes that the information has been removed from - the network. Therefore, cache contents should be protected as - sensitive information. - - Proxy implementors should consider the privacy and security - implications of their design and coding decisions, and of the - configuration options they provide to proxy operators (especially the - default configuration). - - Users of a proxy need to be aware that they are no trustworthier than - the people who run the proxy; HTTP itself cannot solve this problem. - - The judicious use of cryptography, when appropriate, may suffice to - protect against a broad range of security and privacy attacks. Such - cryptography is beyond the scope of the HTTP/1.1 specification. - -15.7.1 Denial of Service Attacks on Proxies - - They exist. They are hard to defend against. Research continues. - Beware. - -16 Acknowledgments - - This specification makes heavy use of the augmented BNF and generic - constructs defined by David H. Crocker for RFC 822 [9]. Similarly, it - reuses many of the definitions provided by Nathaniel Borenstein and - Ned Freed for MIME [7]. We hope that their inclusion in this - specification will help reduce past confusion over the relationship - between HTTP and Internet mail message formats. - - The HTTP protocol has evolved considerably over the years. It has - benefited from a large and active developer community--the many - people who have participated on the www-talk mailing list--and it is - that community which has been most responsible for the success of - HTTP and of the World-Wide Web in general. Marc Andreessen, Robert - Cailliau, Daniel W. Connolly, Bob Denny, John Franks, Jean-Francois - Groff, Phillip M. Hallam-Baker, Hakon W. Lie, Ari Luotonen, Rob - McCool, Lou Montulli, Dave Raggett, Tony Sanders, and Marc - VanHeyningen deserve special recognition for their efforts in - defining early aspects of the protocol. - - This document has benefited greatly from the comments of all those - participating in the HTTP-WG. In addition to those already mentioned, - the following individuals have contributed to this specification: - - - -Fielding, et al. Standards Track [Page 156] - -RFC 2616 HTTP/1.1 June 1999 - - - Gary Adams Ross Patterson - Harald Tveit Alvestrand Albert Lunde - Keith Ball John C. Mallery - Brian Behlendorf Jean-Philippe Martin-Flatin - Paul Burchard Mitra - Maurizio Codogno David Morris - Mike Cowlishaw Gavin Nicol - Roman Czyborra Bill Perry - Michael A. Dolan Jeffrey Perry - David J. Fiander Scott Powers - Alan Freier Owen Rees - Marc Hedlund Luigi Rizzo - Greg Herlihy David Robinson - Koen Holtman Marc Salomon - Alex Hopmann Rich Salz - Bob Jernigan Allan M. Schiffman - Shel Kaphan Jim Seidman - Rohit Khare Chuck Shotton - John Klensin Eric W. Sink - Martijn Koster Simon E. Spero - Alexei Kosut Richard N. Taylor - David M. Kristol Robert S. Thau - Daniel LaLiberte Bill (BearHeart) Weinman - Ben Laurie Francois Yergeau - Paul J. Leach Mary Ellen Zurko - Daniel DuBois Josh Cohen - - - Much of the content and presentation of the caching design is due to - suggestions and comments from individuals including: Shel Kaphan, - Paul Leach, Koen Holtman, David Morris, and Larry Masinter. - - Most of the specification of ranges is based on work originally done - by Ari Luotonen and John Franks, with additional input from Steve - Zilles. - - Thanks to the "cave men" of Palo Alto. You know who you are. - - Jim Gettys (the current editor of this document) wishes particularly - to thank Roy Fielding, the previous editor of this document, along - with John Klensin, Jeff Mogul, Paul Leach, Dave Kristol, Koen - Holtman, John Franks, Josh Cohen, Alex Hopmann, Scott Lawrence, and - Larry Masinter for their help. And thanks go particularly to Jeff - Mogul and Scott Lawrence for performing the "MUST/MAY/SHOULD" audit. - - - - - - - -Fielding, et al. Standards Track [Page 157] - -RFC 2616 HTTP/1.1 June 1999 - - - The Apache Group, Anselm Baird-Smith, author of Jigsaw, and Henrik - Frystyk implemented RFC 2068 early, and we wish to thank them for the - discovery of many of the problems that this document attempts to - rectify. - -17 References - - [1] Alvestrand, H., "Tags for the Identification of Languages", RFC - 1766, March 1995. - - [2] Anklesaria, F., McCahill, M., Lindner, P., Johnson, D., Torrey, - D. and B. Alberti, "The Internet Gopher Protocol (a distributed - document search and retrieval protocol)", RFC 1436, March 1993. - - [3] Berners-Lee, T., "Universal Resource Identifiers in WWW", RFC - 1630, June 1994. - - [4] Berners-Lee, T., Masinter, L. and M. McCahill, "Uniform Resource - Locators (URL)", RFC 1738, December 1994. - - [5] Berners-Lee, T. and D. Connolly, "Hypertext Markup Language - - 2.0", RFC 1866, November 1995. - - [6] Berners-Lee, T., Fielding, R. and H. Frystyk, "Hypertext Transfer - Protocol -- HTTP/1.0", RFC 1945, May 1996. - - [7] Freed, N. and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part One: Format of Internet Message Bodies", - RFC 2045, November 1996. - - [8] Braden, R., "Requirements for Internet Hosts -- Communication - Layers", STD 3, RFC 1123, October 1989. - - [9] Crocker, D., "Standard for The Format of ARPA Internet Text - Messages", STD 11, RFC 822, August 1982. - - [10] Davis, F., Kahle, B., Morris, H., Salem, J., Shen, T., Wang, R., - Sui, J., and M. Grinbaum, "WAIS Interface Protocol Prototype - Functional Specification," (v1.5), Thinking Machines - Corporation, April 1990. - - [11] Fielding, R., "Relative Uniform Resource Locators", RFC 1808, - June 1995. - - [12] Horton, M. and R. Adams, "Standard for Interchange of USENET - Messages", RFC 1036, December 1987. - - - - - -Fielding, et al. Standards Track [Page 158] - -RFC 2616 HTTP/1.1 June 1999 - - - [13] Kantor, B. and P. Lapsley, "Network News Transfer Protocol", RFC - 977, February 1986. - - [14] Moore, K., "MIME (Multipurpose Internet Mail Extensions) Part - Three: Message Header Extensions for Non-ASCII Text", RFC 2047, - November 1996. - - [15] Nebel, E. and L. Masinter, "Form-based File Upload in HTML", RFC - 1867, November 1995. - - [16] Postel, J., "Simple Mail Transfer Protocol", STD 10, RFC 821, - August 1982. - - [17] Postel, J., "Media Type Registration Procedure", RFC 1590, - November 1996. - -[[ Should be: ]] -[[ [17] Freed, N., Klensin, J., and Postel, J., "Multipurpose Internet ]] -[[ Mail Extensions (MIME) Part Four: "Registration Procedure", ]] -[[ RFC 2048, November 1996. ]] - - [18] Postel, J. and J. Reynolds, "File Transfer Protocol", STD 9, RFC - 959, October 1985. - - [19] Reynolds, J. and J. Postel, "Assigned Numbers", STD 2, RFC 1700, - October 1994. - - [20] Sollins, K. and L. Masinter, "Functional Requirements for - Uniform Resource Names", RFC 1737, December 1994. - - [21] US-ASCII. Coded Character Set - 7-Bit American Standard Code for - Information Interchange. Standard ANSI X3.4-1986, ANSI, 1986. - - [22] ISO-8859. International Standard -- Information Processing -- - 8-bit Single-Byte Coded Graphic Character Sets -- - Part 1: Latin alphabet No. 1, ISO-8859-1:1987. - Part 2: Latin alphabet No. 2, ISO-8859-2, 1987. - Part 3: Latin alphabet No. 3, ISO-8859-3, 1988. - Part 4: Latin alphabet No. 4, ISO-8859-4, 1988. - Part 5: Latin/Cyrillic alphabet, ISO-8859-5, 1988. - Part 6: Latin/Arabic alphabet, ISO-8859-6, 1987. - Part 7: Latin/Greek alphabet, ISO-8859-7, 1987. - Part 8: Latin/Hebrew alphabet, ISO-8859-8, 1988. - Part 9: Latin alphabet No. 5, ISO-8859-9, 1990. - - [23] Meyers, J. and M. Rose, "The Content-MD5 Header Field", RFC - 1864, October 1995. - - [24] Carpenter, B. and Y. Rekhter, "Renumbering Needs Work", RFC - 1900, February 1996. - - [25] Deutsch, P., "GZIP file format specification version 4.3", RFC - 1952, May 1996. - - - -Fielding, et al. Standards Track [Page 159] - -RFC 2616 HTTP/1.1 June 1999 - - - [26] Venkata N. Padmanabhan, and Jeffrey C. Mogul. "Improving HTTP - Latency", Computer Networks and ISDN Systems, v. 28, pp. 25-35, - Dec. 1995. Slightly revised version of paper in Proc. 2nd - International WWW Conference '94: Mosaic and the Web, Oct. 1994, - which is available at - http://www.ncsa.uiuc.edu/SDG/IT94/Proceedings/DDay/mogul/HTTPLat - ency.html. - - [27] Joe Touch, John Heidemann, and Katia Obraczka. "Analysis of HTTP - Performance", , - ISI Research Report ISI/RR-98-463, (original report dated Aug. - 1996), USC/Information Sciences Institute, August 1998. - - [28] Mills, D., "Network Time Protocol (Version 3) Specification, - Implementation and Analysis", RFC 1305, March 1992. - - [29] Deutsch, P., "DEFLATE Compressed Data Format Specification - version 1.3", RFC 1951, May 1996. - - [30] S. Spero, "Analysis of HTTP Performance Problems," - http://sunsite.unc.edu/mdma-release/http-prob.html. - - [31] Deutsch, P. and J. Gailly, "ZLIB Compressed Data Format - Specification version 3.3", RFC 1950, May 1996. - - [32] Franks, J., Hallam-Baker, P., Hostetler, J., Leach, P., - Luotonen, A., Sink, E. and L. Stewart, "An Extension to HTTP: - Digest Access Authentication", RFC 2069, January 1997. - - [33] Fielding, R., Gettys, J., Mogul, J., Frystyk, H. and T. - Berners-Lee, "Hypertext Transfer Protocol -- HTTP/1.1", RFC - 2068, January 1997. - - [34] Bradner, S., "Key words for use in RFCs to Indicate Requirement - Levels", BCP 14, RFC 2119, March 1997. - - [35] Troost, R. and Dorner, S., "Communicating Presentation - Information in Internet Messages: The Content-Disposition - Header", RFC 1806, June 1995. - - [36] Mogul, J., Fielding, R., Gettys, J. and H. Frystyk, "Use and - Interpretation of HTTP Version Numbers", RFC 2145, May 1997. - [jg639] - - [37] Palme, J., "Common Internet Message Headers", RFC 2076, February - 1997. [jg640] - - - - - -Fielding, et al. Standards Track [Page 160] - -RFC 2616 HTTP/1.1 June 1999 - - - [38] Yergeau, F., "UTF-8, a transformation format of Unicode and - ISO-10646", RFC 2279, January 1998. [jg641] - - [39] Nielsen, H.F., Gettys, J., Baird-Smith, A., Prud'hommeaux, E., - Lie, H., and C. Lilley. "Network Performance Effects of - HTTP/1.1, CSS1, and PNG," Proceedings of ACM SIGCOMM '97, Cannes - France, September 1997.[jg642] - - [40] Freed, N. and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part Two: Media Types", RFC 2046, November - 1996. [jg643] - - [41] Alvestrand, H., "IETF Policy on Character Sets and Languages", - BCP 18, RFC 2277, January 1998. [jg644] - - [42] Berners-Lee, T., Fielding, R. and L. Masinter, "Uniform Resource - Identifiers (URI): Generic Syntax and Semantics", RFC 2396, - August 1998. [jg645] - - [43] Franks, J., Hallam-Baker, P., Hostetler, J., Lawrence, S., - Leach, P., Luotonen, A., Sink, E. and L. Stewart, "HTTP - Authentication: Basic and Digest Access Authentication", RFC - 2617, June 1999. [jg646] - - [44] Luotonen, A., "Tunneling TCP based protocols through Web proxy - servers," Work in Progress. [jg647] - - [45] Palme, J. and A. Hopmann, "MIME E-mail Encapsulation of - Aggregate Documents, such as HTML (MHTML)", RFC 2110, March - 1997. - - [46] Bradner, S., "The Internet Standards Process -- Revision 3", BCP - 9, RFC 2026, October 1996. - - [47] Masinter, L., "Hyper Text Coffee Pot Control Protocol - (HTCPCP/1.0)", RFC 2324, 1 April 1998. - - [48] Freed, N. and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part Five: Conformance Criteria and Examples", - RFC 2049, November 1996. - - [49] Troost, R., Dorner, S. and K. Moore, "Communicating Presentation - Information in Internet Messages: The Content-Disposition Header - Field", RFC 2183, August 1997. - - - - - - - -Fielding, et al. Standards Track [Page 161] - -RFC 2616 HTTP/1.1 June 1999 - - -18 Authors' Addresses - - Roy T. Fielding - Information and Computer Science - University of California, Irvine - Irvine, CA 92697-3425, USA - - Fax: +1 (949) 824-1715 - EMail: fielding@ics.uci.edu - - - James Gettys - World Wide Web Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: jg@w3.org - - - Jeffrey C. Mogul - Western Research Laboratory - Compaq Computer Corporation - 250 University Avenue - Palo Alto, California, 94305, USA - - EMail: mogul@wrl.dec.com - - - Henrik Frystyk Nielsen - World Wide Web Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: frystyk@w3.org - - - Larry Masinter - Xerox Corporation - 3333 Coyote Hill Road - Palo Alto, CA 94034, USA - - EMail: masinter@parc.xerox.com - - - - - -Fielding, et al. Standards Track [Page 162] - -RFC 2616 HTTP/1.1 June 1999 - - - Paul J. Leach - Microsoft Corporation - 1 Microsoft Way - Redmond, WA 98052, USA - - EMail: paulle@microsoft.com - - - Tim Berners-Lee - Director, World Wide Web Consortium - MIT Laboratory for Computer Science - 545 Technology Square - Cambridge, MA 02139, USA - - Fax: +1 (617) 258 8682 - EMail: timbl@w3.org - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Fielding, et al. Standards Track [Page 163] - -RFC 2616 HTTP/1.1 June 1999 - - -19 Appendices - -19.1 Internet Media Type message/http and application/http - - In addition to defining the HTTP/1.1 protocol, this document serves - as the specification for the Internet media type "message/http" and - "application/http". The message/http type can be used to enclose a - single HTTP request or response message, provided that it obeys the - MIME restrictions for all "message" types regarding line length and - encodings. The application/http type can be used to enclose a - pipeline of one or more HTTP request or response messages (not - intermixed). The following is to be registered with IANA [17]. - - Media Type name: message - Media subtype name: http - Required parameters: none - Optional parameters: version, msgtype - version: The HTTP-Version number of the enclosed message - (e.g., "1.1"). If not present, the version can be - determined from the first line of the body. - msgtype: The message type -- "request" or "response". If not - present, the type can be determined from the first - line of the body. - Encoding considerations: only "7bit", "8bit", or "binary" are - permitted - Security considerations: none - - Media Type name: application - Media subtype name: http - Required parameters: none - Optional parameters: version, msgtype - version: The HTTP-Version number of the enclosed messages - (e.g., "1.1"). If not present, the version can be - determined from the first line of the body. - msgtype: The message type -- "request" or "response". If not - present, the type can be determined from the first - line of the body. - Encoding considerations: HTTP messages enclosed by this type - are in "binary" format; use of an appropriate - Content-Transfer-Encoding is required when - transmitted via E-mail. - Security considerations: none - - - - - - - - - -Fielding, et al. Standards Track [Page 164] - -RFC 2616 HTTP/1.1 June 1999 - - -19.2 Internet Media Type multipart/byteranges - - When an HTTP 206 (Partial Content) response message includes the - content of multiple ranges (a response to a request for multiple - non-overlapping ranges), these are transmitted as a multipart - message-body. The media type for this purpose is called - "multipart/byteranges". - - The multipart/byteranges media type includes two or more parts, each - with its own Content-Type and Content-Range fields. The required - boundary parameter specifies the boundary string used to separate - each body-part. - - Media Type name: multipart - Media subtype name: byteranges - Required parameters: boundary - Optional parameters: none - Encoding considerations: only "7bit", "8bit", or "binary" are - permitted - Security considerations: none - - - For example: - - HTTP/1.1 206 Partial Content - Date: Wed, 15 Nov 1995 06:25:24 GMT - Last-Modified: Wed, 15 Nov 1995 04:58:08 GMT - Content-type: multipart/byteranges; boundary=THIS_STRING_SEPARATES - - --THIS_STRING_SEPARATES - Content-type: application/pdf - Content-range: bytes 500-999/8000 - - ...the first range... - --THIS_STRING_SEPARATES - Content-type: application/pdf - Content-range: bytes 7000-7999/8000 - - ...the second range - --THIS_STRING_SEPARATES-- - - Notes: - - 1) Additional CRLFs may precede the first boundary string in the - entity. - - - - - - -Fielding, et al. Standards Track [Page 165] - -RFC 2616 HTTP/1.1 June 1999 - - - 2) Although RFC 2046 [40] permits the boundary string to be - quoted, some existing implementations handle a quoted boundary - string incorrectly. - - 3) A number of browsers and servers were coded to an early draft - of the byteranges specification to use a media type of - multipart/x-byteranges, which is almost, but not quite - compatible with the version documented in HTTP/1.1. - -19.3 Tolerant Applications - - Although this document specifies the requirements for the generation - of HTTP/1.1 messages, not all applications will be correct in their - implementation. We therefore recommend that operational applications - be tolerant of deviations whenever those deviations can be - interpreted unambiguously. - - Clients SHOULD be tolerant in parsing the Status-Line and servers - tolerant when parsing the Request-Line. In particular, they SHOULD - accept any amount of SP or HT characters between fields, even though - only a single SP is required. - - The line terminator for message-header fields is the sequence CRLF. - However, we recommend that applications, when parsing such headers, - recognize a single LF as a line terminator and ignore the leading CR. - - The character set of an entity-body SHOULD be labeled as the lowest - common denominator of the character codes used within that body, with - the exception that not labeling the entity is preferred over labeling - the entity with the labels US-ASCII or ISO-8859-1. See section 3.7.1 - and 3.4.1. - - Additional rules for requirements on parsing and encoding of dates - and other potential problems with date encodings include: - - - HTTP/1.1 clients and caches SHOULD assume that an RFC-850 date - which appears to be more than 50 years in the future is in fact - in the past (this helps solve the "year 2000" problem). - - - An HTTP/1.1 implementation MAY internally represent a parsed - Expires date as earlier than the proper value, but MUST NOT - internally represent a parsed Expires date as later than the - proper value. - - - All expiration-related calculations MUST be done in GMT. The - local time zone MUST NOT influence the calculation or comparison - of an age or expiration time. - - - - -Fielding, et al. Standards Track [Page 166] - -RFC 2616 HTTP/1.1 June 1999 - - - - If an HTTP header incorrectly carries a date value with a time - zone other than GMT, it MUST be converted into GMT using the - most conservative possible conversion. - -19.4 Differences Between HTTP Entities and RFC 2045 Entities - - HTTP/1.1 uses many of the constructs defined for Internet Mail (RFC - 822 [9]) and the Multipurpose Internet Mail Extensions (MIME [7]) to - allow entities to be transmitted in an open variety of - representations and with extensible mechanisms. However, RFC 2045 - discusses mail, and HTTP has a few features that are different from - those described in RFC 2045. These differences were carefully chosen - to optimize performance over binary connections, to allow greater - freedom in the use of new media types, to make date comparisons - easier, and to acknowledge the practice of some early HTTP servers - and clients. - - This appendix describes specific areas where HTTP differs from RFC - 2045. Proxies and gateways to strict MIME environments SHOULD be - aware of these differences and provide the appropriate conversions - where necessary. Proxies and gateways from MIME environments to HTTP - also need to be aware of the differences because some conversions - might be required. - -19.4.1 MIME-Version - - HTTP is not a MIME-compliant protocol. However, HTTP/1.1 messages MAY - include a single MIME-Version general-header field to indicate what - version of the MIME protocol was used to construct the message. Use - of the MIME-Version header field indicates that the message is in - full compliance with the MIME protocol (as defined in RFC 2045[7]). - Proxies/gateways are responsible for ensuring full compliance (where - possible) when exporting HTTP messages to strict MIME environments. - - MIME-Version = "MIME-Version" ":" 1*DIGIT "." 1*DIGIT - - MIME version "1.0" is the default for use in HTTP/1.1. However, - HTTP/1.1 message parsing and semantics are defined by this document - and not the MIME specification. - -19.4.2 Conversion to Canonical Form - - RFC 2045 [7] requires that an Internet mail entity be converted to - canonical form prior to being transferred, as described in section 4 - of RFC 2049 [48]. Section 3.7.1 of this document describes the forms - allowed for subtypes of the "text" media type when transmitted over - HTTP. RFC 2046 requires that content with a type of "text" represent - line breaks as CRLF and forbids the use of CR or LF outside of line - - - -Fielding, et al. Standards Track [Page 167] - -RFC 2616 HTTP/1.1 June 1999 - - - break sequences. HTTP allows CRLF, bare CR, and bare LF to indicate a - line break within text content when a message is transmitted over - HTTP. - - Where it is possible, a proxy or gateway from HTTP to a strict MIME - environment SHOULD translate all line breaks within the text media - types described in section 3.7.1 of this document to the RFC 2049 - canonical form of CRLF. Note, however, that this might be complicated - by the presence of a Content-Encoding and by the fact that HTTP - allows the use of some character sets which do not use octets 13 and - 10 to represent CR and LF, as is the case for some multi-byte - character sets. - - Implementors should note that conversion will break any cryptographic - checksums applied to the original content unless the original content - is already in canonical form. Therefore, the canonical form is - recommended for any content that uses such checksums in HTTP. - -19.4.3 Conversion of Date Formats - - HTTP/1.1 uses a restricted set of date formats (section 3.3.1) to - simplify the process of date comparison. Proxies and gateways from - other protocols SHOULD ensure that any Date header field present in a - message conforms to one of the HTTP/1.1 formats and rewrite the date - if necessary. - -19.4.4 Introduction of Content-Encoding - - RFC 2045 does not include any concept equivalent to HTTP/1.1's - Content-Encoding header field. Since this acts as a modifier on the - media type, proxies and gateways from HTTP to MIME-compliant - protocols MUST either change the value of the Content-Type header - field or decode the entity-body before forwarding the message. (Some - experimental applications of Content-Type for Internet mail have used - a media-type parameter of ";conversions=" to perform - a function equivalent to Content-Encoding. However, this parameter is - not part of RFC 2045.) - -19.4.5 No Content-Transfer-Encoding - - HTTP does not use the Content-Transfer-Encoding (CTE) field of RFC - 2045. Proxies and gateways from MIME-compliant protocols to HTTP MUST - remove any non-identity CTE ("quoted-printable" or "base64") encoding - prior to delivering the response message to an HTTP client. - - [[ "MUST remove any CTE encoding prior to delivering the response ]] - [[ message to an HTTP client." ]] - - Proxies and gateways from HTTP to MIME-compliant protocols are - responsible for ensuring that the message is in the correct format - and encoding for safe transport on that protocol, where "safe - - - -Fielding, et al. Standards Track [Page 168] - -RFC 2616 HTTP/1.1 June 1999 - - - transport" is defined by the limitations of the protocol being used. - Such a proxy or gateway SHOULD label the data with an appropriate - Content-Transfer-Encoding if doing so will improve the likelihood of - safe transport over the destination protocol. - -19.4.6 Introduction of Transfer-Encoding - - HTTP/1.1 introduces the Transfer-Encoding header field (section - 14.41). Proxies/gateways MUST remove any transfer-coding prior to - forwarding a message via a MIME-compliant protocol. - - A process for decoding the "chunked" transfer-coding (section 3.6) - can be represented in pseudo-code as: - - length := 0 - read chunk-size, chunk-extension (if any) and CRLF - while (chunk-size > 0) { - read chunk-data and CRLF - append chunk-data to entity-body - length := length + chunk-size - read chunk-size and CRLF - } - read entity-header - while (entity-header not empty) { - append entity-header to existing header fields - read entity-header - } - Content-Length := length - Remove "chunked" from Transfer-Encoding - -19.4.7 MHTML and Line Length Limitations - - HTTP implementations which share code with MHTML [45] implementations - need to be aware of MIME line length limitations. Since HTTP does not - have this limitation, HTTP does not fold long lines. MHTML messages - being transported by HTTP follow all conventions of MHTML, including - line length limitations and folding, canonicalization, etc., since - HTTP transports all message-bodies as payload (see section 3.7.2) and - does not interpret the content or any MIME header lines that might be - contained therein. - -19.5 Additional Features - - RFC 1945 and RFC 2068 document protocol elements used by some - existing HTTP implementations, but not consistently and correctly - across most HTTP/1.1 applications. Implementors are advised to be - aware of these features, but cannot rely upon their presence in, or - interoperability with, other HTTP/1.1 applications. Some of these - - - -Fielding, et al. Standards Track [Page 169] - -RFC 2616 HTTP/1.1 June 1999 - - - describe proposed experimental features, and some describe features - that experimental deployment found lacking that are now addressed in - the base HTTP/1.1 specification. - - A number of other headers, such as Content-Disposition and Title, - from SMTP and MIME are also often implemented (see RFC 2076 [37]). - -19.5.1 Content-Disposition - - The Content-Disposition response-header field has been proposed as a - means for the origin server to suggest a default filename if the user - requests that the content is saved to a file. This usage is derived - from the definition of Content-Disposition in RFC 1806 [35]. - - content-disposition = "Content-Disposition" ":" - disposition-type *( ";" disposition-parm ) - disposition-type = "attachment" | disp-extension-token - disposition-parm = filename-parm | disp-extension-parm - filename-parm = "filename" "=" quoted-string - disp-extension-token = token - disp-extension-parm = token "=" ( token | quoted-string ) - - An example is - - Content-Disposition: attachment; filename="fname.ext" - - The receiving user agent SHOULD NOT respect any directory path - information present in the filename-parm parameter, which is the only - parameter believed to apply to HTTP implementations at this time. The - filename SHOULD be treated as a terminal component only. - - If this header is used in a response with the application/octet- - stream content-type, the implied suggestion is that the user agent - should not display the response, but directly enter a `save response - as...' dialog. - - See section 15.5 for Content-Disposition security issues. - -19.6 Compatibility with Previous Versions - - It is beyond the scope of a protocol specification to mandate - compliance with previous versions. HTTP/1.1 was deliberately - designed, however, to make supporting previous versions easy. It is - worth noting that, at the time of composing this specification - (1996), we would expect commercial HTTP/1.1 servers to: - - - recognize the format of the Request-Line for HTTP/0.9, 1.0, and - 1.1 requests; - - - -Fielding, et al. Standards Track [Page 170] - -RFC 2616 HTTP/1.1 June 1999 - - - - understand any valid request in the format of HTTP/0.9, 1.0, or - 1.1; - - - respond appropriately with a message in the same major version - used by the client. - - And we would expect HTTP/1.1 clients to: - - - recognize the format of the Status-Line for HTTP/1.0 and 1.1 - responses; - - - understand any valid response in the format of HTTP/0.9, 1.0, or - 1.1. - - For most implementations of HTTP/1.0, each connection is established - by the client prior to the request and closed by the server after - sending the response. Some implementations implement the Keep-Alive - version of persistent connections described in section 19.7.1 of RFC - 2068 [33]. - -19.6.1 Changes from HTTP/1.0 - - This section summarizes major differences between versions HTTP/1.0 - and HTTP/1.1. - -19.6.1.1 Changes to Simplify Multi-homed Web Servers and Conserve IP - Addresses - - The requirements that clients and servers support the Host request- - header, report an error if the Host request-header (section 14.23) is - missing from an HTTP/1.1 request, and accept absolute URIs (section - 5.1.2) are among the most important changes defined by this - specification. - - Older HTTP/1.0 clients assumed a one-to-one relationship of IP - addresses and servers; there was no other established mechanism for - distinguishing the intended server of a request than the IP address - to which that request was directed. The changes outlined above will - allow the Internet, once older HTTP clients are no longer common, to - support multiple Web sites from a single IP address, greatly - simplifying large operational Web servers, where allocation of many - IP addresses to a single host has created serious problems. The - Internet will also be able to recover the IP addresses that have been - allocated for the sole purpose of allowing special-purpose domain - names to be used in root-level HTTP URLs. Given the rate of growth of - the Web, and the number of servers already deployed, it is extremely - - - - - -Fielding, et al. Standards Track [Page 171] - -RFC 2616 HTTP/1.1 June 1999 - - - important that all implementations of HTTP (including updates to - existing HTTP/1.0 applications) correctly implement these - requirements: - - - Both clients and servers MUST support the Host request-header. - - - A client that sends an HTTP/1.1 request MUST send a Host header. - - - Servers MUST report a 400 (Bad Request) error if an HTTP/1.1 - request does not include a Host request-header. - - - Servers MUST accept absolute URIs. - -19.6.2 Compatibility with HTTP/1.0 Persistent Connections - - Some clients and servers might wish to be compatible with some - previous implementations of persistent connections in HTTP/1.0 - clients and servers. Persistent connections in HTTP/1.0 are - explicitly negotiated as they are not the default behavior. HTTP/1.0 - experimental implementations of persistent connections are faulty, - and the new facilities in HTTP/1.1 are designed to rectify these - problems. The problem was that some existing 1.0 clients may be - sending Keep-Alive to a proxy server that doesn't understand - Connection, which would then erroneously forward it to the next - inbound server, which would establish the Keep-Alive connection and - result in a hung HTTP/1.0 proxy waiting for the close on the - response. The result is that HTTP/1.0 clients must be prevented from - using Keep-Alive when talking to proxies. - - However, talking to proxies is the most important use of persistent - connections, so that prohibition is clearly unacceptable. Therefore, - we need some other mechanism for indicating a persistent connection - is desired, which is safe to use even when talking to an old proxy - that ignores Connection. Persistent connections are the default for - HTTP/1.1 messages; we introduce a new keyword (Connection: close) for - declaring non-persistence. See section 14.10. - - The original HTTP/1.0 form of persistent connections (the Connection: - Keep-Alive and Keep-Alive header) is documented in RFC 2068. [33] - -19.6.3 Changes from RFC 2068 - - This specification has been carefully audited to correct and - disambiguate key word usage; RFC 2068 had many problems in respect to - the conventions laid out in RFC 2119 [34]. - - Clarified which error code should be used for inbound server failures - (e.g. DNS failures). (Section 10.5.5). - - - -Fielding, et al. Standards Track [Page 172] - -RFC 2616 HTTP/1.1 June 1999 - - - CREATE had a race that required an Etag be sent when a resource is - first created. (Section 10.2.2). - - Content-Base was deleted from the specification: it was not - implemented widely, and there is no simple, safe way to introduce it - without a robust extension mechanism. In addition, it is used in a - similar, but not identical fashion in MHTML [45]. - - Transfer-coding and message lengths all interact in ways that - required fixing exactly when chunked encoding is used (to allow for - transfer encoding that may not be self delimiting); it was important - to straighten out exactly how message lengths are computed. (Sections - 3.6, 4.4, 7.2.2, 13.5.2, 14.13, 14.16) - - A content-coding of "identity" was introduced, to solve problems - discovered in caching. (section 3.5) - - Quality Values of zero should indicate that "I don't want something" - to allow clients to refuse a representation. (Section 3.9) - - The use and interpretation of HTTP version numbers has been clarified - by RFC 2145. Require proxies to upgrade requests to highest protocol - version they support to deal with problems discovered in HTTP/1.0 - implementations (Section 3.1) - - Charset wildcarding is introduced to avoid explosion of character set - names in accept headers. (Section 14.2) - - A case was missed in the Cache-Control model of HTTP/1.1; s-maxage - was introduced to add this missing case. (Sections 13.4, 14.8, 14.9, - 14.9.3) - - The Cache-Control: max-age directive was not properly defined for - responses. (Section 14.9.3) - - There are situations where a server (especially a proxy) does not - know the full length of a response but is capable of serving a - byterange request. We therefore need a mechanism to allow byteranges - with a content-range not indicating the full length of the message. - (Section 14.16) - - Range request responses would become very verbose if all meta-data - were always returned; by allowing the server to only send needed - headers in a 206 response, this problem can be avoided. (Section - 10.2.7, 13.5.3, and 14.27) - - - - - - -Fielding, et al. Standards Track [Page 173] - -RFC 2616 HTTP/1.1 June 1999 - - - Fix problem with unsatisfiable range requests; there are two cases: - syntactic problems, and range doesn't exist in the document. The 416 - status code was needed to resolve this ambiguity needed to indicate - an error for a byte range request that falls outside of the actual - contents of a document. (Section 10.4.17, 14.16) - - Rewrite of message transmission requirements to make it much harder - for implementors to get it wrong, as the consequences of errors here - can have significant impact on the Internet, and to deal with the - following problems: - - 1. Changing "HTTP/1.1 or later" to "HTTP/1.1", in contexts where - this was incorrectly placing a requirement on the behavior of - an implementation of a future version of HTTP/1.x - - 2. Made it clear that user-agents should retry requests, not - "clients" in general. - - 3. Converted requirements for clients to ignore unexpected 100 - (Continue) responses, and for proxies to forward 100 responses, - into a general requirement for 1xx responses. - - 4. Modified some TCP-specific language, to make it clearer that - non-TCP transports are possible for HTTP. - - 5. Require that the origin server MUST NOT wait for the request - body before it sends a required 100 (Continue) response. - - 6. Allow, rather than require, a server to omit 100 (Continue) if - it has already seen some of the request body. - - 7. Allow servers to defend against denial-of-service attacks and - broken clients. - - This change adds the Expect header and 417 status code. The message - transmission requirements fixes are in sections 8.2, 10.4.18, - 8.1.2.2, 13.11, and 14.20. - - Proxies should be able to add Content-Length when appropriate. - (Section 13.5.2) - - Clean up confusion between 403 and 404 responses. (Section 10.4.4, - 10.4.5, and 10.4.11) - - Warnings could be cached incorrectly, or not updated appropriately. - (Section 13.1.2, 13.2.4, 13.5.2, 13.5.3, 14.9.3, and 14.46) Warning - also needed to be a general header, as PUT or other methods may have - need for it in requests. - - - -Fielding, et al. Standards Track [Page 174] - -RFC 2616 HTTP/1.1 June 1999 - - - Transfer-coding had significant problems, particularly with - interactions with chunked encoding. The solution is that transfer- - codings become as full fledged as content-codings. This involves - adding an IANA registry for transfer-codings (separate from content - codings), a new header field (TE) and enabling trailer headers in the - future. Transfer encoding is a major performance benefit, so it was - worth fixing [39]. TE also solves another, obscure, downward - interoperability problem that could have occurred due to interactions - between authentication trailers, chunked encoding and HTTP/1.0 - clients.(Section 3.6, 3.6.1, and 14.39) - - The PATCH, LINK, UNLINK methods were defined but not commonly - implemented in previous versions of this specification. See RFC 2068 - [33]. - - The Alternates, Content-Version, Derived-From, Link, URI, Public and - Content-Base header fields were defined in previous versions of this - specification, but not commonly implemented. See RFC 2068 [33]. - -20 Index - - Please see the PostScript version of this RFC for the INDEX. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Fielding, et al. Standards Track [Page 175] - -RFC 2616 HTTP/1.1 June 1999 - - -21. Full Copyright Statement - - Copyright (C) The Internet Society (1999). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - - - - - - - - - - - - - - -Fielding, et al. Standards Track [Page 176] - diff --git a/docs/specs/rfc2617.txt b/docs/specs/rfc2617.txt deleted file mode 100644 index b8fdf59..0000000 --- a/docs/specs/rfc2617.txt +++ /dev/null @@ -1,1909 +0,0 @@ - -[[ Text in double brackets is from the unofficial errata at ]] -[[ http://skrb.org/ietf/http_errata.html ]] - -Network Working Group J. Franks -Request for Comments: 2617 Northwestern University -Obsoletes: 2069 P. Hallam-Baker -Category: Standards Track Verisign, Inc. - J. Hostetler - AbiSource, Inc. - S. Lawrence - Agranat Systems, Inc. - P. Leach - Microsoft Corporation - A. Luotonen - Netscape Communications Corporation - L. Stewart - Open Market, Inc. - June 1999 - - - HTTP Authentication: Basic and Digest Access Authentication - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (1999). All Rights Reserved. - -Abstract - - "HTTP/1.0", includes the specification for a Basic Access - Authentication scheme. This scheme is not considered to be a secure - method of user authentication (unless used in conjunction with some - external secure system such as SSL [5]), as the user name and - password are passed over the network as cleartext. - - This document also provides the specification for HTTP's - authentication framework, the original Basic authentication scheme - and a scheme based on cryptographic hashes, referred to as "Digest - Access Authentication". It is therefore also intended to serve as a - replacement for RFC 2069 [6]. Some optional elements specified by - RFC 2069 have been removed from this specification due to problems - found since its publication; other new elements have been added for - compatibility, those new elements have been made optional, but are - strongly recommended. - - - -Franks, et al. Standards Track [Page 1] - -RFC 2617 HTTP Authentication June 1999 - - - Like Basic, Digest access authentication verifies that both parties - to a communication know a shared secret (a password); unlike Basic, - this verification can be done without sending the password in the - clear, which is Basic's biggest weakness. As with most other - authentication protocols, the greatest sources of risks are usually - found not in the core protocol itself but in policies and procedures - surrounding its use. - -Table of Contents - - 1 Access Authentication................................ 3 - 1.1 Reliance on the HTTP/1.1 Specification............ 3 - 1.2 Access Authentication Framework................... 3 - 2 Basic Authentication Scheme.......................... 5 - 3 Digest Access Authentication Scheme.................. 6 - 3.1 Introduction...................................... 6 - 3.1.1 Purpose......................................... 6 - 3.1.2 Overall Operation............................... 6 - 3.1.3 Representation of digest values................. 7 - 3.1.4 Limitations..................................... 7 - 3.2 Specification of Digest Headers................... 7 - 3.2.1 The WWW-Authenticate Response Header............ 8 - 3.2.2 The Authorization Request Header................ 11 - 3.2.3 The Authentication-Info Header.................. 15 - 3.3 Digest Operation.................................. 17 - 3.4 Security Protocol Negotiation..................... 18 - 3.5 Example........................................... 18 - 3.6 Proxy-Authentication and Proxy-Authorization...... 19 - 4 Security Considerations.............................. 19 - 4.1 Authentication of Clients using Basic - Authentication.................................... 19 - 4.2 Authentication of Clients using Digest - Authentication.................................... 20 - 4.3 Limited Use Nonce Values.......................... 21 - 4.4 Comparison of Digest with Basic Authentication.... 22 - 4.5 Replay Attacks.................................... 22 - 4.6 Weakness Created by Multiple Authentication - Schemes........................................... 23 - 4.7 Online dictionary attacks......................... 23 - 4.8 Man in the Middle................................. 24 - 4.9 Chosen plaintext attacks.......................... 24 - 4.10 Precomputed dictionary attacks.................... 25 - 4.11 Batch brute force attacks......................... 25 - 4.12 Spoofing by Counterfeit Servers................... 25 - 4.13 Storing passwords................................. 26 - 4.14 Summary........................................... 26 - 5 Sample implementation................................ 27 - 6 Acknowledgments...................................... 31 - - - -Franks, et al. Standards Track [Page 2] - -RFC 2617 HTTP Authentication June 1999 - - - 7 References........................................... 31 - 8 Authors' Addresses................................... 32 - 9 Full Copyright Statement............................. 34 - -1 Access Authentication - -1.1 Reliance on the HTTP/1.1 Specification - - This specification is a companion to the HTTP/1.1 specification [2]. - It uses the augmented BNF section 2.1 of that document, and relies on - both the non-terminals defined in that document and other aspects of - the HTTP/1.1 specification. - -1.2 Access Authentication Framework - - HTTP provides a simple challenge-response authentication mechanism - that MAY be used by a server to challenge a client request and by a - client to provide authentication information. It uses an extensible, - case-insensitive token to identify the authentication scheme, - followed by a comma-separated list of attribute-value pairs which - carry the parameters necessary for achieving authentication via that - scheme. - - auth-scheme = token - auth-param = token "=" ( token | quoted-string ) - - The 401 (Unauthorized) response message is used by an origin server - to challenge the authorization of a user agent. This response MUST - include a WWW-Authenticate header field containing at least one - challenge applicable to the requested resource. The 407 (Proxy - Authentication Required) response message is used by a proxy to - challenge the authorization of a client and MUST include a Proxy- - Authenticate header field containing at least one challenge - applicable to the proxy for the requested resource. - - challenge = auth-scheme 1*SP 1#auth-param - - Note: User agents will need to take special care in parsing the WWW- - Authenticate or Proxy-Authenticate header field value if it contains - more than one challenge, or if more than one WWW-Authenticate header - field is provided, since the contents of a challenge may itself - contain a comma-separated list of authentication parameters. - - The authentication parameter realm is defined for all authentication - schemes: - - realm = "realm" "=" realm-value - realm-value = quoted-string - - - -Franks, et al. Standards Track [Page 3] - -RFC 2617 HTTP Authentication June 1999 - - - The realm directive (case-insensitive) is required for all - authentication schemes that issue a challenge. The realm value - (case-sensitive), in combination with the canonical root URL (the - absoluteURI for the server whose abs_path is empty; see section 5.1.2 - of [2]) of the server being accessed, defines the protection space. - These realms allow the protected resources on a server to be - partitioned into a set of protection spaces, each with its own - authentication scheme and/or authorization database. The realm value - is a string, generally assigned by the origin server, which may have - additional semantics specific to the authentication scheme. Note that - there may be multiple challenges with the same auth-scheme but - different realms. - - A user agent that wishes to authenticate itself with an origin - server--usually, but not necessarily, after receiving a 401 - (Unauthorized)--MAY do so by including an Authorization header field - with the request. A client that wishes to authenticate itself with a - proxy--usually, but not necessarily, after receiving a 407 (Proxy - Authentication Required)--MAY do so by including a Proxy- - Authorization header field with the request. Both the Authorization - field value and the Proxy-Authorization field value consist of - credentials containing the authentication information of the client - for the realm of the resource being requested. The user agent MUST - choose to use one of the challenges with the strongest auth-scheme it - understands and request credentials from the user based upon that - challenge. - - credentials = auth-scheme #auth-param - - Note that many browsers will only recognize Basic and will require - that it be the first auth-scheme presented. Servers should only - include Basic if it is minimally acceptable. - - The protection space determines the domain over which credentials can - be automatically applied. If a prior request has been authorized, the - same credentials MAY be reused for all other requests within that - protection space for a period of time determined by the - authentication scheme, parameters, and/or user preference. Unless - otherwise defined by the authentication scheme, a single protection - space cannot extend outside the scope of its server. - - If the origin server does not wish to accept the credentials sent - with a request, it SHOULD return a 401 (Unauthorized) response. The - response MUST include a WWW-Authenticate header field containing at - least one (possibly new) challenge applicable to the requested - resource. If a proxy does not accept the credentials sent with a - request, it SHOULD return a 407 (Proxy Authentication Required). The - response MUST include a Proxy-Authenticate header field containing a - - - -Franks, et al. Standards Track [Page 4] - -RFC 2617 HTTP Authentication June 1999 - - - (possibly new) challenge applicable to the proxy for the requested - resource. - - The HTTP protocol does not restrict applications to this simple - challenge-response mechanism for access authentication. Additional - mechanisms MAY be used, such as encryption at the transport level or - via message encapsulation, and with additional header fields - specifying authentication information. However, these additional - mechanisms are not defined by this specification. - - Proxies MUST be completely transparent regarding user agent - authentication by origin servers. That is, they must forward the - WWW-Authenticate and Authorization headers untouched, and follow the - rules found in section 14.8 of [2]. Both the Proxy-Authenticate and - the Proxy-Authorization header fields are hop-by-hop headers (see - section 13.5.1 of [2]). - -2 Basic Authentication Scheme - - The "basic" authentication scheme is based on the model that the - client must authenticate itself with a user-ID and a password for - each realm. The realm value should be considered an opaque string - which can only be compared for equality with other realms on that - server. The server will service the request only if it can validate - the user-ID and password for the protection space of the Request-URI. - There are no optional authentication parameters. - - For Basic, the framework above is utilized as follows: - - challenge = "Basic" realm - credentials = "Basic" basic-credentials - - Upon receipt of an unauthorized request for a URI within the - protection space, the origin server MAY respond with a challenge like - the following: - - WWW-Authenticate: Basic realm="WallyWorld" - - where "WallyWorld" is the string assigned by the server to identify - the protection space of the Request-URI. A proxy may respond with the - same challenge using the Proxy-Authenticate header field. - - To receive authorization, the client sends the userid and password, - separated by a single colon (":") character, within a base64 [7] - encoded string in the credentials. - - basic-credentials = base64-user-pass - base64-user-pass = - user-pass = userid ":" password - userid = * - password = *TEXT - - Userids might be case sensitive. - - If the user agent wishes to send the userid "Aladdin" and password - "open sesame", it would use the following header field: - - Authorization: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ== - - A client SHOULD assume that all paths at or deeper than the depth of - the last symbolic element in the path field of the Request-URI also - are within the protection space specified by the Basic realm value of - the current challenge. A client MAY preemptively send the - corresponding Authorization header with requests for resources in - that space without receipt of another challenge from the server. - Similarly, when a client sends a request to a proxy, it may reuse a - userid and password in the Proxy-Authorization header field without - receiving another challenge from the proxy server. See section 4 for - security considerations associated with Basic authentication. - -3 Digest Access Authentication Scheme - -3.1 Introduction - -3.1.1 Purpose - - The protocol referred to as "HTTP/1.0" includes the specification for - a Basic Access Authentication scheme[1]. That scheme is not - considered to be a secure method of user authentication, as the user - name and password are passed over the network in an unencrypted form. - This section provides the specification for a scheme that does not - send the password in cleartext, referred to as "Digest Access - Authentication". - - The Digest Access Authentication scheme is not intended to be a - complete answer to the need for security in the World Wide Web. This - scheme provides no encryption of message content. The intent is - simply to create an access authentication method that avoids the most - serious flaws of Basic authentication. - -3.1.2 Overall Operation - - Like Basic Access Authentication, the Digest scheme is based on a - simple challenge-response paradigm. The Digest scheme challenges - using a nonce value. A valid response contains a checksum (by - - - -Franks, et al. Standards Track [Page 6] - -RFC 2617 HTTP Authentication June 1999 - - - default, the MD5 checksum) of the username, the password, the given - nonce value, the HTTP method, and the requested URI. In this way, the - password is never sent in the clear. Just as with the Basic scheme, - the username and password must be prearranged in some fashion not - addressed by this document. - -3.1.3 Representation of digest values - - An optional header allows the server to specify the algorithm used to - create the checksum or digest. By default the MD5 algorithm is used - and that is the only algorithm described in this document. - - For the purposes of this document, an MD5 digest of 128 bits is - represented as 32 ASCII printable characters. The bits in the 128 bit - digest are converted from most significant to least significant bit, - four bits at a time to their ASCII presentation as follows. Each four - bits is represented by its familiar hexadecimal notation from the - characters 0123456789abcdef. That is, binary 0000 gets represented by - the character '0', 0001, by '1', and so on up to the representation - of 1111 as 'f'. - -3.1.4 Limitations - - The Digest authentication scheme described in this document suffers - from many known limitations. It is intended as a replacement for - Basic authentication and nothing more. It is a password-based system - and (on the server side) suffers from all the same problems of any - password system. In particular, no provision is made in this protocol - for the initial secure arrangement between user and server to - establish the user's password. - - Users and implementors should be aware that this protocol is not as - secure as Kerberos, and not as secure as any client-side private-key - scheme. Nevertheless it is better than nothing, better than what is - commonly used with telnet and ftp, and better than Basic - authentication. - -3.2 Specification of Digest Headers - - The Digest Access Authentication scheme is conceptually similar to - the Basic scheme. The formats of the modified WWW-Authenticate header - line and the Authorization header line are specified below. In - addition, a new header, Authentication-Info, is specified. - - - - - - - - -Franks, et al. Standards Track [Page 7] - -RFC 2617 HTTP Authentication June 1999 - - -3.2.1 The WWW-Authenticate Response Header - - If a server receives a request for an access-protected object, and an - acceptable Authorization header is not sent, the server responds with - a "401 Unauthorized" status code, and a WWW-Authenticate header as - per the framework defined above, which for the digest scheme is - utilized as follows: - - challenge = "Digest" digest-challenge - - digest-challenge = 1#( realm | [ domain ] | nonce | - [ opaque ] |[ stale ] | [ algorithm ] | - [ qop-options ] | [auth-param] ) - - - domain = "domain" "=" <"> URI ( 1*SP URI ) <"> - [[ Should be: ]] - [[ domain = "domain" "=" <"> URI *( 1*SP URI ) <"> ]] - URI = absoluteURI | abs_path - nonce = "nonce" "=" nonce-value - nonce-value = quoted-string - opaque = "opaque" "=" quoted-string - stale = "stale" "=" ( "true" | "false" ) - algorithm = "algorithm" "=" ( "MD5" | "MD5-sess" | - token ) - qop-options = "qop" "=" <"> 1#qop-value <"> - qop-value = "auth" | "auth-int" | token - - The meanings of the values of the directives used above are as - follows: - - realm - A string to be displayed to users so they know which username and - password to use. This string should contain at least the name of - the host performing the authentication and might additionally - indicate the collection of users who might have access. An example - might be "registered_users@gotham.news.com". - - domain - A quoted, space-separated list of URIs, as specified in RFC XURI - [7], that define the protection space. If a URI is an abs_path, it - is relative to the canonical root URL (see section 1.2 above) of - the server being accessed. An absoluteURI in this list may refer to - a different server than the one being accessed. The client can use - this list to determine the set of URIs for which the same - authentication information may be sent: any URI that has a URI in - this list as a prefix (after both have been made absolute) may be - assumed to be in the same protection space. If this directive is - omitted or its value is empty, the client should assume that the - protection space consists of all URIs on the responding server. - - - -Franks, et al. Standards Track [Page 8] - -RFC 2617 HTTP Authentication June 1999 - - - This directive is not meaningful in Proxy-Authenticate headers, for - which the protection space is always the entire proxy; if present - it should be ignored. - - nonce - A server-specified data string which should be uniquely generated - each time a 401 response is made. It is recommended that this - string be base64 or hexadecimal data. Specifically, since the - string is passed in the header lines as a quoted string, the - double-quote character is not allowed. - - The contents of the nonce are implementation dependent. The quality - of the implementation depends on a good choice. A nonce might, for - example, be constructed as the base 64 encoding of - - time-stamp H(time-stamp ":" ETag ":" private-key) - - where time-stamp is a server-generated time or other non-repeating - value, ETag is the value of the HTTP ETag header associated with - the requested entity, and private-key is data known only to the - server. With a nonce of this form a server would recalculate the - hash portion after receiving the client authentication header and - reject the request if it did not match the nonce from that header - or if the time-stamp value is not recent enough. In this way the - server can limit the time of the nonce's validity. The inclusion of - the ETag prevents a replay request for an updated version of the - resource. (Note: including the IP address of the client in the - nonce would appear to offer the server the ability to limit the - reuse of the nonce to the same client that originally got it. - However, that would break proxy farms, where requests from a single - user often go through different proxies in the farm. Also, IP - address spoofing is not that hard.) - - An implementation might choose not to accept a previously used - nonce or a previously used digest, in order to protect against a - replay attack. Or, an implementation might choose to use one-time - nonces or digests for POST or PUT requests and a time-stamp for GET - requests. For more details on the issues involved see section 4. - of this document. - - The nonce is opaque to the client. - - opaque - A string of data, specified by the server, which should be returned - by the client unchanged in the Authorization header of subsequent - requests with URIs in the same protection space. It is recommended - that this string be base64 or hexadecimal data. - - - - -Franks, et al. Standards Track [Page 9] - -RFC 2617 HTTP Authentication June 1999 - - - stale - A flag, indicating that the previous request from the client was - rejected because the nonce value was stale. If stale is TRUE - (case-insensitive), the client may wish to simply retry the request - with a new encrypted response, without reprompting the user for a - new username and password. The server should only set stale to TRUE - if it receives a request for which the nonce is invalid but with a - valid digest for that nonce (indicating that the client knows the - correct username/password). If stale is FALSE, or anything other - than TRUE, or the stale directive is not present, the username - and/or password are invalid, and new values must be obtained. - - algorithm - A string indicating a pair of algorithms used to produce the digest - and a checksum. If this is not present it is assumed to be "MD5". - If the algorithm is not understood, the challenge should be ignored - (and a different one used, if there is more than one). - - In this document the string obtained by applying the digest - algorithm to the data "data" with secret "secret" will be denoted - by KD(secret, data), and the string obtained by applying the - checksum algorithm to the data "data" will be denoted H(data). The - notation unq(X) means the value of the quoted-string X without the - surrounding quotes. - - For the "MD5" and "MD5-sess" algorithms - - H(data) = MD5(data) - - and - - KD(secret, data) = H(concat(secret, ":", data)) - - i.e., the digest is the MD5 of the secret concatenated with a colon - concatenated with the data. The "MD5-sess" algorithm is intended to - allow efficient 3rd party authentication servers; for the - difference in usage, see the description in section 3.2.2.2. - - qop-options - This directive is optional, but is made so only for backward - compatibility with RFC 2069 [6]; it SHOULD be used by all - implementations compliant with this version of the Digest scheme. - If present, it is a quoted string of one or more tokens indicating - the "quality of protection" values supported by the server. The - value "auth" indicates authentication; the value "auth-int" - indicates authentication with integrity protection; see the - - - - - -Franks, et al. Standards Track [Page 10] - -RFC 2617 HTTP Authentication June 1999 - - - descriptions below for calculating the response directive value for - the application of this choice. Unrecognized options MUST be - ignored. - - auth-param - This directive allows for future extensions. Any unrecognized - directive MUST be ignored. - -3.2.2 The Authorization Request Header - - The client is expected to retry the request, passing an Authorization - header line, which is defined according to the framework above, - utilized as follows. - - credentials = "Digest" digest-response - digest-response = 1#( username | realm | nonce | digest-uri - | response | [ algorithm ] | [cnonce] | - [opaque] | [message-qop] | - [nonce-count] | [auth-param] ) - - username = "username" "=" username-value - username-value = quoted-string - digest-uri = "uri" "=" digest-uri-value - digest-uri-value = request-uri ; As specified by HTTP/1.1 - message-qop = "qop" "=" qop-value - cnonce = "cnonce" "=" cnonce-value - cnonce-value = nonce-value - nonce-count = "nc" "=" nc-value - nc-value = 8LHEX - response = "response" "=" request-digest - request-digest = <"> 32LHEX <"> - LHEX = "0" | "1" | "2" | "3" | - "4" | "5" | "6" | "7" | - "8" | "9" | "a" | "b" | - "c" | "d" | "e" | "f" - - The values of the opaque and algorithm fields must be those supplied - in the WWW-Authenticate response header for the entity being - requested. - - response - A string of 32 hex digits computed as defined below, which proves - that the user knows a password - - username - The user's name in the specified realm. - - - - - -Franks, et al. Standards Track [Page 11] - -RFC 2617 HTTP Authentication June 1999 - - - digest-uri - The URI from Request-URI of the Request-Line; duplicated here - because proxies are allowed to change the Request-Line in transit. - - qop - Indicates what "quality of protection" the client has applied to - the message. If present, its value MUST be one of the alternatives - the server indicated it supports in the WWW-Authenticate header. - These values affect the computation of the request-digest. Note - that this is a single token, not a quoted list of alternatives as - in WWW- Authenticate. This directive is optional in order to - preserve backward compatibility with a minimal implementation of - RFC 2069 [6], but SHOULD be used if the server indicated that qop - is supported by providing a qop directive in the WWW-Authenticate - header field. - - cnonce - This MUST be specified if a qop directive is sent (see above), and - MUST NOT be specified if the server did not send a qop directive in - the WWW-Authenticate header field. The cnonce-value is an opaque - quoted string value provided by the client and used by both client - and server to avoid chosen plaintext attacks, to provide mutual - authentication, and to provide some message integrity protection. - See the descriptions below of the calculation of the response- - digest and request-digest values. - - nonce-count - This MUST be specified if a qop directive is sent (see above), and - MUST NOT be specified if the server did not send a qop directive in - the WWW-Authenticate header field. The nc-value is the hexadecimal - count of the number of requests (including the current request) - that the client has sent with the nonce value in this request. For - example, in the first request sent in response to a given nonce - value, the client sends "nc=00000001". The purpose of this - directive is to allow the server to detect request replays by - maintaining its own copy of this count - if the same nc-value is - seen twice, then the request is a replay. See the description - below of the construction of the request-digest value. - - auth-param - This directive allows for future extensions. Any unrecognized - directive MUST be ignored. - - If a directive or its value is improper, or required directives are - missing, the proper response is 400 Bad Request. If the request- - digest is invalid, then a login failure should be logged, since - repeated login failures from a single client may indicate an attacker - attempting to guess passwords. - - - -Franks, et al. Standards Track [Page 12] - -RFC 2617 HTTP Authentication June 1999 - - - The definition of request-digest above indicates the encoding for its - value. The following definitions show how the value is computed. - -3.2.2.1 Request-Digest - - If the "qop" value is "auth" or "auth-int": - - request-digest = <"> < KD ( H(A1), unq(nonce-value) - ":" nc-value - ":" unq(cnonce-value) - ":" unq(qop-value) - ":" H(A2) - ) <"> - - If the "qop" directive is not present (this construction is for - compatibility with RFC 2069): - - request-digest = - <"> < KD ( H(A1), unq(nonce-value) ":" H(A2) ) > - <"> - - See below for the definitions for A1 and A2. - -3.2.2.2 A1 - - If the "algorithm" directive's value is "MD5" or is unspecified, then - A1 is: - - A1 = unq(username-value) ":" unq(realm-value) ":" passwd - - where - - passwd = < user's password > - - If the "algorithm" directive's value is "MD5-sess", then A1 is - calculated only once - on the first request by the client following - receipt of a WWW-Authenticate challenge from the server. It uses the - server nonce from that challenge, and the first client nonce value to - construct A1 as follows: - - A1 = H( unq(username-value) ":" unq(realm-value) - ":" passwd ) - ":" unq(nonce-value) ":" unq(cnonce-value) - - This creates a 'session key' for the authentication of subsequent - requests and responses which is different for each "authentication - session", thus limiting the amount of material hashed with any one - key. (Note: see further discussion of the authentication session in - - - -Franks, et al. Standards Track [Page 13] - -RFC 2617 HTTP Authentication June 1999 - - - section 3.3.) Because the server need only use the hash of the user - credentials in order to create the A1 value, this construction could - be used in conjunction with a third party authentication service so - that the web server would not need the actual password value. The - specification of such a protocol is beyond the scope of this - specification. - -3.2.2.3 A2 - - If the "qop" directive's value is "auth" or is unspecified, then A2 - is: - - A2 = Method ":" digest-uri-value - - If the "qop" value is "auth-int", then A2 is: - - A2 = Method ":" digest-uri-value ":" H(entity-body) - -3.2.2.4 Directive values and quoted-string - - Note that the value of many of the directives, such as "username- - value", are defined as a "quoted-string". However, the "unq" notation - indicates that surrounding quotation marks are removed in forming the - string A1. Thus if the Authorization header includes the fields - - username="Mufasa", realm=myhost@testrealm.com - - and the user Mufasa has password "Circle Of Life" then H(A1) would be - H(Mufasa:myhost@testrealm.com:Circle Of Life) with no quotation marks - in the digested string. - - No white space is allowed in any of the strings to which the digest - function H() is applied unless that white space exists in the quoted - strings or entity body whose contents make up the string to be - digested. For example, the string A1 illustrated above must be - - Mufasa:myhost@testrealm.com:Circle Of Life - - with no white space on either side of the colons, but with the white - space between the words used in the password value. Likewise, the - other strings digested by H() must not have white space on either - side of the colons which delimit their fields unless that white space - was in the quoted strings or entity body being digested. - - Also note that if integrity protection is applied (qop=auth-int), the - H(entity-body) is the hash of the entity body, not the message body - - it is computed before any transfer encoding is applied by the sender - - - - -Franks, et al. Standards Track [Page 14] - -RFC 2617 HTTP Authentication June 1999 - - - and after it has been removed by the recipient. Note that this - includes multipart boundaries and embedded headers in each part of - any multipart content-type. - -3.2.2.5 Various considerations - - The "Method" value is the HTTP request method as specified in section - 5.1.1 of [2]. The "request-uri" value is the Request-URI from the - request line as specified in section 5.1.2 of [2]. This may be "*", - an "absoluteURL" or an "abs_path" as specified in section 5.1.2 of - [2], but it MUST agree with the Request-URI. In particular, it MUST - be an "absoluteURL" if the Request-URI is an "absoluteURL". The - "cnonce-value" is an optional client-chosen value whose purpose is - to foil chosen plaintext attacks. - - The authenticating server must assure that the resource designated by - the "uri" directive is the same as the resource specified in the - Request-Line; if they are not, the server SHOULD return a 400 Bad - Request error. (Since this may be a symptom of an attack, server - implementers may want to consider logging such errors.) The purpose - of duplicating information from the request URL in this field is to - deal with the possibility that an intermediate proxy may alter the - client's Request-Line. This altered (but presumably semantically - equivalent) request would not result in the same digest as that - calculated by the client. - - Implementers should be aware of how authenticated transactions - interact with shared caches. The HTTP/1.1 protocol specifies that - when a shared cache (see section 13.7 of [2]) has received a request - containing an Authorization header and a response from relaying that - request, it MUST NOT return that response as a reply to any other - request, unless one of two Cache-Control (see section 14.9 of [2]) - directives was present in the response. If the original response - included the "must-revalidate" Cache-Control directive, the cache MAY - use the entity of that response in replying to a subsequent request, - but MUST first revalidate it with the origin server, using the - request headers from the new request to allow the origin server to - authenticate the new request. Alternatively, if the original response - included the "public" Cache-Control directive, the response entity - MAY be returned in reply to any subsequent request. - -3.2.3 The Authentication-Info Header - - The Authentication-Info header is used by the server to communicate - some information regarding the successful authentication in the - response. - - - - - -Franks, et al. Standards Track [Page 15] - -RFC 2617 HTTP Authentication June 1999 - - - AuthenticationInfo = "Authentication-Info" ":" auth-info - auth-info = 1#(nextnonce | [ message-qop ] - | [ response-auth ] | [ cnonce ] - | [nonce-count] ) - nextnonce = "nextnonce" "=" nonce-value - response-auth = "rspauth" "=" response-digest - response-digest = <"> *LHEX <"> - - The value of the nextnonce directive is the nonce the server wishes - the client to use for a future authentication response. The server - may send the Authentication-Info header with a nextnonce field as a - means of implementing one-time or otherwise changing nonces. If the - nextnonce field is present the client SHOULD use it when constructing - the Authorization header for its next request. Failure of the client - to do so may result in a request to re-authenticate from the server - with the "stale=TRUE". - - Server implementations should carefully consider the performance - implications of the use of this mechanism; pipelined requests will - not be possible if every response includes a nextnonce directive - that must be used on the next request received by the server. - Consideration should be given to the performance vs. security - tradeoffs of allowing an old nonce value to be used for a limited - time to permit request pipelining. Use of the nonce-count can - retain most of the security advantages of a new server nonce - without the deleterious affects on pipelining. - - message-qop - Indicates the "quality of protection" options applied to the - response by the server. The value "auth" indicates authentication; - the value "auth-int" indicates authentication with integrity - protection. The server SHOULD use the same value for the message- - qop directive in the response as was sent by the client in the - corresponding request. - - The optional response digest in the "response-auth" directive - supports mutual authentication -- the server proves that it knows the - user's secret, and with qop=auth-int also provides limited integrity - protection of the response. The "response-digest" value is calculated - as for the "request-digest" in the Authorization header, except that - if "qop=auth" or is not specified in the Authorization header for the - request, A2 is - - A2 = ":" digest-uri-value - - and if "qop=auth-int", then A2 is - - A2 = ":" digest-uri-value ":" H(entity-body) - - - -Franks, et al. Standards Track [Page 16] - -RFC 2617 HTTP Authentication June 1999 - - - where "digest-uri-value" is the value of the "uri" directive on the - Authorization header in the request. The "cnonce-value" and "nc- - value" MUST be the ones for the client request to which this message - is the response. The "response-auth", "cnonce", and "nonce-count" - directives MUST BE present if "qop=auth" or "qop=auth-int" is - specified. - - The Authentication-Info header is allowed in the trailer of an HTTP - message transferred via chunked transfer-coding. - -3.3 Digest Operation - - Upon receiving the Authorization header, the server may check its - validity by looking up the password that corresponds to the submitted - username. Then, the server must perform the same digest operation - (e.g., MD5) performed by the client, and compare the result to the - given request-digest value. - - Note that the HTTP server does not actually need to know the user's - cleartext password. As long as H(A1) is available to the server, the - validity of an Authorization header may be verified. - - The client response to a WWW-Authenticate challenge for a protection - space starts an authentication session with that protection space. - The authentication session lasts until the client receives another - WWW-Authenticate challenge from any server in the protection space. A - client should remember the username, password, nonce, nonce count and - opaque values associated with an authentication session to use to - construct the Authorization header in future requests within that - protection space. The Authorization header may be included - preemptively; doing so improves server efficiency and avoids extra - round trips for authentication challenges. The server may choose to - accept the old Authorization header information, even though the - nonce value included might not be fresh. Alternatively, the server - may return a 401 response with a new nonce value, causing the client - to retry the request; by specifying stale=TRUE with this response, - the server tells the client to retry with the new nonce, but without - prompting for a new username and password. - - Because the client is required to return the value of the opaque - directive given to it by the server for the duration of a session, - the opaque data may be used to transport authentication session state - information. (Note that any such use can also be accomplished more - easily and safely by including the state in the nonce.) For example, - a server could be responsible for authenticating content that - actually sits on another server. It would achieve this by having the - first 401 response include a domain directive whose value includes a - URI on the second server, and an opaque directive whose value - - - -Franks, et al. Standards Track [Page 17] - -RFC 2617 HTTP Authentication June 1999 - - - contains the state information. The client will retry the request, at - which time the server might respond with a 301/302 redirection, - pointing to the URI on the second server. The client will follow the - redirection, and pass an Authorization header , including the - data. - - As with the basic scheme, proxies must be completely transparent in - the Digest access authentication scheme. That is, they must forward - the WWW-Authenticate, Authentication-Info and Authorization headers - untouched. If a proxy wants to authenticate a client before a request - is forwarded to the server, it can be done using the Proxy- - Authenticate and Proxy-Authorization headers described in section 3.6 - below. - -3.4 Security Protocol Negotiation - - It is useful for a server to be able to know which security schemes a - client is capable of handling. - - It is possible that a server may want to require Digest as its - authentication method, even if the server does not know that the - client supports it. A client is encouraged to fail gracefully if the - server specifies only authentication schemes it cannot handle. - -3.5 Example - - The following example assumes that an access-protected document is - being requested from the server via a GET request. The URI of the - document is "http://www.nowhere.org/dir/index.html". Both client and - server know that the username for this document is "Mufasa", and the - password is "Circle Of Life" (with one space between each of the - three words). - - The first time the client requests the document, no Authorization - header is sent, so the server responds with: - - HTTP/1.1 401 Unauthorized - WWW-Authenticate: Digest - realm="testrealm@host.com", - qop="auth,auth-int", - nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", - opaque="5ccc069c403ebaf9f0171e9517f40e41" - - The client may prompt the user for the username and password, after - which it will respond with a new request, including the following - Authorization header: - - - - - -Franks, et al. Standards Track [Page 18] - -RFC 2617 HTTP Authentication June 1999 - - - Authorization: Digest username="Mufasa", - realm="testrealm@host.com", - nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", - uri="/dir/index.html", - qop=auth, - nc=00000001, - cnonce="0a4f113b", - response="6629fae49393a05397450978507c4ef1", - opaque="5ccc069c403ebaf9f0171e9517f40e41" - -3.6 Proxy-Authentication and Proxy-Authorization - - The digest authentication scheme may also be used for authenticating - users to proxies, proxies to proxies, or proxies to origin servers by - use of the Proxy-Authenticate and Proxy-Authorization headers. These - headers are instances of the Proxy-Authenticate and Proxy- - Authorization headers specified in sections 10.33 and 10.34 of the - HTTP/1.1 specification [2] and their behavior is subject to - restrictions described there. The transactions for proxy - authentication are very similar to those already described. Upon - receiving a request which requires authentication, the proxy/server - must issue the "407 Proxy Authentication Required" response with a - "Proxy-Authenticate" header. The digest-challenge used in the - Proxy-Authenticate header is the same as that for the WWW- - Authenticate header as defined above in section 3.2.1. - - The client/proxy must then re-issue the request with a Proxy- - Authorization header, with directives as specified for the - Authorization header in section 3.2.2 above. - - On subsequent responses, the server sends Proxy-Authentication-Info - with directives the same as those for the Authentication-Info header - field. - - Note that in principle a client could be asked to authenticate itself - to both a proxy and an end-server, but never in the same response. - -4 Security Considerations - -4.1 Authentication of Clients using Basic Authentication - - The Basic authentication scheme is not a secure method of user - authentication, nor does it in any way protect the entity, which is - transmitted in cleartext across the physical network used as the - carrier. HTTP does not prevent additional authentication schemes and - encryption mechanisms from being employed to increase security or the - addition of enhancements (such as schemes to use one-time passwords) - to Basic authentication. - - - -Franks, et al. Standards Track [Page 19] - -RFC 2617 HTTP Authentication June 1999 - - - The most serious flaw in Basic authentication is that it results in - the essentially cleartext transmission of the user's password over - the physical network. It is this problem which Digest Authentication - attempts to address. - - Because Basic authentication involves the cleartext transmission of - passwords it SHOULD NOT be used (without enhancements) to protect - sensitive or valuable information. - - A common use of Basic authentication is for identification purposes - -- requiring the user to provide a user name and password as a means - of identification, for example, for purposes of gathering accurate - usage statistics on a server. When used in this way it is tempting to - think that there is no danger in its use if illicit access to the - protected documents is not a major concern. This is only correct if - the server issues both user name and password to the users and in - particular does not allow the user to choose his or her own password. - The danger arises because naive users frequently reuse a single - password to avoid the task of maintaining multiple passwords. - - If a server permits users to select their own passwords, then the - threat is not only unauthorized access to documents on the server but - also unauthorized access to any other resources on other systems that - the user protects with the same password. Furthermore, in the - server's password database, many of the passwords may also be users' - passwords for other sites. The owner or administrator of such a - system could therefore expose all users of the system to the risk of - unauthorized access to all those sites if this information is not - maintained in a secure fashion. - - Basic Authentication is also vulnerable to spoofing by counterfeit - servers. If a user can be led to believe that he is connecting to a - host containing information protected by Basic authentication when, - in fact, he is connecting to a hostile server or gateway, then the - attacker can request a password, store it for later use, and feign an - error. This type of attack is not possible with Digest - Authentication. Server implementers SHOULD guard against the - possibility of this sort of counterfeiting by gateways or CGI - scripts. In particular it is very dangerous for a server to simply - turn over a connection to a gateway. That gateway can then use the - persistent connection mechanism to engage in multiple transactions - with the client while impersonating the original server in a way that - is not detectable by the client. - -4.2 Authentication of Clients using Digest Authentication - - Digest Authentication does not provide a strong authentication - mechanism, when compared to public key based mechanisms, for example. - - - -Franks, et al. Standards Track [Page 20] - -RFC 2617 HTTP Authentication June 1999 - - - However, it is significantly stronger than (e.g.) CRAM-MD5, which has - been proposed for use with LDAP [10], POP and IMAP (see RFC 2195 - [9]). It is intended to replace the much weaker and even more - dangerous Basic mechanism. - - Digest Authentication offers no confidentiality protection beyond - protecting the actual password. All of the rest of the request and - response are available to an eavesdropper. - - Digest Authentication offers only limited integrity protection for - the messages in either direction. If qop=auth-int mechanism is used, - those parts of the message used in the calculation of the WWW- - Authenticate and Authorization header field response directive values - (see section 3.2 above) are protected. Most header fields and their - values could be modified as a part of a man-in-the-middle attack. - - Many needs for secure HTTP transactions cannot be met by Digest - Authentication. For those needs TLS or SHTTP are more appropriate - protocols. In particular Digest authentication cannot be used for any - transaction requiring confidentiality protection. Nevertheless many - functions remain for which Digest authentication is both useful and - appropriate. Any service in present use that uses Basic should be - switched to Digest as soon as practical. - -4.3 Limited Use Nonce Values - - The Digest scheme uses a server-specified nonce to seed the - generation of the request-digest value (as specified in section - 3.2.2.1 above). As shown in the example nonce in section 3.2.1, the - server is free to construct the nonce such that it may only be used - from a particular client, for a particular resource, for a limited - period of time or number of uses, or any other restrictions. Doing - so strengthens the protection provided against, for example, replay - attacks (see 4.5). However, it should be noted that the method - chosen for generating and checking the nonce also has performance and - resource implications. For example, a server may choose to allow - each nonce value to be used only once by maintaining a record of - whether or not each recently issued nonce has been returned and - sending a next-nonce directive in the Authentication-Info header - field of every response. This protects against even an immediate - replay attack, but has a high cost checking nonce values, and perhaps - more important will cause authentication failures for any pipelined - requests (presumably returning a stale nonce indication). Similarly, - incorporating a request-specific element such as the Etag value for a - resource limits the use of the nonce to that version of the resource - and also defeats pipelining. Thus it may be useful to do so for - methods with side effects but have unacceptable performance for those - that do not. - - - -Franks, et al. Standards Track [Page 21] - -RFC 2617 HTTP Authentication June 1999 - - -4.4 Comparison of Digest with Basic Authentication - - Both Digest and Basic Authentication are very much on the weak end of - the security strength spectrum. But a comparison between the two - points out the utility, even necessity, of replacing Basic by Digest. - - The greatest threat to the type of transactions for which these - protocols are used is network snooping. This kind of transaction - might involve, for example, online access to a database whose use is - restricted to paying subscribers. With Basic authentication an - eavesdropper can obtain the password of the user. This not only - permits him to access anything in the database, but, often worse, - will permit access to anything else the user protects with the same - password. - - By contrast, with Digest Authentication the eavesdropper only gets - access to the transaction in question and not to the user's password. - The information gained by the eavesdropper would permit a replay - attack, but only with a request for the same document, and even that - may be limited by the server's choice of nonce. - -4.5 Replay Attacks - - A replay attack against Digest authentication would usually be - pointless for a simple GET request since an eavesdropper would - already have seen the only document he could obtain with a replay. - This is because the URI of the requested document is digested in the - client request and the server will only deliver that document. By - contrast under Basic Authentication once the eavesdropper has the - user's password, any document protected by that password is open to - him. - - Thus, for some purposes, it is necessary to protect against replay - attacks. A good Digest implementation can do this in various ways. - The server created "nonce" value is implementation dependent, but if - it contains a digest of the client IP, a time-stamp, the resource - ETag, and a private server key (as recommended above) then a replay - attack is not simple. An attacker must convince the server that the - request is coming from a false IP address and must cause the server - to deliver the document to an IP address different from the address - to which it believes it is sending the document. An attack can only - succeed in the period before the time-stamp expires. Digesting the - client IP and time-stamp in the nonce permits an implementation which - does not maintain state between transactions. - - For applications where no possibility of replay attack can be - tolerated the server can use one-time nonce values which will not be - honored for a second use. This requires the overhead of the server - - - -Franks, et al. Standards Track [Page 22] - -RFC 2617 HTTP Authentication June 1999 - - - remembering which nonce values have been used until the nonce time- - stamp (and hence the digest built with it) has expired, but it - effectively protects against replay attacks. - - An implementation must give special attention to the possibility of - replay attacks with POST and PUT requests. Unless the server employs - one-time or otherwise limited-use nonces and/or insists on the use of - the integrity protection of qop=auth-int, an attacker could replay - valid credentials from a successful request with counterfeit form - data or other message body. Even with the use of integrity protection - most metadata in header fields is not protected. Proper nonce - generation and checking provides some protection against replay of - previously used valid credentials, but see 4.8. - -4.6 Weakness Created by Multiple Authentication Schemes - - An HTTP/1.1 server may return multiple challenges with a 401 - (Authenticate) response, and each challenge may use a different - auth-scheme. A user agent MUST choose to use the strongest auth- - scheme it understands and request credentials from the user based - upon that challenge. - - Note that many browsers will only recognize Basic and will require - that it be the first auth-scheme presented. Servers should only - include Basic if it is minimally acceptable. - - When the server offers choices of authentication schemes using the - WWW-Authenticate header, the strength of the resulting authentication - is only as good as that of the of the weakest of the authentication - schemes. See section 4.8 below for discussion of particular attack - scenarios that exploit multiple authentication schemes. - -4.7 Online dictionary attacks - - If the attacker can eavesdrop, then it can test any overheard - nonce/response pairs against a list of common words. Such a list is - usually much smaller than the total number of possible passwords. The - cost of computing the response for each password on the list is paid - once for each challenge. - - The server can mitigate this attack by not allowing users to select - passwords that are in a dictionary. - - - - - - - - - -Franks, et al. Standards Track [Page 23] - -RFC 2617 HTTP Authentication June 1999 - - -4.8 Man in the Middle - - Both Basic and Digest authentication are vulnerable to "man in the - middle" (MITM) attacks, for example, from a hostile or compromised - proxy. Clearly, this would present all the problems of eavesdropping. - But it also offers some additional opportunities to the attacker. - - A possible man-in-the-middle attack would be to add a weak - authentication scheme to the set of choices, hoping that the client - will use one that exposes the user's credentials (e.g. password). For - this reason, the client should always use the strongest scheme that - it understands from the choices offered. - - An even better MITM attack would be to remove all offered choices, - replacing them with a challenge that requests only Basic - authentication, then uses the cleartext credentials from the Basic - authentication to authenticate to the origin server using the - stronger scheme it requested. A particularly insidious way to mount - such a MITM attack would be to offer a "free" proxy caching service - to gullible users. - - User agents should consider measures such as presenting a visual - indication at the time of the credentials request of what - authentication scheme is to be used, or remembering the strongest - authentication scheme ever requested by a server and produce a - warning message before using a weaker one. It might also be a good - idea for the user agent to be configured to demand Digest - authentication in general, or from specific sites. - - Or, a hostile proxy might spoof the client into making a request the - attacker wanted rather than one the client wanted. Of course, this is - still much harder than a comparable attack against Basic - Authentication. - -4.9 Chosen plaintext attacks - - With Digest authentication, a MITM or a malicious server can - arbitrarily choose the nonce that the client will use to compute the - response. This is called a "chosen plaintext" attack. The ability to - choose the nonce is known to make cryptanalysis much easier [8]. - - However, no way to analyze the MD5 one-way function used by Digest - using chosen plaintext is currently known. - - The countermeasure against this attack is for clients to be - configured to require the use of the optional "cnonce" directive; - this allows the client to vary the input to the hash in a way not - chosen by the attacker. - - - -Franks, et al. Standards Track [Page 24] - -RFC 2617 HTTP Authentication June 1999 - - -4.10 Precomputed dictionary attacks - - With Digest authentication, if the attacker can execute a chosen - plaintext attack, the attacker can precompute the response for many - common words to a nonce of its choice, and store a dictionary of - (response, password) pairs. Such precomputation can often be done in - parallel on many machines. It can then use the chosen plaintext - attack to acquire a response corresponding to that challenge, and - just look up the password in the dictionary. Even if most passwords - are not in the dictionary, some might be. Since the attacker gets to - pick the challenge, the cost of computing the response for each - password on the list can be amortized over finding many passwords. A - dictionary with 100 million password/response pairs would take about - 3.2 gigabytes of disk storage. - - The countermeasure against this attack is to for clients to be - configured to require the use of the optional "cnonce" directive. - -4.11 Batch brute force attacks - - With Digest authentication, a MITM can execute a chosen plaintext - attack, and can gather responses from many users to the same nonce. - It can then find all the passwords within any subset of password - space that would generate one of the nonce/response pairs in a single - pass over that space. It also reduces the time to find the first - password by a factor equal to the number of nonce/response pairs - gathered. This search of the password space can often be done in - parallel on many machines, and even a single machine can search large - subsets of the password space very quickly -- reports exist of - searching all passwords with six or fewer letters in a few hours. - - The countermeasure against this attack is to for clients to be - configured to require the use of the optional "cnonce" directive. - -4.12 Spoofing by Counterfeit Servers - - Basic Authentication is vulnerable to spoofing by counterfeit - servers. If a user can be led to believe that she is connecting to a - host containing information protected by a password she knows, when - in fact she is connecting to a hostile server, then the hostile - server can request a password, store it away for later use, and feign - an error. This type of attack is more difficult with Digest - Authentication -- but the client must know to demand that Digest - authentication be used, perhaps using some of the techniques - described above to counter "man-in-the-middle" attacks. Again, the - user can be helped in detecting this attack by a visual indication of - the authentication mechanism in use with appropriate guidance in - interpreting the implications of each scheme. - - - -Franks, et al. Standards Track [Page 25] - -RFC 2617 HTTP Authentication June 1999 - - -4.13 Storing passwords - - Digest authentication requires that the authenticating agent (usually - the server) store some data derived from the user's name and password - in a "password file" associated with a given realm. Normally this - might contain pairs consisting of username and H(A1), where H(A1) is - the digested value of the username, realm, and password as described - above. - - The security implications of this are that if this password file is - compromised, then an attacker gains immediate access to documents on - the server using this realm. Unlike, say a standard UNIX password - file, this information need not be decrypted in order to access - documents in the server realm associated with this file. On the other - hand, decryption, or more likely a brute force attack, would be - necessary to obtain the user's password. This is the reason that the - realm is part of the digested data stored in the password file. It - means that if one Digest authentication password file is compromised, - it does not automatically compromise others with the same username - and password (though it does expose them to brute force attack). - - There are two important security consequences of this. First the - password file must be protected as if it contained unencrypted - passwords, because for the purpose of accessing documents in its - realm, it effectively does. - - A second consequence of this is that the realm string should be - unique among all realms which any single user is likely to use. In - particular a realm string should include the name of the host doing - the authentication. The inability of the client to authenticate the - server is a weakness of Digest Authentication. - -4.14 Summary - - By modern cryptographic standards Digest Authentication is weak. But - for a large range of purposes it is valuable as a replacement for - Basic Authentication. It remedies some, but not all, weaknesses of - Basic Authentication. Its strength may vary depending on the - implementation. In particular the structure of the nonce (which is - dependent on the server implementation) may affect the ease of - mounting a replay attack. A range of server options is appropriate - since, for example, some implementations may be willing to accept the - server overhead of one-time nonces or digests to eliminate the - possibility of replay. Others may satisfied with a nonce like the one - recommended above restricted to a single IP address and a single ETag - or with a limited lifetime. - - - - - -Franks, et al. Standards Track [Page 26] - -RFC 2617 HTTP Authentication June 1999 - - - The bottom line is that *any* compliant implementation will be - relatively weak by cryptographic standards, but *any* compliant - implementation will be far superior to Basic Authentication. - -5 Sample implementation - - [[ WARNING: DigestCalcHA1 IS WRONG ]] - - The following code implements the calculations of H(A1), H(A2), - request-digest and response-digest, and a test program which computes - the values used in the example of section 3.5. It uses the MD5 - implementation from RFC 1321. - - File "digcalc.h": - -#define HASHLEN 16 -typedef char HASH[HASHLEN]; -#define HASHHEXLEN 32 -typedef char HASHHEX[HASHHEXLEN+1]; -#define IN -#define OUT - -/* calculate H(A1) as per HTTP Digest spec */ -void DigestCalcHA1( - IN char * pszAlg, - IN char * pszUserName, - IN char * pszRealm, - IN char * pszPassword, - IN char * pszNonce, - IN char * pszCNonce, - OUT HASHHEX SessionKey - ); - -/* calculate request-digest/response-digest as per HTTP Digest spec */ -void DigestCalcResponse( - IN HASHHEX HA1, /* H(A1) */ - IN char * pszNonce, /* nonce from server */ - IN char * pszNonceCount, /* 8 hex digits */ - IN char * pszCNonce, /* client nonce */ - IN char * pszQop, /* qop-value: "", "auth", "auth-int" */ - IN char * pszMethod, /* method from the request */ - IN char * pszDigestUri, /* requested URL */ - IN HASHHEX HEntity, /* H(entity body) if qop="auth-int" */ - OUT HASHHEX Response /* request-digest or response-digest */ - ); - -File "digcalc.c": - -#include -#include - - - -Franks, et al. Standards Track [Page 27] - -RFC 2617 HTTP Authentication June 1999 - - -#include -#include "digcalc.h" - -void CvtHex( - IN HASH Bin, - OUT HASHHEX Hex - ) -{ - unsigned short i; - unsigned char j; - - for (i = 0; i < HASHLEN; i++) { - j = (Bin[i] >> 4) & 0xf; - if (j <= 9) - Hex[i*2] = (j + '0'); - else - Hex[i*2] = (j + 'a' - 10); - j = Bin[i] & 0xf; - if (j <= 9) - Hex[i*2+1] = (j + '0'); - else - Hex[i*2+1] = (j + 'a' - 10); - }; - Hex[HASHHEXLEN] = '\0'; -}; - -/* calculate H(A1) as per spec */ -void DigestCalcHA1( - IN char * pszAlg, - IN char * pszUserName, - IN char * pszRealm, - IN char * pszPassword, - IN char * pszNonce, - IN char * pszCNonce, - OUT HASHHEX SessionKey - ) -{ - MD5_CTX Md5Ctx; - HASH HA1; - - MD5Init(&Md5Ctx); - MD5Update(&Md5Ctx, pszUserName, strlen(pszUserName)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszRealm, strlen(pszRealm)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszPassword, strlen(pszPassword)); - MD5Final(HA1, &Md5Ctx); - if (stricmp(pszAlg, "md5-sess") == 0) { - - - -Franks, et al. Standards Track [Page 28] - -RFC 2617 HTTP Authentication June 1999 - - - MD5Init(&Md5Ctx); - MD5Update(&Md5Ctx, HA1, HASHLEN); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszNonce, strlen(pszNonce)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszCNonce, strlen(pszCNonce)); - MD5Final(HA1, &Md5Ctx); - }; - CvtHex(HA1, SessionKey); -}; - -/* calculate request-digest/response-digest as per HTTP Digest spec */ -void DigestCalcResponse( - IN HASHHEX HA1, /* H(A1) */ - IN char * pszNonce, /* nonce from server */ - IN char * pszNonceCount, /* 8 hex digits */ - IN char * pszCNonce, /* client nonce */ - IN char * pszQop, /* qop-value: "", "auth", "auth-int" */ - IN char * pszMethod, /* method from the request */ - IN char * pszDigestUri, /* requested URL */ - IN HASHHEX HEntity, /* H(entity body) if qop="auth-int" */ - OUT HASHHEX Response /* request-digest or response-digest */ - ) -{ - MD5_CTX Md5Ctx; - HASH HA2; - HASH RespHash; - HASHHEX HA2Hex; - - // calculate H(A2) - MD5Init(&Md5Ctx); - MD5Update(&Md5Ctx, pszMethod, strlen(pszMethod)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszDigestUri, strlen(pszDigestUri)); - if (stricmp(pszQop, "auth-int") == 0) { - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, HEntity, HASHHEXLEN); - }; - MD5Final(HA2, &Md5Ctx); - CvtHex(HA2, HA2Hex); - - // calculate response - MD5Init(&Md5Ctx); - MD5Update(&Md5Ctx, HA1, HASHHEXLEN); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszNonce, strlen(pszNonce)); - MD5Update(&Md5Ctx, ":", 1); - if (*pszQop) { - - - -Franks, et al. Standards Track [Page 29] - -RFC 2617 HTTP Authentication June 1999 - - - MD5Update(&Md5Ctx, pszNonceCount, strlen(pszNonceCount)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszCNonce, strlen(pszCNonce)); - MD5Update(&Md5Ctx, ":", 1); - MD5Update(&Md5Ctx, pszQop, strlen(pszQop)); - MD5Update(&Md5Ctx, ":", 1); - }; - MD5Update(&Md5Ctx, HA2Hex, HASHHEXLEN); - MD5Final(RespHash, &Md5Ctx); - CvtHex(RespHash, Response); -}; - -File "digtest.c": - - -#include -#include "digcalc.h" - -void main(int argc, char ** argv) { - - char * pszNonce = "dcd98b7102dd2f0e8b11d0f600bfb0c093"; - char * pszCNonce = "0a4f113b"; - char * pszUser = "Mufasa"; - char * pszRealm = "testrealm@host.com"; - char * pszPass = "Circle Of Life"; - char * pszAlg = "md5"; - char szNonceCount[9] = "00000001"; - char * pszMethod = "GET"; - char * pszQop = "auth"; - char * pszURI = "/dir/index.html"; - HASHHEX HA1; - HASHHEX HA2 = ""; - HASHHEX Response; - - DigestCalcHA1(pszAlg, pszUser, pszRealm, pszPass, pszNonce, -pszCNonce, HA1); - DigestCalcResponse(HA1, pszNonce, szNonceCount, pszCNonce, pszQop, - pszMethod, pszURI, HA2, Response); - printf("Response = %s\n", Response); -}; - - - - - - - - - - - -Franks, et al. Standards Track [Page 30] - -RFC 2617 HTTP Authentication June 1999 - - -6 Acknowledgments - - Eric W. Sink, of AbiSource, Inc., was one of the original authors - before the specification underwent substantial revision. - - In addition to the authors, valuable discussion instrumental in - creating this document has come from Peter J. Churchyard, Ned Freed, - and David M. Kristol. - - Jim Gettys and Larry Masinter edited this document for update. - -7 References - - [1] Berners-Lee, T., Fielding, R. and H. Frystyk, "Hypertext - Transfer Protocol -- HTTP/1.0", RFC 1945, May 1996. - - [2] Fielding, R., Gettys, J., Mogul, J., Frysyk, H., Masinter, L., - Leach, P. and T. Berners-Lee, "Hypertext Transfer Protocol -- - HTTP/1.1", RFC 2616, June 1999. - - [3] Rivest, R., "The MD5 Message-Digest Algorithm", RFC 1321, April - 1992. - - [4] Freed, N. and N. Borenstein. "Multipurpose Internet Mail - Extensions (MIME) Part One: Format of Internet Message Bodies", - RFC 2045, November 1996. - - [5] Dierks, T. and C. Allen "The TLS Protocol, Version 1.0", RFC - 2246, January 1999. - - [6] Franks, J., Hallam-Baker, P., Hostetler, J., Leach, P., - Luotonen, A., Sink, E. and L. Stewart, "An Extension to HTTP : - Digest Access Authentication", RFC 2069, January 1997. - - [7] Berners Lee, T, Fielding, R. and L. Masinter, "Uniform Resource - Identifiers (URI): Generic Syntax", RFC 2396, August 1998. - - [8] Kaliski, B.,Robshaw, M., "Message Authentication with MD5", - CryptoBytes, Sping 1995, RSA Inc, - (http://www.rsa.com/rsalabs/pubs/cryptobytes/spring95/md5.htm) - - [9] Klensin, J., Catoe, R. and P. Krumviede, "IMAP/POP AUTHorize - Extension for Simple Challenge/Response", RFC 2195, September - 1997. - - [10] Morgan, B., Alvestrand, H., Hodges, J., Wahl, M., - "Authentication Methods for LDAP", Work in Progress. - - - - -Franks, et al. Standards Track [Page 31] - -RFC 2617 HTTP Authentication June 1999 - - -8 Authors' Addresses - - John Franks - Professor of Mathematics - Department of Mathematics - Northwestern University - Evanston, IL 60208-2730, USA - - EMail: john@math.nwu.edu - - - Phillip M. Hallam-Baker - Principal Consultant - Verisign Inc. - 301 Edgewater Place - Suite 210 - Wakefield MA 01880, USA - - EMail: pbaker@verisign.com - - - Jeffery L. Hostetler - Software Craftsman - AbiSource, Inc. - 6 Dunlap Court - Savoy, IL 61874 - - EMail: jeff@AbiSource.com - - - Scott D. Lawrence - Agranat Systems, Inc. - 5 Clocktower Place, Suite 400 - Maynard, MA 01754, USA - - EMail: lawrence@agranat.com - - - Paul J. Leach - Microsoft Corporation - 1 Microsoft Way - Redmond, WA 98052, USA - - EMail: paulle@microsoft.com - - - - - - - -Franks, et al. Standards Track [Page 32] - -RFC 2617 HTTP Authentication June 1999 - - - Ari Luotonen - Member of Technical Staff - Netscape Communications Corporation - 501 East Middlefield Road - Mountain View, CA 94043, USA - - - Lawrence C. Stewart - Open Market, Inc. - 215 First Street - Cambridge, MA 02142, USA - - EMail: stewart@OpenMarket.com - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Franks, et al. Standards Track [Page 33] - -RFC 2617 HTTP Authentication June 1999 - - -9. Full Copyright Statement - - Copyright (C) The Internet Society (1999). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - - - - - - - - - - - - - - -Franks, et al. Standards Track [Page 34] - diff --git a/docs/specs/rfc2817.txt b/docs/specs/rfc2817.txt deleted file mode 100644 index d7b7e70..0000000 --- a/docs/specs/rfc2817.txt +++ /dev/null @@ -1,731 +0,0 @@ - - - - - - -Network Working Group R. Khare -Request for Comments: 2817 4K Associates / UC Irvine -Updates: 2616 S. Lawrence -Category: Standards Track Agranat Systems, Inc. - May 2000 - - - Upgrading to TLS Within HTTP/1.1 - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (2000). All Rights Reserved. - -Abstract - - This memo explains how to use the Upgrade mechanism in HTTP/1.1 to - initiate Transport Layer Security (TLS) over an existing TCP - connection. This allows unsecured and secured HTTP traffic to share - the same well known port (in this case, http: at 80 rather than - https: at 443). It also enables "virtual hosting", so a single HTTP + - TLS server can disambiguate traffic intended for several hostnames at - a single IP address. - - Since HTTP/1.1 [1] defines Upgrade as a hop-by-hop mechanism, this - memo also documents the HTTP CONNECT method for establishing end-to- - end tunnels across HTTP proxies. Finally, this memo establishes new - IANA registries for public HTTP status codes, as well as public or - private Upgrade product tokens. - - This memo does NOT affect the current definition of the 'https' URI - scheme, which already defines a separate namespace - (http://example.org/ and https://example.org/ are not equivalent). - - - - - - - - - - - -Khare & Lawrence Standards Track [Page 1] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - -Table of Contents - - 1. Motivation . . . . . . . . . . . . . . . . . . . . . . . . . . 2 - 2. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 3 - 2.1 Requirements Terminology . . . . . . . . . . . . . . . . . . . 4 - 3. Client Requested Upgrade to HTTP over TLS . . . . . . . . . . 4 - 3.1 Optional Upgrade . . . . . . . . . . . . . . . . . . . . . . . 4 - 3.2 Mandatory Upgrade . . . . . . . . . . . . . . . . . . . . . . 4 - 3.3 Server Acceptance of Upgrade Request . . . . . . . . . . . . . 4 - 4. Server Requested Upgrade to HTTP over TLS . . . . . . . . . . 5 - 4.1 Optional Advertisement . . . . . . . . . . . . . . . . . . . . 5 - 4.2 Mandatory Advertisement . . . . . . . . . . . . . . . . . . . 5 - 5. Upgrade across Proxies . . . . . . . . . . . . . . . . . . . . 6 - 5.1 Implications of Hop By Hop Upgrade . . . . . . . . . . . . . . 6 - 5.2 Requesting a Tunnel with CONNECT . . . . . . . . . . . . . . . 6 - 5.3 Establishing a Tunnel with CONNECT . . . . . . . . . . . . . . 7 - 6. Rationale for the use of a 4xx (client error) Status Code . . 7 - 7. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 8 - 7.1 HTTP Status Code Registry . . . . . . . . . . . . . . . . . . 8 - 7.2 HTTP Upgrade Token Registry . . . . . . . . . . . . . . . . . 8 - 8. Security Considerations . . . . . . . . . . . . . . . . . . . 9 - 8.1 Implications for the https: URI Scheme . . . . . . . . . . . . 10 - 8.2 Security Considerations for CONNECT . . . . . . . . . . . . . 10 - References . . . . . . . . . . . . . . . . . . . . . . . . . . 10 - Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . 11 - A. Acknowledgments . . . . . . . . . . . . . . . . . . . . . . . 12 - Full Copyright Statement . . . . . . . . . . . . . . . . . . . 13 - -1. Motivation - - The historical practice of deploying HTTP over SSL3 [3] has - distinguished the combination from HTTP alone by a unique URI scheme - and the TCP port number. The scheme 'http' meant the HTTP protocol - alone on port 80, while 'https' meant the HTTP protocol over SSL on - port 443. Parallel well-known port numbers have similarly been - requested -- and in some cases, granted -- to distinguish between - secured and unsecured use of other application protocols (e.g. - snews, ftps). This approach effectively halves the number of - available well known ports. - - At the Washington DC IETF meeting in December 1997, the Applications - Area Directors and the IESG reaffirmed that the practice of issuing - parallel "secure" port numbers should be deprecated. The HTTP/1.1 - Upgrade mechanism can apply Transport Layer Security [6] to an open - HTTP connection. - - - - - - -Khare & Lawrence Standards Track [Page 2] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - In the nearly two years since, there has been broad acceptance of the - concept behind this proposal, but little interest in implementing - alternatives to port 443 for generic Web browsing. In fact, nothing - in this memo affects the current interpretation of https: URIs. - However, new application protocols built atop HTTP, such as the - Internet Printing Protocol [7], call for just such a mechanism in - order to move ahead in the IETF standards process. - - The Upgrade mechanism also solves the "virtual hosting" problem. - Rather than allocating multiple IP addresses to a single host, an - HTTP/1.1 server will use the Host: header to disambiguate the - intended web service. As HTTP/1.1 usage has grown more prevalent, - more ISPs are offering name-based virtual hosting, thus delaying IP - address space exhaustion. - - TLS (and SSL) have been hobbled by the same limitation as earlier - versions of HTTP: the initial handshake does not specify the intended - hostname, relying exclusively on the IP address. Using a cleartext - HTTP/1.1 Upgrade: preamble to the TLS handshake -- choosing the - certificates based on the initial Host: header -- will allow ISPs to - provide secure name-based virtual hosting as well. - -2. Introduction - - TLS, a.k.a., SSL (Secure Sockets Layer), establishes a private end- - to-end connection, optionally including strong mutual authentication, - using a variety of cryptosystems. Initially, a handshake phase uses - three subprotocols to set up a record layer, authenticate endpoints, - set parameters, as well as report errors. Then, there is an ongoing - layered record protocol that handles encryption, compression, and - reassembly for the remainder of the connection. The latter is - intended to be completely transparent. For example, there is no - dependency between TLS's record markers and or certificates and - HTTP/1.1's chunked encoding or authentication. - - Either the client or server can use the HTTP/1.1 [1] Upgrade - mechanism (Section 14.42) to indicate that a TLS-secured connection - is desired or necessary. This memo defines the "TLS/1.0" Upgrade - token, and a new HTTP Status Code, "426 Upgrade Required". - - Section 3 and Section 4 describe the operation of a directly - connected client and server. Intermediate proxies must establish an - end-to-end tunnel before applying those operations, as explained in - Section 5. - - - - - - - -Khare & Lawrence Standards Track [Page 3] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - -2.1 Requirements Terminology - - Keywords "MUST", "MUST NOT", "REQUIRED", "SHOULD", "SHOULD NOT" and - "MAY" that appear in this document are to be interpreted as described - in RFC 2119 [11]. - -3. Client Requested Upgrade to HTTP over TLS - - When the client sends an HTTP/1.1 request with an Upgrade header - field containing the token "TLS/1.0", it is requesting the server to - complete the current HTTP/1.1 request after switching to TLS/1.0. - -3.1 Optional Upgrade - - A client MAY offer to switch to secured operation during any clear - HTTP request when an unsecured response would be acceptable: - - GET http://example.bank.com/acct_stat.html?749394889300 HTTP/1.1 - Host: example.bank.com - Upgrade: TLS/1.0 - Connection: Upgrade - - In this case, the server MAY respond to the clear HTTP operation - normally, OR switch to secured operation (as detailed in the next - section). - - Note that HTTP/1.1 [1] specifies "the upgrade keyword MUST be - supplied within a Connection header field (section 14.10) whenever - Upgrade is present in an HTTP/1.1 message". - -3.2 Mandatory Upgrade - - If an unsecured response would be unacceptable, a client MUST send an - OPTIONS request first to complete the switch to TLS/1.0 (if - possible). - - OPTIONS * HTTP/1.1 - Host: example.bank.com - Upgrade: TLS/1.0 - Connection: Upgrade - -3.3 Server Acceptance of Upgrade Request - - As specified in HTTP/1.1 [1], if the server is prepared to initiate - the TLS handshake, it MUST send the intermediate "101 Switching - Protocol" and MUST include an Upgrade response header specifying the - tokens of the protocol stack it is switching to: - - - - -Khare & Lawrence Standards Track [Page 4] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - HTTP/1.1 101 Switching Protocols - Upgrade: TLS/1.0, HTTP/1.1 - Connection: Upgrade - - Note that the protocol tokens listed in the Upgrade header of a 101 - Switching Protocols response specify an ordered 'bottom-up' stack. - - As specified in HTTP/1.1 [1], Section 10.1.2: "The server will - switch protocols to those defined by the response's Upgrade header - field immediately after the empty line which terminates the 101 - response". - - Once the TLS handshake completes successfully, the server MUST - continue with the response to the original request. Any TLS handshake - failure MUST lead to disconnection, per the TLS error alert - specification. - -4. Server Requested Upgrade to HTTP over TLS - - The Upgrade response header field advertises possible protocol - upgrades a server MAY accept. In conjunction with the "426 Upgrade - Required" status code, a server can advertise the exact protocol - upgrade(s) that a client MUST accept to complete the request. - -4.1 Optional Advertisement - - As specified in HTTP/1.1 [1], the server MAY include an Upgrade - header in any response other than 101 or 426 to indicate a - willingness to switch to any (combination) of the protocols listed. - -4.2 Mandatory Advertisement - - A server MAY indicate that a client request can not be completed - without TLS using the "426 Upgrade Required" status code, which MUST - include an an Upgrade header field specifying the token of the - required TLS version. - - HTTP/1.1 426 Upgrade Required - Upgrade: TLS/1.0, HTTP/1.1 - Connection: Upgrade - - The server SHOULD include a message body in the 426 response which - indicates in human readable form the reason for the error and - describes any alternative courses which may be available to the user. - - Note that even if a client is willing to use TLS, it must use the - operations in Section 3 to proceed; the TLS handshake cannot begin - immediately after the 426 response. - - - -Khare & Lawrence Standards Track [Page 5] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - -5. Upgrade across Proxies - - As a hop-by-hop header, Upgrade is negotiated between each pair of - HTTP counterparties. If a User Agent sends a request with an Upgrade - header to a proxy, it is requesting a change to the protocol between - itself and the proxy, not an end-to-end change. - - Since TLS, in particular, requires end-to-end connectivity to provide - authentication and prevent man-in-the-middle attacks, this memo - specifies the CONNECT method to establish a tunnel across proxies. - - Once a tunnel is established, any of the operations in Section 3 can - be used to establish a TLS connection. - -5.1 Implications of Hop By Hop Upgrade - - If an origin server receives an Upgrade header from a proxy and - responds with a 101 Switching Protocols response, it is changing the - protocol only on the connection between the proxy and itself. - Similarly, a proxy might return a 101 response to its client to - change the protocol on that connection independently of the protocols - it is using to communicate toward the origin server. - - These scenarios also complicate diagnosis of a 426 response. Since - Upgrade is a hop-by-hop header, a proxy that does not recognize 426 - might remove the accompanying Upgrade header and prevent the client - from determining the required protocol switch. If a client receives - a 426 status without an accompanying Upgrade header, it will need to - request an end to end tunnel connection as described in Section 5.2 - and repeat the request in order to obtain the required upgrade - information. - - This hop-by-hop definition of Upgrade was a deliberate choice. It - allows for incremental deployment on either side of proxies, and for - optimized protocols between cascaded proxies without the knowledge of - the parties that are not a part of the change. - -5.2 Requesting a Tunnel with CONNECT - - A CONNECT method requests that a proxy establish a tunnel connection - on its behalf. The Request-URI portion of the Request-Line is always - an 'authority' as defined by URI Generic Syntax [2], which is to say - the host name and port number destination of the requested connection - separated by a colon: - - CONNECT server.example.com:80 HTTP/1.1 - Host: server.example.com:80 - - - - -Khare & Lawrence Standards Track [Page 6] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - Other HTTP mechanisms can be used normally with the CONNECT method -- - except end-to-end protocol Upgrade requests, of course, since the - tunnel must be established first. - - For example, proxy authentication might be used to establish the - authority to create a tunnel: - - CONNECT server.example.com:80 HTTP/1.1 - Host: server.example.com:80 - Proxy-Authorization: basic aGVsbG86d29ybGQ= - - Like any other pipelined HTTP/1.1 request, data to be tunneled may be - sent immediately after the blank line. The usual caveats also apply: - data may be discarded if the eventual response is negative, and the - connection may be reset with no response if more than one TCP segment - is outstanding. - -5.3 Establishing a Tunnel with CONNECT - - Any successful (2xx) response to a CONNECT request indicates that the - proxy has established a connection to the requested host and port, - and has switched to tunneling the current connection to that server - connection. - - It may be the case that the proxy itself can only reach the requested - origin server through another proxy. In this case, the first proxy - SHOULD make a CONNECT request of that next proxy, requesting a tunnel - to the authority. A proxy MUST NOT respond with any 2xx status code - unless it has either a direct or tunnel connection established to the - authority. - - An origin server which receives a CONNECT request for itself MAY - respond with a 2xx status code to indicate that a connection is - established. - - If at any point either one of the peers gets disconnected, any - outstanding data that came from that peer will be passed to the other - one, and after that also the other connection will be terminated by - the proxy. If there is outstanding data to that peer undelivered, - that data will be discarded. - -6. Rationale for the use of a 4xx (client error) Status Code - - Reliable, interoperable negotiation of Upgrade features requires an - unambiguous failure signal. The 426 Upgrade Required status code - allows a server to definitively state the precise protocol extensions - a given resource must be served with. - - - - -Khare & Lawrence Standards Track [Page 7] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - It might at first appear that the response should have been some form - of redirection (a 3xx code), by analogy to an old-style redirection - to an https: URI. User agents that do not understand Upgrade: - preclude this. - - Suppose that a 3xx code had been assigned for "Upgrade Required"; a - user agent that did not recognize it would treat it as 300. It would - then properly look for a "Location" header in the response and - attempt to repeat the request at the URL in that header field. Since - it did not know to Upgrade to incorporate the TLS layer, it would at - best fail again at the new URL. - -7. IANA Considerations - - IANA shall create registries for two name spaces, as described in BCP - 26 [10]: - - o HTTP Status Codes - o HTTP Upgrade Tokens - -7.1 HTTP Status Code Registry - - The HTTP Status Code Registry defines the name space for the Status- - Code token in the Status line of an HTTP response. The initial - values for this name space are those specified by: - - 1. Draft Standard for HTTP/1.1 [1] - 2. Web Distributed Authoring and Versioning [4] [defines 420-424] - 3. WebDAV Advanced Collections [5] (Work in Progress) [defines 425] - 4. Section 6 [defines 426] - - Values to be added to this name space SHOULD be subject to review in - the form of a standards track document within the IETF Applications - Area. Any such document SHOULD be traceable through statuses of - either 'Obsoletes' or 'Updates' to the Draft Standard for - HTTP/1.1 [1]. - -7.2 HTTP Upgrade Token Registry - - The HTTP Upgrade Token Registry defines the name space for product - tokens used to identify protocols in the Upgrade HTTP header field. - Each registered token should be associated with one or a set of - specifications, and with contact information. - - The Draft Standard for HTTP/1.1 [1] specifies that these tokens obey - the production for 'product': - - - - - -Khare & Lawrence Standards Track [Page 8] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - product = token ["/" product-version] - product-version = token - - Registrations should be allowed on a First Come First Served basis as - described in BCP 26 [10]. These specifications need not be IETF - documents or be subject to IESG review, but should obey the following - rules: - - 1. A token, once registered, stays registered forever. - 2. The registration MUST name a responsible party for the - registration. - 3. The registration MUST name a point of contact. - 4. The registration MAY name the documentation required for the - token. - 5. The responsible party MAY change the registration at any time. - The IANA will keep a record of all such changes, and make them - available upon request. - 6. The responsible party for the first registration of a "product" - token MUST approve later registrations of a "version" token - together with that "product" token before they can be registered. - 7. If absolutely required, the IESG MAY reassign the responsibility - for a token. This will normally only be used in the case when a - responsible party cannot be contacted. - - This specification defines the protocol token "TLS/1.0" as the - identifier for the protocol specified by The TLS Protocol [6]. - - It is NOT required that specifications for upgrade tokens be made - publicly available, but the contact information for the registration - SHOULD be. - -8. Security Considerations - - The potential for a man-in-the-middle attack (deleting the Upgrade - header) remains the same as current, mixed http/https practice: - - o Removing the Upgrade header is similar to rewriting web pages to - change https:// links to http:// links. - o The risk is only present if the server is willing to vend such - information over both a secure and an insecure channel in the - first place. - o If the client knows for a fact that a server is TLS-compliant, it - can insist on it by only sending an Upgrade request with a no-op - method like OPTIONS. - o Finally, as the https: specification warns, "users should - carefully examine the certificate presented by the server to - determine if it meets their expectations". - - - - -Khare & Lawrence Standards Track [Page 9] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - Furthermore, for clients that do not explicitly try to invoke TLS, - servers can use the Upgrade header in any response other than 101 or - 426 to advertise TLS compliance. Since TLS compliance should be - considered a feature of the server and not the resource at hand, it - should be sufficient to send it once, and let clients cache that - fact. - -8.1 Implications for the https: URI Scheme - - While nothing in this memo affects the definition of the 'https' URI - scheme, widespread adoption of this mechanism for HyperText content - could use 'http' to identify both secure and non-secure resources. - - The choice of what security characteristics are required on the - connection is left to the client and server. This allows either - party to use any information available in making this determination. - For example, user agents may rely on user preference settings or - information about the security of the network such as 'TLS required - on all POST operations not on my local net', or servers may apply - resource access rules such as 'the FORM on this page must be served - and submitted using TLS'. - -8.2 Security Considerations for CONNECT - - A generic TCP tunnel is fraught with security risks. First, such - authorization should be limited to a small number of known ports. - The Upgrade: mechanism defined here only requires onward tunneling at - port 80. Second, since tunneled data is opaque to the proxy, there - are additional risks to tunneling to other well-known or reserved - ports. A putative HTTP client CONNECTing to port 25 could relay spam - via SMTP, for example. - -References - - [1] Fielding, R., Gettys, J., Mogul, J., Frystyk, H., Masinter, L., - Leach, P. and T. Berners-Lee, "Hypertext Transfer Protocol -- - HTTP/1.1", RFC 2616, June 1999. - - [2] Berners-Lee, T., Fielding, R. and L. Masinter, "URI Generic - Syntax", RFC 2396, August 1998. - - [3] Rescorla, E., "HTTP Over TLS", RFC 2818, May 2000. - - [4] Goland, Y., Whitehead, E., Faizi, A., Carter, S. and D. Jensen, - "Web Distributed Authoring and Versioning", RFC 2518, February - 1999. - - - - - -Khare & Lawrence Standards Track [Page 10] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - - [5] Slein, J., Whitehead, E.J., et al., "WebDAV Advanced Collections - Protocol", Work In Progress. - - [6] Dierks, T. and C. Allen, "The TLS Protocol", RFC 2246, January - 1999. - - [7] Herriot, R., Butler, S., Moore, P. and R. Turner, "Internet - Printing Protocol/1.0: Encoding and Transport", RFC 2565, April - 1999. - - [8] Luotonen, A., "Tunneling TCP based protocols through Web proxy - servers", Work In Progress. (Also available in: Luotonen, Ari. - Web Proxy Servers, Prentice-Hall, 1997 ISBN:0136806120.) - - [9] Rose, M., "Writing I-Ds and RFCs using XML", RFC 2629, June - 1999. - - [10] Narten, T. and H. Alvestrand, "Guidelines for Writing an IANA - Considerations Section in RFCs", BCP 26, RFC 2434, October 1998. - - [11] Bradner, S., "Key words for use in RFCs to Indicate Requirement - Levels", BCP 14, RFC 2119, March 1997. - -Authors' Addresses - - Rohit Khare - 4K Associates / UC Irvine - 3207 Palo Verde - Irvine, CA 92612 - US - - Phone: +1 626 806 7574 - EMail: rohit@4K-associates.com - URI: http://www.4K-associates.com/ - - - Scott Lawrence - Agranat Systems, Inc. - 5 Clocktower Place - Suite 400 - Maynard, MA 01754 - US - - Phone: +1 978 461 0888 - EMail: lawrence@agranat.com - URI: http://www.agranat.com/ - - - - - -Khare & Lawrence Standards Track [Page 11] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - -Appendix A. Acknowledgments - - The CONNECT method was originally described in a Work in Progress - titled, "Tunneling TCP based protocols through Web proxy servers", - [8] by Ari Luotonen of Netscape Communications Corporation. It was - widely implemented by HTTP proxies, but was never made a part of any - IETF Standards Track document. The method name CONNECT was reserved, - but not defined in [1]. - - The definition provided here is derived directly from that earlier - memo, with some editorial changes and conformance to the stylistic - conventions since established in other HTTP specifications. - - Additional Thanks to: - - o Paul Hoffman for his work on the STARTTLS command extension for - ESMTP. - o Roy Fielding for assistance with the rationale behind Upgrade: - and its interaction with OPTIONS. - o Eric Rescorla for his work on standardizing the existing https: - practice to compare with. - o Marshall Rose, for the xml2rfc document type description and tools - [9]. - o Jim Whitehead, for sorting out the current range of available HTTP - status codes. - o Henrik Frystyk Nielsen, whose work on the Mandatory extension - mechanism pointed out a hop-by-hop Upgrade still requires - tunneling. - o Harald Alvestrand for improvements to the token registration - rules. - - - - - - - - - - - - - - - - - - - - - -Khare & Lawrence Standards Track [Page 12] - -RFC 2817 HTTP Upgrade to TLS May 2000 - - -Full Copyright Statement - - Copyright (C) The Internet Society (2000). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - - - - - - - - - - - - - - -Khare & Lawrence Standards Track [Page 13] - diff --git a/docs/specs/rfc2818.txt b/docs/specs/rfc2818.txt deleted file mode 100644 index 219a1c4..0000000 --- a/docs/specs/rfc2818.txt +++ /dev/null @@ -1,395 +0,0 @@ - - - - - - -Network Working Group E. Rescorla -Request for Comments: 2818 RTFM, Inc. -Category: Informational May 2000 - - - HTTP Over TLS - -Status of this Memo - - This memo provides information for the Internet community. It does - not specify an Internet standard of any kind. Distribution of this - memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (2000). All Rights Reserved. - -Abstract - - This memo describes how to use TLS to secure HTTP connections over - the Internet. Current practice is to layer HTTP over SSL (the - predecessor to TLS), distinguishing secured traffic from insecure - traffic by the use of a different server port. This document - documents that practice using TLS. A companion document describes a - method for using HTTP/TLS over the same port as normal HTTP - [RFC2817]. - -Table of Contents - - 1. Introduction . . . . . . . . . . . . . . . . . . . . . . 2 - 1.1. Requirements Terminology . . . . . . . . . . . . . . . 2 - 2. HTTP Over TLS . . . . . . . . . . . . . . . . . . . . . . 2 - 2.1. Connection Initiation . . . . . . . . . . . . . . . . . 2 - 2.2. Connection Closure . . . . . . . . . . . . . . . . . . 2 - 2.2.1. Client Behavior . . . . . . . . . . . . . . . . . . . 3 - 2.2.2. Server Behavior . . . . . . . . . . . . . . . . . . . 3 - 2.3. Port Number . . . . . . . . . . . . . . . . . . . . . . 4 - 2.4. URI Format . . . . . . . . . . . . . . . . . . . . . . 4 - 3. Endpoint Identification . . . . . . . . . . . . . . . . . 4 - 3.1. Server Identity . . . . . . . . . . . . . . . . . . . . 4 - 3.2. Client Identity . . . . . . . . . . . . . . . . . . . . 5 - References . . . . . . . . . . . . . . . . . . . . . . . . . 6 - Security Considerations . . . . . . . . . . . . . . . . . . 6 - Author's Address . . . . . . . . . . . . . . . . . . . . . . 6 - Full Copyright Statement . . . . . . . . . . . . . . . . . . 7 - - - - - - -Rescorla Informational [Page 1] - -RFC 2818 HTTP Over TLS May 2000 - - -1. Introduction - - HTTP [RFC2616] was originally used in the clear on the Internet. - However, increased use of HTTP for sensitive applications has - required security measures. SSL, and its successor TLS [RFC2246] were - designed to provide channel-oriented security. This document - describes how to use HTTP over TLS. - -1.1. Requirements Terminology - - Keywords "MUST", "MUST NOT", "REQUIRED", "SHOULD", "SHOULD NOT" and - "MAY" that appear in this document are to be interpreted as described - in [RFC2119]. - -2. HTTP Over TLS - - Conceptually, HTTP/TLS is very simple. Simply use HTTP over TLS - precisely as you would use HTTP over TCP. - -2.1. Connection Initiation - - The agent acting as the HTTP client should also act as the TLS - client. It should initiate a connection to the server on the - appropriate port and then send the TLS ClientHello to begin the TLS - handshake. When the TLS handshake has finished. The client may then - initiate the first HTTP request. All HTTP data MUST be sent as TLS - "application data". Normal HTTP behavior, including retained - connections should be followed. - -2.2. Connection Closure - - TLS provides a facility for secure connection closure. When a valid - closure alert is received, an implementation can be assured that no - further data will be received on that connection. TLS - implementations MUST initiate an exchange of closure alerts before - closing a connection. A TLS implementation MAY, after sending a - closure alert, close the connection without waiting for the peer to - send its closure alert, generating an "incomplete close". Note that - an implementation which does this MAY choose to reuse the session. - This SHOULD only be done when the application knows (typically - through detecting HTTP message boundaries) that it has received all - the message data that it cares about. - - As specified in [RFC2246], any implementation which receives a - connection close without first receiving a valid closure alert (a - "premature close") MUST NOT reuse that session. Note that a - premature close does not call into question the security of the data - already received, but simply indicates that subsequent data might - - - -Rescorla Informational [Page 2] - -RFC 2818 HTTP Over TLS May 2000 - - - have been truncated. Because TLS is oblivious to HTTP - request/response boundaries, it is necessary to examine the HTTP data - itself (specifically the Content-Length header) to determine whether - the truncation occurred inside a message or between messages. - -2.2.1. Client Behavior - - Because HTTP uses connection closure to signal end of server data, - client implementations MUST treat any premature closes as errors and - the data received as potentially truncated. While in some cases the - HTTP protocol allows the client to find out whether truncation took - place so that, if it received the complete reply, it may tolerate - such errors following the principle to "[be] strict when sending and - tolerant when receiving" [RFC1958], often truncation does not show in - the HTTP protocol data; two cases in particular deserve special note: - - A HTTP response without a Content-Length header. Since data length - in this situation is signalled by connection close a premature - close generated by the server cannot be distinguished from a - spurious close generated by an attacker. - - A HTTP response with a valid Content-Length header closed before - all data has been read. Because TLS does not provide document - oriented protection, it is impossible to determine whether the - server has miscomputed the Content-Length or an attacker has - truncated the connection. - - There is one exception to the above rule. When encountering a - premature close, a client SHOULD treat as completed all requests for - which it has received as much data as specified in the Content-Length - header. - - A client detecting an incomplete close SHOULD recover gracefully. It - MAY resume a TLS session closed in this fashion. - - Clients MUST send a closure alert before closing the connection. - Clients which are unprepared to receive any more data MAY choose not - to wait for the server's closure alert and simply close the - connection, thus generating an incomplete close on the server side. - -2.2.2. Server Behavior - - RFC 2616 permits an HTTP client to close the connection at any time, - and requires servers to recover gracefully. In particular, servers - SHOULD be prepared to receive an incomplete close from the client, - since the client can often determine when the end of server data is. - Servers SHOULD be willing to resume TLS sessions closed in this - fashion. - - - -Rescorla Informational [Page 3] - -RFC 2818 HTTP Over TLS May 2000 - - - Implementation note: In HTTP implementations which do not use - persistent connections, the server ordinarily expects to be able to - signal end of data by closing the connection. When Content-Length is - used, however, the client may have already sent the closure alert and - dropped the connection. - - Servers MUST attempt to initiate an exchange of closure alerts with - the client before closing the connection. Servers MAY close the - connection after sending the closure alert, thus generating an - incomplete close on the client side. - -2.3. Port Number - - The first data that an HTTP server expects to receive from the client - is the Request-Line production. The first data that a TLS server (and - hence an HTTP/TLS server) expects to receive is the ClientHello. - Consequently, common practice has been to run HTTP/TLS over a - separate port in order to distinguish which protocol is being used. - When HTTP/TLS is being run over a TCP/IP connection, the default port - is 443. This does not preclude HTTP/TLS from being run over another - transport. TLS only presumes a reliable connection-oriented data - stream. - -2.4. URI Format - - HTTP/TLS is differentiated from HTTP URIs by using the 'https' - protocol identifier in place of the 'http' protocol identifier. An - example URI specifying HTTP/TLS is: - - https://www.example.com/~smith/home.html - -3. Endpoint Identification - -3.1. Server Identity - - In general, HTTP/TLS requests are generated by dereferencing a URI. - As a consequence, the hostname for the server is known to the client. - If the hostname is available, the client MUST check it against the - server's identity as presented in the server's Certificate message, - in order to prevent man-in-the-middle attacks. - - If the client has external information as to the expected identity of - the server, the hostname check MAY be omitted. (For instance, a - client may be connecting to a machine whose address and hostname are - dynamic but the client knows the certificate that the server will - present.) In such cases, it is important to narrow the scope of - acceptable certificates as much as possible in order to prevent man - - - - -Rescorla Informational [Page 4] - -RFC 2818 HTTP Over TLS May 2000 - - - in the middle attacks. In special cases, it may be appropriate for - the client to simply ignore the server's identity, but it must be - understood that this leaves the connection open to active attack. - - If a subjectAltName extension of type dNSName is present, that MUST - be used as the identity. Otherwise, the (most specific) Common Name - field in the Subject field of the certificate MUST be used. Although - the use of the Common Name is existing practice, it is deprecated and - Certification Authorities are encouraged to use the dNSName instead. - - Matching is performed using the matching rules specified by - [RFC2459]. If more than one identity of a given type is present in - the certificate (e.g., more than one dNSName name, a match in any one - of the set is considered acceptable.) Names may contain the wildcard - character * which is considered to match any single domain name - component or component fragment. E.g., *.a.com matches foo.a.com but - not bar.foo.a.com. f*.com matches foo.com but not bar.com. - - In some cases, the URI is specified as an IP address rather than a - hostname. In this case, the iPAddress subjectAltName must be present - in the certificate and must exactly match the IP in the URI. - - If the hostname does not match the identity in the certificate, user - oriented clients MUST either notify the user (clients MAY give the - user the opportunity to continue with the connection in any case) or - terminate the connection with a bad certificate error. Automated - clients MUST log the error to an appropriate audit log (if available) - and SHOULD terminate the connection (with a bad certificate error). - Automated clients MAY provide a configuration setting that disables - this check, but MUST provide a setting which enables it. - - Note that in many cases the URI itself comes from an untrusted - source. The above-described check provides no protection against - attacks where this source is compromised. For example, if the URI was - obtained by clicking on an HTML page which was itself obtained - without using HTTP/TLS, a man in the middle could have replaced the - URI. In order to prevent this form of attack, users should carefully - examine the certificate presented by the server to determine if it - meets their expectations. - -3.2. Client Identity - - Typically, the server has no external knowledge of what the client's - identity ought to be and so checks (other than that the client has a - certificate chain rooted in an appropriate CA) are not possible. If a - server has such knowledge (typically from some source external to - HTTP or TLS) it SHOULD check the identity as described above. - - - - -Rescorla Informational [Page 5] - -RFC 2818 HTTP Over TLS May 2000 - - -References - - [RFC2459] Housley, R., Ford, W., Polk, W. and D. Solo, "Internet - Public Key Infrastructure: Part I: X.509 Certificate and - CRL Profile", RFC 2459, January 1999. - - [RFC2616] Fielding, R., Gettys, J., Mogul, J., Frystyk, H., Masinter, - L., Leach, P. and T. Berners-Lee, "Hypertext Transfer - Protocol, HTTP/1.1", RFC 2616, June 1999. - - [RFC2119] Bradner, S., "Key Words for use in RFCs to indicate - Requirement Levels", BCP 14, RFC 2119, March 1997. - - [RFC2246] Dierks, T. and C. Allen, "The TLS Protocol", RFC 2246, - January 1999. - - [RFC2817] Khare, R. and S. Lawrence, "Upgrading to TLS Within - HTTP/1.1", RFC 2817, May 2000. - -Security Considerations - - This entire document is about security. - -Author's Address - - Eric Rescorla - RTFM, Inc. - 30 Newell Road, #16 - East Palo Alto, CA 94303 - - Phone: (650) 328-8631 - EMail: ekr@rtfm.com - - - - - - - - - - - - - - - - - - - -Rescorla Informational [Page 6] - -RFC 2818 HTTP Over TLS May 2000 - - -Full Copyright Statement - - Copyright (C) The Internet Society (2000). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - - - - - - - - - - - - - - -Rescorla Informational [Page 7] - diff --git a/docs/specs/rfc2965.txt b/docs/specs/rfc2965.txt deleted file mode 100644 index 8a4d02b..0000000 --- a/docs/specs/rfc2965.txt +++ /dev/null @@ -1,1459 +0,0 @@ - - - - - - -Network Working Group D. Kristol -Request for Comments: 2965 Bell Laboratories, Lucent Technologies -Obsoletes: 2109 L. Montulli -Category: Standards Track Epinions.com, Inc. - October 2000 - - - HTTP State Management Mechanism - -Status of this Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (2000). All Rights Reserved. - -IESG Note - - The IESG notes that this mechanism makes use of the .local top-level - domain (TLD) internally when handling host names that don't contain - any dots, and that this mechanism might not work in the expected way - should an actual .local TLD ever be registered. - -Abstract - - This document specifies a way to create a stateful session with - Hypertext Transfer Protocol (HTTP) requests and responses. It - describes three new headers, Cookie, Cookie2, and Set-Cookie2, which - carry state information between participating origin servers and user - agents. The method described here differs from Netscape's Cookie - proposal [Netscape], but it can interoperate with HTTP/1.0 user - agents that use Netscape's method. (See the HISTORICAL section.) - - This document reflects implementation experience with RFC 2109 and - obsoletes it. - -1. TERMINOLOGY - - The terms user agent, client, server, proxy, origin server, and - http_URL have the same meaning as in the HTTP/1.1 specification - [RFC2616]. The terms abs_path and absoluteURI have the same meaning - as in the URI Syntax specification [RFC2396]. - - - - -Kristol & Montulli Standards Track [Page 1] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Host name (HN) means either the host domain name (HDN) or the numeric - Internet Protocol (IP) address of a host. The fully qualified domain - name is preferred; use of numeric IP addresses is strongly - discouraged. - - The terms request-host and request-URI refer to the values the client - would send to the server as, respectively, the host (but not port) - and abs_path portions of the absoluteURI (http_URL) of the HTTP - request line. Note that request-host is a HN. - - The term effective host name is related to host name. If a host name - contains no dots, the effective host name is that name with the - string .local appended to it. Otherwise the effective host name is - the same as the host name. Note that all effective host names - contain at least one dot. - - The term request-port refers to the port portion of the absoluteURI - (http_URL) of the HTTP request line. If the absoluteURI has no - explicit port, the request-port is the HTTP default, 80. The - request-port of a cookie is the request-port of the request in which - a Set-Cookie2 response header was returned to the user agent. - - Host names can be specified either as an IP address or a HDN string. - Sometimes we compare one host name with another. (Such comparisons - SHALL be case-insensitive.) Host A's name domain-matches host B's if - - * their host name strings string-compare equal; or - - * A is a HDN string and has the form NB, where N is a non-empty - name string, B has the form .B', and B' is a HDN string. (So, - x.y.com domain-matches .Y.com but not Y.com.) - - Note that domain-match is not a commutative operation: a.b.c.com - domain-matches .c.com, but not the reverse. - - The reach R of a host name H is defined as follows: - - * If - - - H is the host domain name of a host; and, - - - H has the form A.B; and - - - A has no embedded (that is, interior) dots; and - - - B has at least one embedded dot, or B is the string "local". - then the reach of H is .B. - - - - -Kristol & Montulli Standards Track [Page 2] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - * Otherwise, the reach of H is H. - - For two strings that represent paths, P1 and P2, P1 path-matches P2 - if P2 is a prefix of P1 (including the case where P1 and P2 string- - compare equal). Thus, the string /tec/waldo path-matches /tec. - - Because it was used in Netscape's original implementation of state - management, we will use the term cookie to refer to the state - information that passes between an origin server and user agent, and - that gets stored by the user agent. - -1.1 Requirements - - The key words "MAY", "MUST", "MUST NOT", "OPTIONAL", "RECOMMENDED", - "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT" in this - document are to be interpreted as described in RFC 2119 [RFC2119]. - -2. STATE AND SESSIONS - - This document describes a way to create stateful sessions with HTTP - requests and responses. Currently, HTTP servers respond to each - client request without relating that request to previous or - subsequent requests; the state management mechanism allows clients - and servers that wish to exchange state information to place HTTP - requests and responses within a larger context, which we term a - "session". This context might be used to create, for example, a - "shopping cart", in which user selections can be aggregated before - purchase, or a magazine browsing system, in which a user's previous - reading affects which offerings are presented. - - Neither clients nor servers are required to support cookies. A - server MAY refuse to provide content to a client that does not return - the cookies it sends. - -3. DESCRIPTION - - We describe here a way for an origin server to send state information - to the user agent, and for the user agent to return the state - information to the origin server. The goal is to have a minimal - impact on HTTP and user agents. - -3.1 Syntax: General - - The two state management headers, Set-Cookie2 and Cookie, have common - syntactic properties involving attribute-value pairs. The following - grammar uses the notation, and tokens DIGIT (decimal digits), token - - - - - -Kristol & Montulli Standards Track [Page 3] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - (informally, a sequence of non-special, non-white space characters), - and http_URL from the HTTP/1.1 specification [RFC2616] to describe - their syntax. - - av-pairs = av-pair *(";" av-pair) - av-pair = attr ["=" value] ; optional value - attr = token - value = token | quoted-string - - Attributes (names) (attr) are case-insensitive. White space is - permitted between tokens. Note that while the above syntax - description shows value as optional, most attrs require them. - - NOTE: The syntax above allows whitespace between the attribute and - the = sign. - -3.2 Origin Server Role - - 3.2.1 General The origin server initiates a session, if it so - desires. To do so, it returns an extra response header to the - client, Set-Cookie2. (The details follow later.) - - A user agent returns a Cookie request header (see below) to the - origin server if it chooses to continue a session. The origin server - MAY ignore it or use it to determine the current state of the - session. It MAY send back to the client a Set-Cookie2 response - header with the same or different information, or it MAY send no - Set-Cookie2 header at all. The origin server effectively ends a - session by sending the client a Set-Cookie2 header with Max-Age=0. - - Servers MAY return Set-Cookie2 response headers with any response. - User agents SHOULD send Cookie request headers, subject to other - rules detailed below, with every request. - - An origin server MAY include multiple Set-Cookie2 headers in a - response. Note that an intervening gateway could fold multiple such - headers into a single header. - - - - - - - - - - - - - - -Kristol & Montulli Standards Track [Page 4] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - 3.2.2 Set-Cookie2 Syntax The syntax for the Set-Cookie2 response - header is - - set-cookie = "Set-Cookie2:" cookies - cookies = 1#cookie - cookie = NAME "=" VALUE *(";" set-cookie-av) - NAME = attr - VALUE = value - set-cookie-av = "Comment" "=" value - | "CommentURL" "=" <"> http_URL <"> - | "Discard" - | "Domain" "=" value - | "Max-Age" "=" value - | "Path" "=" value - | "Port" [ "=" <"> portlist <"> ] - | "Secure" - | "Version" "=" 1*DIGIT - portlist = 1#portnum - portnum = 1*DIGIT - - Informally, the Set-Cookie2 response header comprises the token Set- - Cookie2:, followed by a comma-separated list of one or more cookies. - Each cookie begins with a NAME=VALUE pair, followed by zero or more - semi-colon-separated attribute-value pairs. The syntax for - attribute-value pairs was shown earlier. The specific attributes and - the semantics of their values follows. The NAME=VALUE attribute- - value pair MUST come first in each cookie. The others, if present, - can occur in any order. If an attribute appears more than once in a - cookie, the client SHALL use only the value associated with the first - appearance of the attribute; a client MUST ignore values after the - first. - - The NAME of a cookie MAY be the same as one of the attributes in this - specification. However, because the cookie's NAME must come first in - a Set-Cookie2 response header, the NAME and its VALUE cannot be - confused with an attribute-value pair. - - NAME=VALUE - REQUIRED. The name of the state information ("cookie") is NAME, - and its value is VALUE. NAMEs that begin with $ are reserved and - MUST NOT be used by applications. - - The VALUE is opaque to the user agent and may be anything the - origin server chooses to send, possibly in a server-selected - printable ASCII encoding. "Opaque" implies that the content is of - interest and relevance only to the origin server. The content - may, in fact, be readable by anyone that examines the Set-Cookie2 - header. - - - -Kristol & Montulli Standards Track [Page 5] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Comment=value - OPTIONAL. Because cookies can be used to derive or store private - information about a user, the value of the Comment attribute - allows an origin server to document how it intends to use the - cookie. The user can inspect the information to decide whether to - initiate or continue a session with this cookie. Characters in - value MUST be in UTF-8 encoding. [RFC2279] - - CommentURL="http_URL" - OPTIONAL. Because cookies can be used to derive or store private - information about a user, the CommentURL attribute allows an - origin server to document how it intends to use the cookie. The - user can inspect the information identified by the URL to decide - whether to initiate or continue a session with this cookie. - - Discard - OPTIONAL. The Discard attribute instructs the user agent to - discard the cookie unconditionally when the user agent terminates. - - Domain=value - OPTIONAL. The value of the Domain attribute specifies the domain - for which the cookie is valid. If an explicitly specified value - does not start with a dot, the user agent supplies a leading dot. - - Max-Age=value - OPTIONAL. The value of the Max-Age attribute is delta-seconds, - the lifetime of the cookie in seconds, a decimal non-negative - integer. To handle cached cookies correctly, a client SHOULD - calculate the age of the cookie according to the age calculation - rules in the HTTP/1.1 specification [RFC2616]. When the age is - greater than delta-seconds seconds, the client SHOULD discard the - cookie. A value of zero means the cookie SHOULD be discarded - immediately. - - Path=value - OPTIONAL. The value of the Path attribute specifies the subset of - URLs on the origin server to which this cookie applies. - - Port[="portlist"] - OPTIONAL. The Port attribute restricts the port to which a cookie - may be returned in a Cookie request header. Note that the syntax - REQUIREs quotes around the OPTIONAL portlist even if there is only - one portnum in portlist. - - - - - - - - -Kristol & Montulli Standards Track [Page 6] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Secure - OPTIONAL. The Secure attribute (with no value) directs the user - agent to use only (unspecified) secure means to contact the origin - server whenever it sends back this cookie, to protect the - confidentially and authenticity of the information in the cookie. - - The user agent (possibly with user interaction) MAY determine what - level of security it considers appropriate for "secure" cookies. - The Secure attribute should be considered security advice from the - server to the user agent, indicating that it is in the session's - interest to protect the cookie contents. When it sends a "secure" - cookie back to a server, the user agent SHOULD use no less than - the same level of security as was used when it received the cookie - from the server. - - Version=value - REQUIRED. The value of the Version attribute, a decimal integer, - identifies the version of the state management specification to - which the cookie conforms. For this specification, Version=1 - applies. - - 3.2.3 Controlling Caching An origin server must be cognizant of the - effect of possible caching of both the returned resource and the - Set-Cookie2 header. Caching "public" documents is desirable. For - example, if the origin server wants to use a public document such as - a "front door" page as a sentinel to indicate the beginning of a - session for which a Set-Cookie2 response header must be generated, - the page SHOULD be stored in caches "pre-expired" so that the origin - server will see further requests. "Private documents", for example - those that contain information strictly private to a session, SHOULD - NOT be cached in shared caches. - - If the cookie is intended for use by a single user, the Set-Cookie2 - header SHOULD NOT be cached. A Set-Cookie2 header that is intended - to be shared by multiple users MAY be cached. - - The origin server SHOULD send the following additional HTTP/1.1 - response headers, depending on circumstances: - - * To suppress caching of the Set-Cookie2 header: - - Cache-control: no-cache="set-cookie2" - - and one of the following: - - * To suppress caching of a private document in shared caches: - - Cache-control: private - - - -Kristol & Montulli Standards Track [Page 7] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - * To allow caching of a document and require that it be validated - before returning it to the client: - - Cache-Control: must-revalidate, max-age=0 - - * To allow caching of a document, but to require that proxy - caches (not user agent caches) validate it before returning it - to the client: - - Cache-Control: proxy-revalidate, max-age=0 - - * To allow caching of a document and request that it be validated - before returning it to the client (by "pre-expiring" it): - - Cache-control: max-age=0 - - Not all caches will revalidate the document in every case. - - HTTP/1.1 servers MUST send Expires: old-date (where old-date is a - date long in the past) on responses containing Set-Cookie2 response - headers unless they know for certain (by out of band means) that - there are no HTTP/1.0 proxies in the response chain. HTTP/1.1 - servers MAY send other Cache-Control directives that permit caching - by HTTP/1.1 proxies in addition to the Expires: old-date directive; - the Cache-Control directive will override the Expires: old-date for - HTTP/1.1 proxies. - -3.3 User Agent Role - - 3.3.1 Interpreting Set-Cookie2 The user agent keeps separate track - of state information that arrives via Set-Cookie2 response headers - from each origin server (as distinguished by name or IP address and - port). The user agent MUST ignore attribute-value pairs whose - attribute it does not recognize. The user agent applies these - defaults for optional attributes that are missing: - - Discard The default behavior is dictated by the presence or absence - of a Max-Age attribute. - - Domain Defaults to the effective request-host. (Note that because - there is no dot at the beginning of effective request-host, - the default Domain can only domain-match itself.) - - Max-Age The default behavior is to discard the cookie when the user - agent exits. - - Path Defaults to the path of the request URL that generated the - Set-Cookie2 response, up to and including the right-most /. - - - -Kristol & Montulli Standards Track [Page 8] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Port The default behavior is that a cookie MAY be returned to any - request-port. - - Secure If absent, the user agent MAY send the cookie over an - insecure channel. - - 3.3.2 Rejecting Cookies To prevent possible security or privacy - violations, a user agent rejects a cookie according to rules below. - The goal of the rules is to try to limit the set of servers for which - a cookie is valid, based on the values of the Path, Domain, and Port - attributes and the request-URI, request-host and request-port. - - A user agent rejects (SHALL NOT store its information) if the Version - attribute is missing. Moreover, a user agent rejects (SHALL NOT - store its information) if any of the following is true of the - attributes explicitly present in the Set-Cookie2 response header: - - * The value for the Path attribute is not a prefix of the - request-URI. - - * The value for the Domain attribute contains no embedded dots, - and the value is not .local. - - * The effective host name that derives from the request-host does - not domain-match the Domain attribute. - - * The request-host is a HDN (not IP address) and has the form HD, - where D is the value of the Domain attribute, and H is a string - that contains one or more dots. - - * The Port attribute has a "port-list", and the request-port was - not in the list. - - Examples: - - * A Set-Cookie2 from request-host y.x.foo.com for Domain=.foo.com - would be rejected, because H is y.x and contains a dot. - - * A Set-Cookie2 from request-host x.foo.com for Domain=.foo.com - would be accepted. - - * A Set-Cookie2 with Domain=.com or Domain=.com., will always be - rejected, because there is no embedded dot. - - * A Set-Cookie2 with Domain=ajax.com will be accepted, and the - value for Domain will be taken to be .ajax.com, because a dot - gets prepended to the value. - - - - -Kristol & Montulli Standards Track [Page 9] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - * A Set-Cookie2 with Port="80,8000" will be accepted if the - request was made to port 80 or 8000 and will be rejected - otherwise. - - * A Set-Cookie2 from request-host example for Domain=.local will - be accepted, because the effective host name for the request- - host is example.local, and example.local domain-matches .local. - - 3.3.3 Cookie Management If a user agent receives a Set-Cookie2 - response header whose NAME is the same as that of a cookie it has - previously stored, the new cookie supersedes the old when: the old - and new Domain attribute values compare equal, using a case- - insensitive string-compare; and, the old and new Path attribute - values string-compare equal (case-sensitive). However, if the Set- - Cookie2 has a value for Max-Age of zero, the (old and new) cookie is - discarded. Otherwise a cookie persists (resources permitting) until - whichever happens first, then gets discarded: its Max-Age lifetime is - exceeded; or, if the Discard attribute is set, the user agent - terminates the session. - - Because user agents have finite space in which to store cookies, they - MAY also discard older cookies to make space for newer ones, using, - for example, a least-recently-used algorithm, along with constraints - on the maximum number of cookies that each origin server may set. - - If a Set-Cookie2 response header includes a Comment attribute, the - user agent SHOULD store that information in a human-readable form - with the cookie and SHOULD display the comment text as part of a - cookie inspection user interface. - - If a Set-Cookie2 response header includes a CommentURL attribute, the - user agent SHOULD store that information in a human-readable form - with the cookie, or, preferably, SHOULD allow the user to follow the - http_URL link as part of a cookie inspection user interface. - - The cookie inspection user interface may include a facility whereby a - user can decide, at the time the user agent receives the Set-Cookie2 - response header, whether or not to accept the cookie. A potentially - confusing situation could arise if the following sequence occurs: - - * the user agent receives a cookie that contains a CommentURL - attribute; - - * the user agent's cookie inspection interface is configured so - that it presents a dialog to the user before the user agent - accepts the cookie; - - - - - -Kristol & Montulli Standards Track [Page 10] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - * the dialog allows the user to follow the CommentURL link when - the user agent receives the cookie; and, - - * when the user follows the CommentURL link, the origin server - (or another server, via other links in the returned content) - returns another cookie. - - The user agent SHOULD NOT send any cookies in this context. The user - agent MAY discard any cookie it receives in this context that the - user has not, through some user agent mechanism, deemed acceptable. - - User agents SHOULD allow the user to control cookie destruction, but - they MUST NOT extend the cookie's lifetime beyond that controlled by - the Discard and Max-Age attributes. An infrequently-used cookie may - function as a "preferences file" for network applications, and a user - may wish to keep it even if it is the least-recently-used cookie. One - possible implementation would be an interface that allows the - permanent storage of a cookie through a checkbox (or, conversely, its - immediate destruction). - - Privacy considerations dictate that the user have considerable - control over cookie management. The PRIVACY section contains more - information. - - 3.3.4 Sending Cookies to the Origin Server When it sends a request - to an origin server, the user agent includes a Cookie request header - if it has stored cookies that are applicable to the request, based on - - * the request-host and request-port; - - * the request-URI; - - * the cookie's age. - - The syntax for the header is: - -cookie = "Cookie:" cookie-version 1*((";" | ",") cookie-value) -cookie-value = NAME "=" VALUE [";" path] [";" domain] [";" port] -cookie-version = "$Version" "=" value -NAME = attr -VALUE = value -path = "$Path" "=" value -domain = "$Domain" "=" value -port = "$Port" [ "=" <"> value <"> ] - - The value of the cookie-version attribute MUST be the value from the - Version attribute of the corresponding Set-Cookie2 response header. - Otherwise the value for cookie-version is 0. The value for the path - - - -Kristol & Montulli Standards Track [Page 11] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - attribute MUST be the value from the Path attribute, if one was - present, of the corresponding Set-Cookie2 response header. Otherwise - the attribute SHOULD be omitted from the Cookie request header. The - value for the domain attribute MUST be the value from the Domain - attribute, if one was present, of the corresponding Set-Cookie2 - response header. Otherwise the attribute SHOULD be omitted from the - Cookie request header. - - The port attribute of the Cookie request header MUST mirror the Port - attribute, if one was present, in the corresponding Set-Cookie2 - response header. That is, the port attribute MUST be present if the - Port attribute was present in the Set-Cookie2 header, and it MUST - have the same value, if any. Otherwise, if the Port attribute was - absent from the Set-Cookie2 header, the attribute likewise MUST be - omitted from the Cookie request header. - - Note that there is neither a Comment nor a CommentURL attribute in - the Cookie request header corresponding to the ones in the Set- - Cookie2 response header. The user agent does not return the comment - information to the origin server. - - The user agent applies the following rules to choose applicable - cookie-values to send in Cookie request headers from among all the - cookies it has received. - - Domain Selection - The origin server's effective host name MUST domain-match the - Domain attribute of the cookie. - - Port Selection - There are three possible behaviors, depending on the Port - attribute in the Set-Cookie2 response header: - - 1. By default (no Port attribute), the cookie MAY be sent to any - port. - - 2. If the attribute is present but has no value (e.g., Port), the - cookie MUST only be sent to the request-port it was received - from. - - 3. If the attribute has a port-list, the cookie MUST only be - returned if the new request-port is one of those listed in - port-list. - - Path Selection - The request-URI MUST path-match the Path attribute of the cookie. - - - - - -Kristol & Montulli Standards Track [Page 12] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Max-Age Selection - Cookies that have expired should have been discarded and thus are - not forwarded to an origin server. - - If multiple cookies satisfy the criteria above, they are ordered in - the Cookie header such that those with more specific Path attributes - precede those with less specific. Ordering with respect to other - attributes (e.g., Domain) is unspecified. - - Note: For backward compatibility, the separator in the Cookie header - is semi-colon (;) everywhere. A server SHOULD also accept comma (,) - as the separator between cookie-values for future compatibility. - - 3.3.5 Identifying What Version is Understood: Cookie2 The Cookie2 - request header facilitates interoperation between clients and servers - that understand different versions of the cookie specification. When - the client sends one or more cookies to an origin server, if at least - one of those cookies contains a $Version attribute whose value is - different from the version that the client understands, then the - client MUST also send a Cookie2 request header, the syntax for which - is - - cookie2 = "Cookie2:" cookie-version - - Here the value for cookie-version is the highest version of cookie - specification (currently 1) that the client understands. The client - needs to send at most one such request header per request. - - 3.3.6 Sending Cookies in Unverifiable Transactions Users MUST have - control over sessions in order to ensure privacy. (See PRIVACY - section below.) To simplify implementation and to prevent an - additional layer of complexity where adequate safeguards exist, - however, this document distinguishes between transactions that are - verifiable and those that are unverifiable. A transaction is - verifiable if the user, or a user-designated agent, has the option to - review the request-URI prior to its use in the transaction. A - transaction is unverifiable if the user does not have that option. - Unverifiable transactions typically arise when a user agent - automatically requests inlined or embedded entities or when it - resolves redirection (3xx) responses from an origin server. - Typically the origin transaction, the transaction that the user - initiates, is verifiable, and that transaction may directly or - indirectly induce the user agent to make unverifiable transactions. - - An unverifiable transaction is to a third-party host if its request- - host U does not domain-match the reach R of the request-host O in the - origin transaction. - - - - -Kristol & Montulli Standards Track [Page 13] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - When it makes an unverifiable transaction, a user agent MUST disable - all cookie processing (i.e., MUST NOT send cookies, and MUST NOT - accept any received cookies) if the transaction is to a third-party - host. - - This restriction prevents a malicious service author from using - unverifiable transactions to induce a user agent to start or continue - a session with a server in a different domain. The starting or - continuation of such sessions could be contrary to the privacy - expectations of the user, and could also be a security problem. - - User agents MAY offer configurable options that allow the user agent, - or any autonomous programs that the user agent executes, to ignore - the above rule, so long as these override options default to "off". - - (N.B. Mechanisms may be proposed that will automate overriding the - third-party restrictions under controlled conditions.) - - Many current user agents already provide a review option that would - render many links verifiable. For instance, some user agents display - the URL that would be referenced for a particular link when the mouse - pointer is placed over that link. The user can therefore determine - whether to visit that site before causing the browser to do so. - (Though not implemented on current user agents, a similar technique - could be used for a button used to submit a form -- the user agent - could display the action to be taken if the user were to select that - button.) However, even this would not make all links verifiable; for - example, links to automatically loaded images would not normally be - subject to "mouse pointer" verification. - - Many user agents also provide the option for a user to view the HTML - source of a document, or to save the source to an external file where - it can be viewed by another application. While such an option does - provide a crude review mechanism, some users might not consider it - acceptable for this purpose. - -3.4 How an Origin Server Interprets the Cookie Header - - A user agent returns much of the information in the Set-Cookie2 - header to the origin server when the request-URI path-matches the - Path attribute of the cookie. When it receives a Cookie header, the - origin server SHOULD treat cookies with NAMEs whose prefix is $ - specially, as an attribute for the cookie. - - - - - - - - -Kristol & Montulli Standards Track [Page 14] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -3.5 Caching Proxy Role - - One reason for separating state information from both a URL and - document content is to facilitate the scaling that caching permits. - To support cookies, a caching proxy MUST obey these rules already in - the HTTP specification: - - * Honor requests from the cache, if possible, based on cache - validity rules. - - * Pass along a Cookie request header in any request that the - proxy must make of another server. - - * Return the response to the client. Include any Set-Cookie2 - response header. - - * Cache the received response subject to the control of the usual - headers, such as Expires, - - Cache-control: no-cache - - and - - Cache-control: private - - * Cache the Set-Cookie2 subject to the control of the usual - header, - - Cache-control: no-cache="set-cookie2" - - (The Set-Cookie2 header should usually not be cached.) - - Proxies MUST NOT introduce Set-Cookie2 (Cookie) headers of their own - in proxy responses (requests). - -4. EXAMPLES - -4.1 Example 1 - - Most detail of request and response headers has been omitted. Assume - the user agent has no stored cookies. - - 1. User Agent -> Server - - POST /acme/login HTTP/1.1 - [form data] - - User identifies self via a form. - - - -Kristol & Montulli Standards Track [Page 15] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - 2. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme" - - Cookie reflects user's identity. - - 3. User Agent -> Server - - POST /acme/pickitem HTTP/1.1 - Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme" - [form data] - - User selects an item for "shopping basket". - - 4. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1"; - Path="/acme" - - Shopping basket contains an item. - - 5. User Agent -> Server - - POST /acme/shipping HTTP/1.1 - Cookie: $Version="1"; - Customer="WILE_E_COYOTE"; $Path="/acme"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme" - [form data] - - User selects shipping method from form. - - 6. Server -> User Agent - - HTTP/1.1 200 OK - Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme" - - New cookie reflects shipping method. - - 7. User Agent -> Server - - POST /acme/process HTTP/1.1 - Cookie: $Version="1"; - Customer="WILE_E_COYOTE"; $Path="/acme"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme"; - Shipping="FedEx"; $Path="/acme" - [form data] - - - -Kristol & Montulli Standards Track [Page 16] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - User chooses to process order. - - 8. Server -> User Agent - - HTTP/1.1 200 OK - - Transaction is complete. - - The user agent makes a series of requests on the origin server, after - each of which it receives a new cookie. All the cookies have the - same Path attribute and (default) domain. Because the request-URIs - all path-match /acme, the Path attribute of each cookie, each request - contains all the cookies received so far. - -4.2 Example 2 - - This example illustrates the effect of the Path attribute. All - detail of request and response headers has been omitted. Assume the - user agent has no stored cookies. - - Imagine the user agent has received, in response to earlier requests, - the response headers - - Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1"; - Path="/acme" - - and - - Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1"; - Path="/acme/ammo" - - A subsequent request by the user agent to the (same) server for URLs - of the form /acme/ammo/... would include the following request - header: - - Cookie: $Version="1"; - Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo"; - Part_Number="Rocket_Launcher_0001"; $Path="/acme" - - Note that the NAME=VALUE pair for the cookie with the more specific - Path attribute, /acme/ammo, comes before the one with the less - specific Path attribute, /acme. Further note that the same cookie - name appears more than once. - - A subsequent request by the user agent to the (same) server for a URL - of the form /acme/parts/ would include the following request header: - - - - - -Kristol & Montulli Standards Track [Page 17] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; - $Path="/acme" - - Here, the second cookie's Path attribute /acme/ammo is not a prefix - of the request URL, /acme/parts/, so the cookie does not get - forwarded to the server. - -5. IMPLEMENTATION CONSIDERATIONS - - Here we provide guidance on likely or desirable details for an origin - server that implements state management. - -5.1 Set-Cookie2 Content - - An origin server's content should probably be divided into disjoint - application areas, some of which require the use of state - information. The application areas can be distinguished by their - request URLs. The Set-Cookie2 header can incorporate information - about the application areas by setting the Path attribute for each - one. - - The session information can obviously be clear or encoded text that - describes state. However, if it grows too large, it can become - unwieldy. Therefore, an implementor might choose for the session - information to be a key to a server-side resource. Of course, using - a database creates some problems that this state management - specification was meant to avoid, namely: - - 1. keeping real state on the server side; - - 2. how and when to garbage-collect the database entry, in case the - user agent terminates the session by, for example, exiting. - -5.2 Stateless Pages - - Caching benefits the scalability of WWW. Therefore it is important - to reduce the number of documents that have state embedded in them - inherently. For example, if a shopping-basket-style application - always displays a user's current basket contents on each page, those - pages cannot be cached, because each user's basket's contents would - be different. On the other hand, if each page contains just a link - that allows the user to "Look at My Shopping Basket", the page can be - cached. - - - - - - - - -Kristol & Montulli Standards Track [Page 18] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -5.3 Implementation Limits - - Practical user agent implementations have limits on the number and - size of cookies that they can store. In general, user agents' cookie - support should have no fixed limits. They should strive to store as - many frequently-used cookies as possible. Furthermore, general-use - user agents SHOULD provide each of the following minimum capabilities - individually, although not necessarily simultaneously: - - * at least 300 cookies - - * at least 4096 bytes per cookie (as measured by the characters - that comprise the cookie non-terminal in the syntax description - of the Set-Cookie2 header, and as received in the Set-Cookie2 - header) - - * at least 20 cookies per unique host or domain name - - User agents created for specific purposes or for limited-capacity - devices SHOULD provide at least 20 cookies of 4096 bytes, to ensure - that the user can interact with a session-based origin server. - - The information in a Set-Cookie2 response header MUST be retained in - its entirety. If for some reason there is inadequate space to store - the cookie, it MUST be discarded, not truncated. - - Applications should use as few and as small cookies as possible, and - they should cope gracefully with the loss of a cookie. - - 5.3.1 Denial of Service Attacks User agents MAY choose to set an - upper bound on the number of cookies to be stored from a given host - or domain name or on the size of the cookie information. Otherwise a - malicious server could attempt to flood a user agent with many - cookies, or large cookies, on successive responses, which would force - out cookies the user agent had received from other servers. However, - the minima specified above SHOULD still be supported. - -6. PRIVACY - - Informed consent should guide the design of systems that use cookies. - A user should be able to find out how a web site plans to use - information in a cookie and should be able to choose whether or not - those policies are acceptable. Both the user agent and the origin - server must assist informed consent. - - - - - - - -Kristol & Montulli Standards Track [Page 19] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -6.1 User Agent Control - - An origin server could create a Set-Cookie2 header to track the path - of a user through the server. Users may object to this behavior as - an intrusive accumulation of information, even if their identity is - not evident. (Identity might become evident, for example, if a user - subsequently fills out a form that contains identifying information.) - This state management specification therefore requires that a user - agent give the user control over such a possible intrusion, although - the interface through which the user is given this control is left - unspecified. However, the control mechanisms provided SHALL at least - allow the user - - * to completely disable the sending and saving of cookies. - - * to determine whether a stateful session is in progress. - - * to control the saving of a cookie on the basis of the cookie's - Domain attribute. - - Such control could be provided, for example, by mechanisms - - * to notify the user when the user agent is about to send a - cookie to the origin server, to offer the option not to begin a - session. - - * to display a visual indication that a stateful session is in - progress. - - * to let the user decide which cookies, if any, should be saved - when the user concludes a window or user agent session. - - * to let the user examine and delete the contents of a cookie at - any time. - - A user agent usually begins execution with no remembered state - information. It SHOULD be possible to configure a user agent never - to send Cookie headers, in which case it can never sustain state with - an origin server. (The user agent would then behave like one that is - unaware of how to handle Set-Cookie2 response headers.) - - When the user agent terminates execution, it SHOULD let the user - discard all state information. Alternatively, the user agent MAY ask - the user whether state information should be retained; the default - should be "no". If the user chooses to retain state information, it - would be restored the next time the user agent runs. - - - - - -Kristol & Montulli Standards Track [Page 20] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - NOTE: User agents should probably be cautious about using files to - store cookies long-term. If a user runs more than one instance of - the user agent, the cookies could be commingled or otherwise - corrupted. - -6.2 Origin Server Role - - An origin server SHOULD promote informed consent by adding CommentURL - or Comment information to the cookies it sends. CommentURL is - preferred because of the opportunity to provide richer information in - a multiplicity of languages. - -6.3 Clear Text - - The information in the Set-Cookie2 and Cookie headers is unprotected. - As a consequence: - - 1. Any sensitive information that is conveyed in them is exposed - to intruders. - - 2. A malicious intermediary could alter the headers as they travel - in either direction, with unpredictable results. - - These facts imply that information of a personal and/or financial - nature should only be sent over a secure channel. For less sensitive - information, or when the content of the header is a database key, an - origin server should be vigilant to prevent a bad Cookie value from - causing failures. - - A user agent in a shared user environment poses a further risk. - Using a cookie inspection interface, User B could examine the - contents of cookies that were saved when User A used the machine. - -7. SECURITY CONSIDERATIONS - -7.1 Protocol Design - - The restrictions on the value of the Domain attribute, and the rules - concerning unverifiable transactions, are meant to reduce the ways - that cookies can "leak" to the "wrong" site. The intent is to - restrict cookies to one host, or a closely related set of hosts. - Therefore a request-host is limited as to what values it can set for - Domain. We consider it acceptable for hosts host1.foo.com and - host2.foo.com to share cookies, but not a.com and b.com. - - Similarly, a server can set a Path only for cookies that are related - to the request-URI. - - - - -Kristol & Montulli Standards Track [Page 21] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -7.2 Cookie Spoofing - - Proper application design can avoid spoofing attacks from related - domains. Consider: - - 1. User agent makes request to victim.cracker.edu, gets back - cookie session_id="1234" and sets the default domain - victim.cracker.edu. - - 2. User agent makes request to spoof.cracker.edu, gets back cookie - session-id="1111", with Domain=".cracker.edu". - - 3. User agent makes request to victim.cracker.edu again, and - passes - - Cookie: $Version="1"; session_id="1234", - $Version="1"; session_id="1111"; $Domain=".cracker.edu" - - The server at victim.cracker.edu should detect that the second - cookie was not one it originated by noticing that the Domain - attribute is not for itself and ignore it. - -7.3 Unexpected Cookie Sharing - - A user agent SHOULD make every attempt to prevent the sharing of - session information between hosts that are in different domains. - Embedded or inlined objects may cause particularly severe privacy - problems if they can be used to share cookies between disparate - hosts. For example, a malicious server could embed cookie - information for host a.com in a URI for a CGI on host b.com. User - agent implementors are strongly encouraged to prevent this sort of - exchange whenever possible. - -7.4 Cookies For Account Information - - While it is common practice to use them this way, cookies are not - designed or intended to be used to hold authentication information, - such as account names and passwords. Unless such cookies are - exchanged over an encrypted path, the account information they - contain is highly vulnerable to perusal and theft. - -8. OTHER, SIMILAR, PROPOSALS - - Apart from RFC 2109, three other proposals have been made to - accomplish similar goals. This specification began as an amalgam of - Kristol's State-Info proposal [DMK95] and Netscape's Cookie proposal - [Netscape]. - - - - -Kristol & Montulli Standards Track [Page 22] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - Brian Behlendorf proposed a Session-ID header that would be user- - agent-initiated and could be used by an origin server to track - "clicktrails". It would not carry any origin-server-defined state, - however. Phillip Hallam-Baker has proposed another client-defined - session ID mechanism for similar purposes. - - While both session IDs and cookies can provide a way to sustain - stateful sessions, their intended purpose is different, and, - consequently, the privacy requirements for them are different. A - user initiates session IDs to allow servers to track progress through - them, or to distinguish multiple users on a shared machine. Cookies - are server-initiated, so the cookie mechanism described here gives - users control over something that would otherwise take place without - the users' awareness. Furthermore, cookies convey rich, server- - selected information, whereas session IDs comprise user-selected, - simple information. - -9. HISTORICAL - -9.1 Compatibility with Existing Implementations - - Existing cookie implementations, based on the Netscape specification, - use the Set-Cookie (not Set-Cookie2) header. User agents that - receive in the same response both a Set-Cookie and Set-Cookie2 - response header for the same cookie MUST discard the Set-Cookie - information and use only the Set-Cookie2 information. Furthermore, a - user agent MUST assume, if it received a Set-Cookie2 response header, - that the sending server complies with this document and will - understand Cookie request headers that also follow this - specification. - - New cookies MUST replace both equivalent old- and new-style cookies. - That is, if a user agent that follows both this specification and - Netscape's original specification receives a Set-Cookie2 response - header, and the NAME and the Domain and Path attributes match (per - the Cookie Management section) a Netscape-style cookie, the - Netscape-style cookie MUST be discarded, and the user agent MUST - retain only the cookie adhering to this specification. - - Older user agents that do not understand this specification, but that - do understand Netscape's original specification, will not recognize - the Set-Cookie2 response header and will receive and send cookies - according to the older specification. - - - - - - - - -Kristol & Montulli Standards Track [Page 23] - -RFC 2965 HTTP State Management Mechanism October 2000 - - - A user agent that supports both this specification and Netscape-style - cookies SHOULD send a Cookie request header that follows the older - Netscape specification if it received the cookie in a Set-Cookie - response header and not in a Set-Cookie2 response header. However, - it SHOULD send the following request header as well: - - Cookie2: $Version="1" - - The Cookie2 header advises the server that the user agent understands - new-style cookies. If the server understands new-style cookies, as - well, it SHOULD continue the stateful session by sending a Set- - Cookie2 response header, rather than Set-Cookie. A server that does - not understand new-style cookies will simply ignore the Cookie2 - request header. - -9.2 Caching and HTTP/1.0 - - Some caches, such as those conforming to HTTP/1.0, will inevitably - cache the Set-Cookie2 and Set-Cookie headers, because there was no - mechanism to suppress caching of headers prior to HTTP/1.1. This - caching can lead to security problems. Documents transmitted by an - origin server along with Set-Cookie2 and Set-Cookie headers usually - either will be uncachable, or will be "pre-expired". As long as - caches obey instructions not to cache documents (following Expires: - or Pragma: no-cache (HTTP/1.0), or Cache- - control: no-cache (HTTP/1.1)) uncachable documents present no - problem. However, pre-expired documents may be stored in caches. - They require validation (a conditional GET) on each new request, but - some cache operators loosen the rules for their caches, and sometimes - serve expired documents without first validating them. This - combination of factors can lead to cookies meant for one user later - being sent to another user. The Set-Cookie2 and Set-Cookie headers - are stored in the cache, and, although the document is stale - (expired), the cache returns the document in response to later - requests, including cached headers. - -10. ACKNOWLEDGEMENTS - - This document really represents the collective efforts of the HTTP - Working Group of the IETF and, particularly, the following people, in - addition to the authors: Roy Fielding, Yaron Goland, Marc Hedlund, - Ted Hardie, Koen Holtman, Shel Kaphan, Rohit Khare, Foteos Macrides, - David W. Morris. - - - - - - - - -Kristol & Montulli Standards Track [Page 24] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -11. AUTHORS' ADDRESSES - - David M. Kristol - Bell Laboratories, Lucent Technologies - 600 Mountain Ave. Room 2A-333 - Murray Hill, NJ 07974 - - Phone: (908) 582-2250 - Fax: (908) 582-1239 - EMail: dmk@bell-labs.com - - - Lou Montulli - Epinions.com, Inc. - 2037 Landings Dr. - Mountain View, CA 94301 - - EMail: lou@montulli.org - -12. REFERENCES - - [DMK95] Kristol, D.M., "Proposed HTTP State-Info Mechanism", - available at , September, 1995. - - [Netscape] "Persistent Client State -- HTTP Cookies", available at - , - undated. - - [RFC2109] Kristol, D. and L. Montulli, "HTTP State Management - Mechanism", RFC 2109, February 1997. - - [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate - Requirement Levels", BCP 14, RFC 2119, March 1997. - - [RFC2279] Yergeau, F., "UTF-8, a transformation format of Unicode - and ISO-10646", RFC 2279, January 1998. - - [RFC2396] Berners-Lee, T., Fielding, R. and L. Masinter, "Uniform - Resource Identifiers (URI): Generic Syntax", RFC 2396, - August 1998. - - [RFC2616] Fielding, R., Gettys, J., Mogul, J., Frystyk, H. and T. - Berners-Lee, "Hypertext Transfer Protocol -- HTTP/1.1", - RFC 2616, June 1999. - - - - - - -Kristol & Montulli Standards Track [Page 25] - -RFC 2965 HTTP State Management Mechanism October 2000 - - -13. Full Copyright Statement - - Copyright (C) The Internet Society (2000). All Rights Reserved. - - This document and translations of it may be copied and furnished to - others, and derivative works that comment on or otherwise explain it - or assist in its implementation may be prepared, copied, published - and distributed, in whole or in part, without restriction of any - kind, provided that the above copyright notice and this paragraph are - included on all such copies and derivative works. However, this - document itself may not be modified in any way, such as by removing - the copyright notice or references to the Internet Society or other - Internet organizations, except as needed for the purpose of - developing Internet standards in which case the procedures for - copyrights defined in the Internet Standards process must be - followed, or as required to translate it into languages other than - English. - - The limited permissions granted above are perpetual and will not be - revoked by the Internet Society or its successors or assigns. - - This document and the information contained herein is provided on an - "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING - TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING - BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION - HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF - MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - - - - - - - - - - - - - - -Kristol & Montulli Standards Track [Page 26] - diff --git a/docs/specs/rfc3986.txt b/docs/specs/rfc3986.txt deleted file mode 100644 index c56ed4e..0000000 --- a/docs/specs/rfc3986.txt +++ /dev/null @@ -1,3419 +0,0 @@ - - - - - - -Network Working Group T. Berners-Lee -Request for Comments: 3986 W3C/MIT -STD: 66 R. Fielding -Updates: 1738 Day Software -Obsoletes: 2732, 2396, 1808 L. Masinter -Category: Standards Track Adobe Systems - January 2005 - - - Uniform Resource Identifier (URI): Generic Syntax - -Status of This Memo - - This document specifies an Internet standards track protocol for the - Internet community, and requests discussion and suggestions for - improvements. Please refer to the current edition of the "Internet - Official Protocol Standards" (STD 1) for the standardization state - and status of this protocol. Distribution of this memo is unlimited. - -Copyright Notice - - Copyright (C) The Internet Society (2005). - -Abstract - - A Uniform Resource Identifier (URI) is a compact sequence of - characters that identifies an abstract or physical resource. This - specification defines the generic URI syntax and a process for - resolving URI references that might be in relative form, along with - guidelines and security considerations for the use of URIs on the - Internet. The URI syntax defines a grammar that is a superset of all - valid URIs, allowing an implementation to parse the common components - of a URI reference without knowing the scheme-specific requirements - of every possible identifier. This specification does not define a - generative grammar for URIs; that task is performed by the individual - specifications of each URI scheme. - - - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 1] - -RFC 3986 URI Generic Syntax January 2005 - - -Table of Contents - - 1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . 4 - 1.1. Overview of URIs . . . . . . . . . . . . . . . . . . . . 4 - 1.1.1. Generic Syntax . . . . . . . . . . . . . . . . . 6 - 1.1.2. Examples . . . . . . . . . . . . . . . . . . . . 7 - 1.1.3. URI, URL, and URN . . . . . . . . . . . . . . . 7 - 1.2. Design Considerations . . . . . . . . . . . . . . . . . 8 - 1.2.1. Transcription . . . . . . . . . . . . . . . . . 8 - 1.2.2. Separating Identification from Interaction . . . 9 - 1.2.3. Hierarchical Identifiers . . . . . . . . . . . . 10 - 1.3. Syntax Notation . . . . . . . . . . . . . . . . . . . . 11 - 2. Characters . . . . . . . . . . . . . . . . . . . . . . . . . . 11 - 2.1. Percent-Encoding . . . . . . . . . . . . . . . . . . . . 12 - 2.2. Reserved Characters . . . . . . . . . . . . . . . . . . 12 - 2.3. Unreserved Characters . . . . . . . . . . . . . . . . . 13 - 2.4. When to Encode or Decode . . . . . . . . . . . . . . . . 14 - 2.5. Identifying Data . . . . . . . . . . . . . . . . . . . . 14 - 3. Syntax Components . . . . . . . . . . . . . . . . . . . . . . 16 - 3.1. Scheme . . . . . . . . . . . . . . . . . . . . . . . . . 17 - 3.2. Authority . . . . . . . . . . . . . . . . . . . . . . . 17 - 3.2.1. User Information . . . . . . . . . . . . . . . . 18 - 3.2.2. Host . . . . . . . . . . . . . . . . . . . . . . 18 - 3.2.3. Port . . . . . . . . . . . . . . . . . . . . . . 22 - 3.3. Path . . . . . . . . . . . . . . . . . . . . . . . . . . 22 - 3.4. Query . . . . . . . . . . . . . . . . . . . . . . . . . 23 - 3.5. Fragment . . . . . . . . . . . . . . . . . . . . . . . . 24 - 4. Usage . . . . . . . . . . . . . . . . . . . . . . . . . . . . 25 - 4.1. URI Reference . . . . . . . . . . . . . . . . . . . . . 25 - 4.2. Relative Reference . . . . . . . . . . . . . . . . . . . 26 - 4.3. Absolute URI . . . . . . . . . . . . . . . . . . . . . . 27 - 4.4. Same-Document Reference . . . . . . . . . . . . . . . . 27 - 4.5. Suffix Reference . . . . . . . . . . . . . . . . . . . . 27 - 5. Reference Resolution . . . . . . . . . . . . . . . . . . . . . 28 - 5.1. Establishing a Base URI . . . . . . . . . . . . . . . . 28 - 5.1.1. Base URI Embedded in Content . . . . . . . . . . 29 - 5.1.2. Base URI from the Encapsulating Entity . . . . . 29 - 5.1.3. Base URI from the Retrieval URI . . . . . . . . 30 - 5.1.4. Default Base URI . . . . . . . . . . . . . . . . 30 - 5.2. Relative Resolution . . . . . . . . . . . . . . . . . . 30 - 5.2.1. Pre-parse the Base URI . . . . . . . . . . . . . 31 - 5.2.2. Transform References . . . . . . . . . . . . . . 31 - 5.2.3. Merge Paths . . . . . . . . . . . . . . . . . . 32 - 5.2.4. Remove Dot Segments . . . . . . . . . . . . . . 33 - 5.3. Component Recomposition . . . . . . . . . . . . . . . . 35 - 5.4. Reference Resolution Examples . . . . . . . . . . . . . 35 - 5.4.1. Normal Examples . . . . . . . . . . . . . . . . 36 - 5.4.2. Abnormal Examples . . . . . . . . . . . . . . . 36 - - - -Berners-Lee, et al. Standards Track [Page 2] - -RFC 3986 URI Generic Syntax January 2005 - - - 6. Normalization and Comparison . . . . . . . . . . . . . . . . . 38 - 6.1. Equivalence . . . . . . . . . . . . . . . . . . . . . . 38 - 6.2. Comparison Ladder . . . . . . . . . . . . . . . . . . . 39 - 6.2.1. Simple String Comparison . . . . . . . . . . . . 39 - 6.2.2. Syntax-Based Normalization . . . . . . . . . . . 40 - 6.2.3. Scheme-Based Normalization . . . . . . . . . . . 41 - 6.2.4. Protocol-Based Normalization . . . . . . . . . . 42 - 7. Security Considerations . . . . . . . . . . . . . . . . . . . 43 - 7.1. Reliability and Consistency . . . . . . . . . . . . . . 43 - 7.2. Malicious Construction . . . . . . . . . . . . . . . . . 43 - 7.3. Back-End Transcoding . . . . . . . . . . . . . . . . . . 44 - 7.4. Rare IP Address Formats . . . . . . . . . . . . . . . . 45 - 7.5. Sensitive Information . . . . . . . . . . . . . . . . . 45 - 7.6. Semantic Attacks . . . . . . . . . . . . . . . . . . . . 45 - 8. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 46 - 9. Acknowledgements . . . . . . . . . . . . . . . . . . . . . . . 46 - 10. References . . . . . . . . . . . . . . . . . . . . . . . . . . 46 - 10.1. Normative References . . . . . . . . . . . . . . . . . . 46 - 10.2. Informative References . . . . . . . . . . . . . . . . . 47 - A. Collected ABNF for URI . . . . . . . . . . . . . . . . . . . . 49 - B. Parsing a URI Reference with a Regular Expression . . . . . . 50 - C. Delimiting a URI in Context . . . . . . . . . . . . . . . . . 51 - D. Changes from RFC 2396 . . . . . . . . . . . . . . . . . . . . 53 - D.1. Additions . . . . . . . . . . . . . . . . . . . . . . . 53 - D.2. Modifications . . . . . . . . . . . . . . . . . . . . . 53 - Index . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 56 - Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . . 60 - Full Copyright Statement . . . . . . . . . . . . . . . . . . . . . 61 - - - - - - - - - - - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 3] - -RFC 3986 URI Generic Syntax January 2005 - - -1. Introduction - - A Uniform Resource Identifier (URI) provides a simple and extensible - means for identifying a resource. This specification of URI syntax - and semantics is derived from concepts introduced by the World Wide - Web global information initiative, whose use of these identifiers - dates from 1990 and is described in "Universal Resource Identifiers - in WWW" [RFC1630]. The syntax is designed to meet the - recommendations laid out in "Functional Recommendations for Internet - Resource Locators" [RFC1736] and "Functional Requirements for Uniform - Resource Names" [RFC1737]. - - This document obsoletes [RFC2396], which merged "Uniform Resource - Locators" [RFC1738] and "Relative Uniform Resource Locators" - [RFC1808] in order to define a single, generic syntax for all URIs. - It obsoletes [RFC2732], which introduced syntax for an IPv6 address. - It excludes portions of RFC 1738 that defined the specific syntax of - individual URI schemes; those portions will be updated as separate - documents. The process for registration of new URI schemes is - defined separately by [BCP35]. Advice for designers of new URI - schemes can be found in [RFC2718]. All significant changes from RFC - 2396 are noted in Appendix D. - - This specification uses the terms "character" and "coded character - set" in accordance with the definitions provided in [BCP19], and - "character encoding" in place of what [BCP19] refers to as a - "charset". - -1.1. Overview of URIs - - URIs are characterized as follows: - - Uniform - - Uniformity provides several benefits. It allows different types - of resource identifiers to be used in the same context, even when - the mechanisms used to access those resources may differ. It - allows uniform semantic interpretation of common syntactic - conventions across different types of resource identifiers. It - allows introduction of new types of resource identifiers without - interfering with the way that existing identifiers are used. It - allows the identifiers to be reused in many different contexts, - thus permitting new applications or protocols to leverage a pre- - existing, large, and widely used set of resource identifiers. - - - - - - - -Berners-Lee, et al. Standards Track [Page 4] - -RFC 3986 URI Generic Syntax January 2005 - - - Resource - - This specification does not limit the scope of what might be a - resource; rather, the term "resource" is used in a general sense - for whatever might be identified by a URI. Familiar examples - include an electronic document, an image, a source of information - with a consistent purpose (e.g., "today's weather report for Los - Angeles"), a service (e.g., an HTTP-to-SMS gateway), and a - collection of other resources. A resource is not necessarily - accessible via the Internet; e.g., human beings, corporations, and - bound books in a library can also be resources. Likewise, - abstract concepts can be resources, such as the operators and - operands of a mathematical equation, the types of a relationship - (e.g., "parent" or "employee"), or numeric values (e.g., zero, - one, and infinity). - - Identifier - - An identifier embodies the information required to distinguish - what is being identified from all other things within its scope of - identification. Our use of the terms "identify" and "identifying" - refer to this purpose of distinguishing one resource from all - other resources, regardless of how that purpose is accomplished - (e.g., by name, address, or context). These terms should not be - mistaken as an assumption that an identifier defines or embodies - the identity of what is referenced, though that may be the case - for some identifiers. Nor should it be assumed that a system - using URIs will access the resource identified: in many cases, - URIs are used to denote resources without any intention that they - be accessed. Likewise, the "one" resource identified might not be - singular in nature (e.g., a resource might be a named set or a - mapping that varies over time). - - A URI is an identifier consisting of a sequence of characters - matching the syntax rule named in Section 3. It enables - uniform identification of resources via a separately defined - extensible set of naming schemes (Section 3.1). How that - identification is accomplished, assigned, or enabled is delegated to - each scheme specification. - - This specification does not place any limits on the nature of a - resource, the reasons why an application might seek to refer to a - resource, or the kinds of systems that might use URIs for the sake of - identifying resources. This specification does not require that a - URI persists in identifying the same resource over time, though that - is a common goal of all URI schemes. Nevertheless, nothing in this - - - - - -Berners-Lee, et al. Standards Track [Page 5] - -RFC 3986 URI Generic Syntax January 2005 - - - specification prevents an application from limiting itself to - particular types of resources, or to a subset of URIs that maintains - characteristics desired by that application. - - URIs have a global scope and are interpreted consistently regardless - of context, though the result of that interpretation may be in - relation to the end-user's context. For example, "http://localhost/" - has the same interpretation for every user of that reference, even - though the network interface corresponding to "localhost" may be - different for each end-user: interpretation is independent of access. - However, an action made on the basis of that reference will take - place in relation to the end-user's context, which implies that an - action intended to refer to a globally unique thing must use a URI - that distinguishes that resource from all other things. URIs that - identify in relation to the end-user's local context should only be - used when the context itself is a defining aspect of the resource, - such as when an on-line help manual refers to a file on the end- - user's file system (e.g., "file:///etc/hosts"). - -1.1.1. Generic Syntax - - Each URI begins with a scheme name, as defined in Section 3.1, that - refers to a specification for assigning identifiers within that - scheme. As such, the URI syntax is a federated and extensible naming - system wherein each scheme's specification may further restrict the - syntax and semantics of identifiers using that scheme. - - This specification defines those elements of the URI syntax that are - required of all URI schemes or are common to many URI schemes. It - thus defines the syntax and semantics needed to implement a scheme- - independent parsing mechanism for URI references, by which the - scheme-dependent handling of a URI can be postponed until the - scheme-dependent semantics are needed. Likewise, protocols and data - formats that make use of URI references can refer to this - specification as a definition for the range of syntax allowed for all - URIs, including those schemes that have yet to be defined. This - decouples the evolution of identification schemes from the evolution - of protocols, data formats, and implementations that make use of - URIs. - - A parser of the generic URI syntax can parse any URI reference into - its major components. Once the scheme is determined, further - scheme-specific parsing can be performed on the components. In other - words, the URI generic syntax is a superset of the syntax of all URI - schemes. - - - - - - -Berners-Lee, et al. Standards Track [Page 6] - -RFC 3986 URI Generic Syntax January 2005 - - -1.1.2. Examples - - The following example URIs illustrate several URI schemes and - variations in their common syntax components: - - ftp://ftp.is.co.za/rfc/rfc1808.txt - - http://www.ietf.org/rfc/rfc2396.txt - - ldap://[2001:db8::7]/c=GB?objectClass?one - - mailto:John.Doe@example.com - - news:comp.infosystems.www.servers.unix - - tel:+1-816-555-1212 - - telnet://192.0.2.16:80/ - - urn:oasis:names:specification:docbook:dtd:xml:4.1.2 - - -1.1.3. URI, URL, and URN - - A URI can be further classified as a locator, a name, or both. The - term "Uniform Resource Locator" (URL) refers to the subset of URIs - that, in addition to identifying a resource, provide a means of - locating the resource by describing its primary access mechanism - (e.g., its network "location"). The term "Uniform Resource Name" - (URN) has been used historically to refer to both URIs under the - "urn" scheme [RFC2141], which are required to remain globally unique - and persistent even when the resource ceases to exist or becomes - unavailable, and to any other URI with the properties of a name. - - An individual scheme does not have to be classified as being just one - of "name" or "locator". Instances of URIs from any given scheme may - have the characteristics of names or locators or both, often - depending on the persistence and care in the assignment of - identifiers by the naming authority, rather than on any quality of - the scheme. Future specifications and related documentation should - use the general term "URI" rather than the more restrictive terms - "URL" and "URN" [RFC3305]. - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 7] - -RFC 3986 URI Generic Syntax January 2005 - - -1.2. Design Considerations - -1.2.1. Transcription - - The URI syntax has been designed with global transcription as one of - its main considerations. A URI is a sequence of characters from a - very limited set: the letters of the basic Latin alphabet, digits, - and a few special characters. A URI may be represented in a variety - of ways; e.g., ink on paper, pixels on a screen, or a sequence of - character encoding octets. The interpretation of a URI depends only - on the characters used and not on how those characters are - represented in a network protocol. - - The goal of transcription can be described by a simple scenario. - Imagine two colleagues, Sam and Kim, sitting in a pub at an - international conference and exchanging research ideas. Sam asks Kim - for a location to get more information, so Kim writes the URI for the - research site on a napkin. Upon returning home, Sam takes out the - napkin and types the URI into a computer, which then retrieves the - information to which Kim referred. - - There are several design considerations revealed by the scenario: - - o A URI is a sequence of characters that is not always represented - as a sequence of octets. - - o A URI might be transcribed from a non-network source and thus - should consist of characters that are most likely able to be - entered into a computer, within the constraints imposed by - keyboards (and related input devices) across languages and - locales. - - o A URI often has to be remembered by people, and it is easier for - people to remember a URI when it consists of meaningful or - familiar components. - - These design considerations are not always in alignment. For - example, it is often the case that the most meaningful name for a URI - component would require characters that cannot be typed into some - systems. The ability to transcribe a resource identifier from one - medium to another has been considered more important than having a - URI consist of the most meaningful of components. - - In local or regional contexts and with improving technology, users - might benefit from being able to use a wider range of characters; - such use is not defined by this specification. Percent-encoded - octets (Section 2.1) may be used within a URI to represent characters - outside the range of the US-ASCII coded character set if this - - - -Berners-Lee, et al. Standards Track [Page 8] - -RFC 3986 URI Generic Syntax January 2005 - - - representation is allowed by the scheme or by the protocol element in - which the URI is referenced. Such a definition should specify the - character encoding used to map those characters to octets prior to - being percent-encoded for the URI. - -1.2.2. Separating Identification from Interaction - - A common misunderstanding of URIs is that they are only used to refer - to accessible resources. The URI itself only provides - identification; access to the resource is neither guaranteed nor - implied by the presence of a URI. Instead, any operation associated - with a URI reference is defined by the protocol element, data format - attribute, or natural language text in which it appears. - - Given a URI, a system may attempt to perform a variety of operations - on the resource, as might be characterized by words such as "access", - "update", "replace", or "find attributes". Such operations are - defined by the protocols that make use of URIs, not by this - specification. However, we do use a few general terms for describing - common operations on URIs. URI "resolution" is the process of - determining an access mechanism and the appropriate parameters - necessary to dereference a URI; this resolution may require several - iterations. To use that access mechanism to perform an action on the - URI's resource is to "dereference" the URI. - - When URIs are used within information retrieval systems to identify - sources of information, the most common form of URI dereference is - "retrieval": making use of a URI in order to retrieve a - representation of its associated resource. A "representation" is a - sequence of octets, along with representation metadata describing - those octets, that constitutes a record of the state of the resource - at the time when the representation is generated. Retrieval is - achieved by a process that might include using the URI as a cache key - to check for a locally cached representation, resolution of the URI - to determine an appropriate access mechanism (if any), and - dereference of the URI for the sake of applying a retrieval - operation. Depending on the protocols used to perform the retrieval, - additional information might be supplied about the resource (resource - metadata) and its relation to other resources. - - URI references in information retrieval systems are designed to be - late-binding: the result of an access is generally determined when it - is accessed and may vary over time or due to other aspects of the - interaction. These references are created in order to be used in the - future: what is being identified is not some specific result that was - obtained in the past, but rather some characteristic that is expected - to be true for future results. In such cases, the resource referred - to by the URI is actually a sameness of characteristics as observed - - - -Berners-Lee, et al. Standards Track [Page 9] - -RFC 3986 URI Generic Syntax January 2005 - - - over time, perhaps elucidated by additional comments or assertions - made by the resource provider. - - Although many URI schemes are named after protocols, this does not - imply that use of these URIs will result in access to the resource - via the named protocol. URIs are often used simply for the sake of - identification. Even when a URI is used to retrieve a representation - of a resource, that access might be through gateways, proxies, - caches, and name resolution services that are independent of the - protocol associated with the scheme name. The resolution of some - URIs may require the use of more than one protocol (e.g., both DNS - and HTTP are typically used to access an "http" URI's origin server - when a representation isn't found in a local cache). - -1.2.3. Hierarchical Identifiers - - The URI syntax is organized hierarchically, with components listed in - order of decreasing significance from left to right. For some URI - schemes, the visible hierarchy is limited to the scheme itself: - everything after the scheme component delimiter (":") is considered - opaque to URI processing. Other URI schemes make the hierarchy - explicit and visible to generic parsing algorithms. - - The generic syntax uses the slash ("/"), question mark ("?"), and - number sign ("#") characters to delimit components that are - significant to the generic parser's hierarchical interpretation of an - identifier. In addition to aiding the readability of such - identifiers through the consistent use of familiar syntax, this - uniform representation of hierarchy across naming schemes allows - scheme-independent references to be made relative to that hierarchy. - - It is often the case that a group or "tree" of documents has been - constructed to serve a common purpose, wherein the vast majority of - URI references in these documents point to resources within the tree - rather than outside it. Similarly, documents located at a particular - site are much more likely to refer to other resources at that site - than to resources at remote sites. Relative referencing of URIs - allows document trees to be partially independent of their location - and access scheme. For instance, it is possible for a single set of - hypertext documents to be simultaneously accessible and traversable - via each of the "file", "http", and "ftp" schemes if the documents - refer to each other with relative references. Furthermore, such - document trees can be moved, as a whole, without changing any of the - relative references. - - A relative reference (Section 4.2) refers to a resource by describing - the difference within a hierarchical name space between the reference - context and the target URI. The reference resolution algorithm, - - - -Berners-Lee, et al. Standards Track [Page 10] - -RFC 3986 URI Generic Syntax January 2005 - - - presented in Section 5, defines how such a reference is transformed - to the target URI. As relative references can only be used within - the context of a hierarchical URI, designers of new URI schemes - should use a syntax consistent with the generic syntax's hierarchical - components unless there are compelling reasons to forbid relative - referencing within that scheme. - - NOTE: Previous specifications used the terms "partial URI" and - "relative URI" to denote a relative reference to a URI. As some - readers misunderstood those terms to mean that relative URIs are a - subset of URIs rather than a method of referencing URIs, this - specification simply refers to them as relative references. - - All URI references are parsed by generic syntax parsers when used. - However, because hierarchical processing has no effect on an absolute - URI used in a reference unless it contains one or more dot-segments - (complete path segments of "." or "..", as described in Section 3.3), - URI scheme specifications can define opaque identifiers by - disallowing use of slash characters, question mark characters, and - the URIs "scheme:." and "scheme:..". - -1.3. Syntax Notation - - This specification uses the Augmented Backus-Naur Form (ABNF) - notation of [RFC2234], including the following core ABNF syntax rules - defined by that specification: ALPHA (letters), CR (carriage return), - DIGIT (decimal digits), DQUOTE (double quote), HEXDIG (hexadecimal - digits), LF (line feed), and SP (space). The complete URI syntax is - collected in Appendix A. - -2. Characters - - The URI syntax provides a method of encoding data, presumably for the - sake of identifying a resource, as a sequence of characters. The URI - characters are, in turn, frequently encoded as octets for transport - or presentation. This specification does not mandate any particular - character encoding for mapping between URI characters and the octets - used to store or transmit those characters. When a URI appears in a - protocol element, the character encoding is defined by that protocol; - without such a definition, a URI is assumed to be in the same - character encoding as the surrounding text. - - The ABNF notation defines its terminal values to be non-negative - integers (codepoints) based on the US-ASCII coded character set - [ASCII]. Because a URI is a sequence of characters, we must invert - that relation in order to understand the URI syntax. Therefore, the - - - - - -Berners-Lee, et al. Standards Track [Page 11] - -RFC 3986 URI Generic Syntax January 2005 - - - integer values used by the ABNF must be mapped back to their - corresponding characters via US-ASCII in order to complete the syntax - rules. - - A URI is composed from a limited set of characters consisting of - digits, letters, and a few graphic symbols. A reserved subset of - those characters may be used to delimit syntax components within a - URI while the remaining characters, including both the unreserved set - and those reserved characters not acting as delimiters, define each - component's identifying data. - -2.1. Percent-Encoding - - A percent-encoding mechanism is used to represent a data octet in a - component when that octet's corresponding character is outside the - allowed set or is being used as a delimiter of, or within, the - component. A percent-encoded octet is encoded as a character - triplet, consisting of the percent character "%" followed by the two - hexadecimal digits representing that octet's numeric value. For - example, "%20" is the percent-encoding for the binary octet - "00100000" (ABNF: %x20), which in US-ASCII corresponds to the space - character (SP). Section 2.4 describes when percent-encoding and - decoding is applied. - - pct-encoded = "%" HEXDIG HEXDIG - - The uppercase hexadecimal digits 'A' through 'F' are equivalent to - the lowercase digits 'a' through 'f', respectively. If two URIs - differ only in the case of hexadecimal digits used in percent-encoded - octets, they are equivalent. For consistency, URI producers and - normalizers should use uppercase hexadecimal digits for all percent- - encodings. - -2.2. Reserved Characters - - URIs include components and subcomponents that are delimited by - characters in the "reserved" set. These characters are called - "reserved" because they may (or may not) be defined as delimiters by - the generic syntax, by each scheme-specific syntax, or by the - implementation-specific syntax of a URI's dereferencing algorithm. - If data for a URI component would conflict with a reserved - character's purpose as a delimiter, then the conflicting data must be - percent-encoded before the URI is formed. - - - - - - - - -Berners-Lee, et al. Standards Track [Page 12] - -RFC 3986 URI Generic Syntax January 2005 - - - reserved = gen-delims / sub-delims - - gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" - - sub-delims = "!" / "$" / "&" / "'" / "(" / ")" - / "*" / "+" / "," / ";" / "=" - - The purpose of reserved characters is to provide a set of delimiting - characters that are distinguishable from other data within a URI. - URIs that differ in the replacement of a reserved character with its - corresponding percent-encoded octet are not equivalent. Percent- - encoding a reserved character, or decoding a percent-encoded octet - that corresponds to a reserved character, will change how the URI is - interpreted by most applications. Thus, characters in the reserved - set are protected from normalization and are therefore safe to be - used by scheme-specific and producer-specific algorithms for - delimiting data subcomponents within a URI. - - A subset of the reserved characters (gen-delims) is used as - delimiters of the generic URI components described in Section 3. A - component's ABNF syntax rule will not use the reserved or gen-delims - rule names directly; instead, each syntax rule lists the characters - allowed within that component (i.e., not delimiting it), and any of - those characters that are also in the reserved set are "reserved" for - use as subcomponent delimiters within the component. Only the most - common subcomponents are defined by this specification; other - subcomponents may be defined by a URI scheme's specification, or by - the implementation-specific syntax of a URI's dereferencing - algorithm, provided that such subcomponents are delimited by - characters in the reserved set allowed within that component. - - URI producing applications should percent-encode data octets that - correspond to characters in the reserved set unless these characters - are specifically allowed by the URI scheme to represent data in that - component. If a reserved character is found in a URI component and - no delimiting role is known for that character, then it must be - interpreted as representing the data octet corresponding to that - character's encoding in US-ASCII. - -2.3. Unreserved Characters - - Characters that are allowed in a URI but do not have a reserved - purpose are called unreserved. These include uppercase and lowercase - letters, decimal digits, hyphen, period, underscore, and tilde. - - unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" - - - - - -Berners-Lee, et al. Standards Track [Page 13] - -RFC 3986 URI Generic Syntax January 2005 - - - URIs that differ in the replacement of an unreserved character with - its corresponding percent-encoded US-ASCII octet are equivalent: they - identify the same resource. However, URI comparison implementations - do not always perform normalization prior to comparison (see Section - 6). For consistency, percent-encoded octets in the ranges of ALPHA - (%41-%5A and %61-%7A), DIGIT (%30-%39), hyphen (%2D), period (%2E), - underscore (%5F), or tilde (%7E) should not be created by URI - producers and, when found in a URI, should be decoded to their - corresponding unreserved characters by URI normalizers. - -2.4. When to Encode or Decode - - Under normal circumstances, the only time when octets within a URI - are percent-encoded is during the process of producing the URI from - its component parts. This is when an implementation determines which - of the reserved characters are to be used as subcomponent delimiters - and which can be safely used as data. Once produced, a URI is always - in its percent-encoded form. - - When a URI is dereferenced, the components and subcomponents - significant to the scheme-specific dereferencing process (if any) - must be parsed and separated before the percent-encoded octets within - those components can be safely decoded, as otherwise the data may be - mistaken for component delimiters. The only exception is for - percent-encoded octets corresponding to characters in the unreserved - set, which can be decoded at any time. For example, the octet - corresponding to the tilde ("~") character is often encoded as "%7E" - by older URI processing implementations; the "%7E" can be replaced by - "~" without changing its interpretation. - - Because the percent ("%") character serves as the indicator for - percent-encoded octets, it must be percent-encoded as "%25" for that - octet to be used as data within a URI. Implementations must not - percent-encode or decode the same string more than once, as decoding - an already decoded string might lead to misinterpreting a percent - data octet as the beginning of a percent-encoding, or vice versa in - the case of percent-encoding an already percent-encoded string. - -2.5. Identifying Data - - URI characters provide identifying data for each of the URI - components, serving as an external interface for identification - between systems. Although the presence and nature of the URI - production interface is hidden from clients that use its URIs (and is - thus beyond the scope of the interoperability requirements defined by - this specification), it is a frequent source of confusion and errors - in the interpretation of URI character issues. Implementers have to - be aware that there are multiple character encodings involved in the - - - -Berners-Lee, et al. Standards Track [Page 14] - -RFC 3986 URI Generic Syntax January 2005 - - - production and transmission of URIs: local name and data encoding, - public interface encoding, URI character encoding, data format - encoding, and protocol encoding. - - Local names, such as file system names, are stored with a local - character encoding. URI producing applications (e.g., origin - servers) will typically use the local encoding as the basis for - producing meaningful names. The URI producer will transform the - local encoding to one that is suitable for a public interface and - then transform the public interface encoding into the restricted set - of URI characters (reserved, unreserved, and percent-encodings). - Those characters are, in turn, encoded as octets to be used as a - reference within a data format (e.g., a document charset), and such - data formats are often subsequently encoded for transmission over - Internet protocols. - - For most systems, an unreserved character appearing within a URI - component is interpreted as representing the data octet corresponding - to that character's encoding in US-ASCII. Consumers of URIs assume - that the letter "X" corresponds to the octet "01011000", and even - when that assumption is incorrect, there is no harm in making it. A - system that internally provides identifiers in the form of a - different character encoding, such as EBCDIC, will generally perform - character translation of textual identifiers to UTF-8 [STD63] (or - some other superset of the US-ASCII character encoding) at an - internal interface, thereby providing more meaningful identifiers - than those resulting from simply percent-encoding the original - octets. - - For example, consider an information service that provides data, - stored locally using an EBCDIC-based file system, to clients on the - Internet through an HTTP server. When an author creates a file with - the name "Laguna Beach" on that file system, the "http" URI - corresponding to that resource is expected to contain the meaningful - string "Laguna%20Beach". If, however, that server produces URIs by - using an overly simplistic raw octet mapping, then the result would - be a URI containing "%D3%81%87%A4%95%81@%C2%85%81%83%88". An - internal transcoding interface fixes this problem by transcoding the - local name to a superset of US-ASCII prior to producing the URI. - Naturally, proper interpretation of an incoming URI on such an - interface requires that percent-encoded octets be decoded (e.g., - "%20" to SP) before the reverse transcoding is applied to obtain the - local name. - - In some cases, the internal interface between a URI component and the - identifying data that it has been crafted to represent is much less - direct than a character encoding translation. For example, portions - of a URI might reflect a query on non-ASCII data, or numeric - - - -Berners-Lee, et al. Standards Track [Page 15] - -RFC 3986 URI Generic Syntax January 2005 - - - coordinates on a map. Likewise, a URI scheme may define components - with additional encoding requirements that are applied prior to - forming the component and producing the URI. - - When a new URI scheme defines a component that represents textual - data consisting of characters from the Universal Character Set [UCS], - the data should first be encoded as octets according to the UTF-8 - character encoding [STD63]; then only those octets that do not - correspond to characters in the unreserved set should be percent- - encoded. For example, the character A would be represented as "A", - the character LATIN CAPITAL LETTER A WITH GRAVE would be represented - as "%C3%80", and the character KATAKANA LETTER A would be represented - as "%E3%82%A2". - -3. Syntax Components - - The generic URI syntax consists of a hierarchical sequence of - components referred to as the scheme, authority, path, query, and - fragment. - - URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] - - hier-part = "//" authority path-abempty - / path-absolute - / path-rootless - / path-empty - - The scheme and path components are required, though the path may be - empty (no characters). When authority is present, the path must - either be empty or begin with a slash ("/") character. When - authority is not present, the path cannot begin with two slash - characters ("//"). These restrictions result in five different ABNF - rules for a path (Section 3.3), only one of which will match any - given URI reference. - - The following are two example URIs and their component parts: - - foo://example.com:8042/over/there?name=ferret#nose - \_/ \______________/\_________/ \_________/ \__/ - | | | | | - scheme authority path query fragment - | _____________________|__ - / \ / \ - urn:example:animal:ferret:nose - - - - - - - -Berners-Lee, et al. Standards Track [Page 16] - -RFC 3986 URI Generic Syntax January 2005 - - -3.1. Scheme - - Each URI begins with a scheme name that refers to a specification for - assigning identifiers within that scheme. As such, the URI syntax is - a federated and extensible naming system wherein each scheme's - specification may further restrict the syntax and semantics of - identifiers using that scheme. - - Scheme names consist of a sequence of characters beginning with a - letter and followed by any combination of letters, digits, plus - ("+"), period ("."), or hyphen ("-"). Although schemes are case- - insensitive, the canonical form is lowercase and documents that - specify schemes must do so with lowercase letters. An implementation - should accept uppercase letters as equivalent to lowercase in scheme - names (e.g., allow "HTTP" as well as "http") for the sake of - robustness but should only produce lowercase scheme names for - consistency. - - scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) - - Individual schemes are not specified by this document. The process - for registration of new URI schemes is defined separately by [BCP35]. - The scheme registry maintains the mapping between scheme names and - their specifications. Advice for designers of new URI schemes can be - found in [RFC2718]. URI scheme specifications must define their own - syntax so that all strings matching their scheme-specific syntax will - also match the grammar, as described in Section 4.3. - - When presented with a URI that violates one or more scheme-specific - restrictions, the scheme-specific resolution process should flag the - reference as an error rather than ignore the unused parts; doing so - reduces the number of equivalent URIs and helps detect abuses of the - generic syntax, which might indicate that the URI has been - constructed to mislead the user (Section 7.6). - -3.2. Authority - - Many URI schemes include a hierarchical element for a naming - authority so that governance of the name space defined by the - remainder of the URI is delegated to that authority (which may, in - turn, delegate it further). The generic syntax provides a common - means for distinguishing an authority based on a registered name or - server address, along with optional port and user information. - - The authority component is preceded by a double slash ("//") and is - terminated by the next slash ("/"), question mark ("?"), or number - sign ("#") character, or by the end of the URI. - - - - -Berners-Lee, et al. Standards Track [Page 17] - -RFC 3986 URI Generic Syntax January 2005 - - - authority = [ userinfo "@" ] host [ ":" port ] - - URI producers and normalizers should omit the ":" delimiter that - separates host from port if the port component is empty. Some - schemes do not allow the userinfo and/or port subcomponents. - - If a URI contains an authority component, then the path component - must either be empty or begin with a slash ("/") character. Non- - validating parsers (those that merely separate a URI reference into - its major components) will often ignore the subcomponent structure of - authority, treating it as an opaque string from the double-slash to - the first terminating delimiter, until such time as the URI is - dereferenced. - -3.2.1. User Information - - The userinfo subcomponent may consist of a user name and, optionally, - scheme-specific information about how to gain authorization to access - the resource. The user information, if present, is followed by a - commercial at-sign ("@") that delimits it from the host. - - userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) - - Use of the format "user:password" in the userinfo field is - deprecated. Applications should not render as clear text any data - after the first colon (":") character found within a userinfo - subcomponent unless the data after the colon is the empty string - (indicating no password). Applications may choose to ignore or - reject such data when it is received as part of a reference and - should reject the storage of such data in unencrypted form. The - passing of authentication information in clear text has proven to be - a security risk in almost every case where it has been used. - - Applications that render a URI for the sake of user feedback, such as - in graphical hypertext browsing, should render userinfo in a way that - is distinguished from the rest of a URI, when feasible. Such - rendering will assist the user in cases where the userinfo has been - misleadingly crafted to look like a trusted domain name - (Section 7.6). - -3.2.2. Host - - The host subcomponent of authority is identified by an IP literal - encapsulated within square brackets, an IPv4 address in dotted- - decimal form, or a registered name. The host subcomponent is case- - insensitive. The presence of a host subcomponent within a URI does - not imply that the scheme requires access to the given host on the - Internet. In many cases, the host syntax is used only for the sake - - - -Berners-Lee, et al. Standards Track [Page 18] - -RFC 3986 URI Generic Syntax January 2005 - - - of reusing the existing registration process created and deployed for - DNS, thus obtaining a globally unique name without the cost of - deploying another registry. However, such use comes with its own - costs: domain name ownership may change over time for reasons not - anticipated by the URI producer. In other cases, the data within the - host component identifies a registered name that has nothing to do - with an Internet host. We use the name "host" for the ABNF rule - because that is its most common purpose, not its only purpose. - - host = IP-literal / IPv4address / reg-name - - The syntax rule for host is ambiguous because it does not completely - distinguish between an IPv4address and a reg-name. In order to - disambiguate the syntax, we apply the "first-match-wins" algorithm: - If host matches the rule for IPv4address, then it should be - considered an IPv4 address literal and not a reg-name. Although host - is case-insensitive, producers and normalizers should use lowercase - for registered names and hexadecimal addresses for the sake of - uniformity, while only using uppercase letters for percent-encodings. - - A host identified by an Internet Protocol literal address, version 6 - [RFC3513] or later, is distinguished by enclosing the IP literal - within square brackets ("[" and "]"). This is the only place where - square bracket characters are allowed in the URI syntax. In - anticipation of future, as-yet-undefined IP literal address formats, - an implementation may use an optional version flag to indicate such a - format explicitly rather than rely on heuristic determination. - - IP-literal = "[" ( IPv6address / IPvFuture ) "]" - - IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" ) - - The version flag does not indicate the IP version; rather, it - indicates future versions of the literal format. As such, - implementations must not provide the version flag for the existing - IPv4 and IPv6 literal address forms described below. If a URI - containing an IP-literal that starts with "v" (case-insensitive), - indicating that the version flag is present, is dereferenced by an - application that does not know the meaning of that version flag, then - the application should return an appropriate error for "address - mechanism not supported". - - A host identified by an IPv6 literal address is represented inside - the square brackets without a preceding version flag. The ABNF - provided here is a translation of the text definition of an IPv6 - literal address provided in [RFC3513]. This syntax does not support - IPv6 scoped addressing zone identifiers. - - - - -Berners-Lee, et al. Standards Track [Page 19] - -RFC 3986 URI Generic Syntax January 2005 - - - A 128-bit IPv6 address is divided into eight 16-bit pieces. Each - piece is represented numerically in case-insensitive hexadecimal, - using one to four hexadecimal digits (leading zeroes are permitted). - The eight encoded pieces are given most-significant first, separated - by colon characters. Optionally, the least-significant two pieces - may instead be represented in IPv4 address textual format. A - sequence of one or more consecutive zero-valued 16-bit pieces within - the address may be elided, omitting all their digits and leaving - exactly two consecutive colons in their place to mark the elision. - - IPv6address = 6( h16 ":" ) ls32 - / "::" 5( h16 ":" ) ls32 - / [ h16 ] "::" 4( h16 ":" ) ls32 - / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 - / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 - / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 - / [ *4( h16 ":" ) h16 ] "::" ls32 - / [ *5( h16 ":" ) h16 ] "::" h16 - / [ *6( h16 ":" ) h16 ] "::" - - ls32 = ( h16 ":" h16 ) / IPv4address - ; least-significant 32 bits of address - - h16 = 1*4HEXDIG - ; 16 bits of address represented in hexadecimal - - A host identified by an IPv4 literal address is represented in - dotted-decimal notation (a sequence of four decimal numbers in the - range 0 to 255, separated by "."), as described in [RFC1123] by - reference to [RFC0952]. Note that other forms of dotted notation may - be interpreted on some platforms, as described in Section 7.4, but - only the dotted-decimal form of four octets is allowed by this - grammar. - - IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet - - dec-octet = DIGIT ; 0-9 - / %x31-39 DIGIT ; 10-99 - / "1" 2DIGIT ; 100-199 - / "2" %x30-34 DIGIT ; 200-249 - / "25" %x30-35 ; 250-255 - - A host identified by a registered name is a sequence of characters - usually intended for lookup within a locally defined host or service - name registry, though the URI's scheme-specific semantics may require - that a specific registry (or fixed name table) be used instead. The - most common name registry mechanism is the Domain Name System (DNS). - A registered name intended for lookup in the DNS uses the syntax - - - -Berners-Lee, et al. Standards Track [Page 20] - -RFC 3986 URI Generic Syntax January 2005 - - - defined in Section 3.5 of [RFC1034] and Section 2.1 of [RFC1123]. - Such a name consists of a sequence of domain labels separated by ".", - each domain label starting and ending with an alphanumeric character - and possibly also containing "-" characters. The rightmost domain - label of a fully qualified domain name in DNS may be followed by a - single "." and should be if it is necessary to distinguish between - the complete domain name and some local domain. - - reg-name = *( unreserved / pct-encoded / sub-delims ) - - If the URI scheme defines a default for host, then that default - applies when the host subcomponent is undefined or when the - registered name is empty (zero length). For example, the "file" URI - scheme is defined so that no authority, an empty host, and - "localhost" all mean the end-user's machine, whereas the "http" - scheme considers a missing authority or empty host invalid. - - This specification does not mandate a particular registered name - lookup technology and therefore does not restrict the syntax of reg- - name beyond what is necessary for interoperability. Instead, it - delegates the issue of registered name syntax conformance to the - operating system of each application performing URI resolution, and - that operating system decides what it will allow for the purpose of - host identification. A URI resolution implementation might use DNS, - host tables, yellow pages, NetInfo, WINS, or any other system for - lookup of registered names. However, a globally scoped naming - system, such as DNS fully qualified domain names, is necessary for - URIs intended to have global scope. URI producers should use names - that conform to the DNS syntax, even when use of DNS is not - immediately apparent, and should limit these names to no more than - 255 characters in length. - - The reg-name syntax allows percent-encoded octets in order to - represent non-ASCII registered names in a uniform way that is - independent of the underlying name resolution technology. Non-ASCII - characters must first be encoded according to UTF-8 [STD63], and then - each octet of the corresponding UTF-8 sequence must be percent- - encoded to be represented as URI characters. URI producing - applications must not use percent-encoding in host unless it is used - to represent a UTF-8 character sequence. When a non-ASCII registered - name represents an internationalized domain name intended for - resolution via the DNS, the name must be transformed to the IDNA - encoding [RFC3490] prior to name lookup. URI producers should - provide these registered names in the IDNA encoding, rather than a - percent-encoding, if they wish to maximize interoperability with - legacy URI resolvers. - - - - - -Berners-Lee, et al. Standards Track [Page 21] - -RFC 3986 URI Generic Syntax January 2005 - - -3.2.3. Port - - The port subcomponent of authority is designated by an optional port - number in decimal following the host and delimited from it by a - single colon (":") character. - - port = *DIGIT - - A scheme may define a default port. For example, the "http" scheme - defines a default port of "80", corresponding to its reserved TCP - port number. The type of port designated by the port number (e.g., - TCP, UDP, SCTP) is defined by the URI scheme. URI producers and - normalizers should omit the port component and its ":" delimiter if - port is empty or if its value would be the same as that of the - scheme's default. - -3.3. Path - - The path component contains data, usually organized in hierarchical - form, that, along with data in the non-hierarchical query component - (Section 3.4), serves to identify a resource within the scope of the - URI's scheme and naming authority (if any). The path is terminated - by the first question mark ("?") or number sign ("#") character, or - by the end of the URI. - - If a URI contains an authority component, then the path component - must either be empty or begin with a slash ("/") character. If a URI - does not contain an authority component, then the path cannot begin - with two slash characters ("//"). In addition, a URI reference - (Section 4.1) may be a relative-path reference, in which case the - first path segment cannot contain a colon (":") character. The ABNF - requires five separate rules to disambiguate these cases, only one of - which will match the path substring within a given URI reference. We - use the generic term "path component" to describe the URI substring - matched by the parser to one of these rules. - - path = path-abempty ; begins with "/" or is empty - / path-absolute ; begins with "/" but not "//" - / path-noscheme ; begins with a non-colon segment - / path-rootless ; begins with a segment - / path-empty ; zero characters - - path-abempty = *( "/" segment ) - path-absolute = "/" [ segment-nz *( "/" segment ) ] - path-noscheme = segment-nz-nc *( "/" segment ) - path-rootless = segment-nz *( "/" segment ) - path-empty = 0 - - - - -Berners-Lee, et al. Standards Track [Page 22] - -RFC 3986 URI Generic Syntax January 2005 - - - segment = *pchar - segment-nz = 1*pchar - segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" ) - ; non-zero-length segment without any colon ":" - - pchar = unreserved / pct-encoded / sub-delims / ":" / "@" - - A path consists of a sequence of path segments separated by a slash - ("/") character. A path is always defined for a URI, though the - defined path may be empty (zero length). Use of the slash character - to indicate hierarchy is only required when a URI will be used as the - context for relative references. For example, the URI - has a path of "fred@example.com", whereas - the URI has an empty path. - - The path segments "." and "..", also known as dot-segments, are - defined for relative reference within the path name hierarchy. They - are intended for use at the beginning of a relative-path reference - (Section 4.2) to indicate relative position within the hierarchical - tree of names. This is similar to their role within some operating - systems' file directory structures to indicate the current directory - and parent directory, respectively. However, unlike in a file - system, these dot-segments are only interpreted within the URI path - hierarchy and are removed as part of the resolution process (Section - 5.2). - - Aside from dot-segments in hierarchical paths, a path segment is - considered opaque by the generic syntax. URI producing applications - often use the reserved characters allowed in a segment to delimit - scheme-specific or dereference-handler-specific subcomponents. For - example, the semicolon (";") and equals ("=") reserved characters are - often used to delimit parameters and parameter values applicable to - that segment. The comma (",") reserved character is often used for - similar purposes. For example, one URI producer might use a segment - such as "name;v=1.1" to indicate a reference to version 1.1 of - "name", whereas another might use a segment such as "name,1.1" to - indicate the same. Parameter types may be defined by scheme-specific - semantics, but in most cases the syntax of a parameter is specific to - the implementation of the URI's dereferencing algorithm. - -3.4. Query - - The query component contains non-hierarchical data that, along with - data in the path component (Section 3.3), serves to identify a - resource within the scope of the URI's scheme and naming authority - (if any). The query component is indicated by the first question - mark ("?") character and terminated by a number sign ("#") character - or by the end of the URI. - - - -Berners-Lee, et al. Standards Track [Page 23] - -RFC 3986 URI Generic Syntax January 2005 - - - query = *( pchar / "/" / "?" ) - - The characters slash ("/") and question mark ("?") may represent data - within the query component. Beware that some older, erroneous - implementations may not handle such data correctly when it is used as - the base URI for relative references (Section 5.1), apparently - because they fail to distinguish query data from path data when - looking for hierarchical separators. However, as query components - are often used to carry identifying information in the form of - "key=value" pairs and one frequently used value is a reference to - another URI, it is sometimes better for usability to avoid percent- - encoding those characters. - -3.5. Fragment - - The fragment identifier component of a URI allows indirect - identification of a secondary resource by reference to a primary - resource and additional identifying information. The identified - secondary resource may be some portion or subset of the primary - resource, some view on representations of the primary resource, or - some other resource defined or described by those representations. A - fragment identifier component is indicated by the presence of a - number sign ("#") character and terminated by the end of the URI. - - fragment = *( pchar / "/" / "?" ) - - The semantics of a fragment identifier are defined by the set of - representations that might result from a retrieval action on the - primary resource. The fragment's format and resolution is therefore - dependent on the media type [RFC2046] of a potentially retrieved - representation, even though such a retrieval is only performed if the - URI is dereferenced. If no such representation exists, then the - semantics of the fragment are considered unknown and are effectively - unconstrained. Fragment identifier semantics are independent of the - URI scheme and thus cannot be redefined by scheme specifications. - - Individual media types may define their own restrictions on or - structures within the fragment identifier syntax for specifying - different types of subsets, views, or external references that are - identifiable as secondary resources by that media type. If the - primary resource has multiple representations, as is often the case - for resources whose representation is selected based on attributes of - the retrieval request (a.k.a., content negotiation), then whatever is - identified by the fragment should be consistent across all of those - representations. Each representation should either define the - fragment so that it corresponds to the same secondary resource, - regardless of how it is represented, or should leave the fragment - undefined (i.e., not found). - - - -Berners-Lee, et al. Standards Track [Page 24] - -RFC 3986 URI Generic Syntax January 2005 - - - As with any URI, use of a fragment identifier component does not - imply that a retrieval action will take place. A URI with a fragment - identifier may be used to refer to the secondary resource without any - implication that the primary resource is accessible or will ever be - accessed. - - Fragment identifiers have a special role in information retrieval - systems as the primary form of client-side indirect referencing, - allowing an author to specifically identify aspects of an existing - resource that are only indirectly provided by the resource owner. As - such, the fragment identifier is not used in the scheme-specific - processing of a URI; instead, the fragment identifier is separated - from the rest of the URI prior to a dereference, and thus the - identifying information within the fragment itself is dereferenced - solely by the user agent, regardless of the URI scheme. Although - this separate handling is often perceived to be a loss of - information, particularly for accurate redirection of references as - resources move over time, it also serves to prevent information - providers from denying reference authors the right to refer to - information within a resource selectively. Indirect referencing also - provides additional flexibility and extensibility to systems that use - URIs, as new media types are easier to define and deploy than new - schemes of identification. - - The characters slash ("/") and question mark ("?") are allowed to - represent data within the fragment identifier. Beware that some - older, erroneous implementations may not handle this data correctly - when it is used as the base URI for relative references (Section - 5.1). - -4. Usage - - When applications make reference to a URI, they do not always use the - full form of reference defined by the "URI" syntax rule. To save - space and take advantage of hierarchical locality, many Internet - protocol elements and media type formats allow an abbreviation of a - URI, whereas others restrict the syntax to a particular form of URI. - We define the most common forms of reference syntax in this - specification because they impact and depend upon the design of the - generic syntax, requiring a uniform parsing algorithm in order to be - interpreted consistently. - -4.1. URI Reference - - URI-reference is used to denote the most common usage of a resource - identifier. - - URI-reference = URI / relative-ref - - - -Berners-Lee, et al. Standards Track [Page 25] - -RFC 3986 URI Generic Syntax January 2005 - - - A URI-reference is either a URI or a relative reference. If the - URI-reference's prefix does not match the syntax of a scheme followed - by its colon separator, then the URI-reference is a relative - reference. - - A URI-reference is typically parsed first into the five URI - components, in order to determine what components are present and - whether the reference is relative. Then, each component is parsed - for its subparts and their validation. The ABNF of URI-reference, - along with the "first-match-wins" disambiguation rule, is sufficient - to define a validating parser for the generic syntax. Readers - familiar with regular expressions should see Appendix B for an - example of a non-validating URI-reference parser that will take any - given string and extract the URI components. - -4.2. Relative Reference - - A relative reference takes advantage of the hierarchical syntax - (Section 1.2.3) to express a URI reference relative to the name space - of another hierarchical URI. - - relative-ref = relative-part [ "?" query ] [ "#" fragment ] - - relative-part = "//" authority path-abempty - / path-absolute - / path-noscheme - / path-empty - - The URI referred to by a relative reference, also known as the target - URI, is obtained by applying the reference resolution algorithm of - Section 5. - - A relative reference that begins with two slash characters is termed - a network-path reference; such references are rarely used. A - relative reference that begins with a single slash character is - termed an absolute-path reference. A relative reference that does - not begin with a slash character is termed a relative-path reference. - - A path segment that contains a colon character (e.g., "this:that") - cannot be used as the first segment of a relative-path reference, as - it would be mistaken for a scheme name. Such a segment must be - preceded by a dot-segment (e.g., "./this:that") to make a relative- - path reference. - - - - - - - - -Berners-Lee, et al. Standards Track [Page 26] - -RFC 3986 URI Generic Syntax January 2005 - - -4.3. Absolute URI - - Some protocol elements allow only the absolute form of a URI without - a fragment identifier. For example, defining a base URI for later - use by relative references calls for an absolute-URI syntax rule that - does not allow a fragment. - - absolute-URI = scheme ":" hier-part [ "?" query ] - - URI scheme specifications must define their own syntax so that all - strings matching their scheme-specific syntax will also match the - grammar. Scheme specifications will not define - fragment identifier syntax or usage, regardless of its applicability - to resources identifiable via that scheme, as fragment identification - is orthogonal to scheme definition. However, scheme specifications - are encouraged to include a wide range of examples, including - examples that show use of the scheme's URIs with fragment identifiers - when such usage is appropriate. - -4.4. Same-Document Reference - - When a URI reference refers to a URI that is, aside from its fragment - component (if any), identical to the base URI (Section 5.1), that - reference is called a "same-document" reference. The most frequent - examples of same-document references are relative references that are - empty or include only the number sign ("#") separator followed by a - fragment identifier. - - When a same-document reference is dereferenced for a retrieval - action, the target of that reference is defined to be within the same - entity (representation, document, or message) as the reference; - therefore, a dereference should not result in a new retrieval action. - - Normalization of the base and target URIs prior to their comparison, - as described in Sections 6.2.2 and 6.2.3, is allowed but rarely - performed in practice. Normalization may increase the set of same- - document references, which may be of benefit to some caching - applications. As such, reference authors should not assume that a - slightly different, though equivalent, reference URI will (or will - not) be interpreted as a same-document reference by any given - application. - -4.5. Suffix Reference - - The URI syntax is designed for unambiguous reference to resources and - extensibility via the URI scheme. However, as URI identification and - usage have become commonplace, traditional media (television, radio, - newspapers, billboards, etc.) have increasingly used a suffix of the - - - -Berners-Lee, et al. Standards Track [Page 27] - -RFC 3986 URI Generic Syntax January 2005 - - - URI as a reference, consisting of only the authority and path - portions of the URI, such as - - www.w3.org/Addressing/ - - or simply a DNS registered name on its own. Such references are - primarily intended for human interpretation rather than for machines, - with the assumption that context-based heuristics are sufficient to - complete the URI (e.g., most registered names beginning with "www" - are likely to have a URI prefix of "http://"). Although there is no - standard set of heuristics for disambiguating a URI suffix, many - client implementations allow them to be entered by the user and - heuristically resolved. - - Although this practice of using suffix references is common, it - should be avoided whenever possible and should never be used in - situations where long-term references are expected. The heuristics - noted above will change over time, particularly when a new URI scheme - becomes popular, and are often incorrect when used out of context. - Furthermore, they can lead to security issues along the lines of - those described in [RFC1535]. - - As a URI suffix has the same syntax as a relative-path reference, a - suffix reference cannot be used in contexts where a relative - reference is expected. As a result, suffix references are limited to - places where there is no defined base URI, such as dialog boxes and - off-line advertisements. - -5. Reference Resolution - - This section defines the process of resolving a URI reference within - a context that allows relative references so that the result is a - string matching the syntax rule of Section 3. - -5.1. Establishing a Base URI - - The term "relative" implies that a "base URI" exists against which - the relative reference is applied. Aside from fragment-only - references (Section 4.4), relative references are only usable when a - base URI is known. A base URI must be established by the parser - prior to parsing URI references that might be relative. A base URI - must conform to the syntax rule (Section 4.3). If the - base URI is obtained from a URI reference, then that reference must - be converted to absolute form and stripped of any fragment component - prior to its use as a base URI. - - - - - - -Berners-Lee, et al. Standards Track [Page 28] - -RFC 3986 URI Generic Syntax January 2005 - - - The base URI of a reference can be established in one of four ways, - discussed below in order of precedence. The order of precedence can - be thought of in terms of layers, where the innermost defined base - URI has the highest precedence. This can be visualized graphically - as follows: - - .----------------------------------------------------------. - | .----------------------------------------------------. | - | | .----------------------------------------------. | | - | | | .----------------------------------------. | | | - | | | | .----------------------------------. | | | | - | | | | | | | | | | - | | | | `----------------------------------' | | | | - | | | | (5.1.1) Base URI embedded in content | | | | - | | | `----------------------------------------' | | | - | | | (5.1.2) Base URI of the encapsulating entity | | | - | | | (message, representation, or none) | | | - | | `----------------------------------------------' | | - | | (5.1.3) URI used to retrieve the entity | | - | `----------------------------------------------------' | - | (5.1.4) Default Base URI (application-dependent) | - `----------------------------------------------------------' - -5.1.1. Base URI Embedded in Content - - Within certain media types, a base URI for relative references can be - embedded within the content itself so that it can be readily obtained - by a parser. This can be useful for descriptive documents, such as - tables of contents, which may be transmitted to others through - protocols other than their usual retrieval context (e.g., email or - USENET news). - - It is beyond the scope of this specification to specify how, for each - media type, a base URI can be embedded. The appropriate syntax, when - available, is described by the data format specification associated - with each media type. - -5.1.2. Base URI from the Encapsulating Entity - - If no base URI is embedded, the base URI is defined by the - representation's retrieval context. For a document that is enclosed - within another entity, such as a message or archive, the retrieval - context is that entity. Thus, the default base URI of a - representation is the base URI of the entity in which the - representation is encapsulated. - - - - - - -Berners-Lee, et al. Standards Track [Page 29] - -RFC 3986 URI Generic Syntax January 2005 - - - A mechanism for embedding a base URI within MIME container types - (e.g., the message and multipart types) is defined by MHTML - [RFC2557]. Protocols that do not use the MIME message header syntax, - but that do allow some form of tagged metadata to be included within - messages, may define their own syntax for defining a base URI as part - of a message. - -5.1.3. Base URI from the Retrieval URI - - If no base URI is embedded and the representation is not encapsulated - within some other entity, then, if a URI was used to retrieve the - representation, that URI shall be considered the base URI. Note that - if the retrieval was the result of a redirected request, the last URI - used (i.e., the URI that resulted in the actual retrieval of the - representation) is the base URI. - -5.1.4. Default Base URI - - If none of the conditions described above apply, then the base URI is - defined by the context of the application. As this definition is - necessarily application-dependent, failing to define a base URI by - using one of the other methods may result in the same content being - interpreted differently by different types of applications. - - A sender of a representation containing relative references is - responsible for ensuring that a base URI for those references can be - established. Aside from fragment-only references, relative - references can only be used reliably in situations where the base URI - is well defined. - -5.2. Relative Resolution - - This section describes an algorithm for converting a URI reference - that might be relative to a given base URI into the parsed components - of the reference's target. The components can then be recomposed, as - described in Section 5.3, to form the target URI. This algorithm - provides definitive results that can be used to test the output of - other implementations. Applications may implement relative reference - resolution by using some other algorithm, provided that the results - match what would be given by this one. - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 30] - -RFC 3986 URI Generic Syntax January 2005 - - -5.2.1. Pre-parse the Base URI - - The base URI (Base) is established according to the procedure of - Section 5.1 and parsed into the five main components described in - Section 3. Note that only the scheme component is required to be - present in a base URI; the other components may be empty or - undefined. A component is undefined if its associated delimiter does - not appear in the URI reference; the path component is never - undefined, though it may be empty. - - Normalization of the base URI, as described in Sections 6.2.2 and - 6.2.3, is optional. A URI reference must be transformed to its - target URI before it can be normalized. - -5.2.2. Transform References - - For each URI reference (R), the following pseudocode describes an - algorithm for transforming R into its target URI (T): - - -- The URI reference is parsed into the five URI components - -- - (R.scheme, R.authority, R.path, R.query, R.fragment) = parse(R); - - -- A non-strict parser may ignore a scheme in the reference - -- if it is identical to the base URI's scheme. - -- - if ((not strict) and (R.scheme == Base.scheme)) then - undefine(R.scheme); - endif; - - - - - - - - - - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 31] - -RFC 3986 URI Generic Syntax January 2005 - - - if defined(R.scheme) then - T.scheme = R.scheme; - T.authority = R.authority; - T.path = remove_dot_segments(R.path); - T.query = R.query; - else - if defined(R.authority) then - T.authority = R.authority; - T.path = remove_dot_segments(R.path); - T.query = R.query; - else - if (R.path == "") then - T.path = Base.path; - if defined(R.query) then - T.query = R.query; - else - T.query = Base.query; - endif; - else - if (R.path starts-with "/") then - T.path = remove_dot_segments(R.path); - else - T.path = merge(Base.path, R.path); - T.path = remove_dot_segments(T.path); - endif; - T.query = R.query; - endif; - T.authority = Base.authority; - endif; - T.scheme = Base.scheme; - endif; - - T.fragment = R.fragment; - -5.2.3. Merge Paths - - The pseudocode above refers to a "merge" routine for merging a - relative-path reference with the path of the base URI. This is - accomplished as follows: - - o If the base URI has a defined authority component and an empty - path, then return a string consisting of "/" concatenated with the - reference's path; otherwise, - - - - - - - - -Berners-Lee, et al. Standards Track [Page 32] - -RFC 3986 URI Generic Syntax January 2005 - - - o return a string consisting of the reference's path component - appended to all but the last segment of the base URI's path (i.e., - excluding any characters after the right-most "/" in the base URI - path, or excluding the entire base URI path if it does not contain - any "/" characters). - -5.2.4. Remove Dot Segments - - The pseudocode also refers to a "remove_dot_segments" routine for - interpreting and removing the special "." and ".." complete path - segments from a referenced path. This is done after the path is - extracted from a reference, whether or not the path was relative, in - order to remove any invalid or extraneous dot-segments prior to - forming the target URI. Although there are many ways to accomplish - this removal process, we describe a simple method using two string - buffers. - - 1. The input buffer is initialized with the now-appended path - components and the output buffer is initialized to the empty - string. - - 2. While the input buffer is not empty, loop as follows: - - A. If the input buffer begins with a prefix of "../" or "./", - then remove that prefix from the input buffer; otherwise, - - B. if the input buffer begins with a prefix of "/./" or "/.", - where "." is a complete path segment, then replace that - prefix with "/" in the input buffer; otherwise, - - C. if the input buffer begins with a prefix of "/../" or "/..", - where ".." is a complete path segment, then replace that - prefix with "/" in the input buffer and remove the last - segment and its preceding "/" (if any) from the output - buffer; otherwise, - - D. if the input buffer consists only of "." or "..", then remove - that from the input buffer; otherwise, - - E. move the first path segment in the input buffer to the end of - the output buffer, including the initial "/" character (if - any) and any subsequent characters up to, but not including, - the next "/" character or the end of the input buffer. - - 3. Finally, the output buffer is returned as the result of - remove_dot_segments. - - - - - -Berners-Lee, et al. Standards Track [Page 33] - -RFC 3986 URI Generic Syntax January 2005 - - - Note that dot-segments are intended for use in URI references to - express an identifier relative to the hierarchy of names in the base - URI. The remove_dot_segments algorithm respects that hierarchy by - removing extra dot-segments rather than treat them as an error or - leaving them to be misinterpreted by dereference implementations. - - The following illustrates how the above steps are applied for two - examples of merged paths, showing the state of the two buffers after - each step. - - STEP OUTPUT BUFFER INPUT BUFFER - - 1 : /a/b/c/./../../g - 2E: /a /b/c/./../../g - 2E: /a/b /c/./../../g - 2E: /a/b/c /./../../g - 2B: /a/b/c /../../g - 2C: /a/b /../g - 2C: /a /g - 2E: /a/g - - STEP OUTPUT BUFFER INPUT BUFFER - - 1 : mid/content=5/../6 - 2E: mid /content=5/../6 - 2E: mid/content=5 /../6 - 2C: mid /6 - 2E: mid/6 - - Some applications may find it more efficient to implement the - remove_dot_segments algorithm by using two segment stacks rather than - strings. - - Note: Beware that some older, erroneous implementations will fail - to separate a reference's query component from its path component - prior to merging the base and reference paths, resulting in an - interoperability failure if the query component contains the - strings "/../" or "/./". - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 34] - -RFC 3986 URI Generic Syntax January 2005 - - -5.3. Component Recomposition - - Parsed URI components can be recomposed to obtain the corresponding - URI reference string. Using pseudocode, this would be: - - result = "" - - if defined(scheme) then - append scheme to result; - append ":" to result; - endif; - - if defined(authority) then - append "//" to result; - append authority to result; - endif; - - append path to result; - - if defined(query) then - append "?" to result; - append query to result; - endif; - - if defined(fragment) then - append "#" to result; - append fragment to result; - endif; - - return result; - - Note that we are careful to preserve the distinction between a - component that is undefined, meaning that its separator was not - present in the reference, and a component that is empty, meaning that - the separator was present and was immediately followed by the next - component separator or the end of the reference. - -5.4. Reference Resolution Examples - - Within a representation with a well defined base URI of - - http://a/b/c/d;p?q - - a relative reference is transformed to its target URI as follows. - - - - - - - -Berners-Lee, et al. Standards Track [Page 35] - -RFC 3986 URI Generic Syntax January 2005 - - -5.4.1. Normal Examples - - "g:h" = "g:h" - "g" = "http://a/b/c/g" - "./g" = "http://a/b/c/g" - "g/" = "http://a/b/c/g/" - "/g" = "http://a/g" - "//g" = "http://g" - "?y" = "http://a/b/c/d;p?y" - "g?y" = "http://a/b/c/g?y" - "#s" = "http://a/b/c/d;p?q#s" - "g#s" = "http://a/b/c/g#s" - "g?y#s" = "http://a/b/c/g?y#s" - ";x" = "http://a/b/c/;x" - "g;x" = "http://a/b/c/g;x" - "g;x?y#s" = "http://a/b/c/g;x?y#s" - "" = "http://a/b/c/d;p?q" - "." = "http://a/b/c/" - "./" = "http://a/b/c/" - ".." = "http://a/b/" - "../" = "http://a/b/" - "../g" = "http://a/b/g" - "../.." = "http://a/" - "../../" = "http://a/" - "../../g" = "http://a/g" - -5.4.2. Abnormal Examples - - Although the following abnormal examples are unlikely to occur in - normal practice, all URI parsers should be capable of resolving them - consistently. Each example uses the same base as that above. - - Parsers must be careful in handling cases where there are more ".." - segments in a relative-path reference than there are hierarchical - levels in the base URI's path. Note that the ".." syntax cannot be - used to change the authority component of a URI. - - "../../../g" = "http://a/g" - "../../../../g" = "http://a/g" - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 36] - -RFC 3986 URI Generic Syntax January 2005 - - - Similarly, parsers must remove the dot-segments "." and ".." when - they are complete components of a path, but not when they are only - part of a segment. - - "/./g" = "http://a/g" - "/../g" = "http://a/g" - "g." = "http://a/b/c/g." - ".g" = "http://a/b/c/.g" - "g.." = "http://a/b/c/g.." - "..g" = "http://a/b/c/..g" - - Less likely are cases where the relative reference uses unnecessary - or nonsensical forms of the "." and ".." complete path segments. - - "./../g" = "http://a/b/g" - "./g/." = "http://a/b/c/g/" - "g/./h" = "http://a/b/c/g/h" - "g/../h" = "http://a/b/c/h" - "g;x=1/./y" = "http://a/b/c/g;x=1/y" - "g;x=1/../y" = "http://a/b/c/y" - - Some applications fail to separate the reference's query and/or - fragment components from the path component before merging it with - the base path and removing dot-segments. This error is rarely - noticed, as typical usage of a fragment never includes the hierarchy - ("/") character and the query component is not normally used within - relative references. - - "g?y/./x" = "http://a/b/c/g?y/./x" - "g?y/../x" = "http://a/b/c/g?y/../x" - "g#s/./x" = "http://a/b/c/g#s/./x" - "g#s/../x" = "http://a/b/c/g#s/../x" - - Some parsers allow the scheme name to be present in a relative - reference if it is the same as the base URI scheme. This is - considered to be a loophole in prior specifications of partial URI - [RFC1630]. Its use should be avoided but is allowed for backward - compatibility. - - "http:g" = "http:g" ; for strict parsers - / "http://a/b/c/g" ; for backward compatibility - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 37] - -RFC 3986 URI Generic Syntax January 2005 - - -6. Normalization and Comparison - - One of the most common operations on URIs is simple comparison: - determining whether two URIs are equivalent without using the URIs to - access their respective resource(s). A comparison is performed every - time a response cache is accessed, a browser checks its history to - color a link, or an XML parser processes tags within a namespace. - Extensive normalization prior to comparison of URIs is often used by - spiders and indexing engines to prune a search space or to reduce - duplication of request actions and response storage. - - URI comparison is performed for some particular purpose. Protocols - or implementations that compare URIs for different purposes will - often be subject to differing design trade-offs in regards to how - much effort should be spent in reducing aliased identifiers. This - section describes various methods that may be used to compare URIs, - the trade-offs between them, and the types of applications that might - use them. - -6.1. Equivalence - - Because URIs exist to identify resources, presumably they should be - considered equivalent when they identify the same resource. However, - this definition of equivalence is not of much practical use, as there - is no way for an implementation to compare two resources unless it - has full knowledge or control of them. For this reason, - determination of equivalence or difference of URIs is based on string - comparison, perhaps augmented by reference to additional rules - provided by URI scheme definitions. We use the terms "different" and - "equivalent" to describe the possible outcomes of such comparisons, - but there are many application-dependent versions of equivalence. - - Even though it is possible to determine that two URIs are equivalent, - URI comparison is not sufficient to determine whether two URIs - identify different resources. For example, an owner of two different - domain names could decide to serve the same resource from both, - resulting in two different URIs. Therefore, comparison methods are - designed to minimize false negatives while strictly avoiding false - positives. - - In testing for equivalence, applications should not directly compare - relative references; the references should be converted to their - respective target URIs before comparison. When URIs are compared to - select (or avoid) a network action, such as retrieval of a - representation, fragment components (if any) should be excluded from - the comparison. - - - - - -Berners-Lee, et al. Standards Track [Page 38] - -RFC 3986 URI Generic Syntax January 2005 - - -6.2. Comparison Ladder - - A variety of methods are used in practice to test URI equivalence. - These methods fall into a range, distinguished by the amount of - processing required and the degree to which the probability of false - negatives is reduced. As noted above, false negatives cannot be - eliminated. In practice, their probability can be reduced, but this - reduction requires more processing and is not cost-effective for all - applications. - - If this range of comparison practices is considered as a ladder, the - following discussion will climb the ladder, starting with practices - that are cheap but have a relatively higher chance of producing false - negatives, and proceeding to those that have higher computational - cost and lower risk of false negatives. - -6.2.1. Simple String Comparison - - If two URIs, when considered as character strings, are identical, - then it is safe to conclude that they are equivalent. This type of - equivalence test has very low computational cost and is in wide use - in a variety of applications, particularly in the domain of parsing. - - Testing strings for equivalence requires some basic precautions. - This procedure is often referred to as "bit-for-bit" or - "byte-for-byte" comparison, which is potentially misleading. Testing - strings for equality is normally based on pair comparison of the - characters that make up the strings, starting from the first and - proceeding until both strings are exhausted and all characters are - found to be equal, until a pair of characters compares unequal, or - until one of the strings is exhausted before the other. - - This character comparison requires that each pair of characters be - put in comparable form. For example, should one URI be stored in a - byte array in EBCDIC encoding and the second in a Java String object - (UTF-16), bit-for-bit comparisons applied naively will produce - errors. It is better to speak of equality on a character-for- - character basis rather than on a byte-for-byte or bit-for-bit basis. - In practical terms, character-by-character comparisons should be done - codepoint-by-codepoint after conversion to a common character - encoding. - - False negatives are caused by the production and use of URI aliases. - Unnecessary aliases can be reduced, regardless of the comparison - method, by consistently providing URI references in an already- - normalized form (i.e., a form identical to what would be produced - after normalization is applied, as described below). - - - - -Berners-Lee, et al. Standards Track [Page 39] - -RFC 3986 URI Generic Syntax January 2005 - - - Protocols and data formats often limit some URI comparisons to simple - string comparison, based on the theory that people and - implementations will, in their own best interest, be consistent in - providing URI references, or at least consistent enough to negate any - efficiency that might be obtained from further normalization. - -6.2.2. Syntax-Based Normalization - - Implementations may use logic based on the definitions provided by - this specification to reduce the probability of false negatives. - This processing is moderately higher in cost than character-for- - character string comparison. For example, an application using this - approach could reasonably consider the following two URIs equivalent: - - example://a/b/c/%7Bfoo%7D - eXAMPLE://a/./b/../b/%63/%7bfoo%7d - - Web user agents, such as browsers, typically apply this type of URI - normalization when determining whether a cached response is - available. Syntax-based normalization includes such techniques as - case normalization, percent-encoding normalization, and removal of - dot-segments. - -6.2.2.1. Case Normalization - - For all URIs, the hexadecimal digits within a percent-encoding - triplet (e.g., "%3a" versus "%3A") are case-insensitive and therefore - should be normalized to use uppercase letters for the digits A-F. - - When a URI uses components of the generic syntax, the component - syntax equivalence rules always apply; namely, that the scheme and - host are case-insensitive and therefore should be normalized to - lowercase. For example, the URI is - equivalent to . The other generic syntax - components are assumed to be case-sensitive unless specifically - defined otherwise by the scheme (see Section 6.2.3). - -6.2.2.2. Percent-Encoding Normalization - - The percent-encoding mechanism (Section 2.1) is a frequent source of - variance among otherwise identical URIs. In addition to the case - normalization issue noted above, some URI producers percent-encode - octets that do not require percent-encoding, resulting in URIs that - are equivalent to their non-encoded counterparts. These URIs should - be normalized by decoding any percent-encoded octet that corresponds - to an unreserved character, as described in Section 2.3. - - - - - -Berners-Lee, et al. Standards Track [Page 40] - -RFC 3986 URI Generic Syntax January 2005 - - -6.2.2.3. Path Segment Normalization - - The complete path segments "." and ".." are intended only for use - within relative references (Section 4.1) and are removed as part of - the reference resolution process (Section 5.2). However, some - deployed implementations incorrectly assume that reference resolution - is not necessary when the reference is already a URI and thus fail to - remove dot-segments when they occur in non-relative paths. URI - normalizers should remove dot-segments by applying the - remove_dot_segments algorithm to the path, as described in - Section 5.2.4. - -6.2.3. Scheme-Based Normalization - - The syntax and semantics of URIs vary from scheme to scheme, as - described by the defining specification for each scheme. - Implementations may use scheme-specific rules, at further processing - cost, to reduce the probability of false negatives. For example, - because the "http" scheme makes use of an authority component, has a - default port of "80", and defines an empty path to be equivalent to - "/", the following four URIs are equivalent: - - http://example.com - http://example.com/ - http://example.com:/ - http://example.com:80/ - - In general, a URI that uses the generic syntax for authority with an - empty path should be normalized to a path of "/". Likewise, an - explicit ":port", for which the port is empty or the default for the - scheme, is equivalent to one where the port and its ":" delimiter are - elided and thus should be removed by scheme-based normalization. For - example, the second URI above is the normal form for the "http" - scheme. - - Another case where normalization varies by scheme is in the handling - of an empty authority component or empty host subcomponent. For many - scheme specifications, an empty authority or host is considered an - error; for others, it is considered equivalent to "localhost" or the - end-user's host. When a scheme defines a default for authority and a - URI reference to that default is desired, the reference should be - normalized to an empty authority for the sake of uniformity, brevity, - and internationalization. If, however, either the userinfo or port - subcomponents are non-empty, then the host should be given explicitly - even if it matches the default. - - Normalization should not remove delimiters when their associated - component is empty unless licensed to do so by the scheme - - - -Berners-Lee, et al. Standards Track [Page 41] - -RFC 3986 URI Generic Syntax January 2005 - - - specification. For example, the URI "http://example.com/?" cannot be - assumed to be equivalent to any of the examples above. Likewise, the - presence or absence of delimiters within a userinfo subcomponent is - usually significant to its interpretation. The fragment component is - not subject to any scheme-based normalization; thus, two URIs that - differ only by the suffix "#" are considered different regardless of - the scheme. - - Some schemes define additional subcomponents that consist of case- - insensitive data, giving an implicit license to normalizers to - convert this data to a common case (e.g., all lowercase). For - example, URI schemes that define a subcomponent of path to contain an - Internet hostname, such as the "mailto" URI scheme, cause that - subcomponent to be case-insensitive and thus subject to case - normalization (e.g., "mailto:Joe@Example.COM" is equivalent to - "mailto:Joe@example.com", even though the generic syntax considers - the path component to be case-sensitive). - - Other scheme-specific normalizations are possible. - -6.2.4. Protocol-Based Normalization - - Substantial effort to reduce the incidence of false negatives is - often cost-effective for web spiders. Therefore, they implement even - more aggressive techniques in URI comparison. For example, if they - observe that a URI such as - - http://example.com/data - - redirects to a URI differing only in the trailing slash - - http://example.com/data/ - - they will likely regard the two as equivalent in the future. This - kind of technique is only appropriate when equivalence is clearly - indicated by both the result of accessing the resources and the - common conventions of their scheme's dereference algorithm (in this - case, use of redirection by HTTP origin servers to avoid problems - with relative references). - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 42] - -RFC 3986 URI Generic Syntax January 2005 - - -7. Security Considerations - - A URI does not in itself pose a security threat. However, as URIs - are often used to provide a compact set of instructions for access to - network resources, care must be taken to properly interpret the data - within a URI, to prevent that data from causing unintended access, - and to avoid including data that should not be revealed in plain - text. - -7.1. Reliability and Consistency - - There is no guarantee that once a URI has been used to retrieve - information, the same information will be retrievable by that URI in - the future. Nor is there any guarantee that the information - retrievable via that URI in the future will be observably similar to - that retrieved in the past. The URI syntax does not constrain how a - given scheme or authority apportions its namespace or maintains it - over time. Such guarantees can only be obtained from the person(s) - controlling that namespace and the resource in question. A specific - URI scheme may define additional semantics, such as name persistence, - if those semantics are required of all naming authorities for that - scheme. - -7.2. Malicious Construction - - It is sometimes possible to construct a URI so that an attempt to - perform a seemingly harmless, idempotent operation, such as the - retrieval of a representation, will in fact cause a possibly damaging - remote operation. The unsafe URI is typically constructed by - specifying a port number other than that reserved for the network - protocol in question. The client unwittingly contacts a site running - a different protocol service, and data within the URI contains - instructions that, when interpreted according to this other protocol, - cause an unexpected operation. A frequent example of such abuse has - been the use of a protocol-based scheme with a port component of - "25", thereby fooling user agent software into sending an unintended - or impersonating message via an SMTP server. - - Applications should prevent dereference of a URI that specifies a TCP - port number within the "well-known port" range (0 - 1023) unless the - protocol being used to dereference that URI is compatible with the - protocol expected on that well-known port. Although IANA maintains a - registry of well-known ports, applications should make such - restrictions user-configurable to avoid preventing the deployment of - new services. - - - - - - -Berners-Lee, et al. Standards Track [Page 43] - -RFC 3986 URI Generic Syntax January 2005 - - - When a URI contains percent-encoded octets that match the delimiters - for a given resolution or dereference protocol (for example, CR and - LF characters for the TELNET protocol), these percent-encodings must - not be decoded before transmission across that protocol. Transfer of - the percent-encoding, which might violate the protocol, is less - harmful than allowing decoded octets to be interpreted as additional - operations or parameters, perhaps triggering an unexpected and - possibly harmful remote operation. - -7.3. Back-End Transcoding - - When a URI is dereferenced, the data within it is often parsed by - both the user agent and one or more servers. In HTTP, for example, a - typical user agent will parse a URI into its five major components, - access the authority's server, and send it the data within the - authority, path, and query components. A typical server will take - that information, parse the path into segments and the query into - key/value pairs, and then invoke implementation-specific handlers to - respond to the request. As a result, a common security concern for - server implementations that handle a URI, either as a whole or split - into separate components, is proper interpretation of the octet data - represented by the characters and percent-encodings within that URI. - - Percent-encoded octets must be decoded at some point during the - dereference process. Applications must split the URI into its - components and subcomponents prior to decoding the octets, as - otherwise the decoded octets might be mistaken for delimiters. - Security checks of the data within a URI should be applied after - decoding the octets. Note, however, that the "%00" percent-encoding - (NUL) may require special handling and should be rejected if the - application is not expecting to receive raw data within a component. - - Special care should be taken when the URI path interpretation process - involves the use of a back-end file system or related system - functions. File systems typically assign an operational meaning to - special characters, such as the "/", "\", ":", "[", and "]" - characters, and to special device names like ".", "..", "...", "aux", - "lpt", etc. In some cases, merely testing for the existence of such - a name will cause the operating system to pause or invoke unrelated - system calls, leading to significant security concerns regarding - denial of service and unintended data transfer. It would be - impossible for this specification to list all such significant - characters and device names. Implementers should research the - reserved names and characters for the types of storage device that - may be attached to their applications and restrict the use of data - obtained from URI components accordingly. - - - - - -Berners-Lee, et al. Standards Track [Page 44] - -RFC 3986 URI Generic Syntax January 2005 - - -7.4. Rare IP Address Formats - - Although the URI syntax for IPv4address only allows the common - dotted-decimal form of IPv4 address literal, many implementations - that process URIs make use of platform-dependent system routines, - such as gethostbyname() and inet_aton(), to translate the string - literal to an actual IP address. Unfortunately, such system routines - often allow and process a much larger set of formats than those - described in Section 3.2.2. - - For example, many implementations allow dotted forms of three - numbers, wherein the last part is interpreted as a 16-bit quantity - and placed in the right-most two bytes of the network address (e.g., - a Class B network). Likewise, a dotted form of two numbers means - that the last part is interpreted as a 24-bit quantity and placed in - the right-most three bytes of the network address (Class A), and a - single number (without dots) is interpreted as a 32-bit quantity and - stored directly in the network address. Adding further to the - confusion, some implementations allow each dotted part to be - interpreted as decimal, octal, or hexadecimal, as specified in the C - language (i.e., a leading 0x or 0X implies hexadecimal; a leading 0 - implies octal; otherwise, the number is interpreted as decimal). - - These additional IP address formats are not allowed in the URI syntax - due to differences between platform implementations. However, they - can become a security concern if an application attempts to filter - access to resources based on the IP address in string literal format. - If this filtering is performed, literals should be converted to - numeric form and filtered based on the numeric value, and not on a - prefix or suffix of the string form. - -7.5. Sensitive Information - - URI producers should not provide a URI that contains a username or - password that is intended to be secret. URIs are frequently - displayed by browsers, stored in clear text bookmarks, and logged by - user agent history and intermediary applications (proxies). A - password appearing within the userinfo component is deprecated and - should be considered an error (or simply ignored) except in those - rare cases where the 'password' parameter is intended to be public. - -7.6. Semantic Attacks - - Because the userinfo subcomponent is rarely used and appears before - the host in the authority component, it can be used to construct a - URI intended to mislead a human user by appearing to identify one - (trusted) naming authority while actually identifying a different - authority hidden behind the noise. For example - - - -Berners-Lee, et al. Standards Track [Page 45] - -RFC 3986 URI Generic Syntax January 2005 - - - ftp://cnn.example.com&story=breaking_news@10.0.0.1/top_story.htm - - might lead a human user to assume that the host is 'cnn.example.com', - whereas it is actually '10.0.0.1'. Note that a misleading userinfo - subcomponent could be much longer than the example above. - - A misleading URI, such as that above, is an attack on the user's - preconceived notions about the meaning of a URI rather than an attack - on the software itself. User agents may be able to reduce the impact - of such attacks by distinguishing the various components of the URI - when they are rendered, such as by using a different color or tone to - render userinfo if any is present, though there is no panacea. More - information on URI-based semantic attacks can be found in [Siedzik]. - -8. IANA Considerations - - URI scheme names, as defined by in Section 3.1, form a - registered namespace that is managed by IANA according to the - procedures defined in [BCP35]. No IANA actions are required by this - document. - -9. Acknowledgements - - This specification is derived from RFC 2396 [RFC2396], RFC 1808 - [RFC1808], and RFC 1738 [RFC1738]; the acknowledgements in those - documents still apply. It also incorporates the update (with - corrections) for IPv6 literals in the host syntax, as defined by - Robert M. Hinden, Brian E. Carpenter, and Larry Masinter in - [RFC2732]. In addition, contributions by Gisle Aas, Reese Anschultz, - Daniel Barclay, Tim Bray, Mike Brown, Rob Cameron, Jeremy Carroll, - Dan Connolly, Adam M. Costello, John Cowan, Jason Diamond, Martin - Duerst, Stefan Eissing, Clive D.W. Feather, Al Gilman, Tony Hammond, - Elliotte Harold, Pat Hayes, Henry Holtzman, Ian B. Jacobs, Michael - Kay, John C. Klensin, Graham Klyne, Dan Kohn, Bruce Lilly, Andrew - Main, Dave McAlpin, Ira McDonald, Michael Mealling, Ray Merkert, - Stephen Pollei, Julian Reschke, Tomas Rokicki, Miles Sabin, Kai - Schaetzl, Mark Thomson, Ronald Tschalaer, Norm Walsh, Marc Warne, - Stuart Williams, and Henry Zongaro are gratefully acknowledged. - -10. References - -10.1. Normative References - - [ASCII] American National Standards Institute, "Coded Character - Set -- 7-bit American Standard Code for Information - Interchange", ANSI X3.4, 1986. - - - - - -Berners-Lee, et al. Standards Track [Page 46] - -RFC 3986 URI Generic Syntax January 2005 - - - [RFC2234] Crocker, D. and P. Overell, "Augmented BNF for Syntax - Specifications: ABNF", RFC 2234, November 1997. - - [STD63] Yergeau, F., "UTF-8, a transformation format of - ISO 10646", STD 63, RFC 3629, November 2003. - - [UCS] International Organization for Standardization, - "Information Technology - Universal Multiple-Octet Coded - Character Set (UCS)", ISO/IEC 10646:2003, December 2003. - -10.2. Informative References - - [BCP19] Freed, N. and J. Postel, "IANA Charset Registration - Procedures", BCP 19, RFC 2978, October 2000. - - [BCP35] Petke, R. and I. King, "Registration Procedures for URL - Scheme Names", BCP 35, RFC 2717, November 1999. - - [RFC0952] Harrenstien, K., Stahl, M., and E. Feinler, "DoD Internet - host table specification", RFC 952, October 1985. - - [RFC1034] Mockapetris, P., "Domain names - concepts and facilities", - STD 13, RFC 1034, November 1987. - - [RFC1123] Braden, R., "Requirements for Internet Hosts - Application - and Support", STD 3, RFC 1123, October 1989. - - [RFC1535] Gavron, E., "A Security Problem and Proposed Correction - With Widely Deployed DNS Software", RFC 1535, - October 1993. - - [RFC1630] Berners-Lee, T., "Universal Resource Identifiers in WWW: A - Unifying Syntax for the Expression of Names and Addresses - of Objects on the Network as used in the World-Wide Web", - RFC 1630, June 1994. - - [RFC1736] Kunze, J., "Functional Recommendations for Internet - Resource Locators", RFC 1736, February 1995. - - [RFC1737] Sollins, K. and L. Masinter, "Functional Requirements for - Uniform Resource Names", RFC 1737, December 1994. - - [RFC1738] Berners-Lee, T., Masinter, L., and M. McCahill, "Uniform - Resource Locators (URL)", RFC 1738, December 1994. - - [RFC1808] Fielding, R., "Relative Uniform Resource Locators", - RFC 1808, June 1995. - - - - -Berners-Lee, et al. Standards Track [Page 47] - -RFC 3986 URI Generic Syntax January 2005 - - - [RFC2046] Freed, N. and N. Borenstein, "Multipurpose Internet Mail - Extensions (MIME) Part Two: Media Types", RFC 2046, - November 1996. - - [RFC2141] Moats, R., "URN Syntax", RFC 2141, May 1997. - - [RFC2396] Berners-Lee, T., Fielding, R., and L. Masinter, "Uniform - Resource Identifiers (URI): Generic Syntax", RFC 2396, - August 1998. - - [RFC2518] Goland, Y., Whitehead, E., Faizi, A., Carter, S., and D. - Jensen, "HTTP Extensions for Distributed Authoring -- - WEBDAV", RFC 2518, February 1999. - - [RFC2557] Palme, J., Hopmann, A., and N. Shelness, "MIME - Encapsulation of Aggregate Documents, such as HTML - (MHTML)", RFC 2557, March 1999. - - [RFC2718] Masinter, L., Alvestrand, H., Zigmond, D., and R. Petke, - "Guidelines for new URL Schemes", RFC 2718, November 1999. - - [RFC2732] Hinden, R., Carpenter, B., and L. Masinter, "Format for - Literal IPv6 Addresses in URL's", RFC 2732, December 1999. - - [RFC3305] Mealling, M. and R. Denenberg, "Report from the Joint - W3C/IETF URI Planning Interest Group: Uniform Resource - Identifiers (URIs), URLs, and Uniform Resource Names - (URNs): Clarifications and Recommendations", RFC 3305, - August 2002. - - [RFC3490] Faltstrom, P., Hoffman, P., and A. Costello, - "Internationalizing Domain Names in Applications (IDNA)", - RFC 3490, March 2003. - - [RFC3513] Hinden, R. and S. Deering, "Internet Protocol Version 6 - (IPv6) Addressing Architecture", RFC 3513, April 2003. - - [Siedzik] Siedzik, R., "Semantic Attacks: What's in a URL?", - April 2001, . - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 48] - -RFC 3986 URI Generic Syntax January 2005 - - -Appendix A. Collected ABNF for URI - - URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ] - - hier-part = "//" authority path-abempty - / path-absolute - / path-rootless - / path-empty - - URI-reference = URI / relative-ref - - absolute-URI = scheme ":" hier-part [ "?" query ] - - relative-ref = relative-part [ "?" query ] [ "#" fragment ] - - relative-part = "//" authority path-abempty - / path-absolute - / path-noscheme - / path-empty - - scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) - - authority = [ userinfo "@" ] host [ ":" port ] - userinfo = *( unreserved / pct-encoded / sub-delims / ":" ) - host = IP-literal / IPv4address / reg-name - port = *DIGIT - - IP-literal = "[" ( IPv6address / IPvFuture ) "]" - - IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" ) - - IPv6address = 6( h16 ":" ) ls32 - / "::" 5( h16 ":" ) ls32 - / [ h16 ] "::" 4( h16 ":" ) ls32 - / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32 - / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32 - / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32 - / [ *4( h16 ":" ) h16 ] "::" ls32 - / [ *5( h16 ":" ) h16 ] "::" h16 - / [ *6( h16 ":" ) h16 ] "::" - - h16 = 1*4HEXDIG - ls32 = ( h16 ":" h16 ) / IPv4address - IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet - - - - - - - -Berners-Lee, et al. Standards Track [Page 49] - -RFC 3986 URI Generic Syntax January 2005 - - - dec-octet = DIGIT ; 0-9 - / %x31-39 DIGIT ; 10-99 - / "1" 2DIGIT ; 100-199 - / "2" %x30-34 DIGIT ; 200-249 - / "25" %x30-35 ; 250-255 - - reg-name = *( unreserved / pct-encoded / sub-delims ) - - path = path-abempty ; begins with "/" or is empty - / path-absolute ; begins with "/" but not "//" - / path-noscheme ; begins with a non-colon segment - / path-rootless ; begins with a segment - / path-empty ; zero characters - - path-abempty = *( "/" segment ) - path-absolute = "/" [ segment-nz *( "/" segment ) ] - path-noscheme = segment-nz-nc *( "/" segment ) - path-rootless = segment-nz *( "/" segment ) - path-empty = 0 - - segment = *pchar - segment-nz = 1*pchar - segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" ) - ; non-zero-length segment without any colon ":" - - pchar = unreserved / pct-encoded / sub-delims / ":" / "@" - - query = *( pchar / "/" / "?" ) - - fragment = *( pchar / "/" / "?" ) - - pct-encoded = "%" HEXDIG HEXDIG - - unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" - reserved = gen-delims / sub-delims - gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" - sub-delims = "!" / "$" / "&" / "'" / "(" / ")" - / "*" / "+" / "," / ";" / "=" - -Appendix B. Parsing a URI Reference with a Regular Expression - - As the "first-match-wins" algorithm is identical to the "greedy" - disambiguation method used by POSIX regular expressions, it is - natural and commonplace to use a regular expression for parsing the - potential five components of a URI reference. - - The following line is the regular expression for breaking-down a - well-formed URI reference into its components. - - - -Berners-Lee, et al. Standards Track [Page 50] - -RFC 3986 URI Generic Syntax January 2005 - - - ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))? - 12 3 4 5 6 7 8 9 - - The numbers in the second line above are only to assist readability; - they indicate the reference points for each subexpression (i.e., each - paired parenthesis). We refer to the value matched for subexpression - as $. For example, matching the above expression to - - http://www.ics.uci.edu/pub/ietf/uri/#Related - - results in the following subexpression matches: - - $1 = http: - $2 = http - $3 = //www.ics.uci.edu - $4 = www.ics.uci.edu - $5 = /pub/ietf/uri/ - $6 = - $7 = - $8 = #Related - $9 = Related - - where indicates that the component is not present, as is - the case for the query component in the above example. Therefore, we - can determine the value of the five components as - - scheme = $2 - authority = $4 - path = $5 - query = $7 - fragment = $9 - - Going in the opposite direction, we can recreate a URI reference from - its components by using the algorithm of Section 5.3. - -Appendix C. Delimiting a URI in Context - - URIs are often transmitted through formats that do not provide a - clear context for their interpretation. For example, there are many - occasions when a URI is included in plain text; examples include text - sent in email, USENET news, and on printed paper. In such cases, it - is important to be able to delimit the URI from the rest of the text, - and in particular from punctuation marks that might be mistaken for - part of the URI. - - In practice, URIs are delimited in a variety of ways, but usually - within double-quotes "http://example.com/", angle brackets - , or just by using whitespace: - - - -Berners-Lee, et al. Standards Track [Page 51] - -RFC 3986 URI Generic Syntax January 2005 - - - http://example.com/ - - These wrappers do not form part of the URI. - - In some cases, extra whitespace (spaces, line-breaks, tabs, etc.) may - have to be added to break a long URI across lines. The whitespace - should be ignored when the URI is extracted. - - No whitespace should be introduced after a hyphen ("-") character. - Because some typesetters and printers may (erroneously) introduce a - hyphen at the end of line when breaking it, the interpreter of a URI - containing a line break immediately after a hyphen should ignore all - whitespace around the line break and should be aware that the hyphen - may or may not actually be part of the URI. - - Using <> angle brackets around each URI is especially recommended as - a delimiting style for a reference that contains embedded whitespace. - - The prefix "URL:" (with or without a trailing space) was formerly - recommended as a way to help distinguish a URI from other bracketed - designators, though it is not commonly used in practice and is no - longer recommended. - - For robustness, software that accepts user-typed URI should attempt - to recognize and strip both delimiters and embedded whitespace. - - For example, the text - - Yes, Jim, I found it under "http://www.w3.org/Addressing/", - but you can probably pick it up from . Note the warning in . - - contains the URI references - - http://www.w3.org/Addressing/ - ftp://foo.example.com/rfc/ - http://www.ics.uci.edu/pub/ietf/uri/historical.html#WARNING - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 52] - -RFC 3986 URI Generic Syntax January 2005 - - -Appendix D. Changes from RFC 2396 - -D.1. Additions - - An ABNF rule for URI has been introduced to correspond to one common - usage of the term: an absolute URI with optional fragment. - - IPv6 (and later) literals have been added to the list of possible - identifiers for the host portion of an authority component, as - described by [RFC2732], with the addition of "[" and "]" to the - reserved set and a version flag to anticipate future versions of IP - literals. Square brackets are now specified as reserved within the - authority component and are not allowed outside their use as - delimiters for an IP literal within host. In order to make this - change without changing the technical definition of the path, query, - and fragment components, those rules were redefined to directly - specify the characters allowed. - - As [RFC2732] defers to [RFC3513] for definition of an IPv6 literal - address, which, unfortunately, lacks an ABNF description of - IPv6address, we created a new ABNF rule for IPv6address that matches - the text representations defined by Section 2.2 of [RFC3513]. - Likewise, the definition of IPv4address has been improved in order to - limit each decimal octet to the range 0-255. - - Section 6, on URI normalization and comparison, has been completely - rewritten and extended by using input from Tim Bray and discussion - within the W3C Technical Architecture Group. - -D.2. Modifications - - The ad-hoc BNF syntax of RFC 2396 has been replaced with the ABNF of - [RFC2234]. This change required all rule names that formerly - included underscore characters to be renamed with a dash instead. In - addition, a number of syntax rules have been eliminated or simplified - to make the overall grammar more comprehensible. Specifications that - refer to the obsolete grammar rules may be understood by replacing - those rules according to the following table: - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 53] - -RFC 3986 URI Generic Syntax January 2005 - - - +----------------+--------------------------------------------------+ - | obsolete rule | translation | - +----------------+--------------------------------------------------+ - | absoluteURI | absolute-URI | - | relativeURI | relative-part [ "?" query ] | - | hier_part | ( "//" authority path-abempty / | - | | path-absolute ) [ "?" query ] | - | | | - | opaque_part | path-rootless [ "?" query ] | - | net_path | "//" authority path-abempty | - | abs_path | path-absolute | - | rel_path | path-rootless | - | rel_segment | segment-nz-nc | - | reg_name | reg-name | - | server | authority | - | hostport | host [ ":" port ] | - | hostname | reg-name | - | path_segments | path-abempty | - | param | * | - | | | - | uric | unreserved / pct-encoded / ";" / "?" / ":" | - | | / "@" / "&" / "=" / "+" / "$" / "," / "/" | - | | | - | uric_no_slash | unreserved / pct-encoded / ";" / "?" / ":" | - | | / "@" / "&" / "=" / "+" / "$" / "," | - | | | - | mark | "-" / "_" / "." / "!" / "~" / "*" / "'" | - | | / "(" / ")" | - | | | - | escaped | pct-encoded | - | hex | HEXDIG | - | alphanum | ALPHA / DIGIT | - +----------------+--------------------------------------------------+ - - Use of the above obsolete rules for the definition of scheme-specific - syntax is deprecated. - - Section 2, on characters, has been rewritten to explain what - characters are reserved, when they are reserved, and why they are - reserved, even when they are not used as delimiters by the generic - syntax. The mark characters that are typically unsafe to decode, - including the exclamation mark ("!"), asterisk ("*"), single-quote - ("'"), and open and close parentheses ("(" and ")"), have been moved - to the reserved set in order to clarify the distinction between - reserved and unreserved and, hopefully, to answer the most common - question of scheme designers. Likewise, the section on - percent-encoded characters has been rewritten, and URI normalizers - are now given license to decode any percent-encoded octets - - - -Berners-Lee, et al. Standards Track [Page 54] - -RFC 3986 URI Generic Syntax January 2005 - - - corresponding to unreserved characters. In general, the terms - "escaped" and "unescaped" have been replaced with "percent-encoded" - and "decoded", respectively, to reduce confusion with other forms of - escape mechanisms. - - The ABNF for URI and URI-reference has been redesigned to make them - more friendly to LALR parsers and to reduce complexity. As a result, - the layout form of syntax description has been removed, along with - the uric, uric_no_slash, opaque_part, net_path, abs_path, rel_path, - path_segments, rel_segment, and mark rules. All references to - "opaque" URIs have been replaced with a better description of how the - path component may be opaque to hierarchy. The relativeURI rule has - been replaced with relative-ref to avoid unnecessary confusion over - whether they are a subset of URI. The ambiguity regarding the - parsing of URI-reference as a URI or a relative-ref with a colon in - the first segment has been eliminated through the use of five - separate path matching rules. - - The fragment identifier has been moved back into the section on - generic syntax components and within the URI and relative-ref rules, - though it remains excluded from absolute-URI. The number sign ("#") - character has been moved back to the reserved set as a result of - reintegrating the fragment syntax. - - The ABNF has been corrected to allow the path component to be empty. - This also allows an absolute-URI to consist of nothing after the - "scheme:", as is present in practice with the "dav:" namespace - [RFC2518] and with the "about:" scheme used internally by many WWW - browser implementations. The ambiguity regarding the boundary - between authority and path has been eliminated through the use of - five separate path matching rules. - - Registry-based naming authorities that use the generic syntax are now - defined within the host rule. This change allows current - implementations, where whatever name provided is simply fed to the - local name resolution mechanism, to be consistent with the - specification. It also removes the need to re-specify DNS name - formats here. Furthermore, it allows the host component to contain - percent-encoded octets, which is necessary to enable - internationalized domain names to be provided in URIs, processed in - their native character encodings at the application layers above URI - processing, and passed to an IDNA library as a registered name in the - UTF-8 character encoding. The server, hostport, hostname, - domainlabel, toplabel, and alphanum rules have been removed. - - The resolving relative references algorithm of [RFC2396] has been - rewritten with pseudocode for this revision to improve clarity and - fix the following issues: - - - -Berners-Lee, et al. Standards Track [Page 55] - -RFC 3986 URI Generic Syntax January 2005 - - - o [RFC2396] section 5.2, step 6a, failed to account for a base URI - with no path. - - o Restored the behavior of [RFC1808] where, if the reference - contains an empty path and a defined query component, the target - URI inherits the base URI's path component. - - o The determination of whether a URI reference is a same-document - reference has been decoupled from the URI parser, simplifying the - URI processing interface within applications in a way consistent - with the internal architecture of deployed URI processing - implementations. The determination is now based on comparison to - the base URI after transforming a reference to absolute form, - rather than on the format of the reference itself. This change - may result in more references being considered "same-document" - under this specification than there would be under the rules given - in RFC 2396, especially when normalization is used to reduce - aliases. However, it does not change the status of existing - same-document references. - - o Separated the path merge routine into two routines: merge, for - describing combination of the base URI path with a relative-path - reference, and remove_dot_segments, for describing how to remove - the special "." and ".." segments from a composed path. The - remove_dot_segments algorithm is now applied to all URI reference - paths in order to match common implementations and to improve the - normalization of URIs in practice. This change only impacts the - parsing of abnormal references and same-scheme references wherein - the base URI has a non-hierarchical path. - -Index - - A - ABNF 11 - absolute 27 - absolute-path 26 - absolute-URI 27 - access 9 - authority 17, 18 - - B - base URI 28 - - C - character encoding 4 - character 4 - characters 8, 11 - coded character set 4 - - - -Berners-Lee, et al. Standards Track [Page 56] - -RFC 3986 URI Generic Syntax January 2005 - - - D - dec-octet 20 - dereference 9 - dot-segments 23 - - F - fragment 16, 24 - - G - gen-delims 13 - generic syntax 6 - - H - h16 20 - hier-part 16 - hierarchical 10 - host 18 - - I - identifier 5 - IP-literal 19 - IPv4 20 - IPv4address 19, 20 - IPv6 19 - IPv6address 19, 20 - IPvFuture 19 - - L - locator 7 - ls32 20 - - M - merge 32 - - N - name 7 - network-path 26 - - P - path 16, 22, 26 - path-abempty 22 - path-absolute 22 - path-empty 22 - path-noscheme 22 - path-rootless 22 - path-abempty 16, 22, 26 - path-absolute 16, 22, 26 - path-empty 16, 22, 26 - - - -Berners-Lee, et al. Standards Track [Page 57] - -RFC 3986 URI Generic Syntax January 2005 - - - path-rootless 16, 22 - pchar 23 - pct-encoded 12 - percent-encoding 12 - port 22 - - Q - query 16, 23 - - R - reg-name 21 - registered name 20 - relative 10, 28 - relative-path 26 - relative-ref 26 - remove_dot_segments 33 - representation 9 - reserved 12 - resolution 9, 28 - resource 5 - retrieval 9 - - S - same-document 27 - sameness 9 - scheme 16, 17 - segment 22, 23 - segment-nz 23 - segment-nz-nc 23 - sub-delims 13 - suffix 27 - - T - transcription 8 - - U - uniform 4 - unreserved 13 - URI grammar - absolute-URI 27 - ALPHA 11 - authority 18 - CR 11 - dec-octet 20 - DIGIT 11 - DQUOTE 11 - fragment 24 - gen-delims 13 - - - -Berners-Lee, et al. Standards Track [Page 58] - -RFC 3986 URI Generic Syntax January 2005 - - - h16 20 - HEXDIG 11 - hier-part 16 - host 19 - IP-literal 19 - IPv4address 20 - IPv6address 20 - IPvFuture 19 - LF 11 - ls32 20 - OCTET 11 - path 22 - path-abempty 22 - path-absolute 22 - path-empty 22 - path-noscheme 22 - path-rootless 22 - pchar 23 - pct-encoded 12 - port 22 - query 24 - reg-name 21 - relative-ref 26 - reserved 13 - scheme 17 - segment 23 - segment-nz 23 - segment-nz-nc 23 - SP 11 - sub-delims 13 - unreserved 13 - URI 16 - URI-reference 25 - userinfo 18 - URI 16 - URI-reference 25 - URL 7 - URN 7 - userinfo 18 - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 59] - -RFC 3986 URI Generic Syntax January 2005 - - -Authors' Addresses - - Tim Berners-Lee - World Wide Web Consortium - Massachusetts Institute of Technology - 77 Massachusetts Avenue - Cambridge, MA 02139 - USA - - Phone: +1-617-253-5702 - Fax: +1-617-258-5999 - EMail: timbl@w3.org - URI: http://www.w3.org/People/Berners-Lee/ - - - Roy T. Fielding - Day Software - 5251 California Ave., Suite 110 - Irvine, CA 92617 - USA - - Phone: +1-949-679-2960 - Fax: +1-949-679-2972 - EMail: fielding@gbiv.com - URI: http://roy.gbiv.com/ - - - Larry Masinter - Adobe Systems Incorporated - 345 Park Ave - San Jose, CA 95110 - USA - - Phone: +1-408-536-3024 - EMail: LMM@acm.org - URI: http://larry.masinter.net/ - - - - - - - - - - - - - - - -Berners-Lee, et al. Standards Track [Page 60] - -RFC 3986 URI Generic Syntax January 2005 - - -Full Copyright Statement - - Copyright (C) The Internet Society (2005). - - This document is subject to the rights, licenses and restrictions - contained in BCP 78, and except as set forth therein, the authors - retain all their rights. - - This document and the information contained herein are provided on an - "AS IS" basis and THE CONTRIBUTOR, THE ORGANIZATION HE/SHE REPRESENTS - OR IS SPONSORED BY (IF ANY), THE INTERNET SOCIETY AND THE INTERNET - ENGINEERING TASK FORCE DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, - INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE - INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED - WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - -Intellectual Property - - The IETF takes no position regarding the validity or scope of any - Intellectual Property Rights or other rights that might be claimed to - pertain to the implementation or use of the technology described in - this document or the extent to which any license under such rights - might or might not be available; nor does it represent that it has - made any independent effort to identify any such rights. Information - on the IETF's procedures with respect to rights in IETF Documents can - be found in BCP 78 and BCP 79. - - Copies of IPR disclosures made to the IETF Secretariat and any - assurances of licenses to be made available, or the result of an - attempt made to obtain a general license or permission for the use of - such proprietary rights by implementers or users of this - specification can be obtained from the IETF on-line IPR repository at - http://www.ietf.org/ipr. - - The IETF invites any interested party to bring to its attention any - copyrights, patents or patent applications, or other proprietary - rights that may cover technology that may be required to implement - this standard. Please address the information to the IETF at ietf- - ipr@ietf.org. - - -Acknowledgement - - Funding for the RFC Editor function is currently provided by the - Internet Society. - - - - - - -Berners-Lee, et al. Standards Track [Page 61] - diff --git a/gtk-doc.make b/gtk-doc.make new file mode 100644 index 0000000..1f75dfd --- /dev/null +++ b/gtk-doc.make @@ -0,0 +1,280 @@ +# -*- mode: makefile -*- + +#################################### +# Everything below here is generic # +#################################### + +if GTK_DOC_USE_LIBTOOL +GTKDOC_CC = $(LIBTOOL) --tag=CC --mode=compile $(CC) $(INCLUDES) $(GTKDOC_DEPS_CFLAGS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +GTKDOC_LD = $(LIBTOOL) --tag=CC --mode=link $(CC) $(GTKDOC_DEPS_LIBS) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) +GTKDOC_RUN = $(LIBTOOL) --mode=execute +else +GTKDOC_CC = $(CC) $(INCLUDES) $(GTKDOC_DEPS_CFLAGS) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +GTKDOC_LD = $(CC) $(GTKDOC_DEPS_LIBS) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) +GTKDOC_RUN = +endif + +# We set GPATH here; this gives us semantics for GNU make +# which are more like other make's VPATH, when it comes to +# whether a source that is a target of one rule is then +# searched for in VPATH/GPATH. +# +GPATH = $(srcdir) + +TARGET_DIR=$(HTML_DIR)/$(DOC_MODULE) + +SETUP_FILES = \ + $(content_files) \ + $(DOC_MAIN_SGML_FILE) \ + $(DOC_MODULE)-sections.txt \ + $(DOC_MODULE)-overrides.txt + +EXTRA_DIST = \ + $(HTML_IMAGES) \ + $(SETUP_FILES) + +DOC_STAMPS=setup-build.stamp scan-build.stamp tmpl-build.stamp sgml-build.stamp \ + html-build.stamp pdf-build.stamp \ + tmpl.stamp sgml.stamp html.stamp pdf.stamp + +SCANOBJ_FILES = \ + $(DOC_MODULE).args \ + $(DOC_MODULE).hierarchy \ + $(DOC_MODULE).interfaces \ + $(DOC_MODULE).prerequisites \ + $(DOC_MODULE).signals + +REPORT_FILES = \ + $(DOC_MODULE)-undocumented.txt \ + $(DOC_MODULE)-undeclared.txt \ + $(DOC_MODULE)-unused.txt + +CLEANFILES = $(SCANOBJ_FILES) $(REPORT_FILES) $(DOC_STAMPS) + +if ENABLE_GTK_DOC +if GTK_DOC_BUILD_HTML +HTML_BUILD_STAMP=html-build.stamp +else +HTML_BUILD_STAMP= +endif +if GTK_DOC_BUILD_PDF +PDF_BUILD_STAMP=pdf-build.stamp +else +PDF_BUILD_STAMP= +endif + +all-local: $(HTML_BUILD_STAMP) $(PDF_BUILD_STAMP) +else +all-local: +endif + +docs: $(HTML_BUILD_STAMP) $(PDF_BUILD_STAMP) + +$(REPORT_FILES): sgml-build.stamp + +#### setup #### + +setup-build.stamp: + -@if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + echo ' DOC Preparing build'; \ + files=`echo $(SETUP_FILES) $(expand_content_files) $(DOC_MODULE).types`; \ + if test "x$$files" != "x" ; then \ + for file in $$files ; do \ + test -f $(abs_srcdir)/$$file && \ + cp -pu $(abs_srcdir)/$$file $(abs_builddir)/ || true; \ + done; \ + fi; \ + test -d $(abs_srcdir)/tmpl && \ + { cp -rp $(abs_srcdir)/tmpl $(abs_builddir)/; \ + chmod -R u+w $(abs_builddir)/tmpl; } \ + fi + @touch setup-build.stamp + +#### scan #### + +scan-build.stamp: $(HFILE_GLOB) $(CFILE_GLOB) + @echo ' DOC Scanning header files' + @_source_dir='' ; \ + for i in $(DOC_SOURCE_DIR) ; do \ + _source_dir="$${_source_dir} --source-dir=$$i" ; \ + done ; \ + gtkdoc-scan --module=$(DOC_MODULE) --ignore-headers="$(IGNORE_HFILES)" $${_source_dir} $(SCAN_OPTIONS) $(EXTRA_HFILES) + @if grep -l '^..*$$' $(DOC_MODULE).types > /dev/null 2>&1 ; then \ + echo " DOC Introspecting gobjects"; \ + scanobj_options=""; \ + gtkdoc-scangobj 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + scanobj_options="--verbose"; \ + fi; \ + fi; \ + CC="$(GTKDOC_CC)" LD="$(GTKDOC_LD)" RUN="$(GTKDOC_RUN)" CFLAGS="$(GTKDOC_CFLAGS) $(CFLAGS)" LDFLAGS="$(GTKDOC_LIBS) $(LDFLAGS)" \ + gtkdoc-scangobj $(SCANGOBJ_OPTIONS) $$scanobj_options --module=$(DOC_MODULE); \ + else \ + for i in $(SCANOBJ_FILES) ; do \ + test -f $$i || touch $$i ; \ + done \ + fi + @touch scan-build.stamp + +$(DOC_MODULE)-decl.txt $(SCANOBJ_FILES) $(DOC_MODULE)-sections.txt $(DOC_MODULE)-overrides.txt: scan-build.stamp + @true + +#### templates #### + +tmpl-build.stamp: setup-build.stamp $(DOC_MODULE)-decl.txt $(SCANOBJ_FILES) $(DOC_MODULE)-sections.txt $(DOC_MODULE)-overrides.txt + @echo ' DOC Rebuilding template files' + @gtkdoc-mktmpl --module=$(DOC_MODULE) $(MKTMPL_OPTIONS) + @if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + if test -w $(abs_srcdir) ; then \ + cp -rp $(abs_builddir)/tmpl $(abs_srcdir)/; \ + fi \ + fi + @touch tmpl-build.stamp + +tmpl.stamp: tmpl-build.stamp + @true + +$(srcdir)/tmpl/*.sgml: + @true + +#### xml #### + +sgml-build.stamp: tmpl.stamp $(DOC_MODULE)-sections.txt $(srcdir)/tmpl/*.sgml $(expand_content_files) + @echo ' DOC Building XML' + @-chmod -R u+w $(srcdir) + @_source_dir='' ; \ + for i in $(DOC_SOURCE_DIR) ; do \ + _source_dir="$${_source_dir} --source-dir=$$i" ; \ + done ; \ + gtkdoc-mkdb --module=$(DOC_MODULE) --output-format=xml --expand-content-files="$(expand_content_files)" --main-sgml-file=$(DOC_MAIN_SGML_FILE) $${_source_dir} $(MKDB_OPTIONS) + @touch sgml-build.stamp + +sgml.stamp: sgml-build.stamp + @true + +#### html #### + +html-build.stamp: sgml.stamp $(DOC_MAIN_SGML_FILE) $(content_files) + @echo ' DOC Building HTML' + @rm -rf html + @mkdir html + @mkhtml_options=""; \ + gtkdoc-mkhtml 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + mkhtml_options="$$mkhtml_options --verbose"; \ + fi; \ + fi; \ + gtkdoc-mkhtml 2>&1 --help | grep >/dev/null "\-\-path"; \ + if test "$(?)" = "0"; then \ + mkhtml_options="$$mkhtml_options --path=\"$(abs_srcdir)\""; \ + fi; \ + cd html && gtkdoc-mkhtml $$mkhtml_options $(MKHTML_OPTIONS) $(DOC_MODULE) ../$(DOC_MAIN_SGML_FILE) + -@test "x$(HTML_IMAGES)" = "x" || \ + for file in $(HTML_IMAGES) ; do \ + if test -f $(abs_srcdir)/$$file ; then \ + cp $(abs_srcdir)/$$file $(abs_builddir)/html; \ + fi; \ + if test -f $(abs_builddir)/$$file ; then \ + cp $(abs_builddir)/$$file $(abs_builddir)/html; \ + fi; \ + done; + @echo ' DOC Fixing cross-references' + @gtkdoc-fixxref --module=$(DOC_MODULE) --module-dir=html --html-dir=$(HTML_DIR) $(FIXXREF_OPTIONS) + @touch html-build.stamp + +#### pdf #### + +pdf-build.stamp: sgml.stamp $(DOC_MAIN_SGML_FILE) $(content_files) + @echo ' DOC Building PDF' + @rm -f $(DOC_MODULE).pdf + @mkpdf_options=""; \ + gtkdoc-mkpdf 2>&1 --help | grep >/dev/null "\-\-verbose"; \ + if test "$(?)" = "0"; then \ + if test "x$(V)" = "x1"; then \ + mkpdf_options="$$mkpdf_options --verbose"; \ + fi; \ + fi; \ + if test "x$(HTML_IMAGES)" != "x"; then \ + for img in $(HTML_IMAGES); do \ + part=`dirname $$img`; \ + echo $$mkpdf_options | grep >/dev/null "\-\-imgdir=$$part "; \ + if test $$? != 0; then \ + mkpdf_options="$$mkpdf_options --imgdir=$$part"; \ + fi; \ + done; \ + fi; \ + gtkdoc-mkpdf --path="$(abs_srcdir)" $$mkpdf_options $(DOC_MODULE) $(DOC_MAIN_SGML_FILE) $(MKPDF_OPTIONS) + @touch pdf-build.stamp + +############## + +clean-local: + @rm -f *~ *.bak + @rm -rf .libs + +distclean-local: + @rm -rf xml html $(REPORT_FILES) $(DOC_MODULE).pdf \ + $(DOC_MODULE)-decl-list.txt $(DOC_MODULE)-decl.txt + @if test "$(abs_srcdir)" != "$(abs_builddir)" ; then \ + rm -f $(SETUP_FILES) $(expand_content_files) $(DOC_MODULE).types; \ + rm -rf tmpl; \ + fi + +maintainer-clean-local: clean + @rm -rf xml html + +install-data-local: + @installfiles=`echo $(builddir)/html/*`; \ + if test "$$installfiles" = '$(builddir)/html/*'; \ + then echo 1>&2 'Nothing to install' ; \ + else \ + if test -n "$(DOC_MODULE_VERSION)"; then \ + installdir="$(DESTDIR)$(TARGET_DIR)-$(DOC_MODULE_VERSION)"; \ + else \ + installdir="$(DESTDIR)$(TARGET_DIR)"; \ + fi; \ + $(mkinstalldirs) $${installdir} ; \ + for i in $$installfiles; do \ + echo ' $(INSTALL_DATA) '$$i ; \ + $(INSTALL_DATA) $$i $${installdir}; \ + done; \ + if test -n "$(DOC_MODULE_VERSION)"; then \ + mv -f $${installdir}/$(DOC_MODULE).devhelp2 \ + $${installdir}/$(DOC_MODULE)-$(DOC_MODULE_VERSION).devhelp2; \ + fi; \ + $(GTKDOC_REBASE) --relative --dest-dir=$(DESTDIR) --html-dir=$${installdir}; \ + fi + +uninstall-local: + @if test -n "$(DOC_MODULE_VERSION)"; then \ + installdir="$(DESTDIR)$(TARGET_DIR)-$(DOC_MODULE_VERSION)"; \ + else \ + installdir="$(DESTDIR)$(TARGET_DIR)"; \ + fi; \ + rm -rf $${installdir} + +# +# Require gtk-doc when making dist +# +if ENABLE_GTK_DOC +dist-check-gtkdoc: +else +dist-check-gtkdoc: + @echo "*** gtk-doc must be installed and enabled in order to make dist" + @false +endif + +dist-hook: dist-check-gtkdoc dist-hook-local + @mkdir $(distdir)/tmpl + @mkdir $(distdir)/html + @-cp ./tmpl/*.sgml $(distdir)/tmpl + @cp ./html/* $(distdir)/html + @-cp ./$(DOC_MODULE).pdf $(distdir)/ + @-cp ./$(DOC_MODULE).types $(distdir)/ + @-cp ./$(DOC_MODULE)-sections.txt $(distdir)/ + @cd $(distdir) && rm -f $(DISTCLEANFILES) + @$(GTKDOC_REBASE) --online --relative --html-dir=$(distdir)/html + +.PHONY : dist-hook-local docs diff --git a/install-sh b/install-sh new file mode 100755 index 0000000..a9244eb --- /dev/null +++ b/install-sh @@ -0,0 +1,527 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2011-01-19.21; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for `test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writeable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/libsoup.doap b/libsoup.doap deleted file mode 100644 index 930f579..0000000 --- a/libsoup.doap +++ /dev/null @@ -1,23 +0,0 @@ - - - libsoup - HTTP client/server library for GNOME - libsoup is an HTTP client/server library for GNOME. It uses GObjects and the glib main loop, to integrate well with GNOME applications. - - - - - - - - - Dan Winship - - danw - - - diff --git a/libsoup/Makefile.am b/libsoup/Makefile.am index 59d06ba..aa13eec 100644 --- a/libsoup/Makefile.am +++ b/libsoup/Makefile.am @@ -1,5 +1,7 @@ ## Process this file with automake to produce Makefile.in +include $(GLIB_MAKEFILE) + if OS_WIN32 LIBWS2_32 = -lws2_32 endif @@ -14,35 +16,6 @@ INCLUDES = \ $(SQLITE_CFLAGS) \ $(GNOME_KEYRING_CFLAGS) -MARSHAL_GENERATED = soup-marshal.c soup-marshal.h -MKENUMS_GENERATED = soup-enum-types.c soup-enum-types.h - -soup-marshal.h: soup-marshal.list - $(AM_V_GEN) ( $(GLIB_GENMARSHAL) --prefix=soup_marshal $(srcdir)/soup-marshal.list --header > soup-marshal.tmp \ - && mv soup-marshal.tmp soup-marshal.h ) \ - || ( rm -f soup-marshal.tmp && exit 1 ) - -soup-marshal.c: soup-marshal.h - $(AM_V_GEN) ( (echo '#include "soup-marshal.h"'; $(GLIB_GENMARSHAL) --prefix=soup_marshal $(srcdir)/soup-marshal.list --body) > soup-marshal.tmp \ - && mv soup-marshal.tmp soup-marshal.c ) \ - || ( rm -f soup-marshal.tmp && exit 1 ) - -soup-enum-types.h: $(soup_headers) - $(AM_V_GEN) ( cd $(srcdir) && $(GLIB_MKENUMS) --template soup-enum-types.h.tmpl \ - $(soup_headers) ) > soup-enum-types.h.tmp \ - && mv soup-enum-types.h.tmp soup-enum-types.h \ - || rm -f soup-enum-type.h.tmp - -soup-enum-types.c: $(libsoupinclude_HEADERS) - $(AM_V_GEN) ( cd $(srcdir) && $(GLIB_MKENUMS) --template soup-enum-types.c.tmpl \ - $(soup_headers) ) > soup-enum-types.c.tmp \ - && mv soup-enum-types.c.tmp soup-enum-types.c \ - || rm -f soup-enum-type.c.tmp - -BUILT_SOURCES = $(MARSHAL_GENERATED) $(MKENUMS_GENERATED) - -CLEANFILES = $(MARSHAL_GENERATED) $(MKENUMS_GENERATED) - libsoupincludedir = $(includedir)/libsoup-2.4/libsoup soup_headers = \ @@ -90,11 +63,6 @@ soup_headers = \ soup-value-utils.h \ soup-xmlrpc.h -if SQLLITE_SUPPORT -soup_headers += \ - soup-cookie-jar-sqlite.h -endif - libsoupinclude_HEADERS = \ $(soup_headers) \ soup-enum-types.h @@ -110,13 +78,7 @@ libsoup_2_4_la_LIBADD = \ -lz \ $(LIBWS2_32) -if SQLLITE_SUPPORT -libsoup_2_4_la_LIBADD += \ - $(SQLITE_LIBS) -endif - libsoup_2_4_la_SOURCES = \ - $(BUILT_SOURCES) \ soup-address.c \ soup-auth.c \ soup-auth-basic.h \ @@ -144,11 +106,15 @@ libsoup_2_4_la_SOURCES = \ soup-date.c \ soup-directory-input-stream.h \ soup-directory-input-stream.c \ + soup-enum-types.h \ + soup-enum-types.c \ soup-form.c \ soup-headers.c \ soup-http-input-stream.h \ soup-http-input-stream.c \ soup-logger.c \ + soup-marshal.h \ + soup-marshal.c \ soup-message.c \ soup-message-body.c \ soup-message-client-io.c \ @@ -182,18 +148,11 @@ libsoup_2_4_la_SOURCES = \ soup-session-private.h \ soup-session-sync.c \ soup-socket.c \ - soup-ssl.h \ - soup-ssl.c \ soup-status.c \ soup-uri.c \ soup-value-utils.c \ soup-xmlrpc.c -if SQLLITE_SUPPORT -libsoup_2_4_la_SOURCES += \ - soup-cookie-jar-sqlite.c -endif - if BUILD_LIBSOUP_GNOME if OS_WIN32 @@ -230,6 +189,14 @@ libsoup_gnome_2_4_la_SOURCES = \ endif +GLIB_GENERATED = soup-marshal.c soup-marshal.h +GLIB_GENERATED += soup-enum-types.c soup-enum-types.h +BUILT_SOURCES = $(GLIB_GENERATED) + +soup_marshal_sources = $(libsoup_2_4_la_SOURCES) $(libsoup_gnome_2_4_la_SOURCES) +soup_enum_types_sources = $(libsoupinclude_HEADERS) $(libsoupgnomeinclude_HEADERS) +soup_enum_types_MKENUMS_C_FLAGS = --fhead "\#define LIBSOUP_USE_UNSTABLE_REQUEST_API" + # # Introspection support # @@ -288,11 +255,6 @@ gir_DATA = $(INTROSPECTION_GIRS) typelibdir = $(libdir)/girepository-1.0 typelib_DATA = $(INTROSPECTION_GIRS:.gir=.typelib) -CLEANFILES += $(gir_DATA) $(typelib_DATA) +CLEANFILES = $(gir_DATA) $(typelib_DATA) endif - -EXTRA_DIST= \ - soup-marshal.list \ - soup-enum-types.h.tmpl \ - soup-enum-types.c.tmpl diff --git a/libsoup/Makefile.in b/libsoup/Makefile.in new file mode 100644 index 0000000..679d564 --- /dev/null +++ b/libsoup/Makefile.in @@ -0,0 +1,1028 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + + + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@BUILD_LIBSOUP_GNOME_TRUE@am__append_1 = libsoup-gnome-2.4.la +@HAVE_INTROSPECTION_TRUE@am__append_2 = Soup-2.4.gir +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@am__append_3 = SoupGNOME-2.4.gir +subdir = libsoup +DIST_COMMON = $(am__libsoupgnomeinclude_HEADERS_DIST) \ + $(libsoupinclude_HEADERS) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/gtk-doc.m4 \ + $(top_srcdir)/m4/introspection.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(girdir)" \ + "$(DESTDIR)$(typelibdir)" \ + "$(DESTDIR)$(libsoupgnomeincludedir)" \ + "$(DESTDIR)$(libsoupincludedir)" +LTLIBRARIES = $(lib_LTLIBRARIES) +am__DEPENDENCIES_1 = +libsoup_2_4_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ + $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) +am_libsoup_2_4_la_OBJECTS = soup-address.lo soup-auth.lo \ + soup-auth-basic.lo soup-auth-digest.lo soup-auth-ntlm.lo \ + soup-auth-domain.lo soup-auth-domain-basic.lo \ + soup-auth-domain-digest.lo soup-auth-manager.lo \ + soup-auth-manager-ntlm.lo soup-cache.lo soup-connection.lo \ + soup-content-decoder.lo soup-content-sniffer.lo soup-cookie.lo \ + soup-cookie-jar.lo soup-cookie-jar-text.lo soup-date.lo \ + soup-directory-input-stream.lo soup-enum-types.lo soup-form.lo \ + soup-headers.lo soup-http-input-stream.lo soup-logger.lo \ + soup-marshal.lo soup-message.lo soup-message-body.lo \ + soup-message-client-io.lo soup-message-headers.lo \ + soup-message-io.lo soup-message-queue.lo \ + soup-message-server-io.lo soup-method.lo soup-misc.lo \ + soup-multipart.lo soup-password-manager.lo soup-path-map.lo \ + soup-proxy-resolver.lo soup-proxy-resolver-default.lo \ + soup-proxy-resolver-static.lo soup-proxy-uri-resolver.lo \ + soup-request.lo soup-request-data.lo soup-request-file.lo \ + soup-request-http.lo soup-requester.lo soup-server.lo \ + soup-session.lo soup-session-async.lo soup-session-feature.lo \ + soup-session-sync.lo soup-socket.lo soup-status.lo soup-uri.lo \ + soup-value-utils.lo soup-xmlrpc.lo +libsoup_2_4_la_OBJECTS = $(am_libsoup_2_4_la_OBJECTS) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +libsoup_2_4_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) $(libsoup_2_4_la_LDFLAGS) $(LDFLAGS) -o \ + $@ +@BUILD_LIBSOUP_GNOME_TRUE@libsoup_gnome_2_4_la_DEPENDENCIES = \ +@BUILD_LIBSOUP_GNOME_TRUE@ libsoup-2.4.la $(am__DEPENDENCIES_1) \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(am__DEPENDENCIES_1) \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(am__DEPENDENCIES_1) +am__libsoup_gnome_2_4_la_SOURCES_DIST = soup-cookie-jar-sqlite.c \ + soup-gnome-features.c soup-proxy-resolver-gnome.h \ + soup-proxy-resolver-gnome.c soup-password-manager-gnome.h \ + soup-password-manager-gnome.c +@BUILD_LIBSOUP_GNOME_TRUE@@OS_WIN32_FALSE@am__objects_1 = soup-password-manager-gnome.lo +@BUILD_LIBSOUP_GNOME_TRUE@am_libsoup_gnome_2_4_la_OBJECTS = \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-cookie-jar-sqlite.lo \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-gnome-features.lo \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-proxy-resolver-gnome.lo \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(am__objects_1) +libsoup_gnome_2_4_la_OBJECTS = $(am_libsoup_gnome_2_4_la_OBJECTS) +libsoup_gnome_2_4_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) $(libsoup_gnome_2_4_la_LDFLAGS) \ + $(LDFLAGS) -o $@ +@BUILD_LIBSOUP_GNOME_TRUE@am_libsoup_gnome_2_4_la_rpath = -rpath \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(libdir) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +SOURCES = $(libsoup_2_4_la_SOURCES) $(libsoup_gnome_2_4_la_SOURCES) +DIST_SOURCES = $(libsoup_2_4_la_SOURCES) \ + $(am__libsoup_gnome_2_4_la_SOURCES_DIST) +DATA = $(gir_DATA) $(typelib_DATA) +am__libsoupgnomeinclude_HEADERS_DIST = soup-cookie-jar-sqlite.h \ + soup-gnome.h soup-gnome-features.h +HEADERS = $(libsoupgnomeinclude_HEADERS) $(libsoupinclude_HEADERS) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +APACHE_HTTPD = @APACHE_HTTPD@ +APACHE_MODULE_DIR = @APACHE_MODULE_DIR@ +APACHE_PHP_MODULE = @APACHE_PHP_MODULE@ +APACHE_PHP_MODULE_DIR = @APACHE_PHP_MODULE_DIR@ +APACHE_SSL_MODULE_DIR = @APACHE_SSL_MODULE_DIR@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL = @CURL@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@ +GLIB_GENMARSHAL = @GLIB_GENMARSHAL@ +GLIB_LIBS = @GLIB_LIBS@ +GLIB_MAKEFILE = @GLIB_MAKEFILE@ +GLIB_MKENUMS = @GLIB_MKENUMS@ +GNOME_KEYRING_CFLAGS = @GNOME_KEYRING_CFLAGS@ +GNOME_KEYRING_LIBS = @GNOME_KEYRING_LIBS@ +GOBJECT_QUERY = @GOBJECT_QUERY@ +GREP = @GREP@ +GTKDOC_CHECK = @GTKDOC_CHECK@ +GTKDOC_DEPS_CFLAGS = @GTKDOC_DEPS_CFLAGS@ +GTKDOC_DEPS_LIBS = @GTKDOC_DEPS_LIBS@ +GTKDOC_MKPDF = @GTKDOC_MKPDF@ +GTKDOC_REBASE = @GTKDOC_REBASE@ +HAVE_GNOME = @HAVE_GNOME@ +HTML_DIR = @HTML_DIR@ +IF_HAVE_PHP = @IF_HAVE_PHP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTROSPECTION_CFLAGS = @INTROSPECTION_CFLAGS@ +INTROSPECTION_COMPILER = @INTROSPECTION_COMPILER@ +INTROSPECTION_GENERATE = @INTROSPECTION_GENERATE@ +INTROSPECTION_GIRDIR = @INTROSPECTION_GIRDIR@ +INTROSPECTION_LIBS = @INTROSPECTION_LIBS@ +INTROSPECTION_MAKEFILE = @INTROSPECTION_MAKEFILE@ +INTROSPECTION_SCANNER = @INTROSPECTION_SCANNER@ +INTROSPECTION_TYPELIBDIR = @INTROSPECTION_TYPELIBDIR@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MISSING_REGRESSION_TEST_PACKAGES = @MISSING_REGRESSION_TEST_PACKAGES@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PHP = @PHP@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SOUP_AGE = @SOUP_AGE@ +SOUP_API_VERSION = @SOUP_API_VERSION@ +SOUP_CURRENT = @SOUP_CURRENT@ +SOUP_DEBUG_FLAGS = @SOUP_DEBUG_FLAGS@ +SOUP_MAINTAINER_FLAGS = @SOUP_MAINTAINER_FLAGS@ +SOUP_REVISION = @SOUP_REVISION@ +SQLITE_CFLAGS = @SQLITE_CFLAGS@ +SQLITE_LIBS = @SQLITE_LIBS@ +STRIP = @STRIP@ +VERSION = @VERSION@ +XML_CFLAGS = @XML_CFLAGS@ +XML_LIBS = @XML_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +ntlm_auth = @ntlm_auth@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +@OS_WIN32_TRUE@LIBWS2_32 = -lws2_32 +INCLUDES = \ + -DG_LOG_DOMAIN=\"libsoup\" \ + -I$(top_srcdir) \ + $(SOUP_DEBUG_FLAGS) \ + $(SOUP_MAINTAINER_FLAGS) \ + $(GLIB_CFLAGS) \ + $(XML_CFLAGS) \ + $(SQLITE_CFLAGS) \ + $(GNOME_KEYRING_CFLAGS) + +libsoupincludedir = $(includedir)/libsoup-2.4/libsoup +soup_headers = \ + soup.h \ + soup-address.h \ + soup-auth.h \ + soup-auth-domain.h \ + soup-auth-domain-basic.h \ + soup-auth-domain-digest.h \ + soup-cache.h \ + soup-content-decoder.h \ + soup-content-sniffer.h \ + soup-cookie.h \ + soup-cookie-jar.h \ + soup-cookie-jar-text.h \ + soup-date.h \ + soup-form.h \ + soup-headers.h \ + soup-logger.h \ + soup-message.h \ + soup-message-body.h \ + soup-message-headers.h \ + soup-method.h \ + soup-misc.h \ + soup-multipart.h \ + soup-password-manager.h \ + soup-portability.h \ + soup-proxy-resolver.h \ + soup-proxy-resolver-default.h \ + soup-proxy-uri-resolver.h \ + soup-request.h \ + soup-request-data.h \ + soup-request-file.h \ + soup-request-http.h \ + soup-requester.h \ + soup-server.h \ + soup-session.h \ + soup-session-async.h \ + soup-session-feature.h \ + soup-session-sync.h \ + soup-socket.h \ + soup-status.h \ + soup-types.h \ + soup-uri.h \ + soup-value-utils.h \ + soup-xmlrpc.h + +libsoupinclude_HEADERS = \ + $(soup_headers) \ + soup-enum-types.h + +lib_LTLIBRARIES = libsoup-2.4.la $(am__append_1) +libsoup_2_4_la_LDFLAGS = \ + -version-info $(SOUP_CURRENT):$(SOUP_REVISION):$(SOUP_AGE) -no-undefined + +libsoup_2_4_la_LIBADD = \ + $(GLIB_LIBS) \ + $(XML_LIBS) \ + -lz \ + $(LIBWS2_32) + +libsoup_2_4_la_SOURCES = \ + soup-address.c \ + soup-auth.c \ + soup-auth-basic.h \ + soup-auth-basic.c \ + soup-auth-digest.h \ + soup-auth-digest.c \ + soup-auth-ntlm.h \ + soup-auth-ntlm.c \ + soup-auth-domain.c \ + soup-auth-domain-basic.c \ + soup-auth-domain-digest.c \ + soup-auth-manager.h \ + soup-auth-manager.c \ + soup-auth-manager-ntlm.h \ + soup-auth-manager-ntlm.c \ + soup-cache.c \ + soup-cache-private.h \ + soup-connection.h \ + soup-connection.c \ + soup-content-decoder.c \ + soup-content-sniffer.c \ + soup-cookie.c \ + soup-cookie-jar.c \ + soup-cookie-jar-text.c \ + soup-date.c \ + soup-directory-input-stream.h \ + soup-directory-input-stream.c \ + soup-enum-types.h \ + soup-enum-types.c \ + soup-form.c \ + soup-headers.c \ + soup-http-input-stream.h \ + soup-http-input-stream.c \ + soup-logger.c \ + soup-marshal.h \ + soup-marshal.c \ + soup-message.c \ + soup-message-body.c \ + soup-message-client-io.c \ + soup-message-headers.c \ + soup-message-io.c \ + soup-message-private.h \ + soup-message-queue.h \ + soup-message-queue.c \ + soup-message-server-io.c \ + soup-method.c \ + soup-misc.c \ + soup-misc-private.h \ + soup-multipart.c \ + soup-password-manager.c \ + soup-path-map.h \ + soup-path-map.c \ + soup-proxy-resolver.c \ + soup-proxy-resolver-default.c \ + soup-proxy-resolver-static.h \ + soup-proxy-resolver-static.c \ + soup-proxy-uri-resolver.c \ + soup-request.c \ + soup-request-data.c \ + soup-request-file.c \ + soup-request-http.c \ + soup-requester.c \ + soup-server.c \ + soup-session.c \ + soup-session-async.c \ + soup-session-feature.c \ + soup-session-private.h \ + soup-session-sync.c \ + soup-socket.c \ + soup-status.c \ + soup-uri.c \ + soup-value-utils.c \ + soup-xmlrpc.c + +@BUILD_LIBSOUP_GNOME_TRUE@@OS_WIN32_FALSE@soup_password_manager_gnome_files = \ +@BUILD_LIBSOUP_GNOME_TRUE@@OS_WIN32_FALSE@ soup-password-manager-gnome.h \ +@BUILD_LIBSOUP_GNOME_TRUE@@OS_WIN32_FALSE@ soup-password-manager-gnome.c + +@BUILD_LIBSOUP_GNOME_TRUE@@OS_WIN32_TRUE@soup_password_manager_gnome_files = +@BUILD_LIBSOUP_GNOME_TRUE@libsoupgnomeincludedir = $(includedir)/libsoup-gnome-2.4/libsoup +@BUILD_LIBSOUP_GNOME_TRUE@libsoupgnomeinclude_HEADERS = \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-cookie-jar-sqlite.h\ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-gnome.h \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-gnome-features.h + +@BUILD_LIBSOUP_GNOME_TRUE@libsoup_gnome_2_4_la_LDFLAGS = $(libsoup_2_4_la_LDFLAGS) +@BUILD_LIBSOUP_GNOME_TRUE@libsoup_gnome_2_4_la_LIBADD = \ +@BUILD_LIBSOUP_GNOME_TRUE@ libsoup-2.4.la \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(GLIB_LIBS) \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(SQLITE_LIBS) \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(GNOME_KEYRING_LIBS) + +@BUILD_LIBSOUP_GNOME_TRUE@libsoup_gnome_2_4_la_SOURCES = \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-cookie-jar-sqlite.c \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-gnome-features.c \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-proxy-resolver-gnome.h \ +@BUILD_LIBSOUP_GNOME_TRUE@ soup-proxy-resolver-gnome.c \ +@BUILD_LIBSOUP_GNOME_TRUE@ $(soup_password_manager_gnome_files) + +GLIB_GENERATED = soup-marshal.c soup-marshal.h soup-enum-types.c \ + soup-enum-types.h +BUILT_SOURCES = $(GLIB_GENERATED) +soup_marshal_sources = $(libsoup_2_4_la_SOURCES) $(libsoup_gnome_2_4_la_SOURCES) +soup_enum_types_sources = $(libsoupinclude_HEADERS) $(libsoupgnomeinclude_HEADERS) +soup_enum_types_MKENUMS_C_FLAGS = --fhead "\#define LIBSOUP_USE_UNSTABLE_REQUEST_API" +INTROSPECTION_GIRS = $(am__append_2) $(am__append_3) +INTROSPECTION_SCANNER_ARGS = --add-include-path=. +INTROSPECTION_COMPILER_ARGS = --includedir=. + +# Core library +@HAVE_INTROSPECTION_TRUE@gi_soup_files = \ +@HAVE_INTROSPECTION_TRUE@ $(filter-out soup.h soup-enum-types.% soup-marshal.% soup-proxy-resolver.h,\ +@HAVE_INTROSPECTION_TRUE@ $(soup_headers) $(filter-out %.h, $(libsoup_2_4_la_SOURCES))) + +@HAVE_INTROSPECTION_TRUE@gi_built_soup_files = soup-enum-types.h +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_INCLUDES = Gio-2.0 +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_CFLAGS = $(INCLUDES) +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_LIBS = libsoup-2.4.la +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_EXPORT_PACKAGES = libsoup-2.4 +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_SCANNERFLAGS = --c-include "libsoup/soup.h" +@HAVE_INTROSPECTION_TRUE@Soup_2_4_gir_FILES = \ +@HAVE_INTROSPECTION_TRUE@ $(addprefix $(srcdir)/, $(gi_soup_files)) \ +@HAVE_INTROSPECTION_TRUE@ $(foreach f,$(gi_built_soup_files), \ +@HAVE_INTROSPECTION_TRUE@ $(if $(shell test -f $(addprefix $(srcdir)/,$(f)) && echo yes), \ +@HAVE_INTROSPECTION_TRUE@ $(addprefix $(srcdir)/,$(f)), \ +@HAVE_INTROSPECTION_TRUE@ $(f))) + + +# GNOME extensions +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@gi_soup_gnome_files = $(filter-out soup-gnome.h,\ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ $(libsoupgnomeinclude_HEADERS) \ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ $(filter-out %.h, $(libsoup_gnome_2_4_la_SOURCES))) + +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME_2_4_gir_SCANNERFLAGS = \ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ --identifier-prefix=Soup \ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ --symbol-prefix=soup \ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ --c-include "libsoup/soup-gnome.h" \ +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@ --include-uninstalled=$(builddir)/Soup-2.4.gir + +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME_2_4_gir_CFLAGS = $(INCLUDES) +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME_2_4_gir_LIBS = libsoup-gnome-2.4.la libsoup-2.4.la +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME_2_4_gir_FILES = $(addprefix $(srcdir)/,$(gi_soup_gnome_files)) +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME_2_4_gir_EXPORT_PACKAGES = libsoup-gnome-2.4 +@HAVE_INTROSPECTION_TRUE@girdir = $(datadir)/gir-1.0 +@HAVE_INTROSPECTION_TRUE@gir_DATA = $(INTROSPECTION_GIRS) +@HAVE_INTROSPECTION_TRUE@typelibdir = $(libdir)/girepository-1.0 +@HAVE_INTROSPECTION_TRUE@typelib_DATA = $(INTROSPECTION_GIRS:.gir=.typelib) +@HAVE_INTROSPECTION_TRUE@CLEANFILES = $(gir_DATA) $(typelib_DATA) +all: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign libsoup/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign libsoup/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libsoup-2.4.la: $(libsoup_2_4_la_OBJECTS) $(libsoup_2_4_la_DEPENDENCIES) $(EXTRA_libsoup_2_4_la_DEPENDENCIES) + $(AM_V_CCLD)$(libsoup_2_4_la_LINK) -rpath $(libdir) $(libsoup_2_4_la_OBJECTS) $(libsoup_2_4_la_LIBADD) $(LIBS) +libsoup-gnome-2.4.la: $(libsoup_gnome_2_4_la_OBJECTS) $(libsoup_gnome_2_4_la_DEPENDENCIES) $(EXTRA_libsoup_gnome_2_4_la_DEPENDENCIES) + $(AM_V_CCLD)$(libsoup_gnome_2_4_la_LINK) $(am_libsoup_gnome_2_4_la_rpath) $(libsoup_gnome_2_4_la_OBJECTS) $(libsoup_gnome_2_4_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-address.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-basic.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-digest.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-domain-basic.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-domain-digest.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-domain.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-manager-ntlm.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-manager.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth-ntlm.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-auth.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-cache.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-connection.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-content-decoder.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-content-sniffer.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-cookie-jar-sqlite.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-cookie-jar-text.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-cookie-jar.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-cookie.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-date.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-directory-input-stream.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-enum-types.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-form.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-gnome-features.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-headers.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-http-input-stream.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-logger.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-marshal.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-body.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-client-io.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-headers.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-io.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-queue.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message-server-io.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-message.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-method.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-misc.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-multipart.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-password-manager-gnome.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-password-manager.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-path-map.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-proxy-resolver-default.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-proxy-resolver-gnome.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-proxy-resolver-static.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-proxy-resolver.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-proxy-uri-resolver.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-request-data.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-request-file.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-request-http.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-request.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-requester.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-server.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-session-async.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-session-feature.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-session-sync.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-session.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-socket.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-status.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-uri.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-value-utils.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/soup-xmlrpc.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-girDATA: $(gir_DATA) + @$(NORMAL_INSTALL) + test -z "$(girdir)" || $(MKDIR_P) "$(DESTDIR)$(girdir)" + @list='$(gir_DATA)'; test -n "$(girdir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(girdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(girdir)" || exit $$?; \ + done + +uninstall-girDATA: + @$(NORMAL_UNINSTALL) + @list='$(gir_DATA)'; test -n "$(girdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(girdir)'; $(am__uninstall_files_from_dir) +install-typelibDATA: $(typelib_DATA) + @$(NORMAL_INSTALL) + test -z "$(typelibdir)" || $(MKDIR_P) "$(DESTDIR)$(typelibdir)" + @list='$(typelib_DATA)'; test -n "$(typelibdir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(typelibdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(typelibdir)" || exit $$?; \ + done + +uninstall-typelibDATA: + @$(NORMAL_UNINSTALL) + @list='$(typelib_DATA)'; test -n "$(typelibdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(typelibdir)'; $(am__uninstall_files_from_dir) +install-libsoupgnomeincludeHEADERS: $(libsoupgnomeinclude_HEADERS) + @$(NORMAL_INSTALL) + test -z "$(libsoupgnomeincludedir)" || $(MKDIR_P) "$(DESTDIR)$(libsoupgnomeincludedir)" + @list='$(libsoupgnomeinclude_HEADERS)'; test -n "$(libsoupgnomeincludedir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libsoupgnomeincludedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(libsoupgnomeincludedir)" || exit $$?; \ + done + +uninstall-libsoupgnomeincludeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(libsoupgnomeinclude_HEADERS)'; test -n "$(libsoupgnomeincludedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libsoupgnomeincludedir)'; $(am__uninstall_files_from_dir) +install-libsoupincludeHEADERS: $(libsoupinclude_HEADERS) + @$(NORMAL_INSTALL) + test -z "$(libsoupincludedir)" || $(MKDIR_P) "$(DESTDIR)$(libsoupincludedir)" + @list='$(libsoupinclude_HEADERS)'; test -n "$(libsoupincludedir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(libsoupincludedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(libsoupincludedir)" || exit $$?; \ + done + +uninstall-libsoupincludeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(libsoupinclude_HEADERS)'; test -n "$(libsoupincludedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(libsoupincludedir)'; $(am__uninstall_files_from_dir) + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) check-am +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(girdir)" "$(DESTDIR)$(typelibdir)" "$(DESTDIR)$(libsoupgnomeincludedir)" "$(DESTDIR)$(libsoupincludedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES) +clean: clean-am + +clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-girDATA install-libsoupgnomeincludeHEADERS \ + install-libsoupincludeHEADERS install-typelibDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-girDATA uninstall-libLTLIBRARIES \ + uninstall-libsoupgnomeincludeHEADERS \ + uninstall-libsoupincludeHEADERS uninstall-typelibDATA + +.MAKE: all check install install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libLTLIBRARIES clean-libtool ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-girDATA \ + install-html install-html-am install-info install-info-am \ + install-libLTLIBRARIES install-libsoupgnomeincludeHEADERS \ + install-libsoupincludeHEADERS install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + install-typelibDATA installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-girDATA uninstall-libLTLIBRARIES \ + uninstall-libsoupgnomeincludeHEADERS \ + uninstall-libsoupincludeHEADERS uninstall-typelibDATA + + +include $(GLIB_MAKEFILE) + +# +# Introspection support +# +include $(INTROSPECTION_MAKEFILE) + +@HAVE_INTROSPECTION_TRUE@Soup-2.4.gir: libsoup-2.4.la +@BUILD_LIBSOUP_GNOME_TRUE@@HAVE_INTROSPECTION_TRUE@SoupGNOME-2.4.gir: libsoup-gnome-2.4.la Soup-2.4.gir + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/libsoup/TIZEN.h b/libsoup/TIZEN.h deleted file mode 100644 index eb85186..0000000 --- a/libsoup/TIZEN.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * TIZEN.h - * - * Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. - */ - -#ifndef TIZEN_H -#define TIZEN_H - -#define ENABLE(TIZEN_FEATURE) (defined ENABLE_##TIZEN_FEATURE && ENABLE_##TIZEN_FEATURE) - -#define ENABLE_TIZEN_FIX_PACK_ENTRY 1 -#define ENABLE_TIZEN_FIX_CONTENT_SNIFFER_PATTERN 1 -#define ENABLE_TIZEN_FIX_PAUSE_MESSAGE 1 - -#endif //#ifndef TIZEN_H diff --git a/libsoup/soup-address.c b/libsoup/soup-address.c index 0e5e8cd..1ac2d32 100644 --- a/libsoup/soup-address.c +++ b/libsoup/soup-address.c @@ -51,7 +51,7 @@ typedef struct { char *name, *physical; guint port; - GMutex *lock; + GMutex lock; GSList *async_lookups; } SoupAddressPrivate; #define SOUP_ADDRESS_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), SOUP_TYPE_ADDRESS, SoupAddressPrivate)) @@ -116,7 +116,7 @@ soup_address_init (SoupAddress *addr) { SoupAddressPrivate *priv = SOUP_ADDRESS_GET_PRIVATE (addr); - priv->lock = g_mutex_new (); + g_mutex_init (&priv->lock); } static void @@ -132,7 +132,7 @@ finalize (GObject *object) if (priv->physical) g_free (priv->physical); - g_mutex_free (priv->lock); + g_mutex_clear (&priv->lock); G_OBJECT_CLASS (soup_address_parent_class)->finalize (object); } @@ -406,7 +406,7 @@ soup_address_new_from_sockaddr (struct sockaddr *sa, int len) * * Returns a #SoupAddress corresponding to the "any" address * for @family (or %NULL if @family isn't supported), suitable for - * passing to soup_socket_server_new(). + * using as a listening #SoupSocket. * * Return value: (allow-none): the new #SoupAddress **/ @@ -819,14 +819,14 @@ resolve_sync_internal (SoupAddress *addr, GCancellable *cancellable, GError **er * priv->sockaddr and priv->name, unlock it around the * blocking op, and then re-lock it to modify @addr. */ - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); if (!priv->sockaddr) { GList *addrs; - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); addrs = g_resolver_lookup_by_name (resolver, priv->name, cancellable, &my_err); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); status = update_addrs (addr, addrs, my_err); g_resolver_free_addresses (addrs); @@ -834,18 +834,18 @@ resolve_sync_internal (SoupAddress *addr, GCancellable *cancellable, GError **er GInetAddress *gia; char *name; - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); gia = soup_address_make_inet_address (addr); name = g_resolver_lookup_by_address (resolver, gia, cancellable, &my_err); g_object_unref (gia); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); status = update_name (addr, name, my_err); g_free (name); } else status = SOUP_STATUS_OK; - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); if (my_err) g_propagate_error (error, my_err); @@ -905,9 +905,9 @@ soup_address_is_resolved (SoupAddress *addr) g_return_val_if_fail (SOUP_IS_ADDRESS (addr), FALSE); priv = SOUP_ADDRESS_GET_PRIVATE (addr); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); resolved = priv->sockaddr && priv->name; - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); return resolved; } @@ -1013,7 +1013,7 @@ soup_address_hash_by_ip (gconstpointer addr) * This would be used to distinguish hosts in situations where * different virtual hosts on the same IP address should be considered * the same. Eg, if "www.example.com" and "www.example.net" have the - * same IP address, then a single #SoupConnection can be used to talk + * same IP address, then a single connection can be used to talk * to either of them. * * See also soup_address_equal_by_name(), which compares by name diff --git a/libsoup/soup-auth-basic.c b/libsoup/soup-auth-basic.c index 4218f7e..3705e6e 100644 --- a/libsoup/soup-auth-basic.c +++ b/libsoup/soup-auth-basic.c @@ -103,10 +103,17 @@ static void authenticate (SoupAuth *auth, const char *username, const char *password) { SoupAuthBasicPrivate *priv = SOUP_AUTH_BASIC_GET_PRIVATE (auth); - char *user_pass; + char *user_pass, *user_pass_latin1; int len; user_pass = g_strdup_printf ("%s:%s", username, password); + user_pass_latin1 = g_convert (user_pass, -1, "ISO-8859-1", "UTF-8", + NULL, NULL, NULL); + if (user_pass_latin1) { + memset (user_pass, 0, strlen (user_pass)); + g_free (user_pass); + user_pass = user_pass_latin1; + } len = strlen (user_pass); if (priv->token) { diff --git a/libsoup/soup-auth-domain-basic.c b/libsoup/soup-auth-domain-basic.c index 49f8244..db3d6d5 100644 --- a/libsoup/soup-auth-domain-basic.c +++ b/libsoup/soup-auth-domain-basic.c @@ -268,7 +268,7 @@ parse_basic (SoupMessage *msg, const char *header, char *decoded, *colon; gsize len, plen; - if (strncmp (header, "Basic ", 6) != 0) + if (!header || (strncmp (header, "Basic ", 6) != 0)) return FALSE; decoded = (char *)g_base64_decode (header + 6, &len); diff --git a/libsoup/soup-auth-domain-digest.c b/libsoup/soup-auth-domain-digest.c index cee7745..203b9f2 100644 --- a/libsoup/soup-auth-domain-digest.c +++ b/libsoup/soup-auth-domain-digest.c @@ -431,7 +431,7 @@ check_password (SoupAuthDomain *domain, header = soup_message_headers_get_one (msg->request_headers, "Authorization"); - if (strncmp (header, "Digest ", 7) != 0) + if (!header || (strncmp (header, "Digest ", 7) != 0)) return FALSE; params = soup_header_parse_param_list (header + 7); diff --git a/libsoup/soup-auth-domain.c b/libsoup/soup-auth-domain.c index c8ce96b..dd47dff 100644 --- a/libsoup/soup-auth-domain.c +++ b/libsoup/soup-auth-domain.c @@ -193,7 +193,7 @@ soup_auth_domain_class_init (SoupAuthDomainClass *auth_domain_class) /** * SOUP_AUTH_DOMAIN_GENERIC_AUTH_CALLBACK: * - * Alias for the #SoupAuthDomain:auth-callback property. + * Alias for the #SoupAuthDomain:generic-auth-callback property. * (The #SoupAuthDomainGenericAuthCallback.) **/ g_object_class_install_property ( @@ -205,7 +205,7 @@ soup_auth_domain_class_init (SoupAuthDomainClass *auth_domain_class) /** * SOUP_AUTH_DOMAIN_GENERIC_AUTH_DATA: * - * Alias for the #SoupAuthDomain:auth-data property. + * Alias for the #SoupAuthDomain:generic-auth-data property. * (The data to pass to the #SoupAuthDomainGenericAuthCallback.) **/ g_object_class_install_property ( diff --git a/libsoup/soup-auth-manager-ntlm.c b/libsoup/soup-auth-manager-ntlm.c index 7c3f239..cf5218b 100644 --- a/libsoup/soup-auth-manager-ntlm.c +++ b/libsoup/soup-auth-manager-ntlm.c @@ -12,6 +12,7 @@ #include #include +#include #ifdef USE_NTLM_AUTH #include @@ -387,6 +388,8 @@ ntlm_authorize_pre (SoupMessage *msg, gpointer ntlm) SOUP_AUTH_MANAGER_NTLM_GET_PRIVATE (ntlm); SoupNTLMConnection *conn; const char *val; + char *challenge = NULL; + SoupURI *uri; conn = get_connection_for_msg (priv, msg); if (!conn) @@ -394,10 +397,11 @@ ntlm_authorize_pre (SoupMessage *msg, gpointer ntlm) val = soup_message_headers_get_list (msg->response_headers, "WWW-Authenticate"); - if (val) - val = strstr (val, "NTLM "); if (!val) return; + challenge = soup_auth_manager_extract_challenge (val, "NTLM"); + if (!challenge) + return; if (conn->state > SOUP_NTLM_SENT_REQUEST) { /* We already authenticated, but then got another 401. @@ -408,7 +412,7 @@ ntlm_authorize_pre (SoupMessage *msg, gpointer ntlm) goto done; } - if (!soup_ntlm_parse_challenge (val, &conn->nonce, &conn->domain)) { + if (!soup_ntlm_parse_challenge (challenge, &conn->nonce, &conn->domain)) { conn->state = SOUP_NTLM_FAILED; goto done; } @@ -416,17 +420,25 @@ ntlm_authorize_pre (SoupMessage *msg, gpointer ntlm) conn->auth = soup_auth_ntlm_new (conn->domain, soup_message_get_uri (msg)->host); #ifdef USE_NTLM_AUTH - conn->challenge_header = g_strdup (val + 5); + conn->challenge_header = g_strdup (challenge + 5); if (conn->state == SOUP_NTLM_SENT_SSO_REQUEST) { conn->state = SOUP_NTLM_RECEIVED_SSO_CHALLENGE; goto done; } #endif conn->state = SOUP_NTLM_RECEIVED_CHALLENGE; - soup_auth_manager_emit_authenticate (SOUP_AUTH_MANAGER (ntlm), msg, - conn->auth, FALSE); + + uri = soup_message_get_uri (msg); + if (uri->password) + soup_auth_authenticate (conn->auth, uri->user, uri->password); + else { + soup_auth_manager_emit_authenticate (SOUP_AUTH_MANAGER (ntlm), + msg, conn->auth, FALSE); + } done: + g_free (challenge); + /* Remove the WWW-Authenticate headers so the session won't try * to do Basic auth too. */ @@ -441,6 +453,7 @@ ntlm_authorize_post (SoupMessage *msg, gpointer ntlm) SoupNTLMConnection *conn; const char *username = NULL, *password = NULL; char *slash, *domain = NULL; + SoupMessageFlags flags; conn = get_connection_for_msg (priv, msg); if (!conn || !conn->auth) @@ -488,6 +501,9 @@ ssofailure: conn->response_header = soup_ntlm_response (conn->nonce, username, password, NULL, domain); + + flags = soup_message_get_flags (msg); + soup_message_set_flags (msg, flags & ~SOUP_MESSAGE_NEW_CONNECTION); soup_session_requeue_message (priv->session, msg); done: @@ -725,7 +741,7 @@ typedef struct { #define NTLM_CHALLENGE_DOMAIN_STRING_OFFSET 12 #define NTLM_RESPONSE_HEADER "NTLMSSP\x00\x03\x00\x00\x00" -#define NTLM_RESPONSE_FLAGS 0x8202 +#define NTLM_RESPONSE_FLAGS 0x8201 typedef struct { guchar header[12]; @@ -751,7 +767,7 @@ ntlm_set_string (NTLMString *string, int *offset, int len) static char * soup_ntlm_request (void) { - return g_strdup ("NTLM TlRMTVNTUAABAAAABoIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAwAAAA"); + return g_strdup ("NTLM TlRMTVNTUAABAAAABYIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAwAAAA"); } static gboolean @@ -783,7 +799,9 @@ soup_ntlm_parse_challenge (const char *challenge, return FALSE; } - *default_domain = g_strndup ((char *)chall + domain.offset, domain.length); + *default_domain = g_convert ((char *)chall + domain.offset, + domain.length, "UTF-8", "UCS-2LE", + NULL, NULL, NULL); } if (nonce) { @@ -802,8 +820,10 @@ soup_ntlm_response (const char *nonce, const char *host, const char *domain) { - int hlen, dlen, ulen, offset; + int offset; + gsize hlen, dlen, ulen; guchar hash[21], lm_resp[24], nt_resp[24]; + char *user_conv, *host_conv, *domain_conv; NTLMResponse resp; char *out, *p; int state, save; @@ -819,13 +839,15 @@ soup_ntlm_response (const char *nonce, offset = sizeof (resp); - dlen = strlen (domain); - ntlm_set_string (&resp.domain, &offset, dlen); - ulen = strlen (user); - ntlm_set_string (&resp.user, &offset, ulen); if (!host) host = "UNKNOWN"; - hlen = strlen (host); + + domain_conv = g_convert (domain, -1, "UCS-2LE", "UTF-8", NULL, &dlen, NULL); + user_conv = g_convert (user, -1, "UCS-2LE", "UTF-8", NULL, &ulen, NULL); + host_conv = g_convert (host, -1, "UCS-2LE", "UTF-8", NULL, &hlen, NULL); + + ntlm_set_string (&resp.domain, &offset, dlen); + ntlm_set_string (&resp.user, &offset, ulen); ntlm_set_string (&resp.host, &offset, hlen); ntlm_set_string (&resp.lm_resp, &offset, sizeof (lm_resp)); ntlm_set_string (&resp.nt_resp, &offset, sizeof (nt_resp)); @@ -838,11 +860,11 @@ soup_ntlm_response (const char *nonce, p += g_base64_encode_step ((const guchar *) &resp, sizeof (resp), FALSE, p, &state, &save); - p += g_base64_encode_step ((const guchar *) domain, dlen, + p += g_base64_encode_step ((const guchar *) domain_conv, dlen, FALSE, p, &state, &save); - p += g_base64_encode_step ((const guchar *) user, ulen, + p += g_base64_encode_step ((const guchar *) user_conv, ulen, FALSE, p, &state, &save); - p += g_base64_encode_step ((const guchar *) host, hlen, + p += g_base64_encode_step ((const guchar *) host_conv, hlen, FALSE, p, &state, &save); p += g_base64_encode_step (lm_resp, sizeof (lm_resp), FALSE, p, &state, &save); @@ -851,6 +873,10 @@ soup_ntlm_response (const char *nonce, p += g_base64_encode_close (FALSE, p, &state, &save); *p = '\0'; + g_free (domain_conv); + g_free (user_conv); + g_free (host_conv); + return out; } diff --git a/libsoup/soup-auth-manager.c b/libsoup/soup-auth-manager.c index cc0f3c9..1aacf48 100644 --- a/libsoup/soup-auth-manager.c +++ b/libsoup/soup-auth-manager.c @@ -123,7 +123,7 @@ soup_auth_manager_class_init (SoupAuthManagerClass *auth_manager_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupAuthManagerClass, authenticate), NULL, NULL, - soup_marshal_NONE__OBJECT_OBJECT_BOOLEAN, + _soup_marshal_NONE__OBJECT_OBJECT_BOOLEAN, G_TYPE_NONE, 3, SOUP_TYPE_MESSAGE, SOUP_TYPE_AUTH, @@ -241,37 +241,84 @@ auth_header_for_message (SoupMessage *msg) } } -static char * -extract_challenge (const char *challenges, const char *scheme) +static GSList * +next_challenge_start (GSList *items) { - GSList *items, *i; - int schemelen = strlen (scheme); - char *item, *space, *equals; - GString *challenge; - - /* The relevant grammar: + /* The relevant grammar (from httpbis): * * WWW-Authenticate = 1#challenge * Proxy-Authenticate = 1#challenge - * challenge = auth-scheme 1#auth-param + * challenge = auth-scheme [ 1*SP ( b64token / #auth-param ) ] * auth-scheme = token - * auth-param = token "=" ( token | quoted-string ) + * auth-param = token BWS "=" BWS ( token / quoted-string ) + * b64token = 1*( ALPHA / DIGIT / + * "-" / "." / "_" / "~" / "+" / "/" ) *"=" * * The fact that quoted-strings can contain commas, equals * signs, and auth scheme names makes it tricky to "cheat" on - * the parsing. We just use soup_header_parse_list(), and then - * reassemble the pieces after we find the one we want. + * the parsing. So soup_auth_manager_extract_challenge() will + * have used soup_header_parse_list() to split the header into + * items. Given the grammar above, the possible items are: + * + * auth-scheme + * auth-scheme 1*SP b64token + * auth-scheme 1*SP auth-param + * auth-param + * + * where the first three represent the start of a new challenge and + * the last one does not. */ + for (; items; items = items->next) { + const char *item = items->data; + const char *sp = strpbrk (item, "\t\r\n "); + const char *eq = strchr (item, '='); + + if (!eq) { + /* No "=", so it can't be an auth-param */ + return items; + } + if (!sp || sp > eq) { + /* No space, or first space appears after the "=", + * so it must be an auth-param. + */ + continue; + } + while (g_ascii_isspace (*++sp)) + ; + if (sp == eq) { + /* First "=" appears immediately after the first + * space, so this must be an auth-param with + * space around the "=". + */ + continue; + } + + /* "auth-scheme auth-param" or "auth-scheme b64token" */ + return items; + } + + return NULL; +} + +char * +soup_auth_manager_extract_challenge (const char *challenges, const char *scheme) +{ + GSList *items, *i, *next; + int schemelen = strlen (scheme); + char *item; + GString *challenge; + items = soup_header_parse_list (challenges); - /* First item will start with the scheme name, followed by a - * space and then the first auth-param. + /* First item will start with the scheme name, followed by + * either nothing, or else a space and then the first + * auth-param. */ - for (i = items; i; i = i->next) { + for (i = items; i; i = next_challenge_start (i->next)) { item = i->data; if (!g_ascii_strncasecmp (item, scheme, schemelen) && - g_ascii_isspace (item[schemelen])) + (!item[schemelen] || g_ascii_isspace (item[schemelen]))) break; } if (!i) { @@ -279,17 +326,10 @@ extract_challenge (const char *challenges, const char *scheme) return NULL; } - /* The challenge extends from this item until the end, or until - * the next item that has a space before an equals sign. - */ + next = next_challenge_start (i->next); challenge = g_string_new (item); - for (i = i->next; i; i = i->next) { + for (i = i->next; i != next; i = i->next) { item = i->data; - space = strpbrk (item, " \t"); - equals = strchr (item, '='); - if (!equals || (space && equals > space)) - break; - g_string_append (challenge, ", "); g_string_append (challenge, item); } @@ -313,7 +353,7 @@ create_auth (SoupAuthManagerPrivate *priv, SoupMessage *msg) for (i = priv->auth_types->len - 1; i >= 0; i--) { auth_class = priv->auth_types->pdata[i]; - challenge = extract_challenge (header, auth_class->scheme_name); + challenge = soup_auth_manager_extract_challenge (header, auth_class->scheme_name); if (challenge) break; } @@ -336,7 +376,7 @@ check_auth (SoupMessage *msg, SoupAuth *auth) if (!header) return FALSE; - challenge = extract_challenge (header, soup_auth_get_scheme_name (auth)); + challenge = soup_auth_manager_extract_challenge (header, soup_auth_get_scheme_name (auth)); if (!challenge) return FALSE; diff --git a/libsoup/soup-auth-manager.h b/libsoup/soup-auth-manager.h index 493960a..d82fbb1 100644 --- a/libsoup/soup-auth-manager.h +++ b/libsoup/soup-auth-manager.h @@ -32,10 +32,13 @@ typedef struct { GType soup_auth_manager_get_type (void); -void soup_auth_manager_emit_authenticate (SoupAuthManager *manager, - SoupMessage *msg, - SoupAuth *auth, - gboolean retrying); +void soup_auth_manager_emit_authenticate (SoupAuthManager *manager, + SoupMessage *msg, + SoupAuth *auth, + gboolean retrying); + +char *soup_auth_manager_extract_challenge (const char *challenges, + const char *scheme); G_END_DECLS diff --git a/libsoup/soup-auth.c b/libsoup/soup-auth.c index 0b045a5..e72e5c3 100644 --- a/libsoup/soup-auth.c +++ b/libsoup/soup-auth.c @@ -117,7 +117,7 @@ soup_auth_class_init (SoupAuthClass *auth_class) G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, 0, NULL, NULL, - soup_marshal_NONE__STRING_STRING, + _soup_marshal_NONE__STRING_STRING, G_TYPE_NONE, 2, G_TYPE_STRING, G_TYPE_STRING); @@ -126,7 +126,7 @@ soup_auth_class_init (SoupAuthClass *auth_class) /** * SOUP_AUTH_SCHEME_NAME: * - * An alias for the #SoupAuth:scheme property. (The + * An alias for the #SoupAuth:scheme-name property. (The * authentication scheme name.) **/ g_object_class_install_property ( diff --git a/libsoup/soup-cache.c b/libsoup/soup-cache.c index 63cdf8a..6ee6dd9 100644 --- a/libsoup/soup-cache.c +++ b/libsoup/soup-cache.c @@ -43,8 +43,6 @@ #include "soup-session.h" #include "soup-session-feature.h" #include "soup-uri.h" -/*TIZEN patch*/ -#include "TIZEN.h" static SoupSessionFeatureInterface *soup_cache_default_feature_interface; static void soup_cache_session_feature_init (SoupSessionFeatureInterface *feature_interface, gpointer interface_data); @@ -149,6 +147,7 @@ get_cacheability (SoupCache *cache, SoupMessage *msg) { SoupCacheability cacheability; const char *cache_control, *content_type; + gboolean has_max_age = FALSE; /* 1. The request method must be cacheable */ if (msg->method == SOUP_METHOD_GET) @@ -165,7 +164,7 @@ get_cacheability (SoupCache *cache, SoupMessage *msg) return SOUP_CACHE_UNCACHEABLE; cache_control = soup_message_headers_get (msg->response_headers, "Cache-Control"); - if (cache_control) { + if (cache_control && *cache_control) { GHashTable *hash; SoupCachePrivate *priv = SOUP_CACHE_GET_PRIVATE (cache); @@ -187,6 +186,9 @@ get_cacheability (SoupCache *cache, SoupMessage *msg) return SOUP_CACHE_UNCACHEABLE; } + if (g_hash_table_lookup_extended (hash, "max-age", NULL, NULL)) + has_max_age = TRUE; + /* This does not appear in section 2.1, but I think it makes * sense to check it too? */ @@ -198,6 +200,12 @@ get_cacheability (SoupCache *cache, SoupMessage *msg) soup_header_free_param_list (hash); } + /* Section 13.9 */ + if ((soup_message_get_uri (msg))->query && + !soup_message_headers_get_one (msg->response_headers, "Expires") && + !has_max_age) + return SOUP_CACHE_UNCACHEABLE; + switch (msg->status_code) { case SOUP_STATUS_PARTIAL_CONTENT: /* We don't cache partial responses, but they only @@ -960,7 +968,7 @@ msg_got_headers_cb (SoupMessage *msg, gpointer user_data) /* Check if we are already caching this resource */ entry = soup_cache_entry_lookup (cache, msg); - if (entry && entry->dirty) + if (entry && (entry->dirty || entry->being_validated)) return; /* Create a new entry, deleting any old one if present */ @@ -1252,6 +1260,17 @@ soup_cache_class_init (SoupCacheClass *cache_class) } /** + * SoupCacheType: + * @SOUP_CACHE_SINGLE_USER: a single-user cache + * @SOUP_CACHE_SHARED: a shared cache + * + * The type of cache; this affects what kinds of responses will be + * saved. + * + * Since: 2.34 + */ + +/** * soup_cache_new: * @cache_dir: the directory to store the cached data, or %NULL to use the default one * @cache_type: the #SoupCacheType of the cache @@ -1531,12 +1550,24 @@ soup_cache_generate_conditional_request (SoupCache *cache, SoupMessage *original SoupMessage *msg; SoupURI *uri; SoupCacheEntry *entry; - const char *value; + const char *last_modified, *etag; g_return_val_if_fail (SOUP_IS_CACHE (cache), NULL); g_return_val_if_fail (SOUP_IS_MESSAGE (original), NULL); - /* First copy the data we need from the original message */ + /* Add the validator entries in the header from the cached data */ + entry = soup_cache_entry_lookup (cache, original); + g_return_val_if_fail (entry, NULL); + + last_modified = soup_message_headers_get_one (entry->headers, "Last-Modified"); + etag = soup_message_headers_get_one (entry->headers, "ETag"); + + if (!last_modified && !etag) + return NULL; + + entry->being_validated = TRUE; + + /* Copy the data we need from the original message */ uri = soup_message_get_uri (original); msg = soup_message_new_from_uri (original->method, uri); @@ -1544,23 +1575,15 @@ soup_cache_generate_conditional_request (SoupCache *cache, SoupMessage *original (SoupMessageHeadersForeachFunc)copy_headers, msg->request_headers); - /* Now add the validator entries in the header from the cached - data */ - entry = soup_cache_entry_lookup (cache, original); - g_return_val_if_fail (entry, NULL); - - entry->being_validated = TRUE; - - value = soup_message_headers_get (entry->headers, "Last-Modified"); - if (value) + if (last_modified) soup_message_headers_append (msg->request_headers, "If-Modified-Since", - value); - value = soup_message_headers_get (entry->headers, "ETag"); - if (value) + last_modified); + if (etag) soup_message_headers_append (msg->request_headers, "If-None-Match", - value); + etag); + return msg; } @@ -1590,12 +1613,6 @@ pack_entry (gpointer data, return; g_variant_builder_open (entries_builder, G_VARIANT_TYPE (SOUP_CACHE_PHEADERS_FORMAT)); -#if ENABLE(TIZEN_FIX_PACK_ENTRY) - if (!g_utf8_validate (entry->uri, -1, NULL)) { - g_variant_builder_close (entries_builder); - return; - } -#endif g_variant_builder_add (entries_builder, "s", entry->uri); g_variant_builder_add (entries_builder, "b", entry->must_revalidate); g_variant_builder_add (entries_builder, "u", entry->freshness_lifetime); diff --git a/libsoup/soup-connection.c b/libsoup/soup-connection.c index 514640f..b299e2b 100644 --- a/libsoup/soup-connection.c +++ b/libsoup/soup-connection.c @@ -26,7 +26,6 @@ #include "soup-misc.h" #include "soup-misc-private.h" #include "soup-socket.h" -#include "soup-ssl.h" #include "soup-uri.h" #include "soup-enum-types.h" @@ -35,11 +34,11 @@ typedef struct { SoupAddress *remote_addr, *tunnel_addr; SoupURI *proxy_uri; - gpointer ssl_creds; - gboolean ssl_strict; - gboolean ssl_fallback; + GTlsDatabase *tlsdb; + gboolean ssl, ssl_strict, ssl_fallback; - GMainContext *async_context; + GMainContext *async_context; + gboolean use_thread_context; SoupMessageQueueItem *cur_item; SoupConnectionState state; @@ -52,6 +51,7 @@ typedef struct { G_DEFINE_TYPE (SoupConnection, soup_connection, G_TYPE_OBJECT) enum { + EVENT, DISCONNECTED, LAST_SIGNAL }; @@ -64,10 +64,12 @@ enum { PROP_REMOTE_ADDRESS, PROP_TUNNEL_ADDRESS, PROP_PROXY_URI, + PROP_SSL, PROP_SSL_CREDS, PROP_SSL_STRICT, PROP_SSL_FALLBACK, PROP_ASYNC_CONTEXT, + PROP_USE_THREAD_CONTEXT, PROP_TIMEOUT, PROP_IDLE_TIMEOUT, PROP_STATE, @@ -106,7 +108,8 @@ finalize (GObject *object) g_object_unref (priv->tunnel_addr); if (priv->proxy_uri) soup_uri_free (priv->proxy_uri); - + if (priv->tlsdb) + g_object_unref (priv->tlsdb); if (priv->async_context) g_main_context_unref (priv->async_context); @@ -149,13 +152,23 @@ soup_connection_class_init (SoupConnectionClass *connection_class) object_class->get_property = get_property; /* signals */ + signals[EVENT] = + g_signal_new ("event", + G_OBJECT_CLASS_TYPE (object_class), + G_SIGNAL_RUN_FIRST, + 0, + NULL, NULL, + NULL, + G_TYPE_NONE, 2, + G_TYPE_SOCKET_CLIENT_EVENT, + G_TYPE_IO_STREAM); signals[DISCONNECTED] = g_signal_new ("disconnected", G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupConnectionClass, disconnected), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /* properties */ @@ -181,12 +194,20 @@ soup_connection_class_init (SoupConnectionClass *connection_class) SOUP_TYPE_URI, G_PARAM_READWRITE)); g_object_class_install_property ( - object_class, PROP_SSL_CREDS, - g_param_spec_pointer (SOUP_CONNECTION_SSL_CREDENTIALS, - "SSL credentials", - "Opaque SSL credentials for this connection", + object_class, PROP_SSL, + g_param_spec_boolean (SOUP_CONNECTION_SSL, + "SSL", + "Whether this is an SSL connection", + FALSE, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); g_object_class_install_property ( + object_class, PROP_SSL_CREDS, + g_param_spec_object (SOUP_CONNECTION_SSL_CREDENTIALS, + "SSL credentials", + "SSL credentials for this connection", + G_TYPE_TLS_DATABASE, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); + g_object_class_install_property ( object_class, PROP_SSL_STRICT, g_param_spec_boolean (SOUP_CONNECTION_SSL_STRICT, "Strictly validate SSL certificates", @@ -207,6 +228,13 @@ soup_connection_class_init (SoupConnectionClass *connection_class) "GMainContext to dispatch this connection's async I/O in", G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); g_object_class_install_property ( + object_class, PROP_USE_THREAD_CONTEXT, + g_param_spec_boolean (SOUP_CONNECTION_USE_THREAD_CONTEXT, + "Use thread context", + "Use g_main_context_get_thread_default", + FALSE, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); + g_object_class_install_property ( object_class, PROP_TIMEOUT, g_param_spec_uint (SOUP_CONNECTION_TIMEOUT, "Timeout value", @@ -269,8 +297,13 @@ set_property (GObject *object, guint prop_id, soup_uri_free (priv->proxy_uri); priv->proxy_uri = g_value_dup_boxed (value); break; + case PROP_SSL: + priv->ssl = g_value_get_boolean (value); + break; case PROP_SSL_CREDS: - priv->ssl_creds = g_value_get_pointer (value); + if (priv->tlsdb) + g_object_unref (priv->tlsdb); + priv->tlsdb = g_value_dup_object (value); break; case PROP_SSL_STRICT: priv->ssl_strict = g_value_get_boolean (value); @@ -283,6 +316,9 @@ set_property (GObject *object, guint prop_id, if (priv->async_context) g_main_context_ref (priv->async_context); break; + case PROP_USE_THREAD_CONTEXT: + priv->use_thread_context = g_value_get_boolean (value); + break; case PROP_TIMEOUT: priv->io_timeout = g_value_get_uint (value); break; @@ -314,8 +350,11 @@ get_property (GObject *object, guint prop_id, case PROP_PROXY_URI: g_value_set_boxed (value, priv->proxy_uri); break; + case PROP_SSL: + g_value_set_boolean (value, priv->ssl); + break; case PROP_SSL_CREDS: - g_value_set_pointer (value, priv->ssl_creds); + g_value_set_object (value, priv->tlsdb); break; case PROP_SSL_STRICT: g_value_set_boolean (value, priv->ssl_strict); @@ -326,6 +365,9 @@ get_property (GObject *object, guint prop_id, case PROP_ASYNC_CONTEXT: g_value_set_pointer (value, priv->async_context ? g_main_context_ref (priv->async_context) : NULL); break; + case PROP_USE_THREAD_CONTEXT: + g_value_set_boolean (value, priv->use_thread_context); + break; case PROP_TIMEOUT: g_value_set_uint (value, priv->io_timeout); break; @@ -377,6 +419,15 @@ stop_idle_timer (SoupConnectionPrivate *priv) } static void +current_item_restarted (SoupMessage *msg, gpointer user_data) +{ + SoupConnection *conn = user_data; + SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (conn); + + priv->unused_timeout = 0; +} + +static void set_current_item (SoupConnection *conn, SoupMessageQueueItem *item) { SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (conn); @@ -391,8 +442,14 @@ set_current_item (SoupConnection *conn, SoupMessageQueueItem *item) priv->cur_item = item; g_object_notify (G_OBJECT (conn), "message"); - if (priv->state == SOUP_CONNECTION_IDLE || - item->msg->method != SOUP_METHOD_CONNECT) + g_signal_connect (item->msg, "restarted", + G_CALLBACK (current_item_restarted), conn); + + if (item->msg->method == SOUP_METHOD_CONNECT) { + g_signal_emit (conn, signals[EVENT], 0, + G_SOCKET_CLIENT_PROXY_NEGOTIATING, + soup_socket_get_iostream (priv->socket)); + } else if (priv->state == SOUP_CONNECTION_IDLE) soup_connection_set_state (conn, SOUP_CONNECTION_IN_USE); g_object_thaw_notify (G_OBJECT (conn)); @@ -415,8 +472,14 @@ clear_current_item (SoupConnection *conn) priv->cur_item = NULL; g_object_notify (G_OBJECT (conn), "message"); + g_signal_handlers_disconnect_by_func (item->msg, G_CALLBACK (current_item_restarted), conn); + if (item->msg->method == SOUP_METHOD_CONNECT && SOUP_STATUS_IS_SUCCESSFUL (item->msg->status_code)) { + g_signal_emit (conn, signals[EVENT], 0, + G_SOCKET_CLIENT_PROXY_NEGOTIATED, + soup_socket_get_iostream (priv->socket)); + /* We're now effectively no longer proxying */ soup_uri_free (priv->proxy_uri); priv->proxy_uri = NULL; @@ -430,6 +493,33 @@ clear_current_item (SoupConnection *conn) } static void +soup_connection_event (SoupConnection *conn, + GSocketClientEvent event, + GIOStream *connection) +{ + SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (conn); + + if (!connection && priv->socket) + connection = soup_socket_get_iostream (priv->socket); + + g_signal_emit (conn, signals[EVENT], 0, + event, connection); +} + +static void +proxy_socket_event (SoupSocket *socket, + GSocketClientEvent event, + GIOStream *connection, + gpointer user_data) +{ + SoupConnection *conn = user_data; + + /* We handle COMPLETE ourselves */ + if (event != G_SOCKET_CLIENT_COMPLETE) + soup_connection_event (conn, event, connection); +} + +static void socket_disconnected (SoupSocket *sock, gpointer conn) { soup_connection_disconnect (conn); @@ -440,6 +530,8 @@ typedef struct { SoupConnectionCallback callback; gpointer callback_data; GCancellable *cancellable; + guint event_id; + gboolean tls_handshake; } SoupConnectionAsyncConnectData; static void @@ -448,10 +540,23 @@ socket_connect_finished (SoupSocket *socket, guint status, gpointer user_data) SoupConnectionAsyncConnectData *data = user_data; SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (data->conn); + g_signal_handler_disconnect (socket, data->event_id); + if (SOUP_STATUS_IS_SUCCESSFUL (status)) { g_signal_connect (priv->socket, "disconnected", G_CALLBACK (socket_disconnected), data->conn); + if (data->tls_handshake) { + soup_connection_event (data->conn, + G_SOCKET_CLIENT_TLS_HANDSHAKED, + NULL); + } + if (!priv->ssl || !priv->tunnel_addr) { + soup_connection_event (data->conn, + G_SOCKET_CLIENT_COMPLETE, + NULL); + } + soup_connection_set_state (data->conn, SOUP_CONNECTION_IN_USE); priv->unused_timeout = time (NULL) + SOUP_CONNECTION_UNUSED_TIMEOUT; start_idle_timer (data->conn); @@ -475,11 +580,13 @@ static void socket_connect_result (SoupSocket *sock, guint status, gpointer user_data) { SoupConnectionAsyncConnectData *data = user_data; - SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (data->conn); if (SOUP_STATUS_IS_SUCCESSFUL (status) && - priv->ssl_creds && !priv->tunnel_addr) { + data->tls_handshake) { if (soup_socket_start_ssl (sock, data->cancellable)) { + soup_connection_event (data->conn, + G_SOCKET_CLIENT_TLS_HANDSHAKING, + NULL); soup_socket_handshake_async (sock, data->cancellable, socket_connect_finished, data); return; @@ -511,16 +618,21 @@ soup_connection_connect_async (SoupConnection *conn, data->callback = callback; data->callback_data = user_data; data->cancellable = cancellable ? g_object_ref (cancellable) : NULL; + data->tls_handshake = (priv->ssl && !priv->tunnel_addr); priv->socket = soup_socket_new (SOUP_SOCKET_REMOTE_ADDRESS, priv->remote_addr, - SOUP_SOCKET_SSL_CREDENTIALS, priv->ssl_creds, + SOUP_SOCKET_SSL_CREDENTIALS, priv->tlsdb, SOUP_SOCKET_SSL_STRICT, priv->ssl_strict, SOUP_SOCKET_SSL_FALLBACK, priv->ssl_fallback, SOUP_SOCKET_ASYNC_CONTEXT, priv->async_context, + SOUP_SOCKET_USE_THREAD_CONTEXT, priv->use_thread_context, SOUP_SOCKET_TIMEOUT, priv->io_timeout, "clean-dispose", TRUE, NULL); + data->event_id = g_signal_connect (priv->socket, "event", + G_CALLBACK (proxy_socket_event), + conn); soup_socket_connect_async (priv->socket, cancellable, socket_connect_result, data); } @@ -529,7 +641,7 @@ guint soup_connection_connect_sync (SoupConnection *conn, GCancellable *cancellable) { SoupConnectionPrivate *priv; - guint status; + guint status, event_id; g_return_val_if_fail (SOUP_IS_CONNECTION (conn), SOUP_STATUS_MALFORMED); priv = SOUP_CONNECTION_GET_PRIVATE (conn); @@ -539,7 +651,7 @@ soup_connection_connect_sync (SoupConnection *conn, GCancellable *cancellable) priv->socket = soup_socket_new (SOUP_SOCKET_REMOTE_ADDRESS, priv->remote_addr, - SOUP_SOCKET_SSL_CREDENTIALS, priv->ssl_creds, + SOUP_SOCKET_SSL_CREDENTIALS, priv->tlsdb, SOUP_SOCKET_SSL_STRICT, priv->ssl_strict, SOUP_SOCKET_SSL_FALLBACK, priv->ssl_fallback, SOUP_SOCKET_FLAG_NONBLOCKING, FALSE, @@ -547,17 +659,26 @@ soup_connection_connect_sync (SoupConnection *conn, GCancellable *cancellable) "clean-dispose", TRUE, NULL); + event_id = g_signal_connect (priv->socket, "event", + G_CALLBACK (proxy_socket_event), conn); status = soup_socket_connect_sync (priv->socket, cancellable); if (!SOUP_STATUS_IS_SUCCESSFUL (status)) goto fail; - if (priv->ssl_creds && !priv->tunnel_addr) { + if (priv->ssl && !priv->tunnel_addr) { if (!soup_socket_start_ssl (priv->socket, cancellable)) status = SOUP_STATUS_SSL_FAILED; else { + soup_connection_event (conn, + G_SOCKET_CLIENT_TLS_HANDSHAKING, + NULL); status = soup_socket_handshake_sync (priv->socket, cancellable); - if (status == SOUP_STATUS_TLS_FAILED) { + if (status == SOUP_STATUS_OK) { + soup_connection_event (conn, + G_SOCKET_CLIENT_TLS_HANDSHAKED, + NULL); + } else if (status == SOUP_STATUS_TLS_FAILED) { priv->ssl_fallback = TRUE; status = SOUP_STATUS_TRY_AGAIN; } @@ -568,6 +689,11 @@ soup_connection_connect_sync (SoupConnection *conn, GCancellable *cancellable) g_signal_connect (priv->socket, "disconnected", G_CALLBACK (socket_disconnected), conn); + if (!priv->ssl || !priv->tunnel_addr) { + soup_connection_event (conn, + G_SOCKET_CLIENT_COMPLETE, + NULL); + } soup_connection_set_state (conn, SOUP_CONNECTION_IN_USE); priv->unused_timeout = time (NULL) + SOUP_CONNECTION_UNUSED_TIMEOUT; start_idle_timer (conn); @@ -580,6 +706,9 @@ soup_connection_connect_sync (SoupConnection *conn, GCancellable *cancellable) } } + if (priv->socket) + g_signal_handler_disconnect (priv->socket, event_id); + if (priv->proxy_uri != NULL) status = soup_status_proxify (status); return status; @@ -614,8 +743,11 @@ soup_connection_start_ssl_sync (SoupConnection *conn, cancellable)) return SOUP_STATUS_SSL_FAILED; + soup_connection_event (conn, G_SOCKET_CLIENT_TLS_HANDSHAKING, NULL); status = soup_socket_handshake_sync (priv->socket, cancellable); - if (status == SOUP_STATUS_TLS_FAILED) { + if (status == SOUP_STATUS_OK) + soup_connection_event (conn, G_SOCKET_CLIENT_TLS_HANDSHAKED, NULL); + else if (status == SOUP_STATUS_TLS_FAILED) { priv->ssl_fallback = TRUE; status = SOUP_STATUS_TRY_AGAIN; } @@ -629,7 +761,9 @@ start_ssl_completed (SoupSocket *socket, guint status, gpointer user_data) SoupConnectionAsyncConnectData *data = user_data; SoupConnectionPrivate *priv = SOUP_CONNECTION_GET_PRIVATE (data->conn); - if (status == SOUP_STATUS_TLS_FAILED) { + if (status == SOUP_STATUS_OK) + soup_connection_event (data->conn, G_SOCKET_CLIENT_TLS_HANDSHAKED, NULL); + else if (status == SOUP_STATUS_TLS_FAILED) { priv->ssl_fallback = TRUE; status = SOUP_STATUS_TRY_AGAIN; } @@ -657,6 +791,7 @@ soup_connection_start_ssl_async (SoupConnection *conn, SoupConnectionPrivate *priv; const char *server_name; SoupConnectionAsyncConnectData *data; + GMainContext *async_context; g_return_if_fail (SOUP_IS_CONNECTION (conn)); priv = SOUP_CONNECTION_GET_PRIVATE (conn); @@ -666,16 +801,22 @@ soup_connection_start_ssl_async (SoupConnection *conn, data->callback = callback; data->callback_data = user_data; + if (priv->use_thread_context) + async_context = g_main_context_get_thread_default (); + else + async_context = priv->async_context; + server_name = soup_address_get_name (priv->tunnel_addr ? priv->tunnel_addr : priv->remote_addr); if (!soup_socket_start_proxy_ssl (priv->socket, server_name, cancellable)) { - soup_add_completion (priv->async_context, + soup_add_completion (async_context, idle_start_ssl_completed, data); return; } + soup_connection_event (conn, G_SOCKET_CLIENT_TLS_HANDSHAKING, NULL); soup_socket_handshake_async (priv->socket, cancellable, start_ssl_completed, data); } @@ -745,17 +886,10 @@ soup_connection_get_state (SoupConnection *conn) SOUP_CONNECTION_DISCONNECTED); priv = SOUP_CONNECTION_GET_PRIVATE (conn); -#ifdef G_OS_UNIX - if (priv->state == SOUP_CONNECTION_IDLE) { - GPollFD pfd; + if (priv->state == SOUP_CONNECTION_IDLE && + g_socket_condition_check (soup_socket_get_gsocket (priv->socket), G_IO_IN)) + soup_connection_set_state (conn, SOUP_CONNECTION_REMOTE_DISCONNECTED); - pfd.fd = soup_socket_get_fd (priv->socket); - pfd.events = G_IO_IN; - pfd.revents = 0; - if (g_poll (&pfd, 1, 0) == 1) - soup_connection_set_state (conn, SOUP_CONNECTION_REMOTE_DISCONNECTED); - } -#endif if (priv->state == SOUP_CONNECTION_IDLE && priv->unused_timeout && priv->unused_timeout < time (NULL)) soup_connection_set_state (conn, SOUP_CONNECTION_REMOTE_DISCONNECTED); diff --git a/libsoup/soup-connection.h b/libsoup/soup-connection.h index ef304e7..72e6106 100644 --- a/libsoup/soup-connection.h +++ b/libsoup/soup-connection.h @@ -42,10 +42,12 @@ typedef void (*SoupConnectionCallback) (SoupConnection *conn, #define SOUP_CONNECTION_REMOTE_ADDRESS "remote-address" #define SOUP_CONNECTION_TUNNEL_ADDRESS "tunnel-address" #define SOUP_CONNECTION_PROXY_URI "proxy-uri" +#define SOUP_CONNECTION_SSL "ssl" #define SOUP_CONNECTION_SSL_CREDENTIALS "ssl-creds" #define SOUP_CONNECTION_SSL_STRICT "ssl-strict" #define SOUP_CONNECTION_SSL_FALLBACK "ssl-fallback" #define SOUP_CONNECTION_ASYNC_CONTEXT "async-context" +#define SOUP_CONNECTION_USE_THREAD_CONTEXT "use-thread-context" #define SOUP_CONNECTION_TIMEOUT "timeout" #define SOUP_CONNECTION_IDLE_TIMEOUT "idle-timeout" #define SOUP_CONNECTION_STATE "state" diff --git a/libsoup/soup-content-decoder.c b/libsoup/soup-content-decoder.c index 3ab240c..e5a7125 100644 --- a/libsoup/soup-content-decoder.c +++ b/libsoup/soup-content-decoder.c @@ -66,7 +66,7 @@ G_DEFINE_TYPE_WITH_CODE (SoupContentDecoder, soup_content_decoder, G_TYPE_OBJECT soup_content_decoder_session_feature_init)) /* This is constant for now */ -#define ACCEPT_ENCODING_HEADER "gzip" +#define ACCEPT_ENCODING_HEADER "gzip, deflate" static GConverter * gzip_decoder_creator (void) @@ -74,6 +74,12 @@ gzip_decoder_creator (void) return (GConverter *)g_zlib_decompressor_new (G_ZLIB_COMPRESSOR_FORMAT_GZIP); } +static GConverter * +zlib_decoder_creator (void) +{ + return (GConverter *)g_zlib_decompressor_new (G_ZLIB_COMPRESSOR_FORMAT_ZLIB); +} + static void soup_content_decoder_init (SoupContentDecoder *decoder) { @@ -87,6 +93,8 @@ soup_content_decoder_init (SoupContentDecoder *decoder) gzip_decoder_creator); g_hash_table_insert (decoder->priv->decoders, "x-gzip", gzip_decoder_creator); + g_hash_table_insert (decoder->priv->decoders, "deflate", + zlib_decoder_creator); } static void @@ -132,7 +140,8 @@ soup_content_decoder_got_headers_cb (SoupMessage *msg, SoupContentDecoder *decod return; /* Workaround for an apache bug (bgo 613361) */ - if (!g_ascii_strcasecmp (header, "gzip")) { + if (!g_ascii_strcasecmp (header, "gzip") || + !g_ascii_strcasecmp (header, "x-gzip")) { const char *content_type = soup_message_headers_get_content_type (msg->response_headers, NULL); if (content_type && diff --git a/libsoup/soup-content-sniffer.c b/libsoup/soup-content-sniffer.c index f551e94..4b96735 100644 --- a/libsoup/soup-content-sniffer.c +++ b/libsoup/soup-content-sniffer.c @@ -17,8 +17,6 @@ #include "soup-message-private.h" #include "soup-session-feature.h" #include "soup-uri.h" -/*TIZEN patch*/ -#include "TIZEN.h" /** * SECTION:soup-content-sniffer @@ -121,266 +119,6 @@ typedef struct { gboolean scriptable; } SoupContentSnifferPattern; -#if ENABLE(TIZEN_FIX_CONTENT_SNIFFER_PATTERN) -/* This table is updated by TIZEN team(steve.jun@samsung.com), based on draft-abarth-mime-sniff-06 - * (http://tools.ietf.org/html/draft-abarth-mime-sniff-06); - * See 5. Unknown Type - */ -static SoupContentSnifferPattern types_table[] = { - // ) - { TRUE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)" \x3C\x3f\x78\x6d\x6c", - 5, - "text/xml", - TRUE }, - // The string "%PDF-", the PDF signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x25\x50\x44\x46\x2D", - 5, - "application/pdf", - TRUE }, - // The string "%!PS-Adobe-", the PostScript signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x25\x21\x50\x53\x2D\x41\x64\x6F\x62\x65\x2D", - 11, - "application/postscript", - FALSE }, - // UTF-16BE BOM - { FALSE, - (const guchar *)"\xFF\xFF\x00\x00", - (const guchar *)"\xFE\xFF\x00\x00", - 4, - "text/plain", - FALSE }, - // UTF-16LE BOM - { FALSE, - (const guchar *)"\xFF\xFF\x00\x00", - (const guchar *)"\xFF\xFE\x00\x00", - 4, - "text/plain", - FALSE }, - // UTF-8 BOM - { FALSE, - (const guchar *)"\xFF\xFF\xFF\x00", - (const guchar *)"\xEF\xBB\xBF\x00", - 4, - "text/plain", - FALSE }, - // The string "GIF87a", a GIF signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x47\x49\x46\x38\x37\x61", - 6, - "image/gif", - FALSE }, - // The string "GIF89a", a GIF signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x47\x49\x46\x38\x39\x61", - 6, - "image/gif", - FALSE }, - // The PNG signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A", - 8, - "image/png", - FALSE }, - // A JPEG SOI marker followed by a octet of another marker. - { FALSE, - (const guchar *)"\xFF\xFF\xFF", - (const guchar *)"\xFF\xD8\xFF", - 3, - "image/jpeg", - FALSE }, - // The string "BM", a BMP signature. - { FALSE, - (const guchar *)"\xFF\xFF", - (const guchar *)"\x42\x4D", - 2, - "image/bmp", - FALSE }, - // "RIFF" followed by four bytes, followed by "WEBPVP". - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x52\x49\x46\x46\x00\x00\x00\x00\x57\x45\x42\x50\x56\x50", - 14, - "image/webp", - FALSE }, - // A Windows Icon signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF", - (const guchar *)"\x00\x00\x01\x00", - 4, - "image/vnd.microsoft.icon", - FALSE }, - // An Ogg Vorbis audio or video signature. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x4F\x67\x67\x53\x00", - 5, - "application/ogg", - FALSE }, - // "RIFF" followed by four bytes, followed by "WAVE". - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\x00\x00\x00\x00\xFF\xFF\xFF\xFF", - (const guchar *)"\x52\x49\x46\x46\x00\x00\x00\x00\x57\x41\x56\x45", - 12, - "audio/x-wave", - FALSE }, - // The WebM signature [TODO: Use more octets?] vidow: typo(!) - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF", - (const guchar *)"\x1A\x45\xDF\xA3", - 4, - "vidow/webm", - FALSE }, - // A RAR archive. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF\xFF\xFF\xFF", - (const guchar *)"\x52\x61\x72\x20\x1A\x07\x00", - 7, - "application/x-rar-compressed", - FALSE }, - // A ZIP archive. - { FALSE, - (const guchar *)"\xFF\xFF\xFF\xFF", - (const guchar *)"\x50\x4B\x03\x04", - 4, - "application/zip", - FALSE }, - // A GZIP archive. - { FALSE, - (const guchar *)"\xFF\xFF\xFF", - (const guchar *)"\x1F\x8B\x08", - 3, - "application/x-gzip", - FALSE } -}; -#else static SoupContentSnifferPattern types_table[] = { { FALSE, (const guchar *)"\xFF\xFF\xDF\xDF\xDF\xDF\xDF\xDF\xDF\xFF\xDF\xDF\xDF\xDF", @@ -487,7 +225,6 @@ static SoupContentSnifferPattern types_table[] = { "image/vnd.microsoft.icon", FALSE } }; -#endif /* Whether a given byte looks like it might be part of binary content. * Source: HTML5 spec; borrowed from the Chromium mime sniffer code, diff --git a/libsoup/soup-cookie-jar.c b/libsoup/soup-cookie-jar.c index 13bf238..1f6500f 100644 --- a/libsoup/soup-cookie-jar.c +++ b/libsoup/soup-cookie-jar.c @@ -144,7 +144,7 @@ soup_cookie_jar_class_init (SoupCookieJarClass *jar_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupCookieJarClass, changed), NULL, NULL, - soup_marshal_NONE__BOXED_BOXED, + _soup_marshal_NONE__BOXED_BOXED, G_TYPE_NONE, 2, SOUP_TYPE_COOKIE | G_SIGNAL_TYPE_STATIC_SCOPE, SOUP_TYPE_COOKIE | G_SIGNAL_TYPE_STATIC_SCOPE); @@ -252,6 +252,14 @@ soup_cookie_jar_new (void) return g_object_new (SOUP_TYPE_COOKIE_JAR, NULL); } +/** + * soup_cookie_jar_save: + * @jar: a #SoupCookieJar + * + * This function exists for backward compatibility, but does not do + * anything any more; cookie jars are saved automatically when they + * are changed. + */ void soup_cookie_jar_save (SoupCookieJar *jar) { @@ -716,7 +724,9 @@ soup_cookie_jar_delete_cookie (SoupCookieJar *jar, /** * soup_cookie_jar_get_accept_policy: * @jar: a #SoupCookieJar - * + * + * Gets @jar's #SoupCookieJarAcceptPolicy + * * Returns: the #SoupCookieJarAcceptPolicy set in the @jar * * Since: 2.30 diff --git a/libsoup/soup-cookie.c b/libsoup/soup-cookie.c index 5e50043..b10eba8 100644 --- a/libsoup/soup-cookie.c +++ b/libsoup/soup-cookie.c @@ -1069,6 +1069,18 @@ soup_cookie_applies_to_uri (SoupCookie *cookie, SoupURI *uri) return TRUE; } +/** + * soup_cookie_equal: + * @cookie1: a #SoupCookie + * @cookie2: a #SoupCookie + * + * Tests if @cookie1 and @cookie2 are equal. + * + * Note that currently, this does not check that the cookie domains + * match. This may change in the future. + * + * Return value: whether the cookies are equal. + */ gboolean soup_cookie_equal (SoupCookie *cookie1, SoupCookie *cookie2) { diff --git a/libsoup/soup-directory-input-stream.c b/libsoup/soup-directory-input-stream.c index 3ba74b7..3fe946b 100644 --- a/libsoup/soup-directory-input-stream.c +++ b/libsoup/soup-directory-input-stream.c @@ -29,8 +29,9 @@ #include #include -#define INIT_STRING "OMG!" -#define EXIT_STRING "
" +#define INIT_STRING "\n\n\n" +#define ROW_FORMAT "\n" +#define EXIT_STRING "
NameSizeDate Modified%s%s%s
\n\n" G_DEFINE_TYPE (SoupDirectoryInputStream, soup_directory_input_stream, G_TYPE_INPUT_STREAM) @@ -40,31 +41,41 @@ soup_directory_input_stream_parse_info (SoupDirectoryInputStream *stream, { SoupBuffer *buffer; GString *string; - const char *s; - char *escaped, *path, *xml_string; + const char *file_name; + char *escaped, *path, *xml_string, *size, *time; + GTimeVal modified; + GDateTime *modification_time; if (!g_file_info_get_name (info)) return NULL; - s = g_file_info_get_display_name (info); - if (!s) { - s = g_file_info_get_name (info); + file_name = g_file_info_get_display_name (info); + if (!file_name) { + file_name = g_file_info_get_name (info); /* FIXME: convert somehow? */ - if (!g_utf8_validate (s, -1, NULL)) + if (!g_utf8_validate (file_name, -1, NULL)) return NULL; } string = g_string_new (""); - xml_string = g_markup_escape_text (s, -1); - escaped = g_uri_escape_string (g_file_info_get_name (info), NULL, FALSE); - path = g_strconcat (stream->uri, "/", escaped, NULL); + xml_string = g_markup_escape_text (file_name, -1); + escaped = g_uri_escape_string (file_name, NULL, FALSE); + path = g_strconcat (stream->uri, G_DIR_SEPARATOR_S, escaped, NULL); + size = g_format_size (g_file_info_get_size (info)); + g_file_info_get_modification_time (info, &modified); + modification_time = g_date_time_new_from_timeval_local (&modified); + time = g_date_time_format (modification_time, "%X %x"); + g_date_time_unref (modification_time); + + g_string_append_printf (string, ROW_FORMAT, path, xml_string, size, time); + g_string_append (string, "\n"); + buffer = soup_buffer_new (SOUP_MEMORY_TAKE, string->str, string->len); + + g_free (time); g_free (escaped); - g_string_append_printf (string, "
%s", path, xml_string); + g_free (size); g_free (path); g_free (xml_string); - g_string_append (string, ""); - - buffer = soup_buffer_new (SOUP_MEMORY_TAKE, string->str, string->len); g_string_free (string, FALSE); return buffer; diff --git a/libsoup/soup-enum-types.c b/libsoup/soup-enum-types.c new file mode 100644 index 0000000..9c1717d --- /dev/null +++ b/libsoup/soup-enum-types.c @@ -0,0 +1,516 @@ + + + +/* Generated by glib-mkenums. Do not edit */ + +#include "soup-enum-types.h" +#define LIBSOUP_USE_UNSTABLE_REQUEST_API +#include "soup.h" +#include "soup-address.h" +#include "soup-auth.h" +#include "soup-auth-domain.h" +#include "soup-auth-domain-basic.h" +#include "soup-auth-domain-digest.h" +#include "soup-cache.h" +#include "soup-content-decoder.h" +#include "soup-content-sniffer.h" +#include "soup-cookie.h" +#include "soup-cookie-jar.h" +#include "soup-cookie-jar-text.h" +#include "soup-date.h" +#include "soup-form.h" +#include "soup-headers.h" +#include "soup-logger.h" +#include "soup-message.h" +#include "soup-message-body.h" +#include "soup-message-headers.h" +#include "soup-method.h" +#include "soup-misc.h" +#include "soup-multipart.h" +#include "soup-password-manager.h" +#include "soup-portability.h" +#include "soup-proxy-resolver.h" +#include "soup-proxy-resolver-default.h" +#include "soup-proxy-uri-resolver.h" +#include "soup-request.h" +#include "soup-request-data.h" +#include "soup-request-file.h" +#include "soup-request-http.h" +#include "soup-requester.h" +#include "soup-server.h" +#include "soup-session.h" +#include "soup-session-async.h" +#include "soup-session-feature.h" +#include "soup-session-sync.h" +#include "soup-socket.h" +#include "soup-status.h" +#include "soup-types.h" +#include "soup-uri.h" +#include "soup-value-utils.h" +#include "soup-xmlrpc.h" +#include "soup-cookie-jar-sqlite.h" +#include "soup-gnome.h" +#include "soup-gnome-features.h" + +GType +soup_address_family_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_ADDRESS_FAMILY_INVALID, "SOUP_ADDRESS_FAMILY_INVALID", "invalid" }, + { SOUP_ADDRESS_FAMILY_IPV4, "SOUP_ADDRESS_FAMILY_IPV4", "ipv4" }, + { SOUP_ADDRESS_FAMILY_IPV6, "SOUP_ADDRESS_FAMILY_IPV6", "ipv6" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupAddressFamily"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_cacheability_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GFlagsValue values[] = { + { SOUP_CACHE_CACHEABLE, "SOUP_CACHE_CACHEABLE", "cacheable" }, + { SOUP_CACHE_UNCACHEABLE, "SOUP_CACHE_UNCACHEABLE", "uncacheable" }, + { SOUP_CACHE_INVALIDATES, "SOUP_CACHE_INVALIDATES", "invalidates" }, + { SOUP_CACHE_VALIDATES, "SOUP_CACHE_VALIDATES", "validates" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_flags_register_static (g_intern_static_string ("SoupCacheability"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_cache_response_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_CACHE_RESPONSE_FRESH, "SOUP_CACHE_RESPONSE_FRESH", "fresh" }, + { SOUP_CACHE_RESPONSE_NEEDS_VALIDATION, "SOUP_CACHE_RESPONSE_NEEDS_VALIDATION", "needs-validation" }, + { SOUP_CACHE_RESPONSE_STALE, "SOUP_CACHE_RESPONSE_STALE", "stale" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupCacheResponse"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_cache_type_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_CACHE_SINGLE_USER, "SOUP_CACHE_SINGLE_USER", "single-user" }, + { SOUP_CACHE_SHARED, "SOUP_CACHE_SHARED", "shared" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupCacheType"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_cookie_jar_accept_policy_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_COOKIE_JAR_ACCEPT_ALWAYS, "SOUP_COOKIE_JAR_ACCEPT_ALWAYS", "always" }, + { SOUP_COOKIE_JAR_ACCEPT_NEVER, "SOUP_COOKIE_JAR_ACCEPT_NEVER", "never" }, + { SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY, "SOUP_COOKIE_JAR_ACCEPT_NO_THIRD_PARTY", "no-third-party" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupCookieJarAcceptPolicy"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_date_format_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_DATE_HTTP, "SOUP_DATE_HTTP", "http" }, + { SOUP_DATE_COOKIE, "SOUP_DATE_COOKIE", "cookie" }, + { SOUP_DATE_RFC2822, "SOUP_DATE_RFC2822", "rfc2822" }, + { SOUP_DATE_ISO8601_COMPACT, "SOUP_DATE_ISO8601_COMPACT", "iso8601-compact" }, + { SOUP_DATE_ISO8601_FULL, "SOUP_DATE_ISO8601_FULL", "iso8601-full" }, + { SOUP_DATE_ISO8601, "SOUP_DATE_ISO8601", "iso8601" }, + { SOUP_DATE_ISO8601_XMLRPC, "SOUP_DATE_ISO8601_XMLRPC", "iso8601-xmlrpc" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupDateFormat"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_logger_log_level_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_LOGGER_LOG_NONE, "SOUP_LOGGER_LOG_NONE", "none" }, + { SOUP_LOGGER_LOG_MINIMAL, "SOUP_LOGGER_LOG_MINIMAL", "minimal" }, + { SOUP_LOGGER_LOG_HEADERS, "SOUP_LOGGER_LOG_HEADERS", "headers" }, + { SOUP_LOGGER_LOG_BODY, "SOUP_LOGGER_LOG_BODY", "body" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupLoggerLogLevel"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_http_version_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_HTTP_1_0, "SOUP_HTTP_1_0", "http-1-0" }, + { SOUP_HTTP_1_1, "SOUP_HTTP_1_1", "http-1-1" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupHTTPVersion"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_message_flags_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GFlagsValue values[] = { + { SOUP_MESSAGE_NO_REDIRECT, "SOUP_MESSAGE_NO_REDIRECT", "no-redirect" }, + { SOUP_MESSAGE_CAN_REBUILD, "SOUP_MESSAGE_CAN_REBUILD", "can-rebuild" }, + { SOUP_MESSAGE_OVERWRITE_CHUNKS, "SOUP_MESSAGE_OVERWRITE_CHUNKS", "overwrite-chunks" }, + { SOUP_MESSAGE_CONTENT_DECODED, "SOUP_MESSAGE_CONTENT_DECODED", "content-decoded" }, + { SOUP_MESSAGE_CERTIFICATE_TRUSTED, "SOUP_MESSAGE_CERTIFICATE_TRUSTED", "certificate-trusted" }, + { SOUP_MESSAGE_NEW_CONNECTION, "SOUP_MESSAGE_NEW_CONNECTION", "new-connection" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_flags_register_static (g_intern_static_string ("SoupMessageFlags"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_memory_use_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_MEMORY_STATIC, "SOUP_MEMORY_STATIC", "static" }, + { SOUP_MEMORY_TAKE, "SOUP_MEMORY_TAKE", "take" }, + { SOUP_MEMORY_COPY, "SOUP_MEMORY_COPY", "copy" }, + { SOUP_MEMORY_TEMPORARY, "SOUP_MEMORY_TEMPORARY", "temporary" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupMemoryUse"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_message_headers_type_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_MESSAGE_HEADERS_REQUEST, "SOUP_MESSAGE_HEADERS_REQUEST", "request" }, + { SOUP_MESSAGE_HEADERS_RESPONSE, "SOUP_MESSAGE_HEADERS_RESPONSE", "response" }, + { SOUP_MESSAGE_HEADERS_MULTIPART, "SOUP_MESSAGE_HEADERS_MULTIPART", "multipart" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupMessageHeadersType"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_encoding_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_ENCODING_UNRECOGNIZED, "SOUP_ENCODING_UNRECOGNIZED", "unrecognized" }, + { SOUP_ENCODING_NONE, "SOUP_ENCODING_NONE", "none" }, + { SOUP_ENCODING_CONTENT_LENGTH, "SOUP_ENCODING_CONTENT_LENGTH", "content-length" }, + { SOUP_ENCODING_EOF, "SOUP_ENCODING_EOF", "eof" }, + { SOUP_ENCODING_CHUNKED, "SOUP_ENCODING_CHUNKED", "chunked" }, + { SOUP_ENCODING_BYTERANGES, "SOUP_ENCODING_BYTERANGES", "byteranges" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupEncoding"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_expectation_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GFlagsValue values[] = { + { SOUP_EXPECTATION_UNRECOGNIZED, "SOUP_EXPECTATION_UNRECOGNIZED", "unrecognized" }, + { SOUP_EXPECTATION_CONTINUE, "SOUP_EXPECTATION_CONTINUE", "continue" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_flags_register_static (g_intern_static_string ("SoupExpectation"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_connection_state_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_CONNECTION_NEW, "SOUP_CONNECTION_NEW", "new" }, + { SOUP_CONNECTION_CONNECTING, "SOUP_CONNECTION_CONNECTING", "connecting" }, + { SOUP_CONNECTION_IDLE, "SOUP_CONNECTION_IDLE", "idle" }, + { SOUP_CONNECTION_IN_USE, "SOUP_CONNECTION_IN_USE", "in-use" }, + { SOUP_CONNECTION_REMOTE_DISCONNECTED, "SOUP_CONNECTION_REMOTE_DISCONNECTED", "remote-disconnected" }, + { SOUP_CONNECTION_DISCONNECTED, "SOUP_CONNECTION_DISCONNECTED", "disconnected" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupConnectionState"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_requester_error_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_REQUESTER_ERROR_BAD_URI, "SOUP_REQUESTER_ERROR_BAD_URI", "bad-uri" }, + { SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME, "SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME", "unsupported-uri-scheme" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupRequesterError"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_socket_io_status_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_SOCKET_OK, "SOUP_SOCKET_OK", "ok" }, + { SOUP_SOCKET_WOULD_BLOCK, "SOUP_SOCKET_WOULD_BLOCK", "would-block" }, + { SOUP_SOCKET_EOF, "SOUP_SOCKET_EOF", "eof" }, + { SOUP_SOCKET_ERROR, "SOUP_SOCKET_ERROR", "error" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupSocketIOStatus"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_known_status_code_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_STATUS_NONE, "SOUP_STATUS_NONE", "none" }, + { SOUP_STATUS_CANCELLED, "SOUP_STATUS_CANCELLED", "cancelled" }, + { SOUP_STATUS_CANT_RESOLVE, "SOUP_STATUS_CANT_RESOLVE", "cant-resolve" }, + { SOUP_STATUS_CANT_RESOLVE_PROXY, "SOUP_STATUS_CANT_RESOLVE_PROXY", "cant-resolve-proxy" }, + { SOUP_STATUS_CANT_CONNECT, "SOUP_STATUS_CANT_CONNECT", "cant-connect" }, + { SOUP_STATUS_CANT_CONNECT_PROXY, "SOUP_STATUS_CANT_CONNECT_PROXY", "cant-connect-proxy" }, + { SOUP_STATUS_SSL_FAILED, "SOUP_STATUS_SSL_FAILED", "ssl-failed" }, + { SOUP_STATUS_IO_ERROR, "SOUP_STATUS_IO_ERROR", "io-error" }, + { SOUP_STATUS_MALFORMED, "SOUP_STATUS_MALFORMED", "malformed" }, + { SOUP_STATUS_TRY_AGAIN, "SOUP_STATUS_TRY_AGAIN", "try-again" }, + { SOUP_STATUS_TOO_MANY_REDIRECTS, "SOUP_STATUS_TOO_MANY_REDIRECTS", "too-many-redirects" }, + { SOUP_STATUS_TLS_FAILED, "SOUP_STATUS_TLS_FAILED", "tls-failed" }, + { SOUP_STATUS_CONTINUE, "SOUP_STATUS_CONTINUE", "continue" }, + { SOUP_STATUS_SWITCHING_PROTOCOLS, "SOUP_STATUS_SWITCHING_PROTOCOLS", "switching-protocols" }, + { SOUP_STATUS_PROCESSING, "SOUP_STATUS_PROCESSING", "processing" }, + { SOUP_STATUS_OK, "SOUP_STATUS_OK", "ok" }, + { SOUP_STATUS_CREATED, "SOUP_STATUS_CREATED", "created" }, + { SOUP_STATUS_ACCEPTED, "SOUP_STATUS_ACCEPTED", "accepted" }, + { SOUP_STATUS_NON_AUTHORITATIVE, "SOUP_STATUS_NON_AUTHORITATIVE", "non-authoritative" }, + { SOUP_STATUS_NO_CONTENT, "SOUP_STATUS_NO_CONTENT", "no-content" }, + { SOUP_STATUS_RESET_CONTENT, "SOUP_STATUS_RESET_CONTENT", "reset-content" }, + { SOUP_STATUS_PARTIAL_CONTENT, "SOUP_STATUS_PARTIAL_CONTENT", "partial-content" }, + { SOUP_STATUS_MULTI_STATUS, "SOUP_STATUS_MULTI_STATUS", "multi-status" }, + { SOUP_STATUS_MULTIPLE_CHOICES, "SOUP_STATUS_MULTIPLE_CHOICES", "multiple-choices" }, + { SOUP_STATUS_MOVED_PERMANENTLY, "SOUP_STATUS_MOVED_PERMANENTLY", "moved-permanently" }, + { SOUP_STATUS_FOUND, "SOUP_STATUS_FOUND", "found" }, + { SOUP_STATUS_MOVED_TEMPORARILY, "SOUP_STATUS_MOVED_TEMPORARILY", "moved-temporarily" }, + { SOUP_STATUS_SEE_OTHER, "SOUP_STATUS_SEE_OTHER", "see-other" }, + { SOUP_STATUS_NOT_MODIFIED, "SOUP_STATUS_NOT_MODIFIED", "not-modified" }, + { SOUP_STATUS_USE_PROXY, "SOUP_STATUS_USE_PROXY", "use-proxy" }, + { SOUP_STATUS_NOT_APPEARING_IN_THIS_PROTOCOL, "SOUP_STATUS_NOT_APPEARING_IN_THIS_PROTOCOL", "not-appearing-in-this-protocol" }, + { SOUP_STATUS_TEMPORARY_REDIRECT, "SOUP_STATUS_TEMPORARY_REDIRECT", "temporary-redirect" }, + { SOUP_STATUS_BAD_REQUEST, "SOUP_STATUS_BAD_REQUEST", "bad-request" }, + { SOUP_STATUS_UNAUTHORIZED, "SOUP_STATUS_UNAUTHORIZED", "unauthorized" }, + { SOUP_STATUS_PAYMENT_REQUIRED, "SOUP_STATUS_PAYMENT_REQUIRED", "payment-required" }, + { SOUP_STATUS_FORBIDDEN, "SOUP_STATUS_FORBIDDEN", "forbidden" }, + { SOUP_STATUS_NOT_FOUND, "SOUP_STATUS_NOT_FOUND", "not-found" }, + { SOUP_STATUS_METHOD_NOT_ALLOWED, "SOUP_STATUS_METHOD_NOT_ALLOWED", "method-not-allowed" }, + { SOUP_STATUS_NOT_ACCEPTABLE, "SOUP_STATUS_NOT_ACCEPTABLE", "not-acceptable" }, + { SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED, "SOUP_STATUS_PROXY_AUTHENTICATION_REQUIRED", "proxy-authentication-required" }, + { SOUP_STATUS_PROXY_UNAUTHORIZED, "SOUP_STATUS_PROXY_UNAUTHORIZED", "proxy-unauthorized" }, + { SOUP_STATUS_REQUEST_TIMEOUT, "SOUP_STATUS_REQUEST_TIMEOUT", "request-timeout" }, + { SOUP_STATUS_CONFLICT, "SOUP_STATUS_CONFLICT", "conflict" }, + { SOUP_STATUS_GONE, "SOUP_STATUS_GONE", "gone" }, + { SOUP_STATUS_LENGTH_REQUIRED, "SOUP_STATUS_LENGTH_REQUIRED", "length-required" }, + { SOUP_STATUS_PRECONDITION_FAILED, "SOUP_STATUS_PRECONDITION_FAILED", "precondition-failed" }, + { SOUP_STATUS_REQUEST_ENTITY_TOO_LARGE, "SOUP_STATUS_REQUEST_ENTITY_TOO_LARGE", "request-entity-too-large" }, + { SOUP_STATUS_REQUEST_URI_TOO_LONG, "SOUP_STATUS_REQUEST_URI_TOO_LONG", "request-uri-too-long" }, + { SOUP_STATUS_UNSUPPORTED_MEDIA_TYPE, "SOUP_STATUS_UNSUPPORTED_MEDIA_TYPE", "unsupported-media-type" }, + { SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE, "SOUP_STATUS_REQUESTED_RANGE_NOT_SATISFIABLE", "requested-range-not-satisfiable" }, + { SOUP_STATUS_INVALID_RANGE, "SOUP_STATUS_INVALID_RANGE", "invalid-range" }, + { SOUP_STATUS_EXPECTATION_FAILED, "SOUP_STATUS_EXPECTATION_FAILED", "expectation-failed" }, + { SOUP_STATUS_UNPROCESSABLE_ENTITY, "SOUP_STATUS_UNPROCESSABLE_ENTITY", "unprocessable-entity" }, + { SOUP_STATUS_LOCKED, "SOUP_STATUS_LOCKED", "locked" }, + { SOUP_STATUS_FAILED_DEPENDENCY, "SOUP_STATUS_FAILED_DEPENDENCY", "failed-dependency" }, + { SOUP_STATUS_INTERNAL_SERVER_ERROR, "SOUP_STATUS_INTERNAL_SERVER_ERROR", "internal-server-error" }, + { SOUP_STATUS_NOT_IMPLEMENTED, "SOUP_STATUS_NOT_IMPLEMENTED", "not-implemented" }, + { SOUP_STATUS_BAD_GATEWAY, "SOUP_STATUS_BAD_GATEWAY", "bad-gateway" }, + { SOUP_STATUS_SERVICE_UNAVAILABLE, "SOUP_STATUS_SERVICE_UNAVAILABLE", "service-unavailable" }, + { SOUP_STATUS_GATEWAY_TIMEOUT, "SOUP_STATUS_GATEWAY_TIMEOUT", "gateway-timeout" }, + { SOUP_STATUS_HTTP_VERSION_NOT_SUPPORTED, "SOUP_STATUS_HTTP_VERSION_NOT_SUPPORTED", "http-version-not-supported" }, + { SOUP_STATUS_INSUFFICIENT_STORAGE, "SOUP_STATUS_INSUFFICIENT_STORAGE", "insufficient-storage" }, + { SOUP_STATUS_NOT_EXTENDED, "SOUP_STATUS_NOT_EXTENDED", "not-extended" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupKnownStatusCode"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_xmlrpc_error_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_XMLRPC_ERROR_ARGUMENTS, "SOUP_XMLRPC_ERROR_ARGUMENTS", "arguments" }, + { SOUP_XMLRPC_ERROR_RETVAL, "SOUP_XMLRPC_ERROR_RETVAL", "retval" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupXMLRPCError"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} +GType +soup_xmlrpc_fault_get_type (void) +{ + static volatile gsize g_define_type_id__volatile = 0; + + if (g_once_init_enter (&g_define_type_id__volatile)) + { + static const GEnumValue values[] = { + { SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED, "SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED", "parse-error-not-well-formed" }, + { SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING, "SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING", "parse-error-unsupported-encoding" }, + { SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING, "SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING", "parse-error-invalid-character-for-encoding" }, + { SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC, "SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC", "server-error-invalid-xml-rpc" }, + { SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND, "SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND", "server-error-requested-method-not-found" }, + { SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS, "SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS", "server-error-invalid-method-parameters" }, + { SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR, "SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR", "server-error-internal-xml-rpc-error" }, + { SOUP_XMLRPC_FAULT_APPLICATION_ERROR, "SOUP_XMLRPC_FAULT_APPLICATION_ERROR", "application-error" }, + { SOUP_XMLRPC_FAULT_SYSTEM_ERROR, "SOUP_XMLRPC_FAULT_SYSTEM_ERROR", "system-error" }, + { SOUP_XMLRPC_FAULT_TRANSPORT_ERROR, "SOUP_XMLRPC_FAULT_TRANSPORT_ERROR", "transport-error" }, + { 0, NULL, NULL } + }; + GType g_define_type_id = + g_enum_register_static (g_intern_static_string ("SoupXMLRPCFault"), values); + g_once_init_leave (&g_define_type_id__volatile, g_define_type_id); + } + + return g_define_type_id__volatile; +} + + + diff --git a/libsoup/soup-enum-types.c.tmpl b/libsoup/soup-enum-types.c.tmpl deleted file mode 100644 index 289cef0..0000000 --- a/libsoup/soup-enum-types.c.tmpl +++ /dev/null @@ -1,36 +0,0 @@ -/*** BEGIN file-header ***/ -#include "soup.h" -#define LIBSOUP_USE_UNSTABLE_REQUEST_API -#include "soup-cache.h" -#include "soup-requester.h" -/*** END file-header ***/ - -/*** BEGIN file-production ***/ -/* enumerations from "@filename@" */ -/*** END file-production ***/ - -/*** BEGIN value-header ***/ -GType -@enum_name@_get_type (void) -{ - static GType etype = 0; - if (G_UNLIKELY (etype == 0)) { - static const G@Type@Value values[] = { -/*** END value-header ***/ - -/*** BEGIN value-production ***/ - { @VALUENAME@, "@VALUENAME@", "@valuenick@" }, -/*** END value-production ***/ - -/*** BEGIN value-tail ***/ - { 0, NULL, NULL } - }; - etype = g_@type@_register_static (g_intern_static_string ("@EnumName@"), values); - } - return etype; -} - -/*** END value-tail ***/ - -/*** BEGIN file-tail ***/ -/*** END file-tail ***/ diff --git a/libsoup/soup-enum-types.h b/libsoup/soup-enum-types.h new file mode 100644 index 0000000..ebaf3c0 --- /dev/null +++ b/libsoup/soup-enum-types.h @@ -0,0 +1,55 @@ + + + +/* Generated by glib-mkenums. Do not edit */ + +#ifndef __SOUP_ENUM_TYPES_H__ +#define __SOUP_ENUM_TYPES_H__ + +#include + +G_BEGIN_DECLS +GType soup_address_family_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_ADDRESS_FAMILY (soup_address_family_get_type ()) +GType soup_cacheability_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_CACHEABILITY (soup_cacheability_get_type ()) +GType soup_cache_response_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_CACHE_RESPONSE (soup_cache_response_get_type ()) +GType soup_cache_type_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_CACHE_TYPE (soup_cache_type_get_type ()) +GType soup_cookie_jar_accept_policy_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_COOKIE_JAR_ACCEPT_POLICY (soup_cookie_jar_accept_policy_get_type ()) +GType soup_date_format_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_DATE_FORMAT (soup_date_format_get_type ()) +GType soup_logger_log_level_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_LOGGER_LOG_LEVEL (soup_logger_log_level_get_type ()) +GType soup_http_version_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_HTTP_VERSION (soup_http_version_get_type ()) +GType soup_message_flags_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_MESSAGE_FLAGS (soup_message_flags_get_type ()) +GType soup_memory_use_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_MEMORY_USE (soup_memory_use_get_type ()) +GType soup_message_headers_type_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_MESSAGE_HEADERS_TYPE (soup_message_headers_type_get_type ()) +GType soup_encoding_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_ENCODING (soup_encoding_get_type ()) +GType soup_expectation_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_EXPECTATION (soup_expectation_get_type ()) +GType soup_connection_state_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_CONNECTION_STATE (soup_connection_state_get_type ()) +GType soup_requester_error_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_REQUESTER_ERROR (soup_requester_error_get_type ()) +GType soup_socket_io_status_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_SOCKET_IO_STATUS (soup_socket_io_status_get_type ()) +GType soup_known_status_code_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_KNOWN_STATUS_CODE (soup_known_status_code_get_type ()) +GType soup_xmlrpc_error_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_XMLRPC_ERROR (soup_xmlrpc_error_get_type ()) +GType soup_xmlrpc_fault_get_type (void) G_GNUC_CONST; +#define SOUP_TYPE_XMLRPC_FAULT (soup_xmlrpc_fault_get_type ()) +G_END_DECLS + +#endif /* __SOUP_ENUM_TYPES_H__ */ + + + diff --git a/libsoup/soup-enum-types.h.tmpl b/libsoup/soup-enum-types.h.tmpl deleted file mode 100644 index e18d7e0..0000000 --- a/libsoup/soup-enum-types.h.tmpl +++ /dev/null @@ -1,24 +0,0 @@ -/*** BEGIN file-header ***/ -#ifndef __SOUP_ENUM_TYPES_H__ -#define __SOUP_ENUM_TYPES_H__ - -#include - -G_BEGIN_DECLS -/*** END file-header ***/ - -/*** BEGIN file-production ***/ - -/* enumerations from "@filename@" */ -/*** END file-production ***/ - -/*** BEGIN value-header ***/ -GType @enum_name@_get_type (void) G_GNUC_CONST; -#define SOUP_TYPE_@ENUMSHORT@ (@enum_name@_get_type ()) -/*** END value-header ***/ - -/*** BEGIN file-tail ***/ -G_END_DECLS - -#endif /* __SOUP_ENUM_TYPES_H__ */ -/*** END file-tail ***/ diff --git a/libsoup/soup-form.c b/libsoup/soup-form.c index 9e52793..a781d3c 100644 --- a/libsoup/soup-form.c +++ b/libsoup/soup-form.c @@ -115,10 +115,10 @@ soup_form_decode (const char *encoded_form) /** * soup_form_decode_multipart: * @msg: a #SoupMessage containing a "multipart/form-data" request body - * @file_control_name: the name of the HTML file upload control, or %NULL - * @filename: (out): return location for the name of the uploaded file - * @content_type: (out): return location for the MIME type of the uploaded file - * @file: (out): return location for the uploaded file data + * @file_control_name: (allow-none): the name of the HTML file upload control, or %NULL + * @filename: (out) (allow-none): return location for the name of the uploaded file, or %NULL + * @content_type: (out) (allow-none): return location for the MIME type of the uploaded file, or %NULL + * @file: (out) (allow-none): return location for the uploaded file data, or %NULL * * Decodes the "multipart/form-data" request in @msg; this is a * convenience method for the case when you have a single file upload @@ -130,7 +130,7 @@ soup_form_decode (const char *encoded_form) * control data will be returned (as strings, as with * soup_form_decode()) in the returned #GHashTable. * - * You may pass %NULL for @filename and/or @content_type if you do not + * You may pass %NULL for @filename, @content_type and/or @file if you do not * care about those fields. soup_form_decode_multipart() may also * return %NULL in those fields if the client did not provide that * information. You must free the returned filename and content-type @@ -159,6 +159,8 @@ soup_form_decode_multipart (SoupMessage *msg, const char *file_control_name, char *disposition, *name; int i; + g_return_val_if_fail (SOUP_IS_MESSAGE (msg), NULL); + multipart = soup_multipart_new_from_message (msg->request_headers, msg->request_body); if (!multipart) @@ -168,7 +170,8 @@ soup_form_decode_multipart (SoupMessage *msg, const char *file_control_name, *filename = NULL; if (content_type) *content_type = NULL; - *file = NULL; + if (file) + *file = NULL; form_data_set = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, g_free); @@ -185,7 +188,7 @@ soup_form_decode_multipart (SoupMessage *msg, const char *file_control_name, continue; } - if (!strcmp (name, file_control_name)) { + if (file_control_name && !strcmp (name, file_control_name)) { if (filename) *filename = g_strdup (g_hash_table_lookup (params, "filename")); if (content_type) diff --git a/libsoup/soup-gnome-features.c b/libsoup/soup-gnome-features.c index 22a426b..798712a 100644 --- a/libsoup/soup-gnome-features.c +++ b/libsoup/soup-gnome-features.c @@ -14,7 +14,7 @@ /** * SOUP_TYPE_PROXY_RESOLVER_GNOME: * - * This returns the #GType of a #SoupProxyResolver that can be used to + * This returns the #GType of a #SoupProxyURIResolver that can be used to * resolve HTTP proxies for GNOME applications. You can add this to * a session using soup_session_add_feature_by_type() or by using the * %SOUP_SESSION_ADD_FEATURE_BY_TYPE construct-time property. diff --git a/libsoup/soup-headers.c b/libsoup/soup-headers.c index 6baf70b..965f9da 100644 --- a/libsoup/soup-headers.c +++ b/libsoup/soup-headers.c @@ -17,8 +17,8 @@ /** * soup_headers_parse: * @str: the header string (including the Request-Line or Status-Line, - * and the trailing blank line) - * @len: length of @str up to (but not including) the terminating blank line. + * but not the trailing blank line) + * @len: length of @str * @dest: #SoupMessageHeaders to store the header values in * * Parses the headers of an HTTP request or response in @str and @@ -37,15 +37,14 @@ soup_headers_parse (const char *str, int len, SoupMessageHeaders *dest) { const char *headers_start; char *headers_copy, *name, *name_end, *value, *value_end; - char *eol, *sol; + char *eol, *sol, *p; gboolean success = FALSE; g_return_val_if_fail (str != NULL, FALSE); g_return_val_if_fail (dest != NULL, FALSE); - /* Technically, the grammar does allow NUL bytes in the - * headers, but this is probably a bug, and if it's not, we - * can't deal with them anyway. + /* RFC 2616 does allow NUL bytes in the headers, but httpbis + * is changing that, and we can't deal with them anyway. */ if (memchr (str, '\0', len)) return FALSE; @@ -70,11 +69,16 @@ soup_headers_parse (const char *str, int len, SoupMessageHeaders *dest) while (*(value_end + 1)) { name = value_end + 1; name_end = strchr (name, ':'); - if (!name_end || name + strcspn (name, " \t\r\n") < name_end) { - /* Bad header; just ignore this line. Note - * that if it has continuation lines, we'll - * end up ignoring them too since they'll - * start with spaces. + + /* Reject if there is no ':', or the header name is + * empty, or it contains whitespace. + */ + if (!name_end || + name_end == name || + name + strcspn (name, " \t\r\n") < name_end) { + /* Ignore this line. Note that if it has + * continuation lines, we'll end up ignoring + * them too since they'll start with spaces. */ value_end = strchr (name, '\n'); if (!value_end) @@ -127,6 +131,10 @@ soup_headers_parse (const char *str, int len, SoupMessageHeaders *dest) eol--; *eol = '\0'; + /* convert (illegal) '\r's to spaces */ + for (p = strchr (value, '\r'); p; p = strchr (p, '\r')) + *p = ' '; + soup_message_headers_append (dest, name, value); } success = TRUE; @@ -138,8 +146,8 @@ done: /** * soup_headers_parse_request: - * @str: the header string (including the trailing blank line) - * @len: length of @str up to (but not including) the terminating blank line. + * @str: the headers (up to, but not including, the trailing blank line) + * @len: length of @str * @req_headers: #SoupMessageHeaders to store the header values in * @req_method: (out) (allow-none): if non-%NULL, will be filled in with the * request method @@ -169,7 +177,7 @@ soup_headers_parse_request (const char *str, unsigned long major_version, minor_version; char *p; - g_return_val_if_fail (str && *str, SOUP_STATUS_MALFORMED); + g_return_val_if_fail (str != NULL, SOUP_STATUS_MALFORMED); /* RFC 2616 4.1 "servers SHOULD ignore any empty line(s) * received where a Request-Line is expected." @@ -325,9 +333,9 @@ soup_headers_parse_status_line (const char *status_line, /** * soup_headers_parse_response: - * @str: the header string (including the trailing blank line) - * @len: length of @str up to (but not including) the terminating blank line. - * @headers: #SoupMessageheaders to store the header values in + * @str: the headers (up to, but not including, the trailing blank line) + * @len: length of @str + * @headers: #SoupMessageHeaders to store the header values in * @ver: (out) (allow-none): if non-%NULL, will be filled in with the HTTP * version * @status_code: (out) (allow-none): if non-%NULL, will be filled in with @@ -352,7 +360,7 @@ soup_headers_parse_response (const char *str, { SoupHTTPVersion version; - g_return_val_if_fail (str && *str, FALSE); + g_return_val_if_fail (str != NULL, FALSE); /* Workaround for broken servers that send extra line breaks * after a response, which we then see prepended to the next diff --git a/libsoup/soup-http-input-stream.c b/libsoup/soup-http-input-stream.c index 6aa153d..c0337e9 100644 --- a/libsoup/soup-http-input-stream.c +++ b/libsoup/soup-http-input-stream.c @@ -27,13 +27,11 @@ #include #include "soup-http-input-stream.h" +#include "soup-headers.h" +#include "soup-content-sniffer.h" #include "soup-session.h" -static void soup_http_input_stream_seekable_iface_init (GSeekableIface *seekable_iface); - -G_DEFINE_TYPE_WITH_CODE (SoupHTTPInputStream, soup_http_input_stream, G_TYPE_INPUT_STREAM, - G_IMPLEMENT_INTERFACE (G_TYPE_SEEKABLE, - soup_http_input_stream_seekable_iface_init)) +G_DEFINE_TYPE (SoupHTTPInputStream, soup_http_input_stream, G_TYPE_INPUT_STREAM) typedef void (*SoupHTTPInputStreamCallback)(GInputStream *); @@ -45,19 +43,20 @@ typedef struct { goffset offset; GCancellable *cancellable; - GSource *cancel_watch; + guint cancel_id; SoupHTTPInputStreamCallback got_headers_cb; SoupHTTPInputStreamCallback got_chunk_cb; SoupHTTPInputStreamCallback finished_cb; SoupHTTPInputStreamCallback cancelled_cb; - guchar *leftover_buffer; - gsize leftover_bufsize, leftover_offset; + GQueue *leftover_queue; guchar *caller_buffer; gsize caller_bufsize, caller_nread; GAsyncReadyCallback outstanding_callback; GSimpleAsyncResult *result; + + char *sniffed_content_type; } SoupHTTPInputStreamPrivate; #define SOUP_HTTP_INPUT_STREAM_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), SOUP_TYPE_HTTP_INPUT_STREAM, SoupHTTPInputStreamPrivate)) @@ -89,23 +88,10 @@ static gboolean soup_http_input_stream_close_finish (GInputStream *strea GAsyncResult *result, GError **error); -static goffset soup_http_input_stream_tell (GSeekable *seekable); - -static gboolean soup_http_input_stream_can_seek (GSeekable *seekable); -static gboolean soup_http_input_stream_seek (GSeekable *seekable, - goffset offset, - GSeekType type, - GCancellable *cancellable, - GError **error); - -static gboolean soup_http_input_stream_can_truncate (GSeekable *seekable); -static gboolean soup_http_input_stream_truncate (GSeekable *seekable, - goffset offset, - GCancellable *cancellable, - GError **error); - static void soup_http_input_stream_got_headers (SoupMessage *msg, gpointer stream); +static void soup_http_input_stream_content_sniffed (SoupMessage *msg, const char *content_type, GHashTable *params, gpointer stream); static void soup_http_input_stream_got_chunk (SoupMessage *msg, SoupBuffer *chunk, gpointer stream); +static void soup_http_input_stream_restarted (SoupMessage *msg, gpointer stream); static void soup_http_input_stream_finished (SoupMessage *msg, gpointer stream); static void @@ -117,10 +103,16 @@ soup_http_input_stream_finalize (GObject *object) g_object_unref (priv->session); g_signal_handlers_disconnect_by_func (priv->msg, G_CALLBACK (soup_http_input_stream_got_headers), stream); + g_signal_handlers_disconnect_by_func (priv->msg, G_CALLBACK (soup_http_input_stream_content_sniffed), stream); g_signal_handlers_disconnect_by_func (priv->msg, G_CALLBACK (soup_http_input_stream_got_chunk), stream); + g_signal_handlers_disconnect_by_func (priv->msg, G_CALLBACK (soup_http_input_stream_restarted), stream); g_signal_handlers_disconnect_by_func (priv->msg, G_CALLBACK (soup_http_input_stream_finished), stream); g_object_unref (priv->msg); - g_free (priv->leftover_buffer); + + g_queue_foreach (priv->leftover_queue, (GFunc) soup_buffer_free, NULL); + g_queue_free (priv->leftover_queue); + + g_free (priv->sniffed_content_type); if (G_OBJECT_CLASS (soup_http_input_stream_parent_class)->finalize) (*G_OBJECT_CLASS (soup_http_input_stream_parent_class)->finalize)(object); @@ -145,19 +137,11 @@ soup_http_input_stream_class_init (SoupHTTPInputStreamClass *klass) } static void -soup_http_input_stream_seekable_iface_init (GSeekableIface *seekable_iface) -{ - seekable_iface->tell = soup_http_input_stream_tell; - seekable_iface->can_seek = soup_http_input_stream_can_seek; - seekable_iface->seek = soup_http_input_stream_seek; - seekable_iface->can_truncate = soup_http_input_stream_can_truncate; - seekable_iface->truncate_fn = soup_http_input_stream_truncate; -} - -static void soup_http_input_stream_init (SoupHTTPInputStream *stream) { - ; + SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); + + priv->leftover_queue = g_queue_new (); } static void @@ -167,6 +151,20 @@ soup_http_input_stream_queue_message (SoupHTTPInputStream *stream) priv->got_headers = priv->finished = FALSE; + if (soup_session_get_feature_for_message (priv->session, SOUP_TYPE_CONTENT_SNIFFER, priv->msg)) { + g_signal_connect (priv->msg, "content_sniffed", + G_CALLBACK (soup_http_input_stream_content_sniffed), stream); + } else { + g_signal_connect (priv->msg, "got_headers", + G_CALLBACK (soup_http_input_stream_got_headers), stream); + } + g_signal_connect (priv->msg, "got_chunk", + G_CALLBACK (soup_http_input_stream_got_chunk), stream); + g_signal_connect (priv->msg, "restarted", + G_CALLBACK (soup_http_input_stream_restarted), stream); + g_signal_connect (priv->msg, "finished", + G_CALLBACK (soup_http_input_stream_finished), stream); + /* Add an extra ref since soup_session_queue_message steals one */ g_object_ref (priv->msg); soup_session_queue_message (priv->session, priv->msg, NULL, NULL); @@ -198,7 +196,7 @@ soup_http_input_stream_queue_message (SoupHTTPInputStream *stream) * * Returns: a new #GInputStream. **/ -SoupHTTPInputStream * +GInputStream * soup_http_input_stream_new (SoupSession *session, SoupMessage *msg) { SoupHTTPInputStream *stream; @@ -213,15 +211,7 @@ soup_http_input_stream_new (SoupSession *session, SoupMessage *msg) priv->async_context = soup_session_get_async_context (session); priv->msg = g_object_ref (msg); - g_signal_connect (msg, "got_headers", - G_CALLBACK (soup_http_input_stream_got_headers), stream); - g_signal_connect (msg, "got_chunk", - G_CALLBACK (soup_http_input_stream_got_chunk), stream); - g_signal_connect (msg, "finished", - G_CALLBACK (soup_http_input_stream_finished), stream); - - soup_http_input_stream_queue_message (stream); - return stream; + return (GInputStream *)stream; } static void @@ -229,12 +219,13 @@ soup_http_input_stream_got_headers (SoupMessage *msg, gpointer stream) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - /* If the status is unsuccessful, we just ignore the signal and let - * libsoup keep going (eventually either it will requeue the request - * (after handling authentication/redirection), or else the - * "finished" handler will run). + /* If the message is expected to be restarted then we read the + * whole message first and hope it does get restarted, but + * if it doesn't, then we stream the body belatedly. */ - if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) + if (msg->status_code == SOUP_STATUS_UNAUTHORIZED || + msg->status_code == SOUP_STATUS_PROXY_UNAUTHORIZED || + soup_session_would_redirect (priv->session, msg)) return; priv->got_headers = TRUE; @@ -248,25 +239,39 @@ soup_http_input_stream_got_headers (SoupMessage *msg, gpointer stream) } static void +soup_http_input_stream_content_sniffed (SoupMessage *msg, const char *content_type, + GHashTable *params, gpointer stream) +{ + SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); + GString *sniffed_type; + + sniffed_type = g_string_new (content_type); + if (params) { + GHashTableIter iter; + gpointer key, value; + + g_hash_table_iter_init (&iter, params); + while (g_hash_table_iter_next (&iter, &key, &value)) { + g_string_append (sniffed_type, "; "); + soup_header_g_string_append_param (sniffed_type, key, value); + } + } + g_free (priv->sniffed_content_type); + priv->sniffed_content_type = g_string_free (sniffed_type, FALSE); + + soup_http_input_stream_got_headers (msg, stream); +} + +static void soup_http_input_stream_got_chunk (SoupMessage *msg, SoupBuffer *chunk_buffer, - gpointer stream) + gpointer stream) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); const gchar *chunk = chunk_buffer->data; gsize chunk_size = chunk_buffer->length; - /* We only pay attention to the chunk if it's part of a successful - * response. - */ - if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) - return; - - /* Sanity check */ - if (priv->caller_bufsize == 0 || priv->leftover_bufsize != 0) - g_warning ("soup_http_input_stream_got_chunk called again before previous chunk was processed"); - /* Copy what we can into priv->caller_buffer */ - if (priv->caller_bufsize > priv->caller_nread) { + if (priv->caller_bufsize > priv->caller_nread && priv->leftover_queue->length == 0) { gsize nread = MIN (chunk_size, priv->caller_bufsize - priv->caller_nread); memcpy (priv->caller_buffer + priv->caller_nread, chunk, nread); @@ -277,26 +282,33 @@ soup_http_input_stream_got_chunk (SoupMessage *msg, SoupBuffer *chunk_buffer, } if (chunk_size > 0) { - /* Copy the rest into priv->leftover_buffer. If - * there's already some data there, realloc and - * append. Otherwise just copy. - */ - if (priv->leftover_bufsize) { - priv->leftover_buffer = g_realloc (priv->leftover_buffer, - priv->leftover_bufsize + chunk_size); - memcpy (priv->leftover_buffer + priv->leftover_bufsize, - chunk, chunk_size); - priv->leftover_bufsize += chunk_size; + if (priv->leftover_queue->length > 0) { + g_queue_push_tail (priv->leftover_queue, soup_buffer_copy (chunk_buffer)); } else { - priv->leftover_bufsize = chunk_size; - priv->leftover_buffer = g_memdup (chunk, chunk_size); - priv->leftover_offset = 0; + g_queue_push_head (priv->leftover_queue, + soup_buffer_new_subbuffer (chunk_buffer, + chunk_buffer->length - chunk_size, + chunk_size)); } } - soup_session_pause_message (priv->session, msg); - if (priv->got_chunk_cb) - priv->got_chunk_cb (stream); + if (priv->got_headers) { + soup_session_pause_message (priv->session, msg); + if (priv->got_chunk_cb) + priv->got_chunk_cb (stream); + } +} + +static void +soup_http_input_stream_restarted (SoupMessage *msg, gpointer stream) +{ + SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); + GList *q; + + /* Throw away any pending read data */ + for (q = priv->leftover_queue->head; q; q = q->next) + soup_buffer_free (q->data); + g_queue_clear (priv->leftover_queue); } static void @@ -304,45 +316,41 @@ soup_http_input_stream_finished (SoupMessage *msg, gpointer stream) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); + priv->got_headers = TRUE; priv->finished = TRUE; if (priv->finished_cb) priv->finished_cb (stream); } -static gboolean -soup_http_input_stream_cancelled (GIOChannel *chan, GIOCondition condition, - gpointer stream) +static void +soup_http_input_stream_cancelled (GCancellable *cancellable, + gpointer user_data) { + SoupHTTPInputStream *stream = user_data; SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - priv->cancel_watch = NULL; + g_signal_handler_disconnect (cancellable, priv->cancel_id); + priv->cancel_id = 0; soup_session_pause_message (priv->session, priv->msg); if (priv->cancelled_cb) - priv->cancelled_cb (stream); - - return FALSE; + priv->cancelled_cb (G_INPUT_STREAM (stream)); } static void soup_http_input_stream_prepare_for_io (GInputStream *stream, GCancellable *cancellable, guchar *buffer, - gsize count) + gsize count) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - int cancel_fd; priv->cancellable = cancellable; - cancel_fd = g_cancellable_get_fd (cancellable); - if (cancel_fd != -1) { - GIOChannel *chan = g_io_channel_unix_new (cancel_fd); - priv->cancel_watch = soup_add_io_watch (priv->async_context, chan, - G_IO_IN | G_IO_ERR | G_IO_HUP, - soup_http_input_stream_cancelled, - stream); - g_io_channel_unref (chan); + if (cancellable) { + priv->cancel_id = g_signal_connect (cancellable, "cancelled", + G_CALLBACK (soup_http_input_stream_cancelled), + stream); } priv->caller_buffer = buffer; @@ -358,10 +366,9 @@ soup_http_input_stream_done_io (GInputStream *stream) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - if (priv->cancel_watch) { - g_source_destroy (priv->cancel_watch); - priv->cancel_watch = NULL; - g_cancellable_release_fd (priv->cancellable); + if (priv->cancel_id) { + g_signal_handler_disconnect (priv->cancellable, priv->cancel_id); + priv->cancel_id = 0; } priv->cancellable = NULL; @@ -372,7 +379,7 @@ soup_http_input_stream_done_io (GInputStream *stream) static gboolean set_error_if_http_failed (SoupMessage *msg, GError **error) { - if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + if (SOUP_STATUS_IS_TRANSPORT_ERROR (msg->status_code)) { g_set_error_literal (error, SOUP_HTTP_ERROR, msg->status_code, msg->reason_phrase); return TRUE; @@ -385,19 +392,17 @@ read_from_leftover (SoupHTTPInputStreamPrivate *priv, gpointer buffer, gsize bufsize) { gsize nread; + SoupBuffer *soup_buffer = (SoupBuffer *) g_queue_peek_head (priv->leftover_queue); + gboolean fits_in_buffer = soup_buffer->length <= bufsize; - if (priv->leftover_bufsize - priv->leftover_offset <= bufsize) { - nread = priv->leftover_bufsize - priv->leftover_offset; - memcpy (buffer, priv->leftover_buffer + priv->leftover_offset, nread); + nread = fits_in_buffer ? soup_buffer->length : bufsize; + memcpy (buffer, soup_buffer->data, nread); - g_free (priv->leftover_buffer); - priv->leftover_buffer = NULL; - priv->leftover_bufsize = priv->leftover_offset = 0; - } else { - nread = bufsize; - memcpy (buffer, priv->leftover_buffer + priv->leftover_offset, nread); - priv->leftover_offset += nread; - } + g_queue_pop_head (priv->leftover_queue); + if (!fits_in_buffer) + g_queue_push_head (priv->leftover_queue, + soup_buffer_new_subbuffer (soup_buffer, nread, soup_buffer->length - nread)); + soup_buffer_free (soup_buffer); priv->offset += nread; return nread; @@ -431,10 +436,6 @@ static void send_sync_finished (GInputStream *stream) { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - GError *error = NULL; - - if (!g_cancellable_set_error_if_cancelled (priv->cancellable, &error)) - set_error_if_http_failed (priv->msg, &error); priv->got_headers_cb = NULL; priv->finished_cb = NULL; @@ -468,6 +469,8 @@ soup_http_input_stream_send (SoupHTTPInputStream *httpstream, g_return_val_if_fail (SOUP_IS_HTTP_INPUT_STREAM (httpstream), FALSE); + soup_http_input_stream_queue_message (httpstream); + if (!g_input_stream_set_pending (istream, error)) return FALSE; @@ -490,7 +493,7 @@ soup_http_input_stream_read (GInputStream *stream, SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); /* If there is data leftover from a previous read, return it. */ - if (priv->leftover_bufsize) + if (priv->leftover_queue->length) return read_from_leftover (priv, buffer, count); if (priv->finished) @@ -521,8 +524,10 @@ soup_http_input_stream_close (GInputStream *stream, { SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (stream); - if (!priv->finished) + if (!priv->finished) { + soup_session_unpause_message (priv->session, priv->msg); soup_session_cancel_message (priv->session, priv->msg, SOUP_STATUS_CANCELLED); + } return TRUE; } @@ -536,7 +541,7 @@ wrapper_callback (GObject *source_object, GAsyncResult *res, g_input_stream_clear_pending (stream); if (priv->outstanding_callback) - (*priv->outstanding_callback)(source_object, res, user_data); + (*priv->outstanding_callback) (source_object, res, user_data); priv->outstanding_callback = NULL; g_object_unref (stream); } @@ -559,10 +564,8 @@ send_async_finished (GInputStream *stream) priv->result = NULL; g_simple_async_result_set_op_res_gboolean (result, error == NULL); - if (error) { - g_simple_async_result_set_from_error (result, error); - g_error_free (error); - } + if (error) + g_simple_async_result_take_error (result, error); g_simple_async_result_complete (result); g_object_unref (result); } @@ -615,12 +618,13 @@ soup_http_input_stream_send_async (SoupHTTPInputStream *httpstream, g_return_if_fail (SOUP_IS_HTTP_INPUT_STREAM (httpstream)); + soup_http_input_stream_queue_message (httpstream); + if (!g_input_stream_set_pending (istream, &error)) { - g_simple_async_report_gerror_in_idle (G_OBJECT (httpstream), - callback, - user_data, - error); - g_error_free (error); + g_simple_async_report_take_gerror_in_idle (G_OBJECT (httpstream), + callback, + user_data, + error); return; } soup_http_input_stream_send_async_internal (istream, io_priority, cancellable, @@ -668,10 +672,9 @@ read_async_done (GInputStream *stream) priv->result = NULL; if (g_cancellable_set_error_if_cancelled (priv->cancellable, &error) || - set_error_if_http_failed (priv->msg, &error)) { - g_simple_async_result_set_from_error (result, error); - g_error_free (error); - } else + set_error_if_http_failed (priv->msg, &error)) + g_simple_async_result_take_error (result, error); + else g_simple_async_result_set_op_res_gssize (result, priv->caller_nread); priv->got_chunk_cb = NULL; @@ -701,7 +704,7 @@ soup_http_input_stream_read_async (GInputStream *stream, callback, user_data, soup_http_input_stream_read_async); - if (priv->leftover_bufsize) { + if (priv->leftover_queue->length) { gsize nread = read_from_leftover (priv, buffer, count); g_simple_async_result_set_op_res_gssize (result, nread); g_simple_async_result_complete_in_idle (result); @@ -754,10 +757,8 @@ soup_http_input_stream_close_async (GInputStream *stream, soup_http_input_stream_close_async); success = soup_http_input_stream_close (stream, cancellable, &error); g_simple_async_result_set_op_res_gboolean (result, success); - if (error) { - g_simple_async_result_set_from_error (result, error); - g_error_free (error); - } + if (error) + g_simple_async_result_take_error (result, error); g_simple_async_result_complete_in_idle (result); g_object_unref (result); @@ -772,100 +773,21 @@ soup_http_input_stream_close_finish (GInputStream *stream, return TRUE; } -static goffset -soup_http_input_stream_tell (GSeekable *seekable) -{ - SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (seekable); - - return priv->offset; -} - -static gboolean -soup_http_input_stream_can_seek (GSeekable *seekable) -{ - return TRUE; -} - -extern void soup_message_io_cleanup (SoupMessage *msg); - -static gboolean -soup_http_input_stream_seek (GSeekable *seekable, - goffset offset, - GSeekType type, - GCancellable *cancellable, - GError **error) +SoupMessage * +soup_http_input_stream_get_message (SoupHTTPInputStream *httpstream) { - GInputStream *stream = G_INPUT_STREAM (seekable); - SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (seekable); - char *range; - - if (type == G_SEEK_END) { - /* FIXME: we could send "bytes=-offset", but unless we - * know the Content-Length, we wouldn't be able to - * answer a tell() properly. We could find the - * Content-Length by doing a HEAD... - */ - - g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, - "G_SEEK_END not currently supported"); - return FALSE; - } - - if (!g_input_stream_set_pending (stream, error)) - return FALSE; - - soup_session_cancel_message (priv->session, priv->msg, SOUP_STATUS_CANCELLED); - soup_message_io_cleanup (priv->msg); - - switch (type) { - case G_SEEK_CUR: - offset += priv->offset; - /* fall through */ - - case G_SEEK_SET: - range = g_strdup_printf ("bytes=%" G_GUINT64_FORMAT "-", (guint64)offset); - priv->offset = offset; - break; - - case G_SEEK_END: - range = NULL; /* keep compilers happy */ - g_return_val_if_reached (FALSE); - break; - - default: - g_return_val_if_reached (FALSE); - } - - soup_message_headers_remove (priv->msg->request_headers, "Range"); - soup_message_headers_append (priv->msg->request_headers, "Range", range); - g_free (range); - - soup_http_input_stream_queue_message (SOUP_HTTP_INPUT_STREAM (stream)); - - g_input_stream_clear_pending (stream); - return TRUE; + SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (httpstream); + return priv->msg ? g_object_ref (priv->msg) : NULL; } -static gboolean -soup_http_input_stream_can_truncate (GSeekable *seekable) +const char * +soup_http_input_stream_get_content_type (SoupHTTPInputStream *httpstream) { - return FALSE; -} + SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (httpstream); -static gboolean -soup_http_input_stream_truncate (GSeekable *seekable, - goffset offset, - GCancellable *cancellable, - GError **error) -{ - g_set_error_literal (error, G_IO_ERROR, G_IO_ERROR_NOT_SUPPORTED, - "Truncate not allowed on input stream"); - return FALSE; -} + if (priv->sniffed_content_type) + return priv->sniffed_content_type; + else + return soup_message_headers_get_content_type (priv->msg->response_headers, NULL); -SoupMessage * -soup_http_input_stream_get_message (SoupHTTPInputStream *httpstream) -{ - SoupHTTPInputStreamPrivate *priv = SOUP_HTTP_INPUT_STREAM_GET_PRIVATE (httpstream); - return priv->msg ? g_object_ref (priv->msg) : NULL; } diff --git a/libsoup/soup-http-input-stream.h b/libsoup/soup-http-input-stream.h index 4f23e93..b6c598c 100644 --- a/libsoup/soup-http-input-stream.h +++ b/libsoup/soup-http-input-stream.h @@ -54,23 +54,25 @@ struct SoupHTTPInputStreamClass { GType soup_http_input_stream_get_type (void) G_GNUC_CONST; -SoupHTTPInputStream *soup_http_input_stream_new (SoupSession *session, - SoupMessage *msg); - -gboolean soup_http_input_stream_send (SoupHTTPInputStream *httpstream, - GCancellable *cancellable, - GError **error); - -void soup_http_input_stream_send_async (SoupHTTPInputStream *httpstream, - int io_priority, - GCancellable *cancellable, - GAsyncReadyCallback callback, - gpointer user_data); -gboolean soup_http_input_stream_send_finish (SoupHTTPInputStream *httpstream, - GAsyncResult *result, - GError **error); - -SoupMessage *soup_http_input_stream_get_message (SoupHTTPInputStream *httpstream); +GInputStream *soup_http_input_stream_new (SoupSession *session, + SoupMessage *msg); + +gboolean soup_http_input_stream_send (SoupHTTPInputStream *httpstream, + GCancellable *cancellable, + GError **error); + +void soup_http_input_stream_send_async (SoupHTTPInputStream *httpstream, + int io_priority, + GCancellable *cancellable, + GAsyncReadyCallback callback, + gpointer user_data); +gboolean soup_http_input_stream_send_finish (SoupHTTPInputStream *httpstream, + GAsyncResult *result, + GError **error); + +SoupMessage *soup_http_input_stream_get_message (SoupHTTPInputStream *httpstream); + +const char *soup_http_input_stream_get_content_type (SoupHTTPInputStream *httpstream); G_END_DECLS diff --git a/libsoup/soup-logger.c b/libsoup/soup-logger.c index 7cdf2ed..536f327 100644 --- a/libsoup/soup-logger.c +++ b/libsoup/soup-logger.c @@ -35,8 +35,8 @@ * and then attach it to a session (or multiple sessions) with * soup_session_add_feature(). * - * By default, the debugging output is sent to %stdout, and looks - * something like: + * By default, the debugging output is sent to + * stdout, and looks something like: * * * > POST /unauth HTTP/1.1 @@ -98,7 +98,7 @@ typedef struct { /* We use a mutex so that if requests are being run in * multiple threads, we don't mix up the output. */ - GMutex *lock; + GMutex lock; GQuark tag; GHashTable *ids; @@ -125,7 +125,7 @@ soup_logger_init (SoupLogger *logger) { SoupLoggerPrivate *priv = SOUP_LOGGER_GET_PRIVATE (logger); - priv->lock = g_mutex_new (); + g_mutex_init (&priv->lock); priv->tag = g_quark_from_static_string (g_strdup_printf ("SoupLogger-%p", logger)); priv->ids = g_hash_table_new (NULL, NULL); } @@ -144,7 +144,7 @@ finalize (GObject *object) if (priv->printer_dnotify) priv->printer_dnotify (priv->printer_data); - g_mutex_free (priv->lock); + g_mutex_clear (&priv->lock); G_OBJECT_CLASS (soup_logger_parent_class)->finalize (object); } @@ -310,7 +310,7 @@ soup_logger_set_response_filter (SoupLogger *logger, * @destroy: a #GDestroyNotify to free @printer_data * * Sets up an alternate log printing routine, if you don't want - * the log to go to %stdout. + * the log to go to stdout. **/ void soup_logger_set_printer (SoupLogger *logger, @@ -422,10 +422,21 @@ soup_logger_print (SoupLogger *logger, SoupLoggerLogLevel level, static void soup_logger_print_basic_auth (SoupLogger *logger, const char *value) { - char *decoded, *p; + char *decoded, *decoded_utf8, *p; gsize len; decoded = (char *)g_base64_decode (value + 6, &len); + if (decoded && !g_utf8_validate (decoded, -1, NULL)) { + decoded_utf8 = g_convert_with_fallback (decoded, -1, + "UTF-8", "ISO-8859-1", + NULL, NULL, &len, + NULL); + if (decoded_utf8) { + g_free (decoded); + decoded = decoded_utf8; + } + } + if (!decoded) decoded = g_strdup (value); p = strchr (decoded, ':'); @@ -576,7 +587,7 @@ got_informational (SoupMessage *msg, gpointer user_data) SoupLogger *logger = user_data; SoupLoggerPrivate *priv = SOUP_LOGGER_GET_PRIVATE (logger); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); print_response (logger, msg); soup_logger_print (logger, SOUP_LOGGER_LOG_MINIMAL, ' ', ""); @@ -601,7 +612,7 @@ got_informational (SoupMessage *msg, gpointer user_data) soup_logger_print (logger, SOUP_LOGGER_LOG_MINIMAL, ' ', ""); } - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); } static void @@ -610,12 +621,12 @@ got_body (SoupMessage *msg, gpointer user_data) SoupLogger *logger = user_data; SoupLoggerPrivate *priv = SOUP_LOGGER_GET_PRIVATE (logger); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); print_response (logger, msg); soup_logger_print (logger, SOUP_LOGGER_LOG_MINIMAL, ' ', ""); - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); } static void diff --git a/libsoup/soup-marshal.c b/libsoup/soup-marshal.c new file mode 100644 index 0000000..07ae739 --- /dev/null +++ b/libsoup/soup-marshal.c @@ -0,0 +1,282 @@ +#include "soup-marshal.h" + +#include + + +#ifdef G_ENABLE_DEBUG +#define g_marshal_value_peek_boolean(v) g_value_get_boolean (v) +#define g_marshal_value_peek_char(v) g_value_get_schar (v) +#define g_marshal_value_peek_uchar(v) g_value_get_uchar (v) +#define g_marshal_value_peek_int(v) g_value_get_int (v) +#define g_marshal_value_peek_uint(v) g_value_get_uint (v) +#define g_marshal_value_peek_long(v) g_value_get_long (v) +#define g_marshal_value_peek_ulong(v) g_value_get_ulong (v) +#define g_marshal_value_peek_int64(v) g_value_get_int64 (v) +#define g_marshal_value_peek_uint64(v) g_value_get_uint64 (v) +#define g_marshal_value_peek_enum(v) g_value_get_enum (v) +#define g_marshal_value_peek_flags(v) g_value_get_flags (v) +#define g_marshal_value_peek_float(v) g_value_get_float (v) +#define g_marshal_value_peek_double(v) g_value_get_double (v) +#define g_marshal_value_peek_string(v) (char*) g_value_get_string (v) +#define g_marshal_value_peek_param(v) g_value_get_param (v) +#define g_marshal_value_peek_boxed(v) g_value_get_boxed (v) +#define g_marshal_value_peek_pointer(v) g_value_get_pointer (v) +#define g_marshal_value_peek_object(v) g_value_get_object (v) +#define g_marshal_value_peek_variant(v) g_value_get_variant (v) +#else /* !G_ENABLE_DEBUG */ +/* WARNING: This code accesses GValues directly, which is UNSUPPORTED API. + * Do not access GValues directly in your code. Instead, use the + * g_value_get_*() functions + */ +#define g_marshal_value_peek_boolean(v) (v)->data[0].v_int +#define g_marshal_value_peek_char(v) (v)->data[0].v_int +#define g_marshal_value_peek_uchar(v) (v)->data[0].v_uint +#define g_marshal_value_peek_int(v) (v)->data[0].v_int +#define g_marshal_value_peek_uint(v) (v)->data[0].v_uint +#define g_marshal_value_peek_long(v) (v)->data[0].v_long +#define g_marshal_value_peek_ulong(v) (v)->data[0].v_ulong +#define g_marshal_value_peek_int64(v) (v)->data[0].v_int64 +#define g_marshal_value_peek_uint64(v) (v)->data[0].v_uint64 +#define g_marshal_value_peek_enum(v) (v)->data[0].v_long +#define g_marshal_value_peek_flags(v) (v)->data[0].v_ulong +#define g_marshal_value_peek_float(v) (v)->data[0].v_float +#define g_marshal_value_peek_double(v) (v)->data[0].v_double +#define g_marshal_value_peek_string(v) (v)->data[0].v_pointer +#define g_marshal_value_peek_param(v) (v)->data[0].v_pointer +#define g_marshal_value_peek_boxed(v) (v)->data[0].v_pointer +#define g_marshal_value_peek_pointer(v) (v)->data[0].v_pointer +#define g_marshal_value_peek_object(v) (v)->data[0].v_pointer +#define g_marshal_value_peek_variant(v) (v)->data[0].v_pointer +#endif /* !G_ENABLE_DEBUG */ + + +/* NONE:BOXED (soup-marshal.list:1) */ + +/* NONE:BOXED,BOXED (soup-marshal.list:2) */ +void +_soup_marshal_VOID__BOXED_BOXED (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__BOXED_BOXED) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gpointer data2); + register GMarshalFunc_VOID__BOXED_BOXED callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 3); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__BOXED_BOXED) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_boxed (param_values + 1), + g_marshal_value_peek_boxed (param_values + 2), + data2); +} + +/* NONE:NONE (soup-marshal.list:3) */ + +/* NONE:OBJECT (soup-marshal.list:4) */ + +/* NONE:OBJECT,OBJECT (soup-marshal.list:5) */ +void +_soup_marshal_VOID__OBJECT_OBJECT (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__OBJECT_OBJECT) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gpointer data2); + register GMarshalFunc_VOID__OBJECT_OBJECT callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 3); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__OBJECT_OBJECT) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_object (param_values + 1), + g_marshal_value_peek_object (param_values + 2), + data2); +} + +/* NONE:OBJECT,OBJECT,BOOLEAN (soup-marshal.list:6) */ +void +_soup_marshal_VOID__OBJECT_OBJECT_BOOLEAN (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__OBJECT_OBJECT_BOOLEAN) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gboolean arg_3, + gpointer data2); + register GMarshalFunc_VOID__OBJECT_OBJECT_BOOLEAN callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 4); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__OBJECT_OBJECT_BOOLEAN) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_object (param_values + 1), + g_marshal_value_peek_object (param_values + 2), + g_marshal_value_peek_boolean (param_values + 3), + data2); +} + +/* NONE:OBJECT,POINTER (soup-marshal.list:7) */ +void +_soup_marshal_VOID__OBJECT_POINTER (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__OBJECT_POINTER) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gpointer data2); + register GMarshalFunc_VOID__OBJECT_POINTER callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 3); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__OBJECT_POINTER) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_object (param_values + 1), + g_marshal_value_peek_pointer (param_values + 2), + data2); +} + +/* NONE:STRING,BOXED (soup-marshal.list:8) */ +void +_soup_marshal_VOID__STRING_BOXED (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__STRING_BOXED) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gpointer data2); + register GMarshalFunc_VOID__STRING_BOXED callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 3); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__STRING_BOXED) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_string (param_values + 1), + g_marshal_value_peek_boxed (param_values + 2), + data2); +} + +/* NONE:STRING,STRING (soup-marshal.list:9) */ +void +_soup_marshal_VOID__STRING_STRING (GClosure *closure, + GValue *return_value G_GNUC_UNUSED, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint G_GNUC_UNUSED, + gpointer marshal_data) +{ + typedef void (*GMarshalFunc_VOID__STRING_STRING) (gpointer data1, + gpointer arg_1, + gpointer arg_2, + gpointer data2); + register GMarshalFunc_VOID__STRING_STRING callback; + register GCClosure *cc = (GCClosure*) closure; + register gpointer data1, data2; + + g_return_if_fail (n_param_values == 3); + + if (G_CCLOSURE_SWAP_DATA (closure)) + { + data1 = closure->data; + data2 = g_value_peek_pointer (param_values + 0); + } + else + { + data1 = g_value_peek_pointer (param_values + 0); + data2 = closure->data; + } + callback = (GMarshalFunc_VOID__STRING_STRING) (marshal_data ? marshal_data : cc->callback); + + callback (data1, + g_marshal_value_peek_string (param_values + 1), + g_marshal_value_peek_string (param_values + 2), + data2); +} + diff --git a/libsoup/soup-marshal.h b/libsoup/soup-marshal.h new file mode 100644 index 0000000..2f61927 --- /dev/null +++ b/libsoup/soup-marshal.h @@ -0,0 +1,78 @@ + +#ifndef ___soup_marshal_MARSHAL_H__ +#define ___soup_marshal_MARSHAL_H__ + +#include + +G_BEGIN_DECLS + +/* NONE:BOXED (soup-marshal.list:1) */ +#define _soup_marshal_VOID__BOXED g_cclosure_marshal_VOID__BOXED +#define _soup_marshal_NONE__BOXED _soup_marshal_VOID__BOXED + +/* NONE:BOXED,BOXED (soup-marshal.list:2) */ +extern void _soup_marshal_VOID__BOXED_BOXED (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__BOXED_BOXED _soup_marshal_VOID__BOXED_BOXED + +/* NONE:NONE (soup-marshal.list:3) */ +#define _soup_marshal_VOID__VOID g_cclosure_marshal_VOID__VOID +#define _soup_marshal_NONE__NONE _soup_marshal_VOID__VOID + +/* NONE:OBJECT (soup-marshal.list:4) */ +#define _soup_marshal_VOID__OBJECT g_cclosure_marshal_VOID__OBJECT +#define _soup_marshal_NONE__OBJECT _soup_marshal_VOID__OBJECT + +/* NONE:OBJECT,OBJECT (soup-marshal.list:5) */ +extern void _soup_marshal_VOID__OBJECT_OBJECT (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__OBJECT_OBJECT _soup_marshal_VOID__OBJECT_OBJECT + +/* NONE:OBJECT,OBJECT,BOOLEAN (soup-marshal.list:6) */ +extern void _soup_marshal_VOID__OBJECT_OBJECT_BOOLEAN (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__OBJECT_OBJECT_BOOLEAN _soup_marshal_VOID__OBJECT_OBJECT_BOOLEAN + +/* NONE:OBJECT,POINTER (soup-marshal.list:7) */ +extern void _soup_marshal_VOID__OBJECT_POINTER (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__OBJECT_POINTER _soup_marshal_VOID__OBJECT_POINTER + +/* NONE:STRING,BOXED (soup-marshal.list:8) */ +extern void _soup_marshal_VOID__STRING_BOXED (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__STRING_BOXED _soup_marshal_VOID__STRING_BOXED + +/* NONE:STRING,STRING (soup-marshal.list:9) */ +extern void _soup_marshal_VOID__STRING_STRING (GClosure *closure, + GValue *return_value, + guint n_param_values, + const GValue *param_values, + gpointer invocation_hint, + gpointer marshal_data); +#define _soup_marshal_NONE__STRING_STRING _soup_marshal_VOID__STRING_STRING + +G_END_DECLS + +#endif /* ___soup_marshal_MARSHAL_H__ */ + diff --git a/libsoup/soup-marshal.list b/libsoup/soup-marshal.list deleted file mode 100644 index 7714813..0000000 --- a/libsoup/soup-marshal.list +++ /dev/null @@ -1,10 +0,0 @@ -NONE:BOXED -NONE:INT -NONE:NONE -NONE:OBJECT -NONE:OBJECT,OBJECT -NONE:OBJECT,POINTER -NONE:BOXED,BOXED -NONE:OBJECT,OBJECT,BOOLEAN -NONE:STRING,BOXED -NONE:STRING,STRING diff --git a/libsoup/soup-message-body.c b/libsoup/soup-message-body.c index a1d78f8..daf9d1e 100644 --- a/libsoup/soup-message-body.c +++ b/libsoup/soup-message-body.c @@ -121,8 +121,8 @@ soup_buffer_new (SoupMemoryUse use, gconstpointer data, gsize length) * * Return value: the new #SoupBuffer. * - * Since: 2.32 * Rename to: soup_buffer_new + * Since: 2.32 **/ SoupBuffer * soup_buffer_new_take (guchar *data, gsize length) @@ -384,25 +384,25 @@ soup_message_body_new (void) * @body may be discarded when they are no longer needed. * * In particular, if you set this flag to %FALSE on an "incoming" - * message body (that is, the %response_body of a client-side message, - * or %request_body of a server-side message), this will cause each - * chunk of the body to be discarded after its corresponding - * #SoupMessage::got_chunk signal is emitted. (This is equivalent to - * setting the deprecated %SOUP_MESSAGE_OVERWRITE_CHUNKS flag on the - * message.) - * - * If you set this flag to %FALSE on the %response_body of a - * server-side message, it will cause each chunk of the body to be + * message body (that is, the #SoupMessage:response_body of a + * client-side message, or #SoupMessage:request_body of a server-side + * message), this will cause each chunk of the body to be discarded + * after its corresponding #SoupMessage::got_chunk signal is emitted. + * (This is equivalent to setting the deprecated + * %SOUP_MESSAGE_OVERWRITE_CHUNKS flag on the message.) + * + * If you set this flag to %FALSE on the #SoupMessage:response_body of + * a server-side message, it will cause each chunk of the body to be * discarded after its corresponding #SoupMessage::wrote_chunk signal * is emitted. * - * If you set the flag to %FALSE on the %request_body of a client-side - * message, it will block the accumulation of chunks into @body's - * %data field, but it will not normally cause the chunks to be - * discarded after being written like in the server-side - * %response_body case, because the request body needs to be kept - * around in case the request needs to be sent a second time due to - * redirection or authentication. However, if you set the + * If you set the flag to %FALSE on the #SoupMessage:request_body of a + * client-side message, it will block the accumulation of chunks into + * @body's %data field, but it will not normally cause the chunks to + * be discarded after being written like in the server-side + * #SoupMessage:response_body case, because the request body needs to + * be kept around in case the request needs to be sent a second time + * due to redirection or authentication. However, if you set the * %SOUP_MESSAGE_CAN_REBUILD flag on the message, then the chunks will * be discarded, and you will be responsible for recreating the * request body after the #SoupMessage::restarted signal is emitted. @@ -483,12 +483,12 @@ soup_message_body_append (SoupMessageBody *body, SoupMemoryUse use, * * Appends @length bytes from @data to @body. * - * This function is exactly equivalent to soup_message_body_apppend() + * This function is exactly equivalent to soup_message_body_append() * with %SOUP_MEMORY_TAKE as second argument; it exists mainly for * convenience and simplifying language bindings. * - * Since: 2.32 * Rename to: soup_message_body_append + * Since: 2.32 **/ void soup_message_body_append_take (SoupMessageBody *body, diff --git a/libsoup/soup-message-headers.c b/libsoup/soup-message-headers.c index 195a3b0..1d98bb9 100644 --- a/libsoup/soup-message-headers.c +++ b/libsoup/soup-message-headers.c @@ -22,6 +22,12 @@ **/ /** + * SoupMessageHeaders: + * + * The HTTP message headers associated with a request or response. + */ + +/** * SoupMessageHeadersType: * @SOUP_MESSAGE_HEADERS_REQUEST: request headers * @SOUP_MESSAGE_HEADERS_RESPONSE: response headers @@ -522,7 +528,7 @@ soup_message_headers_foreach (SoupMessageHeaders *hdrs, } -static GStaticMutex header_pool_mutex = G_STATIC_MUTEX_INIT; +G_LOCK_DEFINE_STATIC (header_pool); static GHashTable *header_pool, *header_setters; static void transfer_encoding_setter (SoupMessageHeaders *, const char *); @@ -549,7 +555,7 @@ intern_header_name (const char *name, SoupHeaderSetter *setter) { const char *interned; - g_static_mutex_lock (&header_pool_mutex); + G_LOCK (header_pool); if (!header_pool) { header_pool = g_hash_table_new (soup_str_case_hash, soup_str_case_equal); @@ -572,7 +578,7 @@ intern_header_name (const char *name, SoupHeaderSetter *setter) if (setter) *setter = g_hash_table_lookup (header_setters, interned); - g_static_mutex_unlock (&header_pool_mutex); + G_UNLOCK (header_pool); return interned; } @@ -839,17 +845,17 @@ soup_message_headers_set_expectations (SoupMessageHeaders *hdrs, * Represents a byte range as used in the Range header. * * If @end is non-negative, then @start and @end represent the bounds - * of of the range, counting from %0. (Eg, the first 500 bytes would be - * represented as @start = %0 and @end = %499.) + * of of the range, counting from 0. (Eg, the first 500 bytes would be + * represented as @start = 0 and @end = 499.) * - * If @end is %-1 and @start is non-negative, then this represents a + * If @end is -1 and @start is non-negative, then this represents a * range starting at @start and ending with the last byte of the * requested resource body. (Eg, all but the first 500 bytes would be - * @start = %500, and @end = %-1.) + * @start = 500, and @end = -1.) * - * If @end is %-1 and @start is negative, then it represents a "suffix + * If @end is -1 and @start is negative, then it represents a "suffix * range", referring to the last -@start bytes of the resource body. - * (Eg, the last 500 bytes would be @start = %-500 and @end = %-1.) + * (Eg, the last 500 bytes would be @start = -500 and @end = -1.) * * Since: 2.26 **/ diff --git a/libsoup/soup-message-io.c b/libsoup/soup-message-io.c index 213a46b..cf2a2e3 100644 --- a/libsoup/soup-message-io.c +++ b/libsoup/soup-message-io.c @@ -18,7 +18,6 @@ #include "soup-message-queue.h" #include "soup-misc.h" #include "soup-socket.h" -#include "soup-ssl.h" typedef enum { SOUP_MESSAGE_IO_CLIENT, @@ -68,7 +67,7 @@ typedef struct { goffset write_length; goffset written; - guint read_tag, write_tag, tls_signal_id; + guint read_tag, write_tag; GSource *unpause_source; SoupMessageGetHeadersFn get_headers_cb; @@ -102,8 +101,6 @@ soup_message_io_cleanup (SoupMessage *msg) return; priv->io_data = NULL; - if (io->tls_signal_id) - g_signal_handler_disconnect (io->sock, io->tls_signal_id); if (io->sock) g_object_unref (io->sock); if (io->item) @@ -146,8 +143,6 @@ soup_message_io_stop (SoupMessage *msg) if (io->read_state < SOUP_MESSAGE_IO_STATE_FINISHING) soup_socket_disconnect (io->sock); - else if (io->item && io->item->conn) - soup_connection_set_state (io->item->conn, SOUP_CONNECTION_IDLE); } #define SOUP_MESSAGE_IO_EOL "\r\n" @@ -321,12 +316,12 @@ read_metadata (SoupMessage *msg, gboolean to_blank) if (got_lf) { if (!to_blank) break; - if (nread == 1 && + if (nread == 1 && io->read_meta_buf->len >= 2 && !strncmp ((char *)io->read_meta_buf->data + io->read_meta_buf->len - 2, "\n\n", 2)) break; - else if (nread == 2 && + else if (nread == 2 && io->read_meta_buf->len >= 3 && !strncmp ((char *)io->read_meta_buf->data + io->read_meta_buf->len - 3, "\n\r\n", 3)) @@ -343,6 +338,7 @@ content_decode_one (SoupBuffer *buf, GConverter *converter, GError **error) gsize outbuf_length, outbuf_used, outbuf_cur, input_used, input_cur; char *outbuf; GConverterResult result; + gboolean dummy_zlib_header_used = FALSE; outbuf_length = MAX (buf->length * 2, 1024); outbuf = g_malloc (outbuf_length); @@ -362,6 +358,39 @@ content_decode_one (SoupBuffer *buf, GConverter *converter, GError **error) g_clear_error (error); outbuf_length *= 2; outbuf = g_realloc (outbuf, outbuf_length); + } else if (input_cur == 0 && + !dummy_zlib_header_used && + G_IS_ZLIB_DECOMPRESSOR (converter) && + g_error_matches (*error, G_IO_ERROR, G_IO_ERROR_INVALID_DATA)) { + + GZlibCompressorFormat format; + g_object_get (G_OBJECT (converter), "format", &format, NULL); + + if (format == G_ZLIB_COMPRESSOR_FORMAT_ZLIB) { + /* Some servers (especially Apache with mod_deflate) + * return RAW compressed data without the zlib headers + * when the client claims to support deflate. For + * those cases use a dummy header (stolen from + * Mozilla's nsHTTPCompressConv.cpp) and try to + * continue uncompressing data. + */ + static char dummy_zlib_header[2] = { 0x78, 0x9C }; + + g_converter_reset (converter); + result = g_converter_convert (converter, + dummy_zlib_header, sizeof(dummy_zlib_header), + outbuf + outbuf_cur, outbuf_length - outbuf_cur, + 0, &input_used, &outbuf_used, NULL); + dummy_zlib_header_used = TRUE; + if (result == G_CONVERTER_CONVERTED) { + g_clear_error (error); + continue; + } + } + + g_free (outbuf); + return NULL; + } else if (*error) { /* GZlibDecompressor can't ever return * G_IO_ERROR_PARTIAL_INPUT unless we pass it @@ -1050,25 +1079,6 @@ io_read (SoupSocket *sock, SoupMessage *msg) goto read_more; } -static void -socket_tls_certificate_changed (GObject *sock, GParamSpec *pspec, - gpointer msg) -{ - GTlsCertificate *certificate; - GTlsCertificateFlags errors; - - g_object_get (sock, - SOUP_SOCKET_TLS_CERTIFICATE, &certificate, - SOUP_SOCKET_TLS_ERRORS, &errors, - NULL); - g_object_set (msg, - SOUP_MESSAGE_TLS_CERTIFICATE, certificate, - SOUP_MESSAGE_TLS_ERRORS, errors, - NULL); - if (certificate) - g_object_unref (certificate); -} - static SoupMessageIOData * new_iostate (SoupMessage *msg, SoupSocket *sock, SoupMessageIOMode mode, SoupMessageGetHeadersFn get_headers_cb, @@ -1100,11 +1110,6 @@ new_iostate (SoupMessage *msg, SoupSocket *sock, SoupMessageIOMode mode, io->read_state = SOUP_MESSAGE_IO_STATE_NOT_STARTED; io->write_state = SOUP_MESSAGE_IO_STATE_NOT_STARTED; - if (soup_socket_is_ssl (io->sock)) { - io->tls_signal_id = g_signal_connect (io->sock, "notify::tls-certificate", - G_CALLBACK (socket_tls_certificate_changed), msg); - } - if (priv->io_data) soup_message_io_cleanup (msg); priv->io_data = io; @@ -1216,15 +1221,23 @@ soup_message_io_unpause (SoupMessage *msg) { SoupMessagePrivate *priv = SOUP_MESSAGE_GET_PRIVATE (msg); SoupMessageIOData *io = priv->io_data; - gboolean non_blocking; + gboolean non_blocking, use_thread_context; GMainContext *async_context; g_return_if_fail (io != NULL); g_object_get (io->sock, SOUP_SOCKET_FLAG_NONBLOCKING, &non_blocking, - SOUP_SOCKET_ASYNC_CONTEXT, &async_context, + SOUP_SOCKET_USE_THREAD_CONTEXT, &use_thread_context, NULL); + if (use_thread_context) + async_context = g_main_context_ref_thread_default (); + else { + g_object_get (io->sock, + SOUP_SOCKET_ASYNC_CONTEXT, &async_context, + NULL); + } + if (non_blocking) { if (!io->unpause_source) { io->unpause_source = soup_add_completion ( diff --git a/libsoup/soup-message-private.h b/libsoup/soup-message-private.h index ce866dc..5625354 100644 --- a/libsoup/soup-message-private.h +++ b/libsoup/soup-message-private.h @@ -96,4 +96,12 @@ gboolean soup_message_io_in_progress (SoupMessage *msg); gboolean soup_message_disables_feature (SoupMessage *msg, gpointer feature); + +void soup_message_set_https_status (SoupMessage *msg, + SoupConnection *conn); + +void soup_message_network_event (SoupMessage *msg, + GSocketClientEvent event, + GIOStream *connection); + #endif /* SOUP_MESSAGE_PRIVATE_H */ diff --git a/libsoup/soup-message-queue.c b/libsoup/soup-message-queue.c index 58fea58..7b1e5dd 100644 --- a/libsoup/soup-message-queue.c +++ b/libsoup/soup-message-queue.c @@ -13,10 +13,7 @@ #include "soup-message-queue.h" #include "soup-uri.h" -/** - * SECTION:soup-message-queue - * - * This is an internal structure used by #SoupSession and its +/* This is an internal structure used by #SoupSession and its * subclasses to keep track of the status of messages currently being * processed. * @@ -32,7 +29,7 @@ struct _SoupMessageQueue { SoupSession *session; - GMutex *mutex; + GMutex mutex; SoupMessageQueueItem *head, *tail; }; @@ -43,7 +40,7 @@ soup_message_queue_new (SoupSession *session) queue = g_slice_new0 (SoupMessageQueue); queue->session = session; - queue->mutex = g_mutex_new (); + g_mutex_init (&queue->mutex); return queue; } @@ -52,7 +49,7 @@ soup_message_queue_destroy (SoupMessageQueue *queue) { g_return_if_fail (queue->head == NULL); - g_mutex_free (queue->mutex); + g_mutex_clear (&queue->mutex); g_slice_free (SoupMessageQueue, queue); } @@ -75,8 +72,7 @@ queue_message_restarted (SoupMessage *msg, gpointer user_data) SOUP_STATUS_IS_REDIRECTION (msg->status_code))) { if (soup_connection_get_state (item->conn) == SOUP_CONNECTION_IN_USE) soup_connection_set_state (item->conn, SOUP_CONNECTION_IDLE); - g_object_unref (item->conn); - item->conn = NULL; + soup_message_queue_item_set_connection (item, NULL); } soup_message_cleanup_response (msg); @@ -106,6 +102,7 @@ soup_message_queue_append (SoupMessageQueue *queue, SoupMessage *msg, item = g_slice_new0 (SoupMessageQueueItem); item->session = queue->session; + item->async_context = soup_session_get_async_context (item->session); item->queue = queue; item->msg = g_object_ref (msg); item->callback = callback; @@ -121,7 +118,7 @@ soup_message_queue_append (SoupMessageQueue *queue, SoupMessage *msg, */ item->ref_count = 1; - g_mutex_lock (queue->mutex); + g_mutex_lock (&queue->mutex); if (queue->head) { queue->tail->next = item; item->prev = queue->tail; @@ -129,7 +126,7 @@ soup_message_queue_append (SoupMessageQueue *queue, SoupMessage *msg, } else queue->head = queue->tail = item; - g_mutex_unlock (queue->mutex); + g_mutex_unlock (&queue->mutex); return item; } @@ -156,13 +153,13 @@ soup_message_queue_item_ref (SoupMessageQueueItem *item) void soup_message_queue_item_unref (SoupMessageQueueItem *item) { - g_mutex_lock (item->queue->mutex); + g_mutex_lock (&item->queue->mutex); /* Decrement the ref_count; if it's still non-zero OR if the * item is still in the queue, then return. */ if (--item->ref_count || !item->removed) { - g_mutex_unlock (item->queue->mutex); + g_mutex_unlock (&item->queue->mutex); return; } @@ -176,7 +173,7 @@ soup_message_queue_item_unref (SoupMessageQueueItem *item) else item->queue->tail = item->prev; - g_mutex_unlock (item->queue->mutex); + g_mutex_unlock (&item->queue->mutex); /* And free it */ g_signal_handlers_disconnect_by_func (item->msg, @@ -187,11 +184,39 @@ soup_message_queue_item_unref (SoupMessageQueueItem *item) g_object_unref (item->proxy_addr); if (item->proxy_uri) soup_uri_free (item->proxy_uri); - if (item->conn) - g_object_unref (item->conn); + soup_message_queue_item_set_connection (item, NULL); g_slice_free (SoupMessageQueueItem, item); } +static void +proxy_connection_event (SoupConnection *conn, + GSocketClientEvent event, + GIOStream *connection, + gpointer user_data) +{ + SoupMessageQueueItem *item = user_data; + + soup_message_network_event (item->msg, event, connection); +} + +void +soup_message_queue_item_set_connection (SoupMessageQueueItem *item, + SoupConnection *conn) +{ + if (item->conn) { + g_signal_handlers_disconnect_by_func (item->conn, proxy_connection_event, item); + g_object_unref (item->conn); + } + + item->conn = conn; + + if (item->conn) { + g_object_ref (item->conn); + g_signal_connect (item->conn, "event", + G_CALLBACK (proxy_connection_event), item); + } +} + /** * soup_message_queue_lookup: * @queue: a #SoupMessageQueue @@ -208,7 +233,7 @@ soup_message_queue_lookup (SoupMessageQueue *queue, SoupMessage *msg) { SoupMessageQueueItem *item; - g_mutex_lock (queue->mutex); + g_mutex_lock (&queue->mutex); item = queue->tail; while (item && (item->removed || item->msg != msg)) @@ -217,7 +242,7 @@ soup_message_queue_lookup (SoupMessageQueue *queue, SoupMessage *msg) if (item) item->ref_count++; - g_mutex_unlock (queue->mutex); + g_mutex_unlock (&queue->mutex); return item; } @@ -238,7 +263,7 @@ soup_message_queue_first (SoupMessageQueue *queue) { SoupMessageQueueItem *item; - g_mutex_lock (queue->mutex); + g_mutex_lock (&queue->mutex); item = queue->head; while (item && item->removed) @@ -247,7 +272,7 @@ soup_message_queue_first (SoupMessageQueue *queue) if (item) item->ref_count++; - g_mutex_unlock (queue->mutex); + g_mutex_unlock (&queue->mutex); return item; } @@ -268,7 +293,7 @@ soup_message_queue_next (SoupMessageQueue *queue, SoupMessageQueueItem *item) { SoupMessageQueueItem *next; - g_mutex_lock (queue->mutex); + g_mutex_lock (&queue->mutex); next = item->next; while (next && next->removed) @@ -276,7 +301,7 @@ soup_message_queue_next (SoupMessageQueue *queue, SoupMessageQueueItem *item) if (next) next->ref_count++; - g_mutex_unlock (queue->mutex); + g_mutex_unlock (&queue->mutex); soup_message_queue_item_unref (item); return next; } @@ -294,7 +319,7 @@ soup_message_queue_remove (SoupMessageQueue *queue, SoupMessageQueueItem *item) { g_return_if_fail (!item->removed); - g_mutex_lock (queue->mutex); + g_mutex_lock (&queue->mutex); item->removed = TRUE; - g_mutex_unlock (queue->mutex); + g_mutex_unlock (&queue->mutex); } diff --git a/libsoup/soup-message-queue.h b/libsoup/soup-message-queue.h index 08cc6df..a1ae663 100644 --- a/libsoup/soup-message-queue.h +++ b/libsoup/soup-message-queue.h @@ -39,13 +39,15 @@ struct _SoupMessageQueueItem { SoupMessage *msg; SoupSessionCallback callback; gpointer callback_data; + GMainContext *async_context; GCancellable *cancellable; SoupAddress *proxy_addr; SoupURI *proxy_uri; SoupConnection *conn; - guint redirection_count; + guint paused : 1; + guint redirection_count : 31; SoupMessageQueueItemState state; @@ -72,11 +74,12 @@ SoupMessageQueueItem *soup_message_queue_next (SoupMessageQueue *queue void soup_message_queue_remove (SoupMessageQueue *queue, SoupMessageQueueItem *item); -void soup_message_queue_item_ref (SoupMessageQueueItem *item); -void soup_message_queue_item_unref (SoupMessageQueueItem *item); - void soup_message_queue_destroy (SoupMessageQueue *queue); +void soup_message_queue_item_ref (SoupMessageQueueItem *item); +void soup_message_queue_item_unref (SoupMessageQueueItem *item); +void soup_message_queue_item_set_connection (SoupMessageQueueItem *item, + SoupConnection *conn); G_END_DECLS diff --git a/libsoup/soup-message-server-io.c b/libsoup/soup-message-server-io.c index 573fcaf..8b07ebf 100644 --- a/libsoup/soup-message-server-io.c +++ b/libsoup/soup-message-server-io.c @@ -82,20 +82,28 @@ parse_request_headers (SoupMessage *msg, char *headers, guint headers_len, } else if (priv->http_version == SOUP_HTTP_1_0) { /* No Host header, no AbsoluteUri */ SoupAddress *addr = soup_socket_get_local_address (sock); - const char *host = soup_address_get_physical (addr); - url = g_strdup_printf ("%s://%s:%d%s", - soup_socket_is_ssl (sock) ? "https" : "http", - host, soup_address_get_port (addr), - req_path); - uri = soup_uri_new (url); - g_free (url); + uri = soup_uri_new (NULL); + soup_uri_set_scheme (uri, soup_socket_is_ssl (sock) ? + SOUP_URI_SCHEME_HTTPS : + SOUP_URI_SCHEME_HTTP); + soup_uri_set_host (uri, soup_address_get_physical (addr)); + soup_uri_set_port (uri, soup_address_get_port (addr)); + soup_uri_set_path (uri, req_path); } else uri = NULL; g_free (req_path); - if (!uri) + + if (!SOUP_URI_VALID_FOR_HTTP (uri)) { + /* certainly not "a valid host on the server" (RFC2616 5.2.3) + * SOUP_URI_VALID_FOR_HTTP also guards against uri == NULL + */ + if (uri) + soup_uri_free (uri); return SOUP_STATUS_BAD_REQUEST; + } + soup_message_set_uri (msg, uri); soup_uri_free (uri); diff --git a/libsoup/soup-message.c b/libsoup/soup-message.c index 9aa1209..c70a954 100644 --- a/libsoup/soup-message.c +++ b/libsoup/soup-message.c @@ -10,11 +10,13 @@ #include "soup-address.h" #include "soup-auth.h" +#include "soup-connection.h" #include "soup-enum-types.h" #include "soup-marshal.h" #include "soup-message.h" #include "soup-message-private.h" #include "soup-misc.h" +#include "soup-socket.h" #include "soup-uri.h" /** @@ -64,11 +66,12 @@ * trying to do. * * As described in the #SoupMessageBody documentation, the - * @request_body and @response_body %data fields will not necessarily - * be filled in at all times. When they are filled in, they will be - * terminated with a '\0' byte (which is not included in the %length), - * so you can use them as ordinary C strings (assuming that you know - * that the body doesn't have any other '\0' bytes). + * @request_body and @response_body data fields + * will not necessarily be filled in at all times. When they are + * filled in, they will be terminated with a '\0' byte (which is not + * included in the length), so you can use them as + * ordinary C strings (assuming that you know that the body doesn't + * have any other '\0' bytes). * * For a client-side #SoupMessage, @request_body's %data is usually * filled in right before libsoup writes the request to the network, @@ -104,6 +107,8 @@ enum { RESTARTED, FINISHED, + NETWORK_EVENT, + LAST_SIGNAL }; @@ -222,7 +227,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, wrote_informational), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -240,7 +245,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, wrote_headers), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -262,7 +267,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, wrote_chunk), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -285,7 +290,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, 0, /* FIXME after next ABI break */ NULL, NULL, - soup_marshal_NONE__BOXED, + _soup_marshal_NONE__BOXED, G_TYPE_NONE, 1, SOUP_TYPE_BUFFER); @@ -306,7 +311,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, wrote_body), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -329,7 +334,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, got_informational), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -352,7 +357,7 @@ soup_message_class_init (SoupMessageClass *message_class) * (If you need to requeue a message--eg, after handling * authentication or redirection--it is usually better to * requeue it from a #SoupMessage::got_body handler rather - * than a #SoupMessage::got_header handler, so that the + * than a #SoupMessage::got_headers handler, so that the * existing HTTP connection can be reused.) **/ signals[GOT_HEADERS] = @@ -361,7 +366,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, got_headers), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -384,7 +389,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, got_chunk), NULL, NULL, - soup_marshal_NONE__BOXED, + _soup_marshal_NONE__BOXED, G_TYPE_NONE, 1, /* Use %G_SIGNAL_TYPE_STATIC_SCOPE so that * the %SOUP_MEMORY_TEMPORARY buffers used @@ -413,7 +418,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, got_body), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -422,24 +427,25 @@ soup_message_class_init (SoupMessageClass *message_class) * @type: the content type that we got from sniffing * @params: (element-type utf8 utf8): a #GHashTable with the parameters * - * This signal is emitted after %got-headers, and before the - * first %got-chunk. If content sniffing is disabled, or no - * content sniffing will be performed, due to the sniffer - * deciding to trust the Content-Type sent by the server, this - * signal is emitted immediately after %got_headers, and @type - * is %NULL. + * This signal is emitted after #SoupMessage::got-headers, and + * before the first #SoupMessage::got-chunk. If content + * sniffing is disabled, or no content sniffing will be + * performed, due to the sniffer deciding to trust the + * Content-Type sent by the server, this signal is emitted + * immediately after #SoupMessage::got-headers, and @type is + * %NULL. * * If the #SoupContentSniffer feature is enabled, and the - * sniffer decided to perform sniffing, the first %got_chunk - * emission may be delayed, so that the sniffer has enough - * data to correctly sniff the content. It notified the - * library user that the content has been sniffed, and allows - * it to change the header contents in the message, if - * desired. + * sniffer decided to perform sniffing, the first + * #SoupMessage::got-chunk emission may be delayed, so that the + * sniffer has enough data to correctly sniff the content. It + * notified the library user that the content has been + * sniffed, and allows it to change the header contents in the + * message, if desired. * * After this signal is emitted, the data that was spooled so * that sniffing could be done is delivered on the first - * emission of %got_chunk. + * emission of #SoupMessage::got-chunk. * * Since: 2.27.3 **/ @@ -449,7 +455,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, 0, NULL, NULL, - soup_marshal_NONE__STRING_BOXED, + _soup_marshal_NONE__STRING_BOXED, G_TYPE_NONE, 2, G_TYPE_STRING, G_TYPE_HASH_TABLE); @@ -469,7 +475,7 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, restarted), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -486,9 +492,41 @@ soup_message_class_init (SoupMessageClass *message_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupMessageClass, finished), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); + /** + * SoupMessage::network-event: + * @msg: the message + * @event: the network event + * @connection: the current state of the network connection + + * Emitted to indicate that some network-related event + * related to @msg has occurred. This essentially proxies the + * #GSocketClient::event signal, but only for events that + * occur while @msg "owns" the connection; if @msg is sent on + * an existing persistent connection, then this signal will + * not be emitted. (If you want to force the message to be + * sent on a new connection, set the + * %SOUP_MESSAGE_NEW_CONNECTION flag on it.) + * + * See #GSocketClient::event for more information on what + * the different values of @event correspond to, and what + * @connection will be in each case. + * + * Since: 2.38 + **/ + signals[NETWORK_EVENT] = + g_signal_new ("network_event", + G_OBJECT_CLASS_TYPE (object_class), + G_SIGNAL_RUN_FIRST, + 0, + NULL, NULL, + NULL, + G_TYPE_NONE, 2, + G_TYPE_SOCKET_CLIENT_EVENT, + G_TYPE_IO_STREAM); + /* properties */ /** * SOUP_MESSAGE_METHOD: @@ -746,7 +784,9 @@ set_property (GObject *object, guint prop_id, if (priv->tls_certificate) g_object_unref (priv->tls_certificate); priv->tls_certificate = g_value_dup_object (value); - if (priv->tls_certificate && !priv->tls_errors) + if (priv->tls_errors) + priv->msg_flags &= ~SOUP_MESSAGE_CERTIFICATE_TRUSTED; + else if (priv->tls_certificate) priv->msg_flags |= SOUP_MESSAGE_CERTIFICATE_TRUSTED; break; case PROP_TLS_ERRORS: @@ -1006,8 +1046,8 @@ soup_message_wrote_body (SoupMessage *msg) * soup_message_got_informational: * @msg: a #SoupMessage * - * Emits the %got_informational signal, indicating that the IO layer - * read a complete informational (1xx) response for @msg. + * Emits the #SoupMessage::got_informational signal, indicating that + * the IO layer read a complete informational (1xx) response for @msg. **/ void soup_message_got_informational (SoupMessage *msg) @@ -1019,8 +1059,8 @@ soup_message_got_informational (SoupMessage *msg) * soup_message_got_headers: * @msg: a #SoupMessage * - * Emits the %got_headers signal, indicating that the IO layer - * finished reading the (non-informational) headers for @msg. + * Emits the #SoupMessage::got_headers signal, indicating that the IO + * layer finished reading the (non-informational) headers for @msg. **/ void soup_message_got_headers (SoupMessage *msg) @@ -1033,8 +1073,8 @@ soup_message_got_headers (SoupMessage *msg) * @msg: a #SoupMessage * @chunk: the newly-read chunk * - * Emits the %got_chunk signal, indicating that the IO layer finished - * reading a chunk of @msg's body. + * Emits the #SoupMessage::got_chunk signal, indicating that the IO + * layer finished reading a chunk of @msg's body. **/ void soup_message_got_chunk (SoupMessage *msg, SoupBuffer *chunk) @@ -1061,8 +1101,8 @@ got_body (SoupMessage *req) * soup_message_got_body: * @msg: a #SoupMessage * - * Emits the %got_body signal, indicating that the IO layer finished - * reading the body for @msg. + * Emits the #SoupMessage::got_body signal, indicating that the IO + * layer finished reading the body for @msg. **/ void soup_message_got_body (SoupMessage *msg) @@ -1080,7 +1120,7 @@ soup_message_got_body (SoupMessage *msg) * finished sniffing the content type for @msg. If content sniffing * will not be performed, due to the sniffer deciding to trust the * Content-Type sent by the server, this signal is emitted immediately - * after %got_headers, with %NULL as @content_type. + * after #SoupMessage::got_headers, with %NULL as @content_type. **/ void soup_message_content_sniffed (SoupMessage *msg, const char *content_type, GHashTable *params) @@ -1119,6 +1159,15 @@ soup_message_finished (SoupMessage *msg) g_signal_emit (msg, signals[FINISHED], 0); } +void +soup_message_network_event (SoupMessage *msg, + GSocketClientEvent event, + GIOStream *connection) +{ + g_signal_emit (msg, signals[NETWORK_EVENT], 0, + event, connection); +} + static void header_handler_free (gpointer header_name, GClosure *closure) { @@ -1390,7 +1439,6 @@ soup_message_cleanup_response (SoupMessage *req) priv->decoders = g_slist_delete_link (priv->decoders, priv->decoders); } priv->msg_flags &= ~SOUP_MESSAGE_CONTENT_DECODED; - priv->msg_flags &= ~SOUP_MESSAGE_CERTIFICATE_TRUSTED; req->status_code = SOUP_STATUS_NONE; if (req->reason_phrase) { @@ -1399,12 +1447,6 @@ soup_message_cleanup_response (SoupMessage *req) } priv->http_version = priv->orig_http_version; - if (priv->tls_certificate) { - g_object_unref (priv->tls_certificate); - priv->tls_certificate = NULL; - } - priv->tls_errors = 0; - g_object_notify (G_OBJECT (req), SOUP_MESSAGE_STATUS_CODE); g_object_notify (G_OBJECT (req), SOUP_MESSAGE_REASON_PHRASE); g_object_notify (G_OBJECT (req), SOUP_MESSAGE_HTTP_VERSION); @@ -1422,14 +1464,19 @@ soup_message_cleanup_response (SoupMessage *req) * soup_message_body_set_accumulate() for more details. * @SOUP_MESSAGE_OVERWRITE_CHUNKS: Deprecated: equivalent to calling * soup_message_body_set_accumulate() on the incoming message body - * (ie, %response_body for a client-side request), passing %FALSE. + * (ie, #SoupMessage:response_body for a client-side request), + * passing %FALSE. * @SOUP_MESSAGE_CONTENT_DECODED: Set by #SoupContentDecoder to * indicate that it has removed the Content-Encoding on a message (and * so headers such as Content-Length may no longer accurately describe * the body). - * @SOUP_MESSAGE_CERTIFICATE_TRUSTED: if %TRUE after an https response + * @SOUP_MESSAGE_CERTIFICATE_TRUSTED: if set after an https response * has been received, indicates that the server's SSL certificate is * trusted according to the session's CA. + * @SOUP_MESSAGE_NEW_CONNECTION: The message should be sent on a + * newly-created connection, not reusing an existing persistent + * connection. Note that messages with non-idempotent + * #SoupMessage:methods behave this way by default. * * Various flags that can be set on a #SoupMessage to alter its * behavior. @@ -1730,25 +1777,27 @@ soup_message_set_status_full (SoupMessage *msg, * call @allocator, which should return a #SoupBuffer. (See * #SoupChunkAllocator for additional details.) Libsoup will then read * data from the network into that buffer, and update the buffer's - * %length to indicate how much data it read. + * length to indicate how much data it read. * * Generally, a custom chunk allocator would be used in conjunction * with soup_message_body_set_accumulate() %FALSE and * #SoupMessage::got_chunk, as part of a strategy to avoid unnecessary * copying of data. However, you cannot assume that every call to the - * allocator will be followed by a call to your %got_chunk handler; if - * an I/O error occurs, then the buffer will be unreffed without ever - * having been used. If your buffer-allocation strategy requires - * special cleanup, use soup_buffer_new_with_owner() rather than doing - * the cleanup from the %got_chunk handler. + * allocator will be followed by a call to your + * #SoupMessage::got_chunk handler; if an I/O error occurs, then the + * buffer will be unreffed without ever having been used. If your + * buffer-allocation strategy requires special cleanup, use + * soup_buffer_new_with_owner() rather than doing the cleanup from the + * #SoupMessage::got_chunk handler. * * The other thing to remember when using non-accumulating message - * bodies is that the buffer passed to the %got_chunk handler will be - * unreffed after the handler returns, just as it would be in the - * non-custom-allocated case. If you want to hand the chunk data off - * to some other part of your program to use later, you'll need to ref - * the #SoupBuffer (or its owner, in the soup_buffer_new_with_owner() - * case) to ensure that the data remains valid. + * bodies is that the buffer passed to the #SoupMessage::got_chunk + * handler will be unreffed after the handler returns, just as it + * would be in the non-custom-allocated case. If you want to hand the + * chunk data off to some other part of your program to use later, + * you'll need to ref the #SoupBuffer (or its owner, in the + * soup_buffer_new_with_owner() case) to ensure that the data remains + * valid. **/ void soup_message_set_chunk_allocator (SoupMessage *msg, @@ -1778,7 +1827,7 @@ soup_message_set_chunk_allocator (SoupMessage *msg, * This disables the actions of #SoupSessionFeatures with the * given @feature_type (or a subclass of that type) on @msg, so that * @msg is processed as though the feature(s) hadn't been added to the - * session. Eg, passing #SOUP_TYPE_PROXY_RESOLVER for @feature_type + * session. Eg, passing #SOUP_TYPE_PROXY_URI_RESOLVER for @feature_type * will disable proxy handling and cause @msg to be sent directly to * the indicated origin server, regardless of system proxy * configuration. @@ -1823,6 +1872,8 @@ soup_message_disables_feature (SoupMessage *msg, gpointer feature) /** * soup_message_get_first_party: * @msg: a #SoupMessage + * + * Gets @msg's first-party #SoupURI * * Returns: (transfer none): the @msg's first party #SoupURI * @@ -1872,6 +1923,34 @@ soup_message_set_first_party (SoupMessage *msg, g_object_notify (G_OBJECT (msg), SOUP_MESSAGE_FIRST_PARTY); } +void +soup_message_set_https_status (SoupMessage *msg, SoupConnection *conn) +{ + SoupSocket *sock; + + sock = conn ? soup_connection_get_socket (conn) : NULL; + if (sock && soup_socket_is_ssl (sock)) { + GTlsCertificate *certificate; + GTlsCertificateFlags errors; + + g_object_get (sock, + SOUP_SOCKET_TLS_CERTIFICATE, &certificate, + SOUP_SOCKET_TLS_ERRORS, &errors, + NULL); + g_object_set (msg, + SOUP_MESSAGE_TLS_CERTIFICATE, certificate, + SOUP_MESSAGE_TLS_ERRORS, errors, + NULL); + if (certificate) + g_object_unref (certificate); + } else { + g_object_set (msg, + SOUP_MESSAGE_TLS_CERTIFICATE, NULL, + SOUP_MESSAGE_TLS_ERRORS, 0, + NULL); + } +} + /** * soup_message_get_https_status: * @msg: a #SoupMessage @@ -1903,3 +1982,38 @@ soup_message_get_https_status (SoupMessage *msg, *errors = priv->tls_errors; return priv->tls_certificate != NULL; } + +/** + * soup_message_set_redirect: + * @msg: a #SoupMessage + * @status_code: a 3xx status code + * @redirect_uri: the URI to redirect @msg to + * + * Sets @msg's status_code to @status_code and adds a Location header + * pointing to @redirect_uri. Use this from a #SoupServer when you + * want to redirect the client to another URI. + * + * @redirect_uri can be a relative URI, in which case it is + * interpreted relative to @msg's current URI. In particular, if + * @redirect_uri is just a path, it will replace the path + * and query of @msg's URI. + * + * Since: 2.38 + */ +void +soup_message_set_redirect (SoupMessage *msg, guint status_code, + const char *redirect_uri) +{ + SoupURI *location; + char *location_str; + + location = soup_uri_new_with_base (soup_message_get_uri (msg), redirect_uri); + g_return_if_fail (location != NULL); + + soup_message_set_status (msg, status_code); + location_str = soup_uri_to_string (location, FALSE); + soup_message_headers_replace (msg->response_headers, "Location", + location_str); + g_free (location_str); + soup_uri_free (location); +} diff --git a/libsoup/soup-message.h b/libsoup/soup-message.h index 8505f00..d3c7e3c 100644 --- a/libsoup/soup-message.h +++ b/libsoup/soup-message.h @@ -118,7 +118,8 @@ typedef enum { SOUP_MESSAGE_OVERWRITE_CHUNKS = (1 << 3), #endif SOUP_MESSAGE_CONTENT_DECODED = (1 << 4), - SOUP_MESSAGE_CERTIFICATE_TRUSTED = (1 << 5) + SOUP_MESSAGE_CERTIFICATE_TRUSTED = (1 << 5), + SOUP_MESSAGE_NEW_CONNECTION = (1 << 6) } SoupMessageFlags; void soup_message_set_flags (SoupMessage *msg, @@ -155,6 +156,10 @@ void soup_message_set_status_full (SoupMessage *msg, guint status_code, const char *reason_phrase); +void soup_message_set_redirect (SoupMessage *msg, + guint status_code, + const char *redirect_uri); + /* I/O */ typedef SoupBuffer * (*SoupChunkAllocator) (SoupMessage *msg, gsize max_len, diff --git a/libsoup/soup-misc-private.h b/libsoup/soup-misc-private.h index 8407101..e935168 100644 --- a/libsoup/soup-misc-private.h +++ b/libsoup/soup-misc-private.h @@ -18,4 +18,8 @@ void soup_socket_handshake_async (SoupSocket *sock, SoupSocketCallback callback, gpointer user_data); +GSocket *soup_socket_get_gsocket (SoupSocket *sock); +GIOStream *soup_socket_get_iostream (SoupSocket *sock); + + #endif /* SOUP_URI_PRIVATE_H */ diff --git a/libsoup/soup-misc.c b/libsoup/soup-misc.c index 3136645..5c09526 100644 --- a/libsoup/soup-misc.c +++ b/libsoup/soup-misc.c @@ -16,6 +16,8 @@ * **/ +const gboolean soup_ssl_supported = TRUE; + /** * soup_str_case_hash: * @key: ASCII string to hash diff --git a/libsoup/soup-misc.h b/libsoup/soup-misc.h index 45c8883..0807b5f 100644 --- a/libsoup/soup-misc.h +++ b/libsoup/soup-misc.h @@ -54,16 +54,7 @@ extern const char soup_char_attributes[]; extern const gboolean soup_ssl_supported; -#define SOUP_SSL_ERROR soup_ssl_error_quark() - -GQuark soup_ssl_error_quark (void); - -typedef enum { - SOUP_SSL_ERROR_HANDSHAKE_NEEDS_READ, - SOUP_SSL_ERROR_HANDSHAKE_NEEDS_WRITE, - SOUP_SSL_ERROR_CERTIFICATE, - SOUP_SSL_ERROR_HANDSHAKE_FAILED -} SoupSSLError; +/* Part of a debugging API */ typedef enum { SOUP_CONNECTION_NEW, diff --git a/libsoup/soup-proxy-resolver.c b/libsoup/soup-proxy-resolver.c index c356ab1..837a1f7 100644 --- a/libsoup/soup-proxy-resolver.c +++ b/libsoup/soup-proxy-resolver.c @@ -111,6 +111,7 @@ uri_from_address (SoupAddress *addr) soup_uri_set_scheme (proxy_uri, SOUP_URI_SCHEME_HTTP); soup_uri_set_host (proxy_uri, soup_address_get_name (addr)); soup_uri_set_port (proxy_uri, soup_address_get_port (addr)); + soup_uri_set_path (proxy_uri, ""); return proxy_uri; } diff --git a/libsoup/soup-request-file.c b/libsoup/soup-request-file.c index d49d374..a26d994 100644 --- a/libsoup/soup-request-file.c +++ b/libsoup/soup-request-file.c @@ -73,8 +73,7 @@ soup_request_file_check_uri (SoupRequest *request, return FALSE; /* but it must be "file:///..." or "file://localhost/..." */ - if (uri->scheme == SOUP_URI_SCHEME_FILE && - *uri->host && + if (*uri->host && g_ascii_strcasecmp (uri->host, "localhost") != 0) return FALSE; @@ -86,26 +85,30 @@ soup_request_file_ensure_file (SoupRequestFile *file, GCancellable *cancellable, GError **error) { - SoupURI *uri; + SoupURI *uri, *copied_uri = NULL; + char *uri_str; if (file->priv->gfile) return TRUE; uri = soup_request_get_uri (SOUP_REQUEST (file)); - if (uri->scheme == SOUP_URI_SCHEME_FILE) { - gchar *decoded_uri = soup_uri_decode (uri->path); - if (decoded_uri) { - file->priv->gfile = g_file_new_for_path (decoded_uri); - g_free (decoded_uri); - } - - return TRUE; + /* gio mishandles URIs with query components: + * https://bugzilla.gnome.org/show_bug.cgi?id=670755 + */ + if (uri->query) { + uri = copied_uri = soup_uri_copy (uri); + soup_uri_set_query (copied_uri, NULL); } - g_set_error (error, SOUP_REQUESTER_ERROR, SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME, - _("Unsupported URI scheme '%s'"), uri->scheme); - return FALSE; + uri_str = soup_uri_to_string (uri, FALSE); + file->priv->gfile = g_file_new_for_uri (uri_str); + + g_free (uri_str); + if (copied_uri) + soup_uri_free (copied_uri); + + return TRUE; } static GInputStream * @@ -171,12 +174,10 @@ soup_request_file_send_async_thread (GSimpleAsyncResult *res, stream = soup_request_file_send (request, cancellable, &error); - if (stream == NULL) { - g_simple_async_result_set_from_error (res, error); - g_error_free (error); - } else { + if (stream == NULL) + g_simple_async_result_take_error (res, error); + else g_simple_async_result_set_op_res_gpointer (res, stream, g_object_unref); - } } static void @@ -250,6 +251,16 @@ soup_request_file_class_init (SoupRequestFileClass *request_file_class) request_class->get_content_type = soup_request_file_get_content_type; } +/** + * soup_request_file_get_file: + * @file: a #SoupRequestFile + * + * Gets a #GFile corresponding to @file's URI + * + * Return value: (transfer full): a #GFile corresponding to @file + * + * Since: 2.34 + */ GFile * soup_request_file_get_file (SoupRequestFile *file) { diff --git a/libsoup/soup-request-http.c b/libsoup/soup-request-http.c index 90a5c48..89547e1 100644 --- a/libsoup/soup-request-http.c +++ b/libsoup/soup-request-http.c @@ -32,7 +32,6 @@ #include "soup-request-http.h" #include "soup-cache.h" #include "soup-cache-private.h" -#include "soup-content-sniffer.h" #include "soup-http-input-stream.h" #include "soup-message.h" #include "soup-session.h" @@ -42,6 +41,7 @@ G_DEFINE_TYPE (SoupRequestHTTP, soup_request_http, SOUP_TYPE_REQUEST) struct _SoupRequestHTTPPrivate { SoupMessage *msg; + char *content_type; }; static void @@ -72,6 +72,8 @@ soup_request_http_finalize (GObject *object) if (http->priv->msg) g_object_unref (http->priv->msg); + g_free (http->priv->content_type); + G_OBJECT_CLASS (soup_request_http_parent_class)->finalize (object); } @@ -80,137 +82,118 @@ soup_request_http_send (SoupRequest *request, GCancellable *cancellable, GError **error) { - SoupHTTPInputStream *httpstream; + GInputStream *httpstream; SoupRequestHTTP *http = SOUP_REQUEST_HTTP (request); httpstream = soup_http_input_stream_new (soup_request_get_session (request), http->priv->msg); - if (!soup_http_input_stream_send (httpstream, cancellable, error)) { + if (!soup_http_input_stream_send (SOUP_HTTP_INPUT_STREAM (httpstream), + cancellable, error)) { g_object_unref (httpstream); return NULL; } - return (GInputStream *)httpstream; + http->priv->content_type = g_strdup (soup_http_input_stream_get_content_type (SOUP_HTTP_INPUT_STREAM (httpstream))); + return httpstream; } +typedef struct { + SoupRequestHTTP *http; + GCancellable *cancellable; + GSimpleAsyncResult *simple; + + SoupMessage *original; + GInputStream *stream; +} SendAsyncData; + static void -sent_async (GObject *source, GAsyncResult *result, gpointer user_data) +free_send_async_data (SendAsyncData *sadata) +{ + g_object_unref (sadata->http); + g_object_unref (sadata->simple); + + if (sadata->cancellable) + g_object_unref (sadata->cancellable); + if (sadata->stream) + g_object_unref (sadata->stream); + if (sadata->original) + g_object_unref (sadata->original); + + g_slice_free (SendAsyncData, sadata); +} + +static void +http_input_stream_ready_cb (GObject *source, GAsyncResult *result, gpointer user_data) { SoupHTTPInputStream *httpstream = SOUP_HTTP_INPUT_STREAM (source); - GSimpleAsyncResult *simple = user_data; + SendAsyncData *sadata = user_data; GError *error = NULL; if (soup_http_input_stream_send_finish (httpstream, result, &error)) { - g_simple_async_result_set_op_res_gpointer (simple, httpstream, g_object_unref); + sadata->http->priv->content_type = g_strdup (soup_http_input_stream_get_content_type (httpstream)); + g_simple_async_result_set_op_res_gpointer (sadata->simple, httpstream, g_object_unref); } else { - g_simple_async_result_set_from_error (simple, error); - g_error_free (error); + g_simple_async_result_take_error (sadata->simple, error); g_object_unref (httpstream); } - g_simple_async_result_complete (simple); - g_object_unref (simple); + g_simple_async_result_complete (sadata->simple); + free_send_async_data (sadata); } -typedef struct { - SoupRequestHTTP *req; - SoupMessage *original; - GCancellable *cancellable; - GAsyncReadyCallback callback; - gpointer user_data; -} ConditionalHelper; - static void conditional_get_ready_cb (SoupSession *session, SoupMessage *msg, gpointer user_data) { - ConditionalHelper *helper = (ConditionalHelper *)user_data; - GSimpleAsyncResult *simple; - SoupHTTPInputStream *httpstream; - - simple = g_simple_async_result_new (G_OBJECT (helper->req), - helper->callback, helper->user_data, - conditional_get_ready_cb); + SendAsyncData *sadata = user_data; + GInputStream *stream; if (msg->status_code == SOUP_STATUS_NOT_MODIFIED) { SoupCache *cache = (SoupCache *)soup_session_get_feature (session, SOUP_TYPE_CACHE); - httpstream = (SoupHTTPInputStream *)soup_cache_send_response (cache, helper->original); - if (httpstream) { - g_simple_async_result_set_op_res_gpointer (simple, httpstream, g_object_unref); - - soup_message_got_headers (helper->original); + stream = soup_cache_send_response (cache, sadata->original); + if (stream) { + g_simple_async_result_set_op_res_gpointer (sadata->simple, stream, g_object_unref); - if (soup_session_get_feature_for_message (session, SOUP_TYPE_CONTENT_SNIFFER, helper->original)) { - const char *content_type = - soup_message_headers_get_content_type (helper->original->response_headers, NULL); - soup_message_content_sniffed (helper->original, content_type, NULL); - } + soup_message_got_headers (sadata->original); - g_simple_async_result_complete (simple); + /* FIXME: this is wrong; the cache won't have + * the sniffed type. + */ + sadata->http->priv->content_type = g_strdup (soup_message_headers_get_content_type (sadata->original->response_headers, NULL)); - soup_message_finished (helper->original); + g_simple_async_result_complete (sadata->simple); - g_object_unref (simple); - } else { - /* Ask again for the resource, somehow the cache cannot locate it */ - httpstream = soup_http_input_stream_new (session, helper->original); - soup_http_input_stream_send_async (httpstream, G_PRIORITY_DEFAULT, - helper->cancellable, sent_async, simple); + soup_message_finished (sadata->original); + free_send_async_data (sadata); + return; } - } else { - /* It is in the cache but it was modified remotely */ - httpstream = soup_http_input_stream_new (session, helper->original); - soup_http_input_stream_send_async (httpstream, G_PRIORITY_DEFAULT, - helper->cancellable, sent_async, simple); } - g_object_unref (helper->req); - g_object_unref (helper->original); - g_slice_free (ConditionalHelper, helper); + /* The resource was modified, or else it mysteriously disappeared + * from our cache. Either way we need to reload it now. + */ + stream = soup_http_input_stream_new (session, sadata->original); + soup_http_input_stream_send_async (SOUP_HTTP_INPUT_STREAM (stream), G_PRIORITY_DEFAULT, + sadata->cancellable, http_input_stream_ready_cb, sadata); } -typedef struct { - SoupRequestHTTP *http; - GAsyncReadyCallback callback; - gpointer user_data; - SoupHTTPInputStream *httpstream; -} SendAsyncHelper; - -static void soup_request_http_send_async (SoupRequest *request, - GCancellable *cancellable, - GAsyncReadyCallback callback, - gpointer user_data); - static gboolean -send_async_cb (gpointer data) +idle_return_from_cache_cb (gpointer data) { - GSimpleAsyncResult *simple; - SoupSession *session; - SendAsyncHelper *helper = (SendAsyncHelper *)data; - - session = soup_request_get_session (SOUP_REQUEST (helper->http)); - simple = g_simple_async_result_new (G_OBJECT (helper->http), - helper->callback, helper->user_data, - soup_request_http_send_async); + SendAsyncData *sadata = data; - g_simple_async_result_set_op_res_gpointer (simple, helper->httpstream, g_object_unref); + g_simple_async_result_set_op_res_gpointer (sadata->simple, + g_object_ref (sadata->stream), g_object_unref); /* Issue signals */ - soup_message_got_headers (helper->http->priv->msg); + soup_message_got_headers (sadata->http->priv->msg); - if (soup_session_get_feature_for_message (session, SOUP_TYPE_CONTENT_SNIFFER, helper->http->priv->msg)) { - const char *content_type = soup_message_headers_get_content_type (helper->http->priv->msg->response_headers, NULL); - soup_message_content_sniffed (helper->http->priv->msg, content_type, NULL); - } - - g_simple_async_result_complete (simple); - - soup_message_finished (helper->http->priv->msg); + sadata->http->priv->content_type = g_strdup (soup_message_headers_get_content_type (sadata->http->priv->msg->response_headers, NULL)); - g_object_unref (simple); + g_simple_async_result_complete (sadata->simple); - g_object_unref (helper->http); - g_slice_free (SendAsyncHelper, helper); + soup_message_finished (sadata->http->priv->msg); + free_send_async_data (sadata); return FALSE; } @@ -221,11 +204,17 @@ soup_request_http_send_async (SoupRequest *request, gpointer user_data) { SoupRequestHTTP *http = SOUP_REQUEST_HTTP (request); - SoupHTTPInputStream *httpstream; - GSimpleAsyncResult *simple; + SendAsyncData *sadata; + GInputStream *stream; SoupSession *session; SoupCache *cache; + sadata = g_slice_new0 (SendAsyncData); + sadata->http = g_object_ref (http); + sadata->cancellable = cancellable ? g_object_ref (cancellable) : NULL; + sadata->simple = g_simple_async_result_new (G_OBJECT (request), callback, user_data, + soup_request_http_send_async); + session = soup_request_get_session (request); cache = (SoupCache *)soup_session_get_feature (session, SOUP_TYPE_CACHE); @@ -234,65 +223,50 @@ soup_request_http_send_async (SoupRequest *request, response = soup_cache_has_response (cache, http->priv->msg); if (response == SOUP_CACHE_RESPONSE_FRESH) { - SoupHTTPInputStream *httpstream; + stream = soup_cache_send_response (cache, http->priv->msg); - httpstream = (SoupHTTPInputStream *) - soup_cache_send_response (cache, http->priv->msg); - - /* Cached resource file could have been deleted outside - */ - if (httpstream) { + /* Cached resource file could have been deleted outside */ + if (stream) { /* Do return the stream asynchronously as in * the other cases. It's not enough to use * g_simple_async_result_complete_in_idle as * the signals must be also emitted * asynchronously */ - SendAsyncHelper *helper = g_slice_new (SendAsyncHelper); - helper->http = g_object_ref (http); - helper->callback = callback; - helper->user_data = user_data; - helper->httpstream = httpstream; - soup_add_timeout (soup_session_get_async_context (session), - 0, send_async_cb, helper); + sadata->stream = stream; + soup_add_completion (soup_session_get_async_context (session), + idle_return_from_cache_cb, sadata); return; } } else if (response == SOUP_CACHE_RESPONSE_NEEDS_VALIDATION) { SoupMessage *conditional_msg; - ConditionalHelper *helper; conditional_msg = soup_cache_generate_conditional_request (cache, http->priv->msg); - helper = g_slice_new0 (ConditionalHelper); - helper->req = g_object_ref (http); - helper->original = g_object_ref (http->priv->msg); - helper->cancellable = cancellable; - helper->callback = callback; - helper->user_data = user_data; - soup_session_queue_message (session, conditional_msg, - conditional_get_ready_cb, - helper); - return; + if (conditional_msg) { + sadata->original = g_object_ref (http->priv->msg); + soup_session_queue_message (session, conditional_msg, + conditional_get_ready_cb, + sadata); + return; + } } } - simple = g_simple_async_result_new (G_OBJECT (http), - callback, user_data, - soup_request_http_send_async); - httpstream = soup_http_input_stream_new (soup_request_get_session (request), - http->priv->msg); - soup_http_input_stream_send_async (httpstream, G_PRIORITY_DEFAULT, - cancellable, sent_async, simple); + stream = soup_http_input_stream_new (session, http->priv->msg); + soup_http_input_stream_send_async (SOUP_HTTP_INPUT_STREAM (stream), + G_PRIORITY_DEFAULT, cancellable, + http_input_stream_ready_cb, sadata); } static GInputStream * -soup_request_http_send_finish (SoupRequest *request, - GAsyncResult *result, - GError **error) +soup_request_http_send_finish (SoupRequest *request, + GAsyncResult *result, + GError **error) { GSimpleAsyncResult *simple; - g_return_val_if_fail (g_simple_async_result_is_valid (result, G_OBJECT (request), soup_request_http_send_async) || g_simple_async_result_is_valid (result, G_OBJECT (request), conditional_get_ready_cb), NULL); + g_return_val_if_fail (g_simple_async_result_is_valid (result, G_OBJECT (request), soup_request_http_send_async), NULL); simple = G_SIMPLE_ASYNC_RESULT (result); if (g_simple_async_result_propagate_error (simple, error)) @@ -313,7 +287,7 @@ soup_request_http_get_content_type (SoupRequest *request) { SoupRequestHTTP *http = SOUP_REQUEST_HTTP (request); - return soup_message_headers_get_content_type (http->priv->msg->response_headers, NULL); + return http->priv->content_type; } static const char *http_schemes[] = { "http", "https", NULL }; diff --git a/libsoup/soup-request.c b/libsoup/soup-request.c index ac85615..06fe7ff 100644 --- a/libsoup/soup-request.c +++ b/libsoup/soup-request.c @@ -38,13 +38,14 @@ * SECTION:soup-request * @short_description: Protocol-independent streaming request interface * - * FIXME + * A #SoupRequest is created by #SoupRequester, and represents a + * request to retrieve a particular URI. */ /** * SoupRequest: * - * FIXME + * A request to retrieve a particular URI. * * Since: 2.34 */ @@ -148,7 +149,7 @@ soup_request_initable_init (GInitable *initable, ok = SOUP_REQUEST_GET_CLASS (initable)-> check_uri (request, request->priv->uri, error); - if (!ok && error) { + if (!ok && error && !*error) { char *uri_string = soup_uri_to_string (request->priv->uri, FALSE); g_set_error (error, SOUP_REQUESTER_ERROR, SOUP_REQUESTER_ERROR_BAD_URI, _("Invalid '%s' URI: %s"), @@ -194,6 +195,20 @@ soup_request_default_send_finish (SoupRequest *request, return soup_request_send (request, NULL, error); } +/** + * soup_request_send: + * @request: a #SoupRequest + * @cancellable: a #GCancellable or %NULL + * @error: return location for a #GError, or %NULL + * + * Synchronously requests the URI pointed to by @request, and returns + * a #GInputStream that can be used to read its contents. + * + * Return value: (transfer full): a #GInputStream that can be used to + * read from the URI pointed to by @request. + * + * Since: 2.34 + */ GInputStream * soup_request_send (SoupRequest *request, GCancellable *cancellable, @@ -203,16 +218,41 @@ soup_request_send (SoupRequest *request, send (request, cancellable, error); } +/** + * soup_request_send_async: + * @request: a #SoupRequest + * @cancellable: a #GCancellable or %NULL + * @callback: a #GAsyncReadyCallback + * @user_data: user data passed to @callback + * + * Begins an asynchronously request for the URI pointed to by + * @request. + * + * Since: 2.34 + */ void -soup_request_send_async (SoupRequest *request, - GCancellable *cancellable, - GAsyncReadyCallback callback, - gpointer user_data) +soup_request_send_async (SoupRequest *request, + GCancellable *cancellable, + GAsyncReadyCallback callback, + gpointer user_data) { SOUP_REQUEST_GET_CLASS (request)-> send_async (request, cancellable, callback, user_data); } +/** + * soup_request_send_finish: + * @request: a #SoupRequest + * @result: the #GAsyncResult + * @error: return location for a #GError, or %NULL + * + * Gets the result of a soup_request_send_async(). + * + * Return value: (transfer full): a #GInputStream that can be used to + * read from the URI pointed to by @request. + * + * Since: 2.34 + */ GInputStream * soup_request_send_finish (SoupRequest *request, GAsyncResult *result, @@ -259,24 +299,68 @@ soup_request_initable_interface_init (GInitableIface *initable_interface) initable_interface->init = soup_request_initable_init; } +/** + * soup_request_get_uri: + * @request: a #SoupRequest + * + * Gets @request's URI + * + * Return value: (transfer none): @request's URI + * + * Since: 2.34 + */ SoupURI * soup_request_get_uri (SoupRequest *request) { return request->priv->uri; } +/** + * soup_request_get_session: + * @request: a #SoupRequest + * + * Gets @request's #SoupSession + * + * Return value: (transfer none): @request's #SoupSession + * + * Since: 2.34 + */ SoupSession * soup_request_get_session (SoupRequest *request) { return request->priv->session; } +/** + * soup_request_get_content_length: + * @request: a #SoupRequest + * + * Gets the length of the data represented by @request. + * + * Return value: the length of the data represented by @request, + * or -1 if not known. + * + * Since: 2.34 + */ goffset soup_request_get_content_length (SoupRequest *request) { return SOUP_REQUEST_GET_CLASS (request)->get_content_length (request); } +/** + * soup_request_get_content_type: + * @request: a #SoupRequest + * + * Gets the type of the data represented by @request. As in the + * HTTP Content-Type header, this may include parameters after + * the MIME type. + * + * Return value: the type of the data represented by @request, + * or %NULL if not known. + * + * Since: 2.34 + */ const char * soup_request_get_content_type (SoupRequest *request) { diff --git a/libsoup/soup-requester.c b/libsoup/soup-requester.c index 3375a7a..d71d447 100644 --- a/libsoup/soup-requester.c +++ b/libsoup/soup-requester.c @@ -178,12 +178,35 @@ soup_requester_session_feature_init (SoupSessionFeatureInterface *feature_interf feature_interface->has_feature = has_feature; } +/** + * soup_requester_new: + * + * Creates a new #SoupRequester object, which can be added to + * a #SoupSession with soup_session_add_feature(). + * + * Return value: the new #SoupRequester + * + * Since: 2.34 + */ SoupRequester * soup_requester_new (void) { return g_object_new (SOUP_TYPE_REQUESTER, NULL); } +/** + * soup_requester_request: + * @requester: a #SoupRequester + * @uri_string: a URI, in string form + * @error: return location for a #GError, or %NULL + * + * Creates a #SoupRequest for retrieving @uri_string. + * + * Return value: (transfer full): a new #SoupRequest, or + * %NULL on error. + * + * Since: 2.34 + */ SoupRequest * soup_requester_request (SoupRequester *requester, const char *uri_string, GError **error) @@ -203,6 +226,19 @@ soup_requester_request (SoupRequester *requester, const char *uri_string, return req; } +/** + * soup_requester_request_uri: + * @requester: a #SoupRequester + * @uri: a #SoupURI representing the URI to retrieve + * @error: return location for a #GError, or %NULL + * + * Creates a #SoupRequest for retrieving @uri. + * + * Return value: (transfer full): a new #SoupRequest, or + * %NULL on error. + * + * Since: 2.34 + */ SoupRequest * soup_requester_request_uri (SoupRequester *requester, SoupURI *uri, GError **error) @@ -225,6 +261,25 @@ soup_requester_request_uri (SoupRequester *requester, SoupURI *uri, NULL); } +/** + * SOUP_REQUESTER_ERROR: + * + * A #GError domain for #SoupRequester errors. Used with + * #SoupRequesterError. + * + * Since: 2.34 + */ +/** + * SoupRequesterError: + * @SOUP_REQUESTER_ERROR_BAD_URI: the URI could not be parsed + * @SOUP_REQUESTER_ERROR_UNSUPPORTED_URI_SCHEME: the URI scheme is not + * supported by this #SoupRequester + * + * A #SoupRequester error. + * + * Since: 2.34 + */ + GQuark soup_requester_error_quark (void) { diff --git a/libsoup/soup-server.c b/libsoup/soup-server.c index 92bcc50..96704ce 100644 --- a/libsoup/soup-server.c +++ b/libsoup/soup-server.c @@ -24,7 +24,6 @@ #include "soup-marshal.h" #include "soup-path-map.h" #include "soup-socket.h" -#include "soup-ssl.h" /** * SECTION:soup-server @@ -99,7 +98,7 @@ typedef struct { guint port; char *ssl_cert_file, *ssl_key_file; - SoupSSLCredentials *ssl_creds; + GTlsCertificate *ssl_cert; char *server_header; @@ -127,6 +126,7 @@ enum { PROP_INTERFACE, PROP_SSL_CERT_FILE, PROP_SSL_KEY_FILE, + PROP_TLS_CERTIFICATE, PROP_ASYNC_CONTEXT, PROP_RAW_PATHS, PROP_SERVER_HEADER, @@ -169,8 +169,8 @@ finalize (GObject *object) g_free (priv->ssl_cert_file); g_free (priv->ssl_key_file); - if (priv->ssl_creds) - soup_ssl_free_server_credentials (priv->ssl_creds); + if (priv->ssl_cert) + g_object_unref (priv->ssl_cert); g_free (priv->server_header); @@ -247,7 +247,7 @@ soup_server_class_init (SoupServerClass *server_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupServerClass, request_started), NULL, NULL, - soup_marshal_NONE__OBJECT_POINTER, + _soup_marshal_NONE__OBJECT_POINTER, G_TYPE_NONE, 2, SOUP_TYPE_MESSAGE, SOUP_TYPE_CLIENT_CONTEXT); @@ -272,7 +272,7 @@ soup_server_class_init (SoupServerClass *server_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupServerClass, request_read), NULL, NULL, - soup_marshal_NONE__OBJECT_POINTER, + _soup_marshal_NONE__OBJECT_POINTER, G_TYPE_NONE, 2, SOUP_TYPE_MESSAGE, SOUP_TYPE_CLIENT_CONTEXT); @@ -292,7 +292,7 @@ soup_server_class_init (SoupServerClass *server_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupServerClass, request_finished), NULL, NULL, - soup_marshal_NONE__OBJECT_POINTER, + _soup_marshal_NONE__OBJECT_POINTER, G_TYPE_NONE, 2, SOUP_TYPE_MESSAGE, SOUP_TYPE_CLIENT_CONTEXT); @@ -321,7 +321,7 @@ soup_server_class_init (SoupServerClass *server_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupServerClass, request_aborted), NULL, NULL, - soup_marshal_NONE__OBJECT_POINTER, + _soup_marshal_NONE__OBJECT_POINTER, G_TYPE_NONE, 2, SOUP_TYPE_MESSAGE, SOUP_TYPE_CLIENT_CONTEXT); @@ -356,9 +356,18 @@ soup_server_class_init (SoupServerClass *server_class) /** * SOUP_SERVER_SSL_CERT_FILE: * - * Alias for the #SoupServer:ssl-cert-file property. (The file - * containing the SSL certificate for the server.) - **/ + * Alias for the #SoupServer:ssl-cert-file property, qv. + */ + /** + * SoupServer:ssl-cert-file: + * + * Path to a file containing a PEM-encoded certificate. If + * this and #SoupServer:ssl-key-file are both set, then the + * server will speak https rather than plain http. + * + * Alternatively, you can use #SoupServer:tls-certificate + * to provide an arbitrary #GTlsCertificate. + */ g_object_class_install_property ( object_class, PROP_SSL_CERT_FILE, g_param_spec_string (SOUP_SERVER_SSL_CERT_FILE, @@ -369,9 +378,20 @@ soup_server_class_init (SoupServerClass *server_class) /** * SOUP_SERVER_SSL_KEY_FILE: * - * Alias for the #SoupServer:ssl-key-file property. (The file - * containing the SSL certificate key for the server.) - **/ + * Alias for the #SoupServer:ssl-key-file property, qv. + */ + /** + * SoupServer:ssl-key-file: + * + * Path to a file containing a PEM-encoded private key. If + * this and #SoupServer:ssl-key-file are both set, then the + * server will speak https rather than plain http. Note that + * you are allowed to set them to the same value, if you have + * a single file containing both the certificate and the key. + * + * Alternatively, you can use #SoupServer:tls-certificate + * to provide an arbitrary #GTlsCertificate. + */ g_object_class_install_property ( object_class, PROP_SSL_KEY_FILE, g_param_spec_string (SOUP_SERVER_SSL_KEY_FILE, @@ -380,6 +400,29 @@ soup_server_class_init (SoupServerClass *server_class) NULL, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); /** + * SOUP_SERVER_TLS_CERTIFICATE: + * + * Alias for the #SoupServer:tls-certificate property, qv. + */ + /** + * SoupServer:tls-certificate: + * + * A #GTlsCertificate that has a #GTlsCertificate:private-key + * set. If this is set, then the server will speak https + * rather than plain http. + * + * Alternatively, you can use #SoupServer:ssl-cert-file and + * #SoupServer:ssl-key-file properties, to have #SoupServer + * read in a a certificate from a file. + */ + g_object_class_install_property ( + object_class, PROP_TLS_CERTIFICATE, + g_param_spec_object (SOUP_SERVER_TLS_CERTIFICATE, + "TLS certificate", + "GTlsCertificate to use for https", + G_TYPE_TLS_CERTIFICATE, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); + /** * SOUP_SERVER_ASYNC_CONTEXT: * * Alias for the #SoupServer:async-context property. (The @@ -429,7 +472,7 @@ soup_server_class_init (SoupServerClass *server_class) * holes. * * As with #SoupSession:user_agent, if you set a - * %server_header property that has trailing whitespace, + * #SoupServer:server_header property that has trailing whitespace, * #SoupServer will append its own product token (eg, * "libsoup/2.3.2") to the end of the * header for you. @@ -469,10 +512,15 @@ constructor (GType type, } if (priv->ssl_cert_file && priv->ssl_key_file) { - priv->ssl_creds = soup_ssl_get_server_credentials ( - priv->ssl_cert_file, - priv->ssl_key_file); - if (!priv->ssl_creds) { + GError *error = NULL; + + if (priv->ssl_cert) + g_object_unref (priv->ssl_cert); + priv->ssl_cert = g_tls_certificate_new_from_files (priv->ssl_cert_file, priv->ssl_key_file, &error); + if (!priv->ssl_cert) { + g_warning ("Could not read SSL certificate from '%s': %s", + priv->ssl_cert_file, error->message); + g_error_free (error); g_object_unref (server); return NULL; } @@ -480,7 +528,7 @@ constructor (GType type, priv->listen_sock = soup_socket_new (SOUP_SOCKET_LOCAL_ADDRESS, priv->iface, - SOUP_SOCKET_SSL_CREDENTIALS, priv->ssl_creds, + SOUP_SOCKET_SSL_CREDENTIALS, priv->ssl_cert, SOUP_SOCKET_ASYNC_CONTEXT, priv->async_context, NULL); if (!soup_socket_listen (priv->listen_sock)) { @@ -525,6 +573,11 @@ set_property (GObject *object, guint prop_id, priv->ssl_key_file = g_strdup (g_value_get_string (value)); break; + case PROP_TLS_CERTIFICATE: + if (priv->ssl_cert) + g_object_unref (priv->ssl_cert); + priv->ssl_cert = g_value_dup_object (value); + break; case PROP_ASYNC_CONTEXT: priv->async_context = g_value_get_pointer (value); if (priv->async_context) @@ -573,6 +626,9 @@ get_property (GObject *object, guint prop_id, case PROP_SSL_KEY_FILE: g_value_set_string (value, priv->ssl_key_file); break; + case PROP_TLS_CERTIFICATE: + g_value_set_object (value, priv->ssl_cert); + break; case PROP_ASYNC_CONTEXT: g_value_set_pointer (value, priv->async_context ? g_main_context_ref (priv->async_context) : NULL); break; @@ -1206,8 +1262,9 @@ soup_client_context_get_auth_user (SoupClientContext *client) * @user_data: the data passed to @soup_server_add_handler * * A callback used to handle requests to a #SoupServer. The callback - * will be invoked after receiving the request body; @msg's %method, - * %request_headers, and %request_body fields will be filled in. + * will be invoked after receiving the request body; @msg's + * #SoupMessage:method, #SoupMessage:request_headers, and + * #SoupMessage:request_body fields will be filled in. * * @path and @query contain the likewise-named components of the * Request-URI, subject to certain assumptions. By default, @@ -1243,7 +1300,7 @@ soup_client_context_get_auth_user (SoupClientContext *client) * * To send the response body a bit at a time using "chunked" encoding, * first call soup_message_headers_set_encoding() to set - * %SOUP_ENCODING_CHUNKED on the %response_headers. Then call + * %SOUP_ENCODING_CHUNKED on the #SoupMessage:response_headers. Then call * soup_message_body_append() (or soup_message_body_append_buffer()) * to append each chunk as it becomes ready, and * soup_server_unpause_message() to make sure it's running. (The diff --git a/libsoup/soup-server.h b/libsoup/soup-server.h index 4ea17ad..e1c9bbf 100644 --- a/libsoup/soup-server.h +++ b/libsoup/soup-server.h @@ -56,13 +56,14 @@ typedef void (*SoupServerCallback) (SoupServer *server, SoupClientContext *client, gpointer user_data); -#define SOUP_SERVER_PORT "port" -#define SOUP_SERVER_INTERFACE "interface" -#define SOUP_SERVER_SSL_CERT_FILE "ssl-cert-file" -#define SOUP_SERVER_SSL_KEY_FILE "ssl-key-file" -#define SOUP_SERVER_ASYNC_CONTEXT "async-context" -#define SOUP_SERVER_RAW_PATHS "raw-paths" -#define SOUP_SERVER_SERVER_HEADER "server-header" +#define SOUP_SERVER_PORT "port" +#define SOUP_SERVER_INTERFACE "interface" +#define SOUP_SERVER_SSL_CERT_FILE "ssl-cert-file" +#define SOUP_SERVER_SSL_KEY_FILE "ssl-key-file" +#define SOUP_SERVER_TLS_CERTIFICATE "tls-certificate" +#define SOUP_SERVER_ASYNC_CONTEXT "async-context" +#define SOUP_SERVER_RAW_PATHS "raw-paths" +#define SOUP_SERVER_SERVER_HEADER "server-header" SoupServer *soup_server_new (const char *optname1, ...) G_GNUC_NULL_TERMINATED; diff --git a/libsoup/soup-session-async.c b/libsoup/soup-session-async.c index 661883b..eaadd58 100644 --- a/libsoup/soup-session-async.c +++ b/libsoup/soup-session-async.c @@ -39,6 +39,7 @@ static void queue_message (SoupSession *session, SoupMessage *req, static guint send_message (SoupSession *session, SoupMessage *req); static void cancel_message (SoupSession *session, SoupMessage *msg, guint status_code); +static void kick (SoupSession *session); static void auth_required (SoupSession *session, SoupMessage *msg, SoupAuth *auth, gboolean retrying); @@ -46,24 +47,38 @@ static void auth_required (SoupSession *session, SoupMessage *msg, G_DEFINE_TYPE (SoupSessionAsync, soup_session_async, SOUP_TYPE_SESSION) typedef struct { - GSource *idle_run_queue_source; + GHashTable *idle_run_queue_sources; + } SoupSessionAsyncPrivate; #define SOUP_SESSION_ASYNC_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), SOUP_TYPE_SESSION_ASYNC, SoupSessionAsyncPrivate)) static void +destroy_unref_source (gpointer source) +{ + g_source_destroy (source); + g_source_unref (source); +} + +static void soup_session_async_init (SoupSessionAsync *sa) { + SoupSessionAsyncPrivate *priv = SOUP_SESSION_ASYNC_GET_PRIVATE (sa); + + priv->idle_run_queue_sources = + g_hash_table_new_full (NULL, NULL, NULL, destroy_unref_source); } static void -finalize (GObject *object) +dispose (GObject *object) { SoupSessionAsyncPrivate *priv = SOUP_SESSION_ASYNC_GET_PRIVATE (object); - if (priv->idle_run_queue_source) - g_source_destroy (priv->idle_run_queue_source); + if (priv->idle_run_queue_sources) { + g_hash_table_destroy (priv->idle_run_queue_sources); + priv->idle_run_queue_sources = NULL; + } - G_OBJECT_CLASS (soup_session_async_parent_class)->finalize (object); + G_OBJECT_CLASS (soup_session_async_parent_class)->dispose (object); } static void @@ -80,8 +95,9 @@ soup_session_async_class_init (SoupSessionAsyncClass *soup_session_async_class) session_class->send_message = send_message; session_class->cancel_message = cancel_message; session_class->auth_required = auth_required; + session_class->kick = kick; - object_class->finalize = finalize; + object_class->dispose = dispose; } @@ -232,6 +248,8 @@ tunnel_complete (SoupMessageQueueItem *item) soup_message_finished (item->msg); if (item->related->msg->status_code) item->related->state = SOUP_MESSAGE_FINISHING; + else + soup_message_set_https_status (item->related->msg, item->conn); do_idle_run_queue (session); soup_message_queue_item_unref (item->related); @@ -270,6 +288,7 @@ tunnel_message_completed (SoupMessage *msg, gpointer user_data) if (item->state == SOUP_MESSAGE_RESTARTING) { soup_message_restarted (msg); if (item->conn) { + item->state = SOUP_MESSAGE_RUNNING; soup_session_send_queue_item (session, item, tunnel_message_completed); return; } @@ -284,8 +303,7 @@ tunnel_message_completed (SoupMessage *msg, gpointer user_data) soup_connection_disconnect (item->conn); if (msg->status_code == SOUP_STATUS_TRY_AGAIN) { item->related->state = SOUP_MESSAGE_AWAITING_CONNECTION; - g_object_unref (item->related->conn); - item->related->conn = NULL; + soup_message_queue_item_set_connection (item->related, NULL); } else soup_message_set_status (item->related->msg, msg->status_code); @@ -312,12 +330,13 @@ got_connection (SoupConnection *conn, guint status, gpointer user_data) return; } + soup_message_set_https_status (item->msg, conn); + if (status != SOUP_STATUS_OK) { soup_connection_disconnect (conn); if (status == SOUP_STATUS_TRY_AGAIN) { - g_object_unref (item->conn); - item->conn = NULL; + soup_message_queue_item_set_connection (item, NULL); item->state = SOUP_MESSAGE_AWAITING_CONNECTION; } else { soup_session_set_item_status (session, item, status); @@ -358,7 +377,13 @@ process_queue_item (SoupMessageQueueItem *item, SoupSession *session = item->session; SoupProxyURIResolver *proxy_resolver; + if (item->async_context != soup_session_get_async_context (session)) + return; + do { + if (item->paused) + return; + switch (item->state) { case SOUP_MESSAGE_STARTING: proxy_resolver = (SoupProxyURIResolver *)soup_session_get_feature_for_message (session, SOUP_TYPE_PROXY_URI_RESOLVER, item->msg); @@ -461,7 +486,11 @@ idle_run_queue (gpointer sa) { SoupSessionAsyncPrivate *priv = SOUP_SESSION_ASYNC_GET_PRIVATE (sa); - priv->idle_run_queue_source = NULL; + if (!priv->idle_run_queue_sources) + return FALSE; + + g_hash_table_remove (priv->idle_run_queue_sources, + soup_session_get_async_context (sa)); run_queue (sa); return FALSE; } @@ -471,10 +500,16 @@ do_idle_run_queue (SoupSession *session) { SoupSessionAsyncPrivate *priv = SOUP_SESSION_ASYNC_GET_PRIVATE (session); - if (!priv->idle_run_queue_source) { - priv->idle_run_queue_source = soup_add_completion ( - soup_session_get_async_context (session), - idle_run_queue, session); + if (!priv->idle_run_queue_sources) + return; + + if (!g_hash_table_lookup (priv->idle_run_queue_sources, + soup_session_get_async_context (session))) { + GMainContext *async_context = soup_session_get_async_context (session); + GSource *source = soup_add_completion (async_context, idle_run_queue, session); + + g_hash_table_insert (priv->idle_run_queue_sources, + async_context, g_source_ref (source)); } } @@ -575,3 +610,9 @@ auth_required (SoupSession *session, SoupMessage *msg, auth_required (session, msg, auth, retrying); } } + +static void +kick (SoupSession *session) +{ + do_idle_run_queue (session); +} diff --git a/libsoup/soup-session-feature.c b/libsoup/soup-session-feature.c index 97061ad..f572a3a 100644 --- a/libsoup/soup-session-feature.c +++ b/libsoup/soup-session-feature.c @@ -42,6 +42,9 @@ * @request_queued: Proxies the session's #SoupSession::request_queued signal * @request_started: Proxies the session's #SoupSession::request_started signal * @request_unqueued: Proxies the session's #SoupSession::request_unqueued signal + * @add_feature: adds a sub-feature to the main feature + * @remove_feature: removes a sub-feature from the main feature + * @has_feature: tests if the feature includes a sub-feature * * The interface implemented by #SoupSessionFeatures. * diff --git a/libsoup/soup-session-sync.c b/libsoup/soup-session-sync.c index 373b1bd..1a919c7 100644 --- a/libsoup/soup-session-sync.c +++ b/libsoup/soup-session-sync.c @@ -48,8 +48,8 @@ **/ typedef struct { - GMutex *lock; - GCond *cond; + GMutex lock; + GCond cond; } SoupSessionSyncPrivate; #define SOUP_SESSION_SYNC_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), SOUP_TYPE_SESSION_SYNC, SoupSessionSyncPrivate)) @@ -61,6 +61,7 @@ static void cancel_message (SoupSession *session, SoupMessage *msg, static void auth_required (SoupSession *session, SoupMessage *msg, SoupAuth *auth, gboolean retrying); static void flush_queue (SoupSession *session); +static void kick (SoupSession *session); G_DEFINE_TYPE (SoupSessionSync, soup_session_sync, SOUP_TYPE_SESSION) @@ -69,8 +70,8 @@ soup_session_sync_init (SoupSessionSync *ss) { SoupSessionSyncPrivate *priv = SOUP_SESSION_SYNC_GET_PRIVATE (ss); - priv->lock = g_mutex_new (); - priv->cond = g_cond_new (); + g_mutex_init (&priv->lock); + g_cond_init (&priv->cond); } static void @@ -78,8 +79,8 @@ finalize (GObject *object) { SoupSessionSyncPrivate *priv = SOUP_SESSION_SYNC_GET_PRIVATE (object); - g_mutex_free (priv->lock); - g_cond_free (priv->cond); + g_mutex_clear (&priv->lock); + g_cond_clear (&priv->cond); G_OBJECT_CLASS (soup_session_sync_parent_class)->finalize (object); } @@ -98,6 +99,7 @@ soup_session_sync_class_init (SoupSessionSyncClass *session_sync_class) session_class->cancel_message = cancel_message; session_class->auth_required = auth_required; session_class->flush_queue = flush_queue; + session_class->kick = kick; object_class->finalize = finalize; } @@ -153,9 +155,9 @@ tunnel_connect (SoupSession *session, SoupMessageQueueItem *related) soup_session_send_queue_item (session, item, NULL); status = item->msg->status_code; if (item->state == SOUP_MESSAGE_RESTARTING && - soup_connection_get_state (conn) != SOUP_CONNECTION_DISCONNECTED) { - item->state = SOUP_MESSAGE_STARTING; + soup_message_io_in_progress (item->msg)) { soup_message_restarted (item->msg); + item->state = SOUP_MESSAGE_RUNNING; } else { if (item->state == SOUP_MESSAGE_RESTARTING) status = SOUP_STATUS_TRY_AGAIN; @@ -169,6 +171,7 @@ tunnel_connect (SoupSession *session, SoupMessageQueueItem *related) if (SOUP_STATUS_IS_SUCCESSFUL (status)) { if (!soup_connection_start_ssl_sync (conn, related->cancellable)) status = SOUP_STATUS_SSL_FAILED; + soup_message_set_https_status (related->msg, conn); } if (!SOUP_STATUS_IS_SUCCESSFUL (status)) @@ -206,18 +209,18 @@ try_again: status = soup_connection_connect_sync (item->conn, item->cancellable); if (status == SOUP_STATUS_TRY_AGAIN) { soup_connection_disconnect (item->conn); - g_object_unref (item->conn); - item->conn = NULL; + soup_message_queue_item_set_connection (item, NULL); goto try_again; } + soup_message_set_https_status (msg, item->conn); + if (!SOUP_STATUS_IS_SUCCESSFUL (status)) { if (!msg->status_code) soup_session_set_item_status (session, item, status); item->state = SOUP_MESSAGE_FINISHING; soup_connection_disconnect (item->conn); - g_object_unref (item->conn); - item->conn = NULL; + soup_message_queue_item_set_connection (item, NULL); return; } @@ -225,8 +228,7 @@ try_again: status = tunnel_connect (session, item); if (!SOUP_STATUS_IS_SUCCESSFUL (status)) { soup_connection_disconnect (item->conn); - g_object_unref (item->conn); - item->conn = NULL; + soup_message_queue_item_set_connection (item, NULL); if (status == SOUP_STATUS_TRY_AGAIN) goto try_again; soup_session_set_item_status (session, item, status); @@ -249,6 +251,13 @@ process_queue_item (SoupMessageQueueItem *item) item->state = SOUP_MESSAGE_STARTING; do { + if (item->paused) { + g_mutex_lock (&priv->lock); + while (item->paused) + g_cond_wait (&priv->cond, &priv->lock); + g_mutex_unlock (&priv->lock); + } + switch (item->state) { case SOUP_MESSAGE_STARTING: proxy_resolver = (SoupProxyURIResolver *)soup_session_get_feature_for_message (session, SOUP_TYPE_PROXY_URI_RESOLVER, msg); @@ -283,13 +292,13 @@ process_queue_item (SoupMessageQueueItem *item) break; case SOUP_MESSAGE_AWAITING_CONNECTION: - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); do { get_connection (item); if (item->state == SOUP_MESSAGE_AWAITING_CONNECTION) - g_cond_wait (priv->cond, priv->lock); + g_cond_wait (&priv->cond, &priv->lock); } while (item->state == SOUP_MESSAGE_AWAITING_CONNECTION); - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); break; case SOUP_MESSAGE_READY: @@ -308,7 +317,7 @@ process_queue_item (SoupMessageQueueItem *item) item->state = SOUP_MESSAGE_FINISHED; soup_message_finished (item->msg); soup_session_unqueue_item (session, item); - g_cond_broadcast (priv->cond); + g_cond_broadcast (&priv->cond); break; default: @@ -354,6 +363,7 @@ queue_message (SoupSession *session, SoupMessage *msg, SoupSessionCallback callback, gpointer user_data) { SoupMessageQueueItem *item; + GThread *thread; SOUP_SESSION_CLASS (soup_session_sync_parent_class)-> queue_message (g_object_ref (session), msg, callback, user_data); @@ -361,7 +371,9 @@ queue_message (SoupSession *session, SoupMessage *msg, item = soup_message_queue_lookup (soup_session_get_queue (session), msg); g_return_if_fail (item != NULL); - g_thread_create (queue_message_thread, item, FALSE, NULL); + thread = g_thread_new ("SoupSessionSync:queue_message", + queue_message_thread, item); + g_thread_unref (thread); } static guint @@ -386,10 +398,10 @@ cancel_message (SoupSession *session, SoupMessage *msg, guint status_code) { SoupSessionSyncPrivate *priv = SOUP_SESSION_SYNC_GET_PRIVATE (session); - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); SOUP_SESSION_CLASS (soup_session_sync_parent_class)->cancel_message (session, msg, status_code); - g_cond_broadcast (priv->cond); - g_mutex_unlock (priv->lock); + g_cond_broadcast (&priv->cond); + g_mutex_unlock (&priv->lock); } static void @@ -437,7 +449,7 @@ flush_queue (SoupSession *session) * try to cancel those requests as well, since we'd likely * just end up looping forever.) */ - g_mutex_lock (priv->lock); + g_mutex_lock (&priv->lock); do { done = TRUE; for (item = soup_message_queue_first (queue); @@ -448,9 +460,19 @@ flush_queue (SoupSession *session) } if (!done) - g_cond_wait (priv->cond, priv->lock); + g_cond_wait (&priv->cond, &priv->lock); } while (!done); - g_mutex_unlock (priv->lock); + g_mutex_unlock (&priv->lock); g_hash_table_destroy (current); } + +static void +kick (SoupSession *session) +{ + SoupSessionSyncPrivate *priv = SOUP_SESSION_SYNC_GET_PRIVATE (session); + + g_mutex_lock (&priv->lock); + g_cond_broadcast (&priv->cond); + g_mutex_unlock (&priv->lock); +} diff --git a/libsoup/soup-session.c b/libsoup/soup-session.c index fd568ef..2797fda 100644 --- a/libsoup/soup-session.c +++ b/libsoup/soup-session.c @@ -13,7 +13,6 @@ #include #include -#include "soup-address.h" #include "soup-auth.h" #include "soup-auth-basic.h" #include "soup-auth-digest.h" @@ -29,10 +28,9 @@ #include "soup-session-feature.h" #include "soup-session-private.h" #include "soup-socket.h" -#include "soup-ssl.h" #include "soup-uri.h" -/*TIZEN patch*/ -#include "TIZEN.h" + +#define HOST_KEEP_ALIVE 5 * 60 * 1000 /* 5 min in msecs */ /** * SECTION:soup-session @@ -70,11 +68,16 @@ typedef struct { guint num_messages; gboolean ssl_fallback; + + GSource *keep_alive_src; + SoupSession *session; } SoupSessionHost; +static guint soup_host_uri_hash (gconstpointer key); +gboolean soup_host_uri_equal (gconstpointer v1, gconstpointer v2); typedef struct { + GTlsDatabase *tlsdb; char *ssl_ca_file; - SoupSSLCredentials *ssl_creds; gboolean ssl_strict; SoupMessageQueue *queue; @@ -86,7 +89,7 @@ typedef struct { GSList *features; GHashTable *features_cache; - GHashTable *hosts; /* char* -> SoupSessionHost */ + GHashTable *http_hosts, *https_hosts; /* char* -> SoupSessionHost */ GHashTable *conns; /* SoupConnection -> SoupSessionHost */ guint num_conns; guint max_conns, max_conns_per_host; @@ -96,11 +99,14 @@ typedef struct { * new SoupSessionHost, or adding/removing a connection. * Must not emit signals or destroy objects while holding it. */ - GMutex *host_lock; + GMutex host_lock; GMainContext *async_context; + gboolean use_thread_context; GResolver *resolver; + + char **http_aliases, **https_aliases; } SoupSessionPrivate; #define SOUP_SESSION_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), SOUP_TYPE_SESSION, SoupSessionPrivate)) @@ -148,8 +154,11 @@ enum { PROP_MAX_CONNS_PER_HOST, PROP_USE_NTLM, PROP_SSL_CA_FILE, + PROP_SSL_USE_SYSTEM_CA_FILE, + PROP_TLS_DATABASE, PROP_SSL_STRICT, PROP_ASYNC_CONTEXT, + PROP_USE_THREAD_CONTEXT, PROP_TIMEOUT, PROP_USER_AGENT, PROP_ACCEPT_LANGUAGE, @@ -158,6 +167,8 @@ enum { PROP_ADD_FEATURE, PROP_ADD_FEATURE_BY_TYPE, PROP_REMOVE_FEATURE_BY_TYPE, + PROP_HTTP_ALIASES, + PROP_HTTPS_ALIASES, LAST_PROP }; @@ -175,10 +186,13 @@ soup_session_init (SoupSession *session) priv->queue = soup_message_queue_new (session); - priv->host_lock = g_mutex_new (); - priv->hosts = g_hash_table_new_full (soup_uri_host_hash, - soup_uri_host_equal, - NULL, (GDestroyNotify)free_host); + g_mutex_init (&priv->host_lock); + priv->http_hosts = g_hash_table_new_full (soup_host_uri_hash, + soup_host_uri_equal, + NULL, (GDestroyNotify)free_host); + priv->https_hosts = g_hash_table_new_full (soup_host_uri_hash, + soup_host_uri_equal, + NULL, (GDestroyNotify)free_host); priv->conns = g_hash_table_new (NULL, NULL); priv->max_conns = SOUP_SESSION_MAX_CONNS_DEFAULT; @@ -202,6 +216,10 @@ soup_session_init (SoupSession *session) priv->resolver = g_resolver_get_default (); priv->ssl_strict = TRUE; + + priv->http_aliases = g_new (char *, 2); + priv->http_aliases[0] = (char *)g_intern_string ("*"); + priv->http_aliases[1] = NULL; } static void @@ -226,17 +244,17 @@ finalize (GObject *object) soup_message_queue_destroy (priv->queue); - g_mutex_free (priv->host_lock); - g_hash_table_destroy (priv->hosts); + g_mutex_clear (&priv->host_lock); + g_hash_table_destroy (priv->http_hosts); + g_hash_table_destroy (priv->https_hosts); g_hash_table_destroy (priv->conns); g_free (priv->user_agent); g_free (priv->accept_language); - if (priv->ssl_ca_file) - g_free (priv->ssl_ca_file); - if (priv->ssl_creds) - soup_ssl_free_client_credentials (priv->ssl_creds); + if (priv->tlsdb) + g_object_unref (priv->tlsdb); + g_free (priv->ssl_ca_file); if (priv->async_context) g_main_context_unref (priv->async_context); @@ -245,6 +263,9 @@ finalize (GObject *object) g_object_unref (priv->resolver); + g_free (priv->http_aliases); + g_free (priv->https_aliases); + G_OBJECT_CLASS (soup_session_parent_class)->finalize (object); } @@ -319,7 +340,7 @@ soup_session_class_init (SoupSessionClass *session_class) G_SIGNAL_RUN_FIRST, 0, /* FIXME? */ NULL, NULL, - soup_marshal_NONE__OBJECT, + _soup_marshal_NONE__OBJECT, G_TYPE_NONE, 1, SOUP_TYPE_MESSAGE); @@ -339,7 +360,7 @@ soup_session_class_init (SoupSessionClass *session_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupSessionClass, request_started), NULL, NULL, - soup_marshal_NONE__OBJECT_OBJECT, + _soup_marshal_NONE__OBJECT_OBJECT, G_TYPE_NONE, 2, SOUP_TYPE_MESSAGE, SOUP_TYPE_SOCKET); @@ -362,7 +383,7 @@ soup_session_class_init (SoupSessionClass *session_class) G_SIGNAL_RUN_FIRST, 0, /* FIXME? */ NULL, NULL, - soup_marshal_NONE__OBJECT, + _soup_marshal_NONE__OBJECT, G_TYPE_NONE, 1, SOUP_TYPE_MESSAGE); @@ -395,32 +416,54 @@ soup_session_class_init (SoupSessionClass *session_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupSessionClass, authenticate), NULL, NULL, - soup_marshal_NONE__OBJECT_OBJECT_BOOLEAN, + _soup_marshal_NONE__OBJECT_OBJECT_BOOLEAN, G_TYPE_NONE, 3, SOUP_TYPE_MESSAGE, SOUP_TYPE_AUTH, G_TYPE_BOOLEAN); + /** + * SoupSession::connection-created: + * @session: the #SoupSession + * @connection: the connection + * + * Emitted when a new connection is created. This is an + * internal signal intended only to be used for debugging + * purposes, and may go away in the future. + * + * Since: 2.30 + */ signals[CONNECTION_CREATED] = g_signal_new ("connection-created", G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, 0, NULL, NULL, - soup_marshal_NONE__OBJECT, + _soup_marshal_NONE__OBJECT, G_TYPE_NONE, 1, /* SoupConnection is private, so we can't use * SOUP_TYPE_CONNECTION here. */ G_TYPE_OBJECT); + /** + * SoupSession::tunneling: + * @session: the #SoupSession + * @connection: the connection + * + * Emitted when an SSL tunnel is being created on a proxy + * connection. This is an internal signal intended only to be + * used for debugging purposes, and may go away in the future. + * + * Since: 2.30 + */ signals[TUNNELING] = g_signal_new ("tunneling", G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, 0, NULL, NULL, - soup_marshal_NONE__OBJECT, + _soup_marshal_NONE__OBJECT, G_TYPE_NONE, 1, /* SoupConnection is private, so we can't use * SOUP_TYPE_CONNECTION here. @@ -517,10 +560,21 @@ soup_session_class_init (SoupSessionClass *session_class) FALSE, G_PARAM_READWRITE)); /** + * #SoupSession:ssl-ca-file: + * + * File containing SSL CA certificates. + * + * Deprecated: use #SoupSession:ssl-use-system-ca-file or + * #SoupSession:tls-database instead + **/ + /** * SOUP_SESSION_SSL_CA_FILE: * * Alias for the #SoupSession:ssl-ca-file property. (File - * containing SSL CA certificates.) + * containing SSL CA certificates.). + * + * Deprecated: use #SoupSession:ssl-use-system-ca-file or + * #SoupSession:tls-database instead **/ g_object_class_install_property ( object_class, PROP_SSL_CA_FILE, @@ -530,16 +584,89 @@ soup_session_class_init (SoupSessionClass *session_class) NULL, G_PARAM_READWRITE)); /** + * SOUP_SESSION_USE_SYSTEM_CA_FILE: + * + * Alias for the #SoupSession:ssl-use-system-ca-file property, + * qv. + * + * Since: 2.38 + **/ + /** + * #SoupSession:ssl-use-system-ca-file: + * + * Setting this to %TRUE overrides #SoupSession:ssl-ca-file + * and #SoupSession:tls-database, and uses the default system + * CA database (which, despite the name, may not actually be a + * file). + * + * See #SoupSession:ssl-strict for more information on how + * https certificate validation is handled. + * + * Since: 2.38 + **/ + g_object_class_install_property ( + object_class, PROP_SSL_USE_SYSTEM_CA_FILE, + g_param_spec_boolean (SOUP_SESSION_SSL_USE_SYSTEM_CA_FILE, + "Use system CA file", + "Use the system certificate database", + TRUE, + G_PARAM_READWRITE)); + /** + * SOUP_SESSION_TLS_DATABASE: + * + * Alias for the #SoupSession:tls-database property, qv. + * + * Since: 2.38 + **/ + /** + * #SoupSession:tls-database: + * + * Overrides #SoupSession:ssl-ca-file and + * #SoupSession:ssl-use-system-ca-file, and uses the provided + * #GTlsDatabase. + * + * See #SoupSession:ssl-strict for more information on how + * https certificate validation is handled. + * + * Since: 2.38 + **/ + g_object_class_install_property ( + object_class, PROP_TLS_DATABASE, + g_param_spec_object (SOUP_SESSION_TLS_DATABASE, + "TLS Database", + "TLS database to use", + G_TYPE_TLS_DATABASE, + G_PARAM_READWRITE)); + /** * SOUP_SESSION_SSL_STRICT: * - * Alias for the #SoupSession:ignore-ssl-cert-errors - * property. By default, when validating certificates against - * a CA file, Soup will consider invalid certificates as a - * connection error. Setting this property to %TRUE makes soup - * ignore the errors, and make the connection. + * Alias for the #SoupSession:ssl-strict property, qv. * * Since: 2.30 **/ + /** + * SoupSession:ssl-strict: + * + * Normally, if #SoupSession:ssl-ca-file (or + * #SoupSession:tlsdb or #SoupSession:ssl-use-system-ca-file) + * is set, then libsoup will reject any certificate that is + * invalid (ie, expired) or that is not signed by one of the + * given CA certificates, and the #SoupMessage will fail with + * the status %SOUP_STATUS_SSL_FAILED. + * + * If you set #SoupSession:ssl-strict to %FALSE, then all + * certificates will be accepted, and you will need to call + * soup_message_get_https_status() to distinguish valid from + * invalid certificates. (This can be used, eg, if you want to + * accept invalid certificates after giving some sort of + * warning.) + * + * If the session has no CA file or TLS database, then all + * certificates are always accepted, and this property has no + * effect. + * + * Since: 2.30 + */ g_object_class_install_property ( object_class, PROP_SSL_STRICT, g_param_spec_boolean (SOUP_SESSION_SSL_STRICT, @@ -552,7 +679,7 @@ soup_session_class_init (SoupSessionClass *session_class) * * Alias for the #SoupSession:async-context property. (The * session's #GMainContext.) - **/ + */ g_object_class_install_property ( object_class, PROP_ASYNC_CONTEXT, g_param_spec_pointer (SOUP_SESSION_ASYNC_CONTEXT, @@ -560,6 +687,33 @@ soup_session_class_init (SoupSessionClass *session_class) "The GMainContext to dispatch async I/O in", G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); /** + * SOUP_SESSION_USE_THREAD_CONTEXT: + * + * Alias for the #SoupSession:use-thread-context property, qv. + * + * Since: 2.38 + */ + /** + * SoupSession:use-thread-context: + * + * If set, asynchronous operations in this session will run in + * whatever the thread-default #GMainContext is at the time + * they are started, rather than always occurring in a context + * fixed at the session's construction time. "Bookkeeping" + * tasks (like expiring idle connections) will happen in the + * context that was thread-default at the time the session was + * created. + * + * Since: 2.38 + */ + g_object_class_install_property ( + object_class, PROP_USE_THREAD_CONTEXT, + g_param_spec_boolean (SOUP_SESSION_USE_THREAD_CONTEXT, + "Use thread-default GMainContext", + "Whether to use thread-default main contexts", + FALSE, + G_PARAM_READWRITE)); + /** * SOUP_SESSION_TIMEOUT: * * Alias for the #SoupSession:timeout property. (The timeout @@ -595,7 +749,7 @@ soup_session_class_init (SoupSessionClass *session_class) * followed by a version string. You may also put comments, * enclosed in parentheses, between or after the tokens. * - * If you set a %user_agent property that has trailing + * If you set a #SoupSession:user_agent property that has trailing * whitespace, #SoupSession will append its own product token * (eg, "libsoup/2.3.2") to the end of the * header for you. @@ -736,18 +890,69 @@ soup_session_class_init (SoupSessionClass *session_class) "Remove features of the given type from the session", SOUP_TYPE_SESSION_FEATURE, G_PARAM_READWRITE)); -} - -static gboolean -safe_str_equal (const char *a, const char *b) -{ - if (!a && !b) - return TRUE; - - if ((a && !b) || (b && !a)) - return FALSE; - - return strcmp (a, b) == 0; + /** + * SoupSession:http-aliases: + * + * A %NULL-terminated array of URI schemes that should be + * considered to be aliases for "http". Eg, if this included + * "dav", than a URI of + * dav://example.com/path would be treated + * identically to http://example.com/path. + * If the value is %NULL, then only "http" is recognized as + * meaning "http". + * + * For backward-compatibility reasons, the default value for + * this property is an array containing the single element + * "*", a special value which means that + * any scheme except "https" is considered to be an alias for + * "http". + * + * See also #SoupSession:https-aliases. + * + * Since: 2.38 + */ + /** + * SOUP_SESSION_HTTP_ALIASES: + * + * Alias for the #SoupSession:http-aliases property. (URI + * schemes that will be considered aliases for "http".) + * + * Since: 2.38 + */ + g_object_class_install_property ( + object_class, PROP_HTTP_ALIASES, + g_param_spec_boxed (SOUP_SESSION_HTTP_ALIASES, + "http aliases", + "URI schemes that are considered aliases for 'http'", + G_TYPE_STRV, + G_PARAM_READWRITE)); + /** + * SoupSession:https-aliases: + * + * A comma-delimited list of URI schemes that should be + * considered to be aliases for "https". See + * #SoupSession:http-aliases for more information. + * + * The default value is %NULL, meaning that no URI schemes + * are considered aliases for "https". + * + * Since: 2.38 + */ + /** + * SOUP_SESSION_HTTPS_ALIASES: + * + * Alias for the #SoupSession:https-aliases property. (URI + * schemes that will be considered aliases for "https".) + * + * Since: 2.38 + **/ + g_object_class_install_property ( + object_class, PROP_HTTPS_ALIASES, + g_param_spec_boxed (SOUP_SESSION_HTTPS_ALIASES, + "https aliases", + "URI schemes that are considered aliases for 'https'", + G_TYPE_STRV, + G_PARAM_READWRITE)); } /* Converts a language in POSIX format and to be RFC2616 compliant */ @@ -834,14 +1039,64 @@ accept_languages_from_system (void) } static void +load_ssl_ca_file (SoupSessionPrivate *priv) +{ + GError *error = NULL; + + if (g_path_is_absolute (priv->ssl_ca_file)) { + priv->tlsdb = g_tls_file_database_new (priv->ssl_ca_file, + &error); + } else { + char *path, *cwd; + + cwd = g_get_current_dir (); + path = g_build_filename (cwd, priv->ssl_ca_file, NULL); + priv->tlsdb = g_tls_file_database_new (path, &error); + g_free (path); + } + if (priv->tlsdb) + return; + + if (!g_error_matches (error, G_TLS_ERROR, G_TLS_ERROR_UNAVAILABLE)) { + g_warning ("Could not set SSL credentials from '%s': %s", + priv->ssl_ca_file, error->message); + + priv->tlsdb = g_tls_file_database_new ("/dev/null", NULL); + } + g_error_free (error); +} + +/* priv->http_aliases and priv->https_aliases are stored as arrays of + * *interned* strings, so we can't just use g_strdupv() to set them. + */ +static void +set_aliases (char ***variable, char **value) +{ + int len, i; + + if (*variable) + g_free (*variable); + + if (!value) { + *variable = NULL; + return; + } + + len = g_strv_length (value); + *variable = g_new (char *, len + 1); + for (i = 0; i < len; i++) + (*variable)[i] = (char *)g_intern_string (value[i]); + (*variable)[i] = NULL; +} + +static void set_property (GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec) { SoupSession *session = SOUP_SESSION (object); SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); SoupURI *uri; - gboolean ca_file_changed = FALSE; - const char *new_ca_file, *user_agent; + const char *user_agent; SoupSessionFeature *feature; switch (prop_id) { @@ -875,19 +1130,35 @@ set_property (GObject *object, guint prop_id, g_warning ("Trying to set use-ntlm on session with no auth-manager"); break; case PROP_SSL_CA_FILE: - new_ca_file = g_value_get_string (value); - - if (!safe_str_equal (priv->ssl_ca_file, new_ca_file)) - ca_file_changed = TRUE; + if (priv->tlsdb) { + g_object_unref (priv->tlsdb); + priv->tlsdb = NULL; + } + g_free (priv->ssl_ca_file); + priv->ssl_ca_file = g_value_dup_string (value); + if (priv->ssl_ca_file) + load_ssl_ca_file (priv); + break; + case PROP_SSL_USE_SYSTEM_CA_FILE: + if (priv->tlsdb) { + g_object_unref (priv->tlsdb); + priv->tlsdb = NULL; + } g_free (priv->ssl_ca_file); - priv->ssl_ca_file = g_strdup (new_ca_file); + priv->ssl_ca_file = NULL; - if (ca_file_changed && priv->ssl_creds) { - soup_ssl_free_client_credentials (priv->ssl_creds); - priv->ssl_creds = NULL; + priv->tlsdb = g_tls_backend_get_default_database (g_tls_backend_get_default ()); + break; + case PROP_TLS_DATABASE: + if (priv->tlsdb) { + g_object_unref (priv->tlsdb); + priv->tlsdb = NULL; } + g_free (priv->ssl_ca_file); + priv->ssl_ca_file = NULL; + priv->tlsdb = g_value_dup_object (value); break; case PROP_SSL_STRICT: priv->ssl_strict = g_value_get_boolean (value); @@ -897,6 +1168,16 @@ set_property (GObject *object, guint prop_id, if (priv->async_context) g_main_context_ref (priv->async_context); break; + case PROP_USE_THREAD_CONTEXT: + priv->use_thread_context = g_value_get_boolean (value); + if (priv->use_thread_context) { + if (priv->async_context) + g_main_context_unref (priv->async_context); + priv->async_context = g_main_context_get_thread_default (); + if (priv->async_context) + g_main_context_ref (priv->async_context); + } + break; case PROP_TIMEOUT: priv->io_timeout = g_value_get_uint (value); break; @@ -943,6 +1224,12 @@ set_property (GObject *object, guint prop_id, case PROP_REMOVE_FEATURE_BY_TYPE: soup_session_remove_feature_by_type (session, g_value_get_gtype (value)); break; + case PROP_HTTP_ALIASES: + set_aliases (&priv->http_aliases, g_value_get_boxed (value)); + break; + case PROP_HTTPS_ALIASES: + set_aliases (&priv->https_aliases, g_value_get_boxed (value)); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; @@ -956,6 +1243,7 @@ get_property (GObject *object, guint prop_id, SoupSession *session = SOUP_SESSION (object); SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); SoupSessionFeature *feature; + GTlsDatabase *tlsdb; switch (prop_id) { case PROP_PROXY_URI: @@ -983,12 +1271,23 @@ get_property (GObject *object, guint prop_id, case PROP_SSL_CA_FILE: g_value_set_string (value, priv->ssl_ca_file); break; + case PROP_SSL_USE_SYSTEM_CA_FILE: + tlsdb = g_tls_backend_get_default_database (g_tls_backend_get_default ()); + g_value_set_boolean (value, priv->tlsdb == tlsdb); + g_object_unref (tlsdb); + break; + case PROP_TLS_DATABASE: + g_value_set_object (value, priv->tlsdb); + break; case PROP_SSL_STRICT: g_value_set_boolean (value, priv->ssl_strict); break; case PROP_ASYNC_CONTEXT: g_value_set_pointer (value, priv->async_context ? g_main_context_ref (priv->async_context) : NULL); break; + case PROP_USE_THREAD_CONTEXT: + g_value_set_boolean (value, priv->use_thread_context); + break; case PROP_TIMEOUT: g_value_set_uint (value, priv->io_timeout); break; @@ -1004,12 +1303,60 @@ get_property (GObject *object, guint prop_id, case PROP_IDLE_TIMEOUT: g_value_set_uint (value, priv->idle_timeout); break; + case PROP_HTTP_ALIASES: + g_value_set_boxed (value, priv->http_aliases); + break; + case PROP_HTTPS_ALIASES: + g_value_set_boxed (value, priv->https_aliases); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } +static gboolean +uri_is_http (SoupSessionPrivate *priv, SoupURI *uri) +{ + int i; + + if (uri->scheme == SOUP_URI_SCHEME_HTTP) + return TRUE; + else if (uri->scheme == SOUP_URI_SCHEME_HTTPS) + return FALSE; + else if (!priv->http_aliases) + return FALSE; + + for (i = 0; priv->http_aliases[i]; i++) { + if (uri->scheme == priv->http_aliases[i]) + return TRUE; + } + + if (!priv->http_aliases[1] && !strcmp (priv->http_aliases[0], "*")) + return TRUE; + else + return FALSE; +} + +static gboolean +uri_is_https (SoupSessionPrivate *priv, SoupURI *uri) +{ + int i; + + if (uri->scheme == SOUP_URI_SCHEME_HTTPS) + return TRUE; + else if (uri->scheme == SOUP_URI_SCHEME_HTTP) + return FALSE; + else if (!priv->https_aliases) + return FALSE; + + for (i = 0; priv->https_aliases[i]; i++) { + if (uri->scheme == priv->https_aliases[i]) + return TRUE; + } + + return FALSE; +} /** * soup_session_get_async_context: @@ -1019,6 +1366,9 @@ get_property (GObject *object, guint prop_id, * context, so you will need to ref it yourself if you want it to * outlive its session. * + * If #SoupSession:use-thread-context is true, this will return the + * current thread-default main context. + * * Return value: (transfer none): @session's #GMainContext, which may * be %NULL **/ @@ -1030,11 +1380,40 @@ soup_session_get_async_context (SoupSession *session) g_return_val_if_fail (SOUP_IS_SESSION (session), NULL); priv = SOUP_SESSION_GET_PRIVATE (session); - return priv->async_context; + if (priv->use_thread_context) + return g_main_context_get_thread_default (); + else + return priv->async_context; } /* Hosts */ +static guint +soup_host_uri_hash (gconstpointer key) +{ + const SoupURI *uri = key; + + g_return_val_if_fail (uri != NULL && uri->host != NULL, 0); + + return uri->port + soup_str_case_hash (uri->host); +} + +gboolean +soup_host_uri_equal (gconstpointer v1, gconstpointer v2) +{ + const SoupURI *one = v1; + const SoupURI *two = v2; + + g_return_val_if_fail (one != NULL && two != NULL, one == two); + g_return_val_if_fail (one->host != NULL && two->host != NULL, one->host == two->host); + + if (one->port != two->port) + return FALSE; + + return g_ascii_strcasecmp (one->host, two->host) == 0; +} + + static SoupSessionHost * soup_session_host_new (SoupSession *session, SoupURI *uri) { @@ -1042,7 +1421,19 @@ soup_session_host_new (SoupSession *session, SoupURI *uri) host = g_slice_new0 (SoupSessionHost); host->uri = soup_uri_copy_host (uri); + if (host->uri->scheme != SOUP_URI_SCHEME_HTTP && + host->uri->scheme != SOUP_URI_SCHEME_HTTPS) { + SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); + + if (uri_is_https (priv, host->uri)) + host->uri->scheme = SOUP_URI_SCHEME_HTTPS; + else + host->uri->scheme = SOUP_URI_SCHEME_HTTP; + } + host->addr = soup_address_new (host->uri->host, host->uri->port); + host->keep_alive_src = NULL; + host->session = session; return host; } @@ -1054,12 +1445,19 @@ get_host_for_uri (SoupSession *session, SoupURI *uri) SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); SoupSessionHost *host; - host = g_hash_table_lookup (priv->hosts, uri); + if (uri_is_https (priv, uri)) + host = g_hash_table_lookup (priv->https_hosts, uri); + else + host = g_hash_table_lookup (priv->http_hosts, uri); if (host) return host; host = soup_session_host_new (session, uri); - g_hash_table_insert (priv->hosts, host->uri, host); + + if (uri_is_https (priv, uri)) + g_hash_table_insert (priv->https_hosts, host->uri, host); + else + g_hash_table_insert (priv->http_hosts, host->uri, host); return host; } @@ -1084,10 +1482,15 @@ free_host (SoupSessionHost *host) soup_connection_disconnect (conn); } + if (host->keep_alive_src) { + g_source_destroy (host->keep_alive_src); + g_source_unref (host->keep_alive_src); + } + soup_uri_free (host->uri); g_object_unref (host->addr); g_slice_free (SoupSessionHost, host); -} +} static void auth_required (SoupSession *session, SoupMessage *msg, @@ -1105,36 +1508,145 @@ auth_manager_authenticate (SoupAuthManager *manager, SoupMessage *msg, session, msg, auth, retrying); } +/* At some point it might be possible to mark additional methods + * safe or idempotent... + */ #define SOUP_METHOD_IS_SAFE(method) (method == SOUP_METHOD_GET || \ method == SOUP_METHOD_HEAD || \ method == SOUP_METHOD_OPTIONS || \ method == SOUP_METHOD_PROPFIND) -static void -redirect_handler (SoupMessage *msg, gpointer user_data) +#define SOUP_METHOD_IS_IDEMPOTENT(method) (method == SOUP_METHOD_GET || \ + method == SOUP_METHOD_HEAD || \ + method == SOUP_METHOD_OPTIONS || \ + method == SOUP_METHOD_PROPFIND || \ + method == SOUP_METHOD_PUT || \ + method == SOUP_METHOD_DELETE) + + +#define SOUP_SESSION_WOULD_REDIRECT_AS_GET(session, msg) \ + ((msg)->status_code == SOUP_STATUS_SEE_OTHER || \ + ((msg)->status_code == SOUP_STATUS_FOUND && \ + !SOUP_METHOD_IS_SAFE ((msg)->method)) || \ + ((msg)->status_code == SOUP_STATUS_MOVED_PERMANENTLY && \ + (msg)->method == SOUP_METHOD_POST)) + +#define SOUP_SESSION_WOULD_REDIRECT_AS_SAFE(session, msg) \ + (((msg)->status_code == SOUP_STATUS_MOVED_PERMANENTLY || \ + (msg)->status_code == SOUP_STATUS_TEMPORARY_REDIRECT || \ + (msg)->status_code == SOUP_STATUS_FOUND) && \ + SOUP_METHOD_IS_SAFE ((msg)->method)) + +static inline SoupURI * +redirection_uri (SoupMessage *msg) { - SoupMessageQueueItem *item = user_data; - SoupSession *session = item->session; const char *new_loc; SoupURI *new_uri; new_loc = soup_message_headers_get_one (msg->response_headers, "Location"); - g_return_if_fail (new_loc != NULL); + if (!new_loc) + return NULL; + new_uri = soup_uri_new_with_base (soup_message_get_uri (msg), new_loc); + if (!new_uri || !new_uri->host) { + if (new_uri) + soup_uri_free (new_uri); + return NULL; + } + + return new_uri; +} + +/** + * soup_session_would_redirect: + * @session: a #SoupSession + * @msg: a #SoupMessage that has response headers + * + * Checks if @msg contains a response that would cause @session to + * redirect it to a new URL (ignoring @msg's %SOUP_MESSAGE_NO_REDIRECT + * flag, and the number of times it has already been redirected). + * + * Return value: whether @msg would be redirected + * + * Since: 2.38 + */ +gboolean +soup_session_would_redirect (SoupSession *session, SoupMessage *msg) +{ + SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); + SoupURI *new_uri; + + /* It must have an appropriate status code and method */ + if (!SOUP_SESSION_WOULD_REDIRECT_AS_GET (session, msg) && + !SOUP_SESSION_WOULD_REDIRECT_AS_SAFE (session, msg)) + return FALSE; + + /* and a Location header that parses to an http URI */ + if (!soup_message_headers_get_one (msg->response_headers, "Location")) + return FALSE; + new_uri = redirection_uri (msg); + if (!new_uri) + return FALSE; + if (!new_uri->host || !*new_uri->host || + (!uri_is_http (priv, new_uri) && !uri_is_https (priv, new_uri))) { + soup_uri_free (new_uri); + return FALSE; + } + + soup_uri_free (new_uri); + return TRUE; +} + +/** + * soup_session_redirect_message: + * @session: the session + * @msg: a #SoupMessage that has received a 3xx response + * + * Updates @msg's URI according to its status code and "Location" + * header, and requeues it on @session. Use this when you have set + * %SOUP_MESSAGE_NO_REDIRECT on a message, but have decided to allow a + * particular redirection to occur, or if you want to allow a + * redirection that #SoupSession will not perform automatically (eg, + * redirecting a non-safe method such as DELETE). + * + * If @msg's status code indicates that it should be retried as a GET + * request, then @msg will be modified accordingly. + * + * If @msg has already been redirected too many times, this will + * cause it to fail with %SOUP_STATUS_TOO_MANY_REDIRECTS. + * + * Return value: %TRUE if a redirection was applied, %FALSE if not + * (eg, because there was no Location header, or it could not be + * parsed). + * + * Since: 2.38 + */ +gboolean +soup_session_redirect_message (SoupSession *session, SoupMessage *msg) +{ + SoupMessageQueueItem *item; + SoupURI *new_uri; + new_uri = redirection_uri (msg); + if (!new_uri) + return FALSE; + + item = soup_message_queue_lookup (soup_session_get_queue (session), msg); + if (!item) { + soup_uri_free (new_uri); + return FALSE; + } if (item->redirection_count >= SOUP_SESSION_MAX_REDIRECTION_COUNT) { + soup_uri_free (new_uri); soup_session_cancel_message (session, msg, SOUP_STATUS_TOO_MANY_REDIRECTS); - return; + soup_message_queue_item_unref (item); + return FALSE; } item->redirection_count++; + soup_message_queue_item_unref (item); - if (msg->status_code == SOUP_STATUS_SEE_OTHER || - (msg->status_code == SOUP_STATUS_FOUND && - !SOUP_METHOD_IS_SAFE (msg->method)) || - (msg->status_code == SOUP_STATUS_MOVED_PERMANENTLY && - msg->method == SOUP_METHOD_POST)) { + if (SOUP_SESSION_WOULD_REDIRECT_AS_GET (session, msg)) { if (msg->method != SOUP_METHOD_HEAD) { - /* Redirect using a GET */ g_object_set (msg, SOUP_MESSAGE_METHOD, SOUP_METHOD_GET, NULL); @@ -1143,50 +1655,39 @@ redirect_handler (SoupMessage *msg, gpointer user_data) SOUP_MEMORY_STATIC, NULL, 0); soup_message_headers_set_encoding (msg->request_headers, SOUP_ENCODING_NONE); - } else if (msg->status_code == SOUP_STATUS_MOVED_PERMANENTLY || - msg->status_code == SOUP_STATUS_TEMPORARY_REDIRECT || - msg->status_code == SOUP_STATUS_FOUND) { - /* Don't redirect non-safe methods */ - if (!SOUP_METHOD_IS_SAFE (msg->method)) - return; - } else { - /* Three possibilities: - * - * 1) This was a non-3xx response that happened to - * have a "Location" header - * 2) It's a non-redirecty 3xx response (300, 304, - * 305, 306) - * 3) It's some newly-defined 3xx response (308+) - * - * We ignore all of these cases. In the first two, - * redirecting would be explicitly wrong, and in the - * last case, we have no clue if the 3xx response is - * supposed to be redirecty or non-redirecty. Plus, - * 2616 says unrecognized status codes should be - * treated as the equivalent to the x00 code, and we - * don't redirect on 300, so therefore we shouldn't - * redirect on 308+ either. - */ - return; } - /* Location is supposed to be an absolute URI, but some sites - * are lame, so we use soup_uri_new_with_base(). - */ - new_uri = soup_uri_new_with_base (soup_message_get_uri (msg), new_loc); - if (!new_uri || !new_uri->host) { + soup_message_set_uri (msg, new_uri); + soup_uri_free (new_uri); + + soup_session_requeue_message (session, msg); + return TRUE; +} + +static void +redirect_handler (SoupMessage *msg, gpointer user_data) +{ + SoupMessageQueueItem *item = user_data; + SoupSession *session = item->session; + + if (!soup_session_would_redirect (session, msg)) { + SoupURI *new_uri = redirection_uri (msg); + gboolean invalid = !new_uri || !new_uri->host; + if (new_uri) soup_uri_free (new_uri); - soup_message_set_status_full (msg, - SOUP_STATUS_MALFORMED, - "Invalid Redirect URL"); + if (invalid) { + /* Really we should just leave the status as-is, + * but that would be an API break. + */ + soup_message_set_status_full (msg, + SOUP_STATUS_MALFORMED, + "Invalid Redirect URL"); + } return; } - soup_message_set_uri (msg, new_uri); - soup_uri_free (new_uri); - - soup_session_requeue_message (session, msg); + soup_session_redirect_message (session, msg); } void @@ -1224,7 +1725,8 @@ soup_session_send_queue_item (SoupSession *session, g_signal_emit (session, signals[REQUEST_STARTED], 0, item->msg, soup_connection_get_socket (item->conn)); - soup_connection_send_request (item->conn, item, completion_cb, item); + if (item->state == SOUP_MESSAGE_RUNNING) + soup_connection_send_request (item->conn, item, completion_cb, item); } gboolean @@ -1237,7 +1739,7 @@ soup_session_cleanup_connections (SoupSession *session, gpointer conn, host; SoupConnectionState state; - g_mutex_lock (priv->host_lock); + g_mutex_lock (&priv->host_lock); g_hash_table_iter_init (&iter, priv->conns); while (g_hash_table_iter_next (&iter, &conn, &host)) { state = soup_connection_get_state (conn); @@ -1245,7 +1747,7 @@ soup_session_cleanup_connections (SoupSession *session, (prune_idle && state == SOUP_CONNECTION_IDLE)) conns = g_slist_prepend (conns, g_object_ref (conn)); } - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); if (!conns) return FALSE; @@ -1260,6 +1762,25 @@ soup_session_cleanup_connections (SoupSession *session, return TRUE; } +static gboolean +free_unused_host (gpointer user_data) +{ + SoupSessionHost *host = (SoupSessionHost *) user_data; + SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (host->session); + + g_mutex_lock (&priv->host_lock); + /* This will free the host in addition to removing it from the + * hash table + */ + if (host->uri->scheme == SOUP_URI_SCHEME_HTTPS) + g_hash_table_remove (priv->https_hosts, host->uri); + else + g_hash_table_remove (priv->http_hosts, host->uri); + g_mutex_unlock (&priv->host_lock); + + return FALSE; +} + static void connection_disconnected (SoupConnection *conn, gpointer user_data) { @@ -1267,7 +1788,7 @@ connection_disconnected (SoupConnection *conn, gpointer user_data) SoupSessionPrivate *priv = SOUP_SESSION_GET_PRIVATE (session); SoupSessionHost *host; - g_mutex_lock (priv->host_lock); + g_mutex_lock (&priv->host_lock); host = g_hash_table_lookup (priv->conns, conn); if (host) { @@ -1275,6 +1796,19 @@ connection_disconnected (SoupConnection *conn, gpointer user_data) host->connections = g_slist_remove (host->connections, conn); host->num_conns--; + /* Free the SoupHost (and its SoupAddress) if there + * has not been any new connection to the host during + * the last HOST_KEEP_ALIVE msecs. + */ + if (host->num_conns == 0) { + g_assert (host->keep_alive_src == NULL); + host->keep_alive_src = soup_add_timeout (priv->async_context, + HOST_KEEP_ALIVE, + free_unused_host, + host); + host->keep_alive_src = g_source_ref (host->keep_alive_src); + } + if (soup_connection_get_ssl_fallback (conn)) host->ssl_fallback = TRUE; } @@ -1282,7 +1816,7 @@ connection_disconnected (SoupConnection *conn, gpointer user_data) g_signal_handlers_disconnect_by_func (conn, connection_disconnected, session); priv->num_conns--; - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); g_object_unref (conn); } @@ -1311,8 +1845,9 @@ soup_session_make_connect_message (SoupSession *session, */ queue_message (session, msg, NULL, NULL); item = soup_message_queue_lookup (priv->queue, msg); - item->conn = g_object_ref (conn); + soup_message_queue_item_set_connection (item, conn); g_object_unref (msg); + item->state = SOUP_MESSAGE_RUNNING; g_signal_emit (session, signals[TUNNELING], 0, conn); return item; @@ -1327,24 +1862,29 @@ soup_session_get_connection (SoupSession *session, SoupConnection *conn; SoupSessionHost *host; SoupAddress *remote_addr, *tunnel_addr; - SoupSSLCredentials *ssl_creds; GSList *conns; int num_pending = 0; SoupURI *uri; + gboolean need_new_connection; if (item->conn) { g_return_val_if_fail (soup_connection_get_state (item->conn) != SOUP_CONNECTION_DISCONNECTED, FALSE); return TRUE; } - g_mutex_lock (priv->host_lock); + need_new_connection = + (soup_message_get_flags (item->msg) & SOUP_MESSAGE_NEW_CONNECTION) || + !SOUP_METHOD_IS_IDEMPOTENT (item->msg->method); + + g_mutex_lock (&priv->host_lock); host = get_host_for_message (session, item->msg); for (conns = host->connections; conns; conns = conns->next) { - if (soup_connection_get_state (conns->data) == SOUP_CONNECTION_IDLE) { + if (!need_new_connection && soup_connection_get_state (conns->data) == SOUP_CONNECTION_IDLE) { soup_connection_set_state (conns->data, SOUP_CONNECTION_IN_USE); - g_mutex_unlock (priv->host_lock); - item->conn = g_object_ref (conns->data); + g_mutex_unlock (&priv->host_lock); + soup_message_queue_item_set_connection (item, conns->data); + soup_message_set_https_status (item->msg, item->conn); return TRUE; } else if (soup_connection_get_state (conns->data) == SOUP_CONNECTION_CONNECTING) num_pending++; @@ -1354,18 +1894,20 @@ soup_session_get_connection (SoupSession *session, * is somewhat arbitrary... */ if (num_pending > host->num_messages / 2) { - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); return FALSE; } if (host->num_conns >= priv->max_conns_per_host) { - g_mutex_unlock (priv->host_lock); + if (need_new_connection) + *try_pruning = TRUE; + g_mutex_unlock (&priv->host_lock); return FALSE; } if (priv->num_conns >= priv->max_conns) { *try_pruning = TRUE; - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); return FALSE; } @@ -1378,23 +1920,18 @@ soup_session_get_connection (SoupSession *session, } uri = soup_message_get_uri (item->msg); - if (uri->scheme == SOUP_URI_SCHEME_HTTPS) { - if (!priv->ssl_creds) - priv->ssl_creds = soup_ssl_get_client_credentials (priv->ssl_ca_file); - ssl_creds = priv->ssl_creds; - - if (item->proxy_addr) - tunnel_addr = host->addr; - } else - ssl_creds = NULL; + if (uri_is_https (priv, uri) && item->proxy_addr) + tunnel_addr = host->addr; conn = soup_connection_new ( SOUP_CONNECTION_REMOTE_ADDRESS, remote_addr, SOUP_CONNECTION_TUNNEL_ADDRESS, tunnel_addr, SOUP_CONNECTION_PROXY_URI, item->proxy_uri, - SOUP_CONNECTION_SSL_CREDENTIALS, ssl_creds, - SOUP_CONNECTION_SSL_STRICT, priv->ssl_strict, + SOUP_CONNECTION_SSL, uri_is_https (priv, uri), + SOUP_CONNECTION_SSL_CREDENTIALS, priv->tlsdb, + SOUP_CONNECTION_SSL_STRICT, (priv->tlsdb != NULL) && priv->ssl_strict, SOUP_CONNECTION_ASYNC_CONTEXT, priv->async_context, + SOUP_CONNECTION_USE_THREAD_CONTEXT, priv->use_thread_context, SOUP_CONNECTION_TIMEOUT, priv->io_timeout, SOUP_CONNECTION_IDLE_TIMEOUT, priv->idle_timeout, SOUP_CONNECTION_SSL_FALLBACK, host->ssl_fallback, @@ -1411,8 +1948,14 @@ soup_session_get_connection (SoupSession *session, host->num_conns++; host->connections = g_slist_prepend (host->connections, conn); - g_mutex_unlock (priv->host_lock); - item->conn = g_object_ref (conn); + if (host->keep_alive_src) { + g_source_destroy (host->keep_alive_src); + g_source_unref (host->keep_alive_src); + host->keep_alive_src = NULL; + } + + g_mutex_unlock (&priv->host_lock); + soup_message_queue_item_set_connection (item, conn); return TRUE; } @@ -1432,8 +1975,9 @@ soup_session_unqueue_item (SoupSession *session, SoupSessionHost *host; if (item->conn) { - g_object_unref (item->conn); - item->conn = NULL; + if (soup_connection_get_state (item->conn) == SOUP_CONNECTION_IN_USE) + soup_connection_set_state (item->conn, SOUP_CONNECTION_IDLE); + soup_message_queue_item_set_connection (item, NULL); } if (item->state != SOUP_MESSAGE_FINISHED) { @@ -1443,10 +1987,10 @@ soup_session_unqueue_item (SoupSession *session, soup_message_queue_remove (priv->queue, item); - g_mutex_lock (priv->host_lock); + g_mutex_lock (&priv->host_lock); host = get_host_for_message (session, item->msg); host->num_messages--; - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); /* g_signal_handlers_disconnect_by_func doesn't work if you * have a metamarshal, meaning it doesn't work with @@ -1514,10 +2058,10 @@ queue_message (SoupSession *session, SoupMessage *msg, item = soup_message_queue_append (priv->queue, msg, callback, user_data); - g_mutex_lock (priv->host_lock); + g_mutex_lock (&priv->host_lock); host = get_host_for_message (session, item->msg); host->num_messages++; - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); if (!(soup_message_get_flags (msg) & SOUP_MESSAGE_NO_REDIRECT)) { soup_message_add_header_handler ( @@ -1631,15 +2175,20 @@ void soup_session_pause_message (SoupSession *session, SoupMessage *msg) { + SoupSessionPrivate *priv; + SoupMessageQueueItem *item; + g_return_if_fail (SOUP_IS_SESSION (session)); g_return_if_fail (SOUP_IS_MESSAGE (msg)); -#if ENABLE(TIZEN_FIX_PAUSE_MESSAGE) - if(soup_message_io_in_progress (msg)) + priv = SOUP_SESSION_GET_PRIVATE (session); + item = soup_message_queue_lookup (priv->queue, msg); + g_return_if_fail (item != NULL); + + item->paused = TRUE; + if (item->state == SOUP_MESSAGE_RUNNING) soup_message_io_pause (msg); -#else - soup_message_io_pause (msg); -#endif + soup_message_queue_item_unref (item); } /** @@ -1658,15 +2207,22 @@ void soup_session_unpause_message (SoupSession *session, SoupMessage *msg) { + SoupSessionPrivate *priv; + SoupMessageQueueItem *item; + g_return_if_fail (SOUP_IS_SESSION (session)); g_return_if_fail (SOUP_IS_MESSAGE (msg)); -#if ENABLE(TIZEN_FIX_PAUSE_MESSAGE) - if(soup_message_io_in_progress (msg)) + priv = SOUP_SESSION_GET_PRIVATE (session); + item = soup_message_queue_lookup (priv->queue, msg); + g_return_if_fail (item != NULL); + + item->paused = FALSE; + if (item->state == SOUP_MESSAGE_RUNNING) soup_message_io_unpause (msg); -#else - soup_message_io_unpause (msg); -#endif + soup_message_queue_item_unref (item); + + SOUP_SESSION_GET_CLASS (session)->kick (session); } @@ -1697,10 +2253,11 @@ cancel_message (SoupSession *session, SoupMessage *msg, guint status_code) * may call this at any time after handing @msg off to @session; if * @session has started sending the request but has not yet received * the complete response, then it will close the request's connection. - * Note that with non-idempotent requests (eg, %POST, %PUT, %DELETE) - * it is possible that you might cancel the request after the server - * acts on it, but before it returns a response, leaving the remote - * resource in an unknown state. + * Note that with non-idempotent requests (eg, + * POST, PUT, + * DELETE) it is possible that you might cancel the + * request after the server acts on it, but before it returns a + * response, leaving the remote resource in an unknown state. * * If the message is cancelled while its response body is being read, * then the response body in @msg will be left partially-filled-in. @@ -1776,11 +2333,11 @@ soup_session_abort (SoupSession *session) SOUP_SESSION_GET_CLASS (session)->flush_queue (session); /* Close all connections */ - g_mutex_lock (priv->host_lock); + g_mutex_lock (&priv->host_lock); conns = NULL; g_hash_table_foreach (priv->conns, gather_conns, &conns); - g_mutex_unlock (priv->host_lock); + g_mutex_unlock (&priv->host_lock); for (c = conns; c; c = c->next) { soup_connection_disconnect (c->data); g_object_unref (c->data); @@ -1789,6 +2346,28 @@ soup_session_abort (SoupSession *session) g_slist_free (conns); } +static void +prefetch_uri(SoupSession *session, SoupURI *uri, + GCancellable *cancellable, + SoupAddressCallback callback, gpointer user_data) +{ + SoupSessionPrivate *priv; + SoupSessionHost *host; + SoupAddress *addr; + + priv = SOUP_SESSION_GET_PRIVATE (session); + + g_mutex_lock (&priv->host_lock); + host = get_host_for_uri (session, uri); + addr = g_object_ref (host->addr); + g_mutex_unlock (&priv->host_lock); + + soup_address_resolve_async (addr, + soup_session_get_async_context (session), + cancellable, callback, user_data); + g_object_unref (addr); +} + /** * soup_session_prepare_for_uri: * @session: a #SoupSession @@ -1799,35 +2378,68 @@ soup_session_abort (SoupSession *session) * proxy address, etc.) in order to work more quickly once the URI is * actually requested. * -* This method acts asynchronously, in @session's %async_context. -* If you are using #SoupSessionSync and do not have a main loop running, -* then you can't use this method. +* This method acts asynchronously, in @session's +* #SoupSession:async_context. If you are using #SoupSessionSync and do +* not have a main loop running, then you can't use this method. * * Since: 2.30 +* +* Deprecated: 2.38: use soup_session_prefetch_dns() instead **/ void soup_session_prepare_for_uri (SoupSession *session, SoupURI *uri) { - SoupSessionPrivate *priv; - SoupSessionHost *host; - SoupAddress *addr; - g_return_if_fail (SOUP_IS_SESSION (session)); g_return_if_fail (uri != NULL); if (!uri->host) return; - priv = SOUP_SESSION_GET_PRIVATE (session); + prefetch_uri (session, uri, NULL, NULL, NULL); +} - g_mutex_lock (priv->host_lock); - host = get_host_for_uri (session, uri); - addr = g_object_ref (host->addr); - g_mutex_unlock (priv->host_lock); +/** +* soup_session_prefetch_dns: +* @session: a #SoupSession +* @hostname: a hostname to be resolved +* @cancellable: (allow-none): a #GCancellable object, or %NULL +* @callback: (scope async) (allow-none): callback to call with the +* result, or %NULL +* @user_data: data for @callback +* +* Tells @session that an URI from the given @hostname may be requested +* shortly, and so the session can try to prepare by resolving the +* domain name in advance, in order to work more quickly once the URI +* is actually requested. +* +* If @cancellable is non-%NULL, it can be used to cancel the +* resolution. @callback will still be invoked in this case, with a +* status of %SOUP_STATUS_CANCELLED. +* +* This method acts asynchronously, in @session's +* #SoupSession:async_context. If you are using #SoupSessionSync and do +* not have a main loop running, then you can't use this method. +* +* Since: 2.38 +**/ +void +soup_session_prefetch_dns (SoupSession *session, const char *hostname, + GCancellable *cancellable, + SoupAddressCallback callback, gpointer user_data) +{ + SoupURI *uri; - soup_address_resolve_async (addr, priv->async_context, - NULL, NULL, NULL); - g_object_unref (addr); + g_return_if_fail (SOUP_IS_SESSION (session)); + g_return_if_fail (hostname != NULL); + + /* FIXME: Prefetching should work for both HTTP and HTTPS */ + uri = soup_uri_new (NULL); + soup_uri_set_scheme (uri, SOUP_URI_SCHEME_HTTP); + soup_uri_set_host (uri, hostname); + soup_uri_set_path (uri, ""); + + prefetch_uri (session, uri, cancellable, callback, user_data); + soup_uri_free (uri); } /** @@ -1968,7 +2580,7 @@ soup_session_remove_feature_by_type (SoupSession *session, GType feature_type) * @feature_type: the #GType of the class of features to get * * Generates a list of @session's features of type @feature_type. (If - * you want to see all features, you can pass %G_TYPE_SESSION_FEATURE + * you want to see all features, you can pass %SOUP_TYPE_SESSION_FEATURE * for @feature_type.) * * Return value: (transfer container) (element-type Soup.SessionFeature): diff --git a/libsoup/soup-session.h b/libsoup/soup-session.h index 4b6661f..8748a76 100644 --- a/libsoup/soup-session.h +++ b/libsoup/soup-session.h @@ -7,6 +7,7 @@ #define SOUP_SESSION_H 1 #include +#include #include G_BEGIN_DECLS @@ -51,8 +52,9 @@ typedef struct { void (*flush_queue) (SoupSession *session); + void (*kick) (SoupSession *session); + /* Padding for future expansion */ - void (*_libsoup_reserved3) (void); void (*_libsoup_reserved4) (void); } SoupSessionClass; @@ -63,8 +65,11 @@ GType soup_session_get_type (void); #define SOUP_SESSION_MAX_CONNS_PER_HOST "max-conns-per-host" #define SOUP_SESSION_USE_NTLM "use-ntlm" #define SOUP_SESSION_SSL_CA_FILE "ssl-ca-file" +#define SOUP_SESSION_SSL_USE_SYSTEM_CA_FILE "ssl-use-system-ca-file" +#define SOUP_SESSION_TLS_DATABASE "tls-database" #define SOUP_SESSION_SSL_STRICT "ssl-strict" #define SOUP_SESSION_ASYNC_CONTEXT "async-context" +#define SOUP_SESSION_USE_THREAD_CONTEXT "use-thread-context" #define SOUP_SESSION_TIMEOUT "timeout" #define SOUP_SESSION_USER_AGENT "user-agent" #define SOUP_SESSION_ACCEPT_LANGUAGE "accept-language" @@ -73,6 +78,8 @@ GType soup_session_get_type (void); #define SOUP_SESSION_ADD_FEATURE "add-feature" #define SOUP_SESSION_ADD_FEATURE_BY_TYPE "add-feature-by-type" #define SOUP_SESSION_REMOVE_FEATURE_BY_TYPE "remove-feature-by-type" +#define SOUP_SESSION_HTTP_ALIASES "http-aliases" +#define SOUP_SESSION_HTTPS_ALIASES "https-aliases" GMainContext *soup_session_get_async_context(SoupSession *session); @@ -99,6 +106,17 @@ void soup_session_abort (SoupSession *session); void soup_session_prepare_for_uri (SoupSession *session, SoupURI *uri); +void soup_session_prefetch_dns (SoupSession *session, + const char *hostname, + GCancellable *cancellable, + SoupAddressCallback callback, + gpointer user_data); + +gboolean soup_session_would_redirect (SoupSession *session, + SoupMessage *msg); +gboolean soup_session_redirect_message (SoupSession *session, + SoupMessage *msg); + void soup_session_add_feature (SoupSession *session, SoupSessionFeature *feature); void soup_session_add_feature_by_type (SoupSession *session, diff --git a/libsoup/soup-socket.c b/libsoup/soup-socket.c index 5f511cd..42f1fed 100644 --- a/libsoup/soup-socket.c +++ b/libsoup/soup-socket.c @@ -21,7 +21,6 @@ #include "soup-marshal.h" #include "soup-misc.h" #include "soup-misc-private.h" -#include "soup-ssl.h" /** * SECTION:soup-socket @@ -40,6 +39,7 @@ enum { WRITABLE, DISCONNECTED, NEW_CONNECTION, + EVENT, LAST_SIGNAL }; @@ -56,6 +56,7 @@ enum { PROP_SSL_STRICT, PROP_SSL_FALLBACK, PROP_ASYNC_CONTEXT, + PROP_USE_THREAD_CONTEXT, PROP_TIMEOUT, PROP_TRUSTED_CERTIFICATE, PROP_CLEAN_DISPOSE, @@ -75,10 +76,11 @@ typedef struct { guint non_blocking:1; guint is_server:1; + guint ssl:1; guint ssl_strict:1; guint ssl_fallback:1; - guint ssl_ca_in_creds:1; guint clean_dispose:1; + guint use_thread_context:1; gpointer ssl_creds; GMainContext *async_context; @@ -86,7 +88,7 @@ typedef struct { GSource *read_src, *write_src; GByteArray *read_buf; - GMutex *iolock, *addrlock; + GMutex iolock, addrlock; guint timeout; GCancellable *connect_cancel; @@ -108,8 +110,8 @@ soup_socket_init (SoupSocket *sock) SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); priv->non_blocking = TRUE; - priv->addrlock = g_mutex_new (); - priv->iolock = g_mutex_new (); + g_mutex_init (&priv->addrlock); + g_mutex_init (&priv->iolock); } static void @@ -173,8 +175,8 @@ finalize (GObject *object) if (priv->read_buf) g_byte_array_free (priv->read_buf, TRUE); - g_mutex_free (priv->addrlock); - g_mutex_free (priv->iolock); + g_mutex_clear (&priv->addrlock); + g_mutex_clear (&priv->iolock); G_OBJECT_CLASS (soup_socket_parent_class)->finalize (object); } @@ -207,7 +209,7 @@ soup_socket_class_init (SoupSocketClass *socket_class) G_SIGNAL_RUN_LAST, G_STRUCT_OFFSET (SoupSocketClass, readable), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -223,7 +225,7 @@ soup_socket_class_init (SoupSocketClass *socket_class) G_SIGNAL_RUN_LAST, G_STRUCT_OFFSET (SoupSocketClass, writable), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -239,7 +241,7 @@ soup_socket_class_init (SoupSocketClass *socket_class) G_SIGNAL_RUN_LAST, G_STRUCT_OFFSET (SoupSocketClass, disconnected), NULL, NULL, - soup_marshal_NONE__NONE, + _soup_marshal_NONE__NONE, G_TYPE_NONE, 0); /** @@ -259,9 +261,31 @@ soup_socket_class_init (SoupSocketClass *socket_class) G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (SoupSocketClass, new_connection), NULL, NULL, - soup_marshal_NONE__OBJECT, + _soup_marshal_NONE__OBJECT, G_TYPE_NONE, 1, SOUP_TYPE_SOCKET); + /** + * SoupSocket::event: + * @sock: the socket + * @event: the event that occurred + * @connection: the current connection state + * + * Emitted when a network-related event occurs. See + * #GSocketClient::event for more details. + * + * Since: 2.38 + **/ + signals[EVENT] = + g_signal_new ("event", + G_OBJECT_CLASS_TYPE (object_class), + G_SIGNAL_RUN_LAST, + 0, + NULL, NULL, + NULL, + G_TYPE_NONE, 2, + G_TYPE_SOCKET_CLIENT_EVENT, + G_TYPE_IO_STREAM); + /* properties */ /** @@ -343,9 +367,13 @@ soup_socket_class_init (SoupSocketClass *socket_class) /** * SOUP_SOCKET_SSL_CREDENTIALS: * - * Alias for the #SoupSocket:ssl-credentials property. + * Alias for the #SoupSocket:ssl-creds property. * (SSL credential information.) **/ + /* For historical reasons, there's only a single property + * here, which is a GTlsDatabase for client sockets, and + * a GTlsCertificate for server sockets. Whee! + */ g_object_class_install_property ( object_class, PROP_SSL_CREDENTIALS, g_param_spec_pointer (SOUP_SOCKET_SSL_CREDENTIALS, @@ -403,6 +431,29 @@ soup_socket_class_init (SoupSocketClass *socket_class) G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); /** + * SOUP_SOCKET_USE_THREAD_CONTEXT: + * + * Alias for the #SoupSocket:use-thread-context property. (Use + * g_main_context_get_thread_default()) + * + * Since: 2.36.1 + */ + /** + * SoupSocket:use-thread-context: + * + * Use g_main_context_get_thread_default(). + * + * Since: 2.36.1 + */ + g_object_class_install_property ( + object_class, PROP_USE_THREAD_CONTEXT, + g_param_spec_boolean (SOUP_SOCKET_USE_THREAD_CONTEXT, + "Use thread context", + "Use g_main_context_get_thread_default", + FALSE, + G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY)); + + /** * SOUP_SOCKET_TIMEOUT: * * Alias for the #SoupSocket:timeout property. (The timeout @@ -506,6 +557,9 @@ set_property (GObject *object, guint prop_id, if (priv->async_context) g_main_context_ref (priv->async_context); break; + case PROP_USE_THREAD_CONTEXT: + priv->use_thread_context = g_value_get_boolean (value); + break; case PROP_TIMEOUT: priv->timeout = g_value_get_uint (value); if (priv->conn) @@ -554,6 +608,9 @@ get_property (GObject *object, guint prop_id, case PROP_ASYNC_CONTEXT: g_value_set_pointer (value, priv->async_context ? g_main_context_ref (priv->async_context) : NULL); break; + case PROP_USE_THREAD_CONTEXT: + g_value_set_boolean (value, priv->use_thread_context); + break; case PROP_TIMEOUT: g_value_set_uint (value, priv->timeout); break; @@ -596,6 +653,19 @@ soup_socket_new (const char *optname1, ...) return sock; } +static void +proxy_socket_client_event (GSocketClient *client, + GSocketClientEvent event, + GSocketConnectable *connectable, + GIOStream *connection, + gpointer user_data) +{ + SoupSocket *sock = user_data; + + g_signal_emit (sock, signals[EVENT], 0, + event, connection); +} + static guint socket_connected (SoupSocket *sock, GSocketConnection *conn, GError *error) { @@ -645,7 +715,7 @@ async_connected (GObject *client, GAsyncResult *result, gpointer data) GSocketConnection *conn; guint status; - if (priv->async_context) + if (priv->async_context && !priv->use_thread_context) g_main_context_pop_thread_default (priv->async_context); conn = g_socket_client_connect_finish (G_SOCKET_CLIENT (client), @@ -691,10 +761,12 @@ soup_socket_connect_async (SoupSocket *sock, GCancellable *cancellable, priv->connect_cancel = cancellable ? g_object_ref (cancellable) : g_cancellable_new (); - if (priv->async_context) + if (priv->async_context && !priv->use_thread_context) g_main_context_push_thread_default (priv->async_context); client = g_socket_client_new (); + g_signal_connect (client, "event", + G_CALLBACK (proxy_socket_client_event), sock); if (priv->timeout) g_socket_client_set_timeout (client, priv->timeout); g_socket_client_connect_async (client, @@ -738,6 +810,8 @@ soup_socket_connect_sync (SoupSocket *sock, GCancellable *cancellable) priv->connect_cancel = cancellable; client = g_socket_client_new (); + g_signal_connect (client, "event", + G_CALLBACK (proxy_socket_client_event), sock); if (priv->timeout) g_socket_client_set_timeout (client, priv->timeout); conn = g_socket_client_connect (client, @@ -756,19 +830,42 @@ soup_socket_get_fd (SoupSocket *sock) return g_socket_get_fd (SOUP_SOCKET_GET_PRIVATE (sock)->gsock); } +GSocket * +soup_socket_get_gsocket (SoupSocket *sock) +{ + g_return_val_if_fail (SOUP_IS_SOCKET (sock), NULL); + + return SOUP_SOCKET_GET_PRIVATE (sock)->gsock; +} + +GIOStream * +soup_socket_get_iostream (SoupSocket *sock) +{ + g_return_val_if_fail (SOUP_IS_SOCKET (sock), NULL); + + return SOUP_SOCKET_GET_PRIVATE (sock)->conn; +} + static GSource * soup_socket_create_watch (SoupSocketPrivate *priv, GIOCondition cond, GPollableSourceFunc callback, gpointer user_data, GCancellable *cancellable) { GSource *watch; + GMainContext *async_context; if (cond == G_IO_IN) watch = g_pollable_input_stream_create_source (priv->istream, cancellable); else watch = g_pollable_output_stream_create_source (priv->ostream, cancellable); g_source_set_callback (watch, (GSourceFunc)callback, user_data, NULL); - g_source_attach (watch, priv->async_context); + + if (priv->use_thread_context) + async_context = g_main_context_get_thread_default (); + else + async_context = priv->async_context; + + g_source_attach (watch, async_context); g_source_unref (watch); return watch; @@ -790,8 +887,10 @@ listen_watch (GObject *pollable, gpointer data) new_priv->gsock = new_gsock; if (priv->async_context) new_priv->async_context = g_main_context_ref (priv->async_context); + new_priv->use_thread_context = priv->use_thread_context; new_priv->non_blocking = priv->non_blocking; new_priv->is_server = TRUE; + new_priv->ssl = priv->ssl; if (priv->ssl_creds) new_priv->ssl_creds = priv->ssl_creds; finish_socket_setup (new_priv); @@ -815,7 +914,7 @@ listen_watch (GObject *pollable, gpointer data) * listening) * * Makes @sock start listening on its local address. When connections - * come in, @sock will emit %new_connection. + * come in, @sock will emit #SoupSocket::new_connection. * * Return value: whether or not @sock is now listening. **/ @@ -882,8 +981,6 @@ soup_socket_peer_certificate_changed (GObject *conn, GParamSpec *pspec, SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); priv->tls_errors = g_tls_connection_get_peer_certificate_errors (G_TLS_CONNECTION (priv->conn)); - if (priv->ssl_ca_in_creds) - priv->tls_errors &= ~G_TLS_CERTIFICATE_UNKNOWN_CA; g_object_notify (sock, "tls-certificate"); g_object_notify (sock, "tls-errors"); @@ -893,15 +990,7 @@ static gboolean soup_socket_accept_certificate (GTlsConnection *conn, GTlsCertificate *cert, GTlsCertificateFlags errors, gpointer sock) { - SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); - - if (soup_ssl_credentials_verify_certificate (priv->ssl_creds, - cert, errors)) { - priv->ssl_ca_in_creds = TRUE; - return TRUE; - } - - return !priv->ssl_strict; + return TRUE; } /** @@ -941,8 +1030,8 @@ soup_socket_start_proxy_ssl (SoupSocket *sock, const char *ssl_host, if (G_IS_TLS_CONNECTION (priv->conn)) return TRUE; - if (!priv->ssl_creds) - return FALSE; + + priv->ssl = TRUE; if (!priv->is_server) { GTlsClientConnection *conn; @@ -953,7 +1042,7 @@ soup_socket_start_proxy_ssl (SoupSocket *sock, const char *ssl_host, NULL, NULL, "base-io-stream", priv->conn, "server-identity", identity, - "use-system-certdb", FALSE, + "database", priv->ssl_creds, "require-close-notify", FALSE, "use-ssl3", priv->ssl_fallback, NULL); @@ -965,16 +1054,18 @@ soup_socket_start_proxy_ssl (SoupSocket *sock, const char *ssl_host, g_object_unref (priv->conn); priv->conn = G_IO_STREAM (conn); - g_signal_connect (conn, "accept-certificate", - G_CALLBACK (soup_socket_accept_certificate), - sock); + if (!priv->ssl_strict) { + g_signal_connect (conn, "accept-certificate", + G_CALLBACK (soup_socket_accept_certificate), + sock); + } } else { GTlsServerConnection *conn; conn = g_initable_new (g_tls_backend_get_server_connection_type (backend), NULL, NULL, "base-io-stream", priv->conn, - "certificate", soup_ssl_credentials_get_certificate (priv->ssl_creds), + "certificate", priv->ssl_creds, "use-system-certdb", FALSE, "require-close-notify", FALSE, NULL); @@ -985,7 +1076,6 @@ soup_socket_start_proxy_ssl (SoupSocket *sock, const char *ssl_host, priv->conn = G_IO_STREAM (conn); } - priv->ssl_ca_in_creds = FALSE; g_signal_connect (priv->conn, "notify::peer-certificate", G_CALLBACK (soup_socket_peer_certificate_changed), sock); @@ -1001,6 +1091,7 @@ soup_socket_handshake_sync (SoupSocket *sock, SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); GError *error = NULL; + priv->ssl = TRUE; if (g_tls_connection_handshake (G_TLS_CONNECTION (priv->conn), cancellable, &error)) return SOUP_STATUS_OK; @@ -1022,7 +1113,7 @@ handshake_async_ready (GObject *source, GAsyncResult *result, gpointer user_data GError *error = NULL; guint status; - if (priv->async_context) + if (priv->async_context && !priv->use_thread_context) g_main_context_pop_thread_default (priv->async_context); if (g_tls_connection_handshake_finish (G_TLS_CONNECTION (priv->conn), @@ -1049,12 +1140,14 @@ soup_socket_handshake_async (SoupSocket *sock, SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); SoupSocketAsyncConnectData *data; + priv->ssl = TRUE; + data = g_slice_new (SoupSocketAsyncConnectData); data->sock = g_object_ref (sock); data->callback = callback; data->user_data = user_data; - if (priv->async_context) + if (priv->async_context && !priv->use_thread_context) g_main_context_push_thread_default (priv->async_context); g_tls_connection_handshake_async (G_TLS_CONNECTION (priv->conn), G_PRIORITY_DEFAULT, @@ -1066,19 +1159,16 @@ soup_socket_handshake_async (SoupSocket *sock, * soup_socket_is_ssl: * @sock: a #SoupSocket * - * Tests if @sock is set up to do SSL. Note that this simply means - * that the %SOUP_SOCKET_SSL_CREDENTIALS property has been set; it - * does not mean that soup_socket_start_ssl() has been called. + * Tests if @sock is doing (or has attempted to do) SSL. * * Return value: %TRUE if @sock has SSL credentials set **/ gboolean soup_socket_is_ssl (SoupSocket *sock) { - g_return_if_fail (SOUP_IS_SOCKET (sock)); SoupSocketPrivate *priv = SOUP_SOCKET_GET_PRIVATE (sock); - return priv->ssl_creds != NULL; + return priv->ssl; } /** @@ -1100,12 +1190,12 @@ soup_socket_disconnect (SoupSocket *sock) if (priv->connect_cancel) { g_cancellable_cancel (priv->connect_cancel); return; - } else if (g_mutex_trylock (priv->iolock)) { + } else if (g_mutex_trylock (&priv->iolock)) { if (priv->conn) disconnect_internal (sock); else already_disconnected = TRUE; - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); } else { /* Another thread is currently doing IO, so * we can't close the socket. So just shutdown @@ -1170,7 +1260,7 @@ soup_socket_get_local_address (SoupSocket *sock) g_return_val_if_fail (SOUP_IS_SOCKET (sock), NULL); priv = SOUP_SOCKET_GET_PRIVATE (sock); - g_mutex_lock (priv->addrlock); + g_mutex_lock (&priv->addrlock); if (!priv->local_addr) { GSocketAddress *addr; struct sockaddr_storage sa; @@ -1182,7 +1272,7 @@ soup_socket_get_local_address (SoupSocket *sock) priv->local_addr = soup_address_new_from_sockaddr ((struct sockaddr *)&sa, sa_len); g_object_unref (addr); } - g_mutex_unlock (priv->addrlock); + g_mutex_unlock (&priv->addrlock); return priv->local_addr; } @@ -1203,7 +1293,7 @@ soup_socket_get_remote_address (SoupSocket *sock) g_return_val_if_fail (SOUP_IS_SOCKET (sock), NULL); priv = SOUP_SOCKET_GET_PRIVATE (sock); - g_mutex_lock (priv->addrlock); + g_mutex_lock (&priv->addrlock); if (!priv->remote_addr) { GSocketAddress *addr; struct sockaddr_storage sa; @@ -1215,7 +1305,7 @@ soup_socket_get_remote_address (SoupSocket *sock) priv->remote_addr = soup_address_new_from_sockaddr ((struct sockaddr *)&sa, sa_len); g_object_unref (addr); } - g_mutex_unlock (priv->addrlock); + g_mutex_unlock (&priv->addrlock); return priv->remote_addr; } @@ -1272,9 +1362,6 @@ read_from_network (SoupSocket *sock, gpointer buffer, gsize len, cancellable); } return SOUP_SOCKET_WOULD_BLOCK; - } else if (g_error_matches (my_err, G_TLS_ERROR, G_TLS_ERROR_HANDSHAKE)) { - my_err->domain = SOUP_SSL_ERROR; - my_err->code = SOUP_SSL_ERROR_CERTIFICATE; } g_propagate_error (error, my_err); @@ -1351,12 +1438,12 @@ soup_socket_read (SoupSocket *sock, gpointer buffer, gsize len, priv = SOUP_SOCKET_GET_PRIVATE (sock); - g_mutex_lock (priv->iolock); + g_mutex_lock (&priv->iolock); if (priv->read_buf) status = read_from_buf (sock, buffer, len, nread); else status = read_from_network (sock, buffer, len, nread, cancellable, error); - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); return status; } @@ -1407,7 +1494,7 @@ soup_socket_read_until (SoupSocket *sock, gpointer buffer, gsize len, priv = SOUP_SOCKET_GET_PRIVATE (sock); - g_mutex_lock (priv->iolock); + g_mutex_lock (&priv->iolock); *got_boundary = FALSE; @@ -1424,7 +1511,7 @@ soup_socket_read_until (SoupSocket *sock, gpointer buffer, gsize len, read_buf->len = prev_len + *nread; if (status != SOUP_SOCKET_OK) { - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); return status; } } @@ -1446,7 +1533,7 @@ soup_socket_read_until (SoupSocket *sock, gpointer buffer, gsize len, match_len = p - read_buf->data; status = read_from_buf (sock, buffer, MIN (len, match_len), nread); - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); return status; } @@ -1501,14 +1588,14 @@ soup_socket_write (SoupSocket *sock, gconstpointer buffer, priv = SOUP_SOCKET_GET_PRIVATE (sock); - g_mutex_lock (priv->iolock); + g_mutex_lock (&priv->iolock); if (!priv->conn) { - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); return SOUP_SOCKET_EOF; } if (priv->write_src) { - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); return SOUP_SOCKET_WOULD_BLOCK; } @@ -1523,14 +1610,14 @@ soup_socket_write (SoupSocket *sock, gconstpointer buffer, } if (my_nwrote > 0) { - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); g_clear_error (&my_err); *nwrote = my_nwrote; return SOUP_SOCKET_OK; } if (g_error_matches (my_err, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) { - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); g_clear_error (&my_err); priv->write_src = @@ -1538,12 +1625,9 @@ soup_socket_write (SoupSocket *sock, gconstpointer buffer, G_IO_OUT, socket_write_watch, sock, cancellable); return SOUP_SOCKET_WOULD_BLOCK; - } else if (g_error_matches (my_err, G_TLS_ERROR, G_TLS_ERROR_HANDSHAKE)) { - my_err->domain = SOUP_SSL_ERROR; - my_err->code = SOUP_SSL_ERROR_CERTIFICATE; } - g_mutex_unlock (priv->iolock); + g_mutex_unlock (&priv->iolock); g_propagate_error (error, my_err); return SOUP_SOCKET_ERROR; } diff --git a/libsoup/soup-socket.h b/libsoup/soup-socket.h index 4d1550f..dc6b59c 100644 --- a/libsoup/soup-socket.h +++ b/libsoup/soup-socket.h @@ -48,6 +48,7 @@ typedef struct { #define SOUP_SOCKET_SSL_FALLBACK "ssl-fallback" #define SOUP_SOCKET_TRUSTED_CERTIFICATE "trusted-certificate" #define SOUP_SOCKET_ASYNC_CONTEXT "async-context" +#define SOUP_SOCKET_USE_THREAD_CONTEXT "use-thread-context" #define SOUP_SOCKET_TIMEOUT "timeout" #define SOUP_SOCKET_TLS_CERTIFICATE "tls-certificate" #define SOUP_SOCKET_TLS_ERRORS "tls-errors" diff --git a/libsoup/soup-ssl.c b/libsoup/soup-ssl.c deleted file mode 100644 index 74d87f2..0000000 --- a/libsoup/soup-ssl.c +++ /dev/null @@ -1,145 +0,0 @@ -/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ -/* - * soup-ssl.c: temporary ssl integration - * - * Copyright (C) 2010 Red Hat, Inc. - */ - -#ifdef HAVE_CONFIG_H -#include -#endif - -#include - -#include "soup-ssl.h" -#include "soup-misc.h" - -const gboolean soup_ssl_supported = TRUE; - -struct SoupSSLCredentials { - GList *ca_list; - GTlsCertificateFlags validation_flags; - GTlsCertificate *certificate; -}; - -SoupSSLCredentials * -soup_ssl_get_client_credentials (const char *ca_file) -{ - SoupSSLCredentials *creds; - - creds = g_slice_new0 (SoupSSLCredentials); - - if (ca_file) { - GError *error = NULL; - - creds->ca_list = g_tls_certificate_list_new_from_file (ca_file, &error); - if (error) { - if (!g_error_matches (error, G_TLS_ERROR, G_TLS_ERROR_UNAVAILABLE)) { - g_warning ("Could not set SSL credentials from '%s': %s", - ca_file, error->message); - } - g_error_free (error); - } - creds->validation_flags = G_TLS_CERTIFICATE_VALIDATE_ALL; - } - - return creds; -} - -gboolean -soup_ssl_credentials_verify_certificate (SoupSSLCredentials *creds, - GTlsCertificate *cert, - GTlsCertificateFlags errors) -{ - errors = errors & creds->validation_flags; - - if (errors & G_TLS_CERTIFICATE_UNKNOWN_CA) { - GList *ca; - - for (ca = creds->ca_list; ca; ca = ca->next) { - if ((g_tls_certificate_verify (cert, NULL, ca->data) & G_TLS_CERTIFICATE_UNKNOWN_CA) == 0) { - errors &= ~G_TLS_CERTIFICATE_UNKNOWN_CA; - break; - } - } - } - - return errors == 0; -} - -void -soup_ssl_free_client_credentials (SoupSSLCredentials *client_creds) -{ - GList *c; - - for (c = client_creds->ca_list; c; c = c->next) - g_object_unref (c->data); - g_list_free (client_creds->ca_list); - g_slice_free (SoupSSLCredentials, client_creds); -} - -SoupSSLCredentials * -soup_ssl_get_server_credentials (const char *cert_file, const char *key_file) -{ - SoupSSLCredentials *creds; - GError *error = NULL; - - creds = g_slice_new0 (SoupSSLCredentials); - - creds->certificate = g_tls_certificate_new_from_files (cert_file, key_file, &error); - if (!creds->certificate) { - g_warning ("Could not read SSL certificate from '%s': %s", - cert_file, error->message); - g_error_free (error); - g_slice_free (SoupSSLCredentials, creds); - return NULL; - } - - return creds; -} - -GTlsCertificate * -soup_ssl_credentials_get_certificate (SoupSSLCredentials *creds) -{ - return creds->certificate; -} - -void -soup_ssl_free_server_credentials (SoupSSLCredentials *server_creds) -{ - g_object_unref (server_creds->certificate); - g_slice_free (SoupSSLCredentials, server_creds); -} - -/** - * SOUP_SSL_ERROR: - * - * A #GError domain representing an SSL error. Used with #SoupSSLError. - **/ -/** - * soup_ssl_error_quark: - * - * The quark used as %SOUP_SSL_ERROR - * - * Return value: The quark used as %SOUP_SSL_ERROR - **/ -GQuark -soup_ssl_error_quark (void) -{ - static GQuark error; - if (!error) - error = g_quark_from_static_string ("soup_ssl_error_quark"); - return error; -} - -/** - * SoupSSLError: - * @SOUP_SSL_ERROR_HANDSHAKE_NEEDS_READ: Internal error. Never exposed - * outside of libsoup. - * @SOUP_SSL_ERROR_HANDSHAKE_NEEDS_WRITE: Internal error. Never exposed - * outside of libsoup. - * @SOUP_SSL_ERROR_CERTIFICATE: Indicates an error validating an SSL - * certificate - * - * SSL-related I/O errors. - **/ diff --git a/libsoup/soup-ssl.h b/libsoup/soup-ssl.h deleted file mode 100644 index 5858199..0000000 --- a/libsoup/soup-ssl.h +++ /dev/null @@ -1,29 +0,0 @@ -/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ -/* - * Copyright (C) 2000-2003, Ximian, Inc. - */ - -#ifndef SOUP_SSL_H -#define SOUP_SSL_H 1 - -#include "soup-types.h" - -typedef enum { - SOUP_SSL_TYPE_CLIENT = 0, - SOUP_SSL_TYPE_SERVER -} SoupSSLType; - -typedef struct SoupSSLCredentials SoupSSLCredentials; - -SoupSSLCredentials *soup_ssl_get_client_credentials (const char *ca_file); -void soup_ssl_free_client_credentials (SoupSSLCredentials *creds); -gboolean soup_ssl_credentials_verify_certificate (SoupSSLCredentials *creds, - GTlsCertificate *cert, - GTlsCertificateFlags errors); - -SoupSSLCredentials *soup_ssl_get_server_credentials (const char *cert_file, - const char *key_file); -void soup_ssl_free_server_credentials (SoupSSLCredentials *creds); -GTlsCertificate *soup_ssl_credentials_get_certificate (SoupSSLCredentials *creds); - -#endif /* SOUP_SSL_H */ diff --git a/libsoup/soup-status.c b/libsoup/soup-status.c index 11fe51b..2590a3d 100644 --- a/libsoup/soup-status.c +++ b/libsoup/soup-status.c @@ -257,13 +257,13 @@ static const struct { * * There is no reason for you to ever use this * function. If you wanted the textual description for the - * %status_code of a given #SoupMessage, you should just look at the - * message's %reason_phrase. However, you should only do that for use - * in debugging messages; HTTP reason phrases are not localized, and - * are not generally very descriptive anyway, and so they should never - * be presented to the user directly. Instead, you should create you - * own error messages based on the status code, and on what you were - * trying to do. + * #SoupMessage:status_code of a given #SoupMessage, you should just + * look at the message's #SoupMessage:reason_phrase. However, you + * should only do that for use in debugging messages; HTTP reason + * phrases are not localized, and are not generally very descriptive + * anyway, and so they should never be presented to the user directly. + * Instead, you should create you own error messages based on the + * status code, and on what you were trying to do. * * Return value: the (terse, English) description of @status_code **/ diff --git a/libsoup/soup-uri.c b/libsoup/soup-uri.c index b5c247d..24cd6b0 100644 --- a/libsoup/soup-uri.c +++ b/libsoup/soup-uri.c @@ -81,6 +81,21 @@ **/ /** + * SOUP_URI_IS_VALID: + * @uri: a #SoupURI + * + * Tests whether @uri is a valid #SoupURI; that is, that it is non-%NULL + * and its @scheme and @path members are also non-%NULL. + * + * This macro does not check whether http and https URIs have a non-%NULL + * @host member. + * + * Return value: %TRUE if @uri is valid for use. + * + * Since: 2.38 + **/ + +/** * SOUP_URI_VALID_FOR_HTTP: * @uri: a #SoupURI * @@ -142,12 +157,25 @@ soup_scheme_default_port (const char *scheme) SoupURI * soup_uri_new_with_base (SoupURI *base, const char *uri_string) { - SoupURI *uri; + SoupURI *uri, fixed_base; const char *end, *hash, *colon, *at, *path, *question; const char *p, *hostend; gboolean remove_dot_segments = TRUE; int len; + g_return_val_if_fail (uri_string != NULL, NULL); + + /* Allow a %NULL path in @base, for compatibility */ + if (base && base->scheme && !base->path) { + g_warn_if_fail (SOUP_URI_IS_VALID (base)); + + memcpy (&fixed_base, base, sizeof (SoupURI)); + fixed_base.path = ""; + base = &fixed_base; + } + + g_return_val_if_fail (base == NULL || SOUP_URI_IS_VALID (base), NULL); + /* First some cleanup steps (which are supposed to all be no-ops, * but...). Skip initial whitespace, strip out internal tabs and * line breaks, and ignore trailing whitespace. @@ -186,7 +214,7 @@ soup_uri_new_with_base (SoupURI *base, const char *uri_string) /* Find scheme: initial [a-z+.-]* substring until ":" */ p = uri_string; - while (p < end && (g_ascii_isalnum (*p) || + while (p < end && (g_ascii_isalpha (*p) || *p == '.' || *p == '+' || *p == '-')) p++; @@ -195,8 +223,10 @@ soup_uri_new_with_base (SoupURI *base, const char *uri_string) uri_string = p + 1; } - if (uri_string == end && !base && !uri->fragment) + if (uri_string == end && !base && !uri->fragment) { + uri->path = g_strdup (""); return uri; + } /* Check for authority */ if (strncmp (uri_string, "//", 2) == 0) { @@ -376,7 +406,7 @@ soup_uri_new_with_base (SoupURI *base, const char *uri_string) /** * soup_uri_new: - * @uri_string: a URI + * @uri_string: (allow-none): a URI * * Parses an absolute URI. * @@ -385,7 +415,8 @@ soup_uri_new_with_base (SoupURI *base, const char *uri_string) * call at least soup_uri_set_scheme() and soup_uri_set_path(), since * those fields are required.) * - * Return value: a #SoupURI, or %NULL. + * Return value: a #SoupURI, or %NULL if the given string was found to be + * invalid. **/ SoupURI * soup_uri_new (const char *uri_string) @@ -398,7 +429,7 @@ soup_uri_new (const char *uri_string) uri = soup_uri_new_with_base (NULL, uri_string); if (!uri) return NULL; - if (!uri->scheme) { + if (!SOUP_URI_IS_VALID (uri)) { soup_uri_free (uri); return NULL; } @@ -427,10 +458,7 @@ soup_uri_to_string (SoupURI *uri, gboolean just_path_and_query) char *return_result; g_return_val_if_fail (uri != NULL, NULL); - - /* IF YOU CHANGE ANYTHING IN THIS FUNCTION, RUN - * tests/uri-parsing AFTERWARD. - */ + g_warn_if_fail (SOUP_URI_IS_VALID (uri)); str = g_string_sized_new (20); @@ -452,10 +480,16 @@ soup_uri_to_string (SoupURI *uri, gboolean just_path_and_query) g_string_append_printf (str, ":%u", uri->port); if (!uri->path && (uri->query || uri->fragment)) g_string_append_c (str, '/'); + else if ((!uri->path || !*uri->path) && + (uri->scheme == SOUP_URI_SCHEME_HTTP || + uri->scheme == SOUP_URI_SCHEME_HTTPS)) + g_string_append_c (str, '/'); } if (uri->path && *uri->path) g_string_append (str, uri->path); + else if (just_path_and_query) + g_string_append_c (str, '/'); if (uri->query) { g_string_append_c (str, '?'); @@ -486,6 +520,7 @@ soup_uri_copy (SoupURI *uri) SoupURI *dup; g_return_val_if_fail (uri != NULL, NULL); + g_warn_if_fail (SOUP_URI_IS_VALID (uri)); dup = g_slice_new0 (SoupURI); dup->scheme = uri->scheme; @@ -522,6 +557,11 @@ parts_equal (const char *one, const char *two, gboolean insensitive) gboolean soup_uri_equal (SoupURI *uri1, SoupURI *uri2) { + g_return_val_if_fail (uri1 != NULL, FALSE); + g_return_val_if_fail (uri2 != NULL, FALSE); + g_warn_if_fail (SOUP_URI_IS_VALID (uri1)); + g_warn_if_fail (SOUP_URI_IS_VALID (uri2)); + if (uri1->scheme != uri2->scheme || uri1->port != uri2->port || !parts_equal (uri1->user, uri2->user, FALSE) || @@ -589,6 +629,8 @@ soup_uri_encode (const char *part, const char *escape_extra) GString *str; char *encoded; + g_return_val_if_fail (part != NULL, NULL); + str = g_string_new (NULL); append_uri_encoded (str, part, escape_extra); encoded = str->str; @@ -606,6 +648,8 @@ uri_decoded_copy (const char *part, int length) unsigned char *s, *d; char *decoded = g_strndup (part, length); + g_return_val_if_fail (part != NULL, NULL); + s = d = (unsigned char *)decoded; do { if (*s == '%') { @@ -638,6 +682,8 @@ uri_decoded_copy (const char *part, int length) char * soup_uri_decode (const char *part) { + g_return_val_if_fail (part != NULL, NULL); + return uri_decoded_copy (part, strlen (part)); } @@ -673,7 +719,7 @@ uri_normalized_copy (const char *part, int length, *d++ = *s; } } else { - if (*s == ' ') + if (!g_ascii_isgraph (*s)) need_fixup = TRUE; *d++ = *s; } @@ -681,16 +727,16 @@ uri_normalized_copy (const char *part, int length, if (need_fixup) { GString *fixed; - char *sp, *p; fixed = g_string_new (NULL); - p = normalized; - while ((sp = strchr (p, ' '))) { - g_string_append_len (fixed, p, sp - p); - g_string_append (fixed, "%20"); - p = sp + 1; + s = (guchar *)normalized; + while (*s) { + if (g_ascii_isgraph (*s)) + g_string_append_c (fixed, *s); + else + g_string_append_printf (fixed, "%%%02X", (int)*s); + s++; } - g_string_append (fixed, p); g_free (normalized); normalized = g_string_free (fixed, FALSE); } @@ -725,6 +771,8 @@ uri_normalized_copy (const char *part, int length, char * soup_uri_normalize (const char *part, const char *unescape_extra) { + g_return_val_if_fail (part != NULL, NULL); + return uri_normalized_copy (part, strlen (part), unescape_extra); } @@ -734,7 +782,7 @@ soup_uri_normalize (const char *part, const char *unescape_extra) * @uri: a #SoupURI * * Tests if @uri uses the default port for its scheme. (Eg, 80 for - * http.) (This only works for http and https; libsoup does not know + * http.) (This only works for http, https and ftp; libsoup does not know * the default ports of other protocols.) * * Return value: %TRUE or %FALSE @@ -742,9 +790,8 @@ soup_uri_normalize (const char *part, const char *unescape_extra) gboolean soup_uri_uses_default_port (SoupURI *uri) { - g_return_val_if_fail (uri->scheme == SOUP_URI_SCHEME_HTTP || - uri->scheme == SOUP_URI_SCHEME_HTTPS || - uri->scheme == SOUP_URI_SCHEME_FTP, FALSE); + g_return_val_if_fail (uri != NULL, FALSE); + g_warn_if_fail (SOUP_URI_IS_VALID (uri)); return uri->port == soup_scheme_default_port (uri->scheme); } @@ -776,6 +823,8 @@ soup_uri_uses_default_port (SoupURI *uri) const char * soup_uri_get_scheme (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->scheme; } @@ -790,6 +839,9 @@ soup_uri_get_scheme (SoupURI *uri) void soup_uri_set_scheme (SoupURI *uri, const char *scheme) { + g_return_if_fail (uri != NULL); + g_return_if_fail (scheme != NULL); + uri->scheme = soup_uri_parse_scheme (scheme, strlen (scheme)); uri->port = soup_scheme_default_port (uri->scheme); } @@ -807,19 +859,23 @@ soup_uri_set_scheme (SoupURI *uri, const char *scheme) const char * soup_uri_get_user (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->user; } /** * soup_uri_set_user: * @uri: a #SoupURI - * @user: the username, or %NULL + * @user: (allow-none): the username, or %NULL * * Sets @uri's user to @user. **/ void soup_uri_set_user (SoupURI *uri, const char *user) { + g_return_if_fail (uri != NULL); + g_free (uri->user); uri->user = g_strdup (user); } @@ -837,19 +893,23 @@ soup_uri_set_user (SoupURI *uri, const char *user) const char * soup_uri_get_password (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->password; } /** * soup_uri_set_password: * @uri: a #SoupURI - * @password: the password, or %NULL + * @password: (allow-none): the password, or %NULL * * Sets @uri's password to @password. **/ void soup_uri_set_password (SoupURI *uri, const char *password) { + g_return_if_fail (uri != NULL); + g_free (uri->password); uri->password = g_strdup (password); } @@ -867,23 +927,29 @@ soup_uri_set_password (SoupURI *uri, const char *password) const char * soup_uri_get_host (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->host; } /** * soup_uri_set_host: * @uri: a #SoupURI - * @host: the hostname or IP address, or %NULL + * @host: (allow-none): the hostname or IP address, or %NULL * * Sets @uri's host to @host. * * If @host is an IPv6 IP address, it should not include the brackets * required by the URI syntax; they will be added automatically when * converting @uri to a string. + * + * http and https URIs should not have a %NULL @host. **/ void soup_uri_set_host (SoupURI *uri, const char *host) { + g_return_if_fail (uri != NULL); + g_free (uri->host); uri->host = g_strdup (host); } @@ -901,6 +967,8 @@ soup_uri_set_host (SoupURI *uri, const char *host) guint soup_uri_get_port (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, 0); + return uri->port; } @@ -915,6 +983,8 @@ soup_uri_get_port (SoupURI *uri) void soup_uri_set_port (SoupURI *uri, guint port) { + g_return_if_fail (uri != NULL); + uri->port = port; } @@ -931,19 +1001,29 @@ soup_uri_set_port (SoupURI *uri, guint port) const char * soup_uri_get_path (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->path; } /** * soup_uri_set_path: * @uri: a #SoupURI - * @path: the path + * @path: the non-%NULL path * * Sets @uri's path to @path. **/ void soup_uri_set_path (SoupURI *uri, const char *path) { + g_return_if_fail (uri != NULL); + + /* We allow a NULL path for compatibility, but warn about it. */ + if (!path) { + g_warn_if_fail (path != NULL); + path = ""; + } + g_free (uri->path); uri->path = g_strdup (path); } @@ -961,19 +1041,23 @@ soup_uri_set_path (SoupURI *uri, const char *path) const char * soup_uri_get_query (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->query; } /** * soup_uri_set_query: * @uri: a #SoupURI - * @query: the query + * @query: (allow-none): the query * * Sets @uri's query to @query. **/ void soup_uri_set_query (SoupURI *uri, const char *query) { + g_return_if_fail (uri != NULL); + g_free (uri->query); uri->query = g_strdup (query); } @@ -990,6 +1074,8 @@ soup_uri_set_query (SoupURI *uri, const char *query) void soup_uri_set_query_from_form (SoupURI *uri, GHashTable *form) { + g_return_if_fail (uri != NULL); + g_free (uri->query); uri->query = soup_form_encode_hash (form); } @@ -1012,6 +1098,8 @@ soup_uri_set_query_from_fields (SoupURI *uri, { va_list args; + g_return_if_fail (uri != NULL); + g_free (uri->query); va_start (args, first_field); uri->query = soup_form_encode_valist (first_field, args); @@ -1031,30 +1119,34 @@ soup_uri_set_query_from_fields (SoupURI *uri, const char * soup_uri_get_fragment (SoupURI *uri) { + g_return_val_if_fail (uri != NULL, NULL); + return uri->fragment; } /** * soup_uri_set_fragment: * @uri: a #SoupURI - * @fragment: the fragment + * @fragment: (allow-none): the fragment * * Sets @uri's fragment to @fragment. **/ void soup_uri_set_fragment (SoupURI *uri, const char *fragment) { + g_return_if_fail (uri != NULL); + g_free (uri->fragment); uri->fragment = g_strdup (fragment); } /** * soup_uri_copy_host: - * @uri: a #SoupUri + * @uri: a #SoupURI * * Makes a copy of @uri, considering only the protocol, host, and port * - * Return value: the new #SoupUri + * Return value: the new #SoupURI * * Since: 2.26.3 **/ @@ -1064,21 +1156,20 @@ soup_uri_copy_host (SoupURI *uri) SoupURI *dup; g_return_val_if_fail (uri != NULL, NULL); + g_warn_if_fail (SOUP_URI_IS_VALID (uri)); dup = soup_uri_new (NULL); dup->scheme = uri->scheme; dup->host = g_strdup (uri->host); dup->port = uri->port; - if (dup->scheme == SOUP_URI_SCHEME_HTTP || - dup->scheme == SOUP_URI_SCHEME_HTTPS) - dup->path = g_strdup (""); + dup->path = g_strdup (""); return dup; } /** * soup_uri_host_hash: - * @key: (type Soup.URI): a #SoupURI + * @key: (type Soup.URI): a #SoupURI with a non-%NULL @host member * * Hashes @key, considering only the scheme, host, and port. * @@ -1092,6 +1183,7 @@ soup_uri_host_hash (gconstpointer key) const SoupURI *uri = key; g_return_val_if_fail (uri != NULL && uri->host != NULL, 0); + g_warn_if_fail (SOUP_URI_IS_VALID (uri)); return GPOINTER_TO_UINT (uri->scheme) + uri->port + soup_str_case_hash (uri->host); @@ -1099,8 +1191,8 @@ soup_uri_host_hash (gconstpointer key) /** * soup_uri_host_equal: - * @v1: (type Soup.URI): a #SoupURI - * @v2: (type Soup.URI): a #SoupURI + * @v1: (type Soup.URI): a #SoupURI with a non-%NULL @host member + * @v2: (type Soup.URI): a #SoupURI with a non-%NULL @host member * * Compares @v1 and @v2, considering only the scheme, host, and port. * @@ -1117,6 +1209,8 @@ soup_uri_host_equal (gconstpointer v1, gconstpointer v2) g_return_val_if_fail (one != NULL && two != NULL, one == two); g_return_val_if_fail (one->host != NULL && two->host != NULL, one->host == two->host); + g_warn_if_fail (SOUP_URI_IS_VALID (one)); + g_warn_if_fail (SOUP_URI_IS_VALID (two)); if (one->scheme != two->scheme) return FALSE; diff --git a/libsoup/soup-uri.h b/libsoup/soup-uri.h index a5c54a6..b851dbe 100644 --- a/libsoup/soup-uri.h +++ b/libsoup/soup-uri.h @@ -97,6 +97,7 @@ guint soup_uri_host_hash (gconstpointer key); gboolean soup_uri_host_equal (gconstpointer v1, gconstpointer v2); +#define SOUP_URI_IS_VALID(uri) ((uri) && (uri)->scheme && (uri)->path) #define SOUP_URI_VALID_FOR_HTTP(uri) ((uri) && ((uri)->scheme == SOUP_URI_SCHEME_HTTP || (uri)->scheme == SOUP_URI_SCHEME_HTTPS) && (uri)->host && (uri)->path) G_END_DECLS diff --git a/libsoup/soup-value-utils.c b/libsoup/soup-value-utils.c index 351b3da..41f8c92 100644 --- a/libsoup/soup-value-utils.c +++ b/libsoup/soup-value-utils.c @@ -249,6 +249,10 @@ soup_value_hash_lookup_vals (GHashTable *hash, const char *first_key, ...) } +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif + /** * soup_value_array_from_args: * @args: arguments to create a #GValueArray from @@ -455,6 +459,9 @@ soup_value_array_get_nth (GValueArray *array, guint index_, GType type, ...) return TRUE; } +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif static GByteArray * soup_byte_array_copy (GByteArray *ba) diff --git a/libsoup/soup-xmlrpc.c b/libsoup/soup-xmlrpc.c index 71bf270..06c9bca 100644 --- a/libsoup/soup-xmlrpc.c +++ b/libsoup/soup-xmlrpc.c @@ -102,7 +102,13 @@ insert_value (xmlNode *parent, GValue *value) g_hash_table_foreach (hash, insert_member, &struct_node); if (!struct_node) return FALSE; +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif } else if (type == G_TYPE_VALUE_ARRAY) { +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif GValueArray *va = g_value_get_boxed (value); xmlNode *node; int i; @@ -128,12 +134,13 @@ insert_value (xmlNode *parent, GValue *value) * @n_params: length of @params * * This creates an XML-RPC methodCall and returns it as a string. - * This is the low-level method that soup_xmlrpc_request_new() and - * soup_xmlrpc_call() are built on. + * This is the low-level method that soup_xmlrpc_request_new() is + * built on. * * @params is an array of #GValue representing the parameters to * @method. (It is *not* a #GValueArray, although if you have a - * #GValueArray, you can just pass its %values and %n_values fields.) + * #GValueArray, you can just pass its valuesf and + * n_values fields.) * * The correspondence between glib types and XML-RPC types is: * @@ -200,7 +207,13 @@ soup_xmlrpc_request_newv (const char *uri, const char *method_name, va_list args body = soup_xmlrpc_build_method_call (method_name, params->values, params->n_values); +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif g_value_array_free (params); +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif if (!body) return NULL; @@ -519,6 +532,9 @@ parse_value (xmlNode *xmlvalue, GValue *value) if (!data || strcmp ((const char *)data->name, "data") != 0) return FALSE; +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif array = g_value_array_new (1); for (xval = find_real_node (data->children); xval; @@ -535,6 +551,9 @@ parse_value (xmlNode *xmlvalue, GValue *value) } g_value_init (value, G_TYPE_VALUE_ARRAY); g_value_take_boxed (value, array); +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif } else return FALSE; @@ -583,6 +602,9 @@ soup_xmlrpc_parse_method_call (const char *method_call, int length, if (!node || strcmp ((const char *)node->name, "params") != 0) goto fail; +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif *params = g_value_array_new (1); param = find_real_node (node->children); while (param && !strcmp ((const char *)param->name, "param")) { @@ -597,6 +619,9 @@ soup_xmlrpc_parse_method_call (const char *method_call, int length, param = find_real_node (param->next); } +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif success = TRUE; *method_name = g_strdup ((char *)xmlMethodName); @@ -644,7 +669,13 @@ soup_xmlrpc_extract_method_call (const char *method_call, int length, success = soup_value_array_to_args (params, args); va_end (args); +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif g_value_array_free (params); +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif return success; } @@ -782,6 +813,43 @@ soup_xmlrpc_error_quark (void) return error; } +/** + * SOUP_XMLRPC_FAULT: + * + * A #GError domain representing an XML-RPC fault code. Used with + * #SoupXMLRPCFault (although servers may also return fault codes not + * in that enumeration). + */ + +/** + * SoupXMLRPCFault: + * @SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED: request was not + * well-formed + * @SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING: request was in + * an unsupported encoding + * @SOUP_XMLRPC_FAULT_PARSE_ERROR_INVALID_CHARACTER_FOR_ENCODING: + * request contained an invalid character + * @SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_XML_RPC: request was not + * valid XML-RPC + * @SOUP_XMLRPC_FAULT_SERVER_ERROR_REQUESTED_METHOD_NOT_FOUND: method + * not found + * @SOUP_XMLRPC_FAULT_SERVER_ERROR_INVALID_METHOD_PARAMETERS: invalid + * parameters + * @SOUP_XMLRPC_FAULT_SERVER_ERROR_INTERNAL_XML_RPC_ERROR: internal + * error + * @SOUP_XMLRPC_FAULT_APPLICATION_ERROR: start of reserved range for + * application error codes + * @SOUP_XMLRPC_FAULT_SYSTEM_ERROR: start of reserved range for + * system error codes + * @SOUP_XMLRPC_FAULT_TRANSPORT_ERROR: start of reserved range for + * transport error codes + * + * Pre-defined XML-RPC fault codes from http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php. + * These are an extension, not part of the XML-RPC spec; you can't + * assume servers will use them. + */ + GQuark soup_xmlrpc_fault_quark (void) { diff --git a/libsoup/soup-xmlrpc.h b/libsoup/soup-xmlrpc.h index 380a31e..d25e380 100644 --- a/libsoup/soup-xmlrpc.h +++ b/libsoup/soup-xmlrpc.h @@ -61,10 +61,6 @@ typedef enum { #define SOUP_XMLRPC_FAULT soup_xmlrpc_fault_quark() GQuark soup_xmlrpc_fault_quark (void); -/* From http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php. - * These are an extension, not part of the XML-RPC spec; you can't - * assume servers will use them. - */ typedef enum { SOUP_XMLRPC_FAULT_PARSE_ERROR_NOT_WELL_FORMED = -32700, SOUP_XMLRPC_FAULT_PARSE_ERROR_UNSUPPORTED_ENCODING = -32701, diff --git a/ltmain.sh b/ltmain.sh new file mode 100755 index 0000000..78cf017 --- /dev/null +++ b/ltmain.sh @@ -0,0 +1,9636 @@ + +# libtool (GNU libtool) 2.4 +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, +# 2007, 2008, 2009, 2010 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --no-quiet, --no-silent +# print informational messages (default) +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print more informational messages than default +# --no-verbose don't print the extra informational messages +# --version print version information +# -h, --help, --help-all print short, long, or detailed help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. When passed as first option, +# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.4 +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . +# GNU libtool home page: . +# General help using GNU software: . + +PROGRAM=libtool +PACKAGE=libtool +VERSION=2.4 +TIMESTAMP="" +package_revision=1.3293 + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# NLS nuisances: We save the old values to restore during execute mode. +lt_user_locale= +lt_safe_locale= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test \"\${$lt_var+set}\" = set; then + save_$lt_var=\$$lt_var + $lt_var=C + export $lt_var + lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" +done +LC_ALL=C +LANGUAGE=C +export LANGUAGE LC_ALL + +$lt_unset CDPATH + + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + + + +: ${CP="cp -f"} +test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} +: ${EGREP="/bin/grep -E"} +: ${FGREP="/bin/grep -F"} +: ${GREP="/bin/grep"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SED="/bin/sed"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} +: ${Xsed="$SED -e 1s/^X//"} + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +exit_status=$EXIT_SUCCESS + +# Make sure IFS has a sensible default +lt_nl=' +' +IFS=" $lt_nl" + +dirname="s,/[^/]*$,," +basename="s,^.*/,," + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} # func_dirname may be replaced by extended shell implementation + + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "${1}" | $SED "$basename"` +} # func_basename may be replaced by extended shell implementation + + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` +} # func_dirname_and_basename may be replaced by extended shell implementation + + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname may be replaced by extended shell implementation + + +# These SED scripts presuppose an absolute path with a trailing slash. +pathcar='s,^/\([^/]*\).*$,\1,' +pathcdr='s,^/[^/]*,,' +removedotparts=':dotsl + s@/\./@/@g + t dotsl + s,/\.$,/,' +collapseslashes='s@/\{1,\}@/@g' +finalslash='s,/*$,/,' + +# func_normal_abspath PATH +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +# value returned in "$func_normal_abspath_result" +func_normal_abspath () +{ + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` + while :; do + # Processed it all yet? + if test "$func_normal_abspath_tpath" = / ; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result" ; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + +# func_relative_path SRCDIR DSTDIR +# generates a relative path from SRCDIR to DSTDIR, with a trailing +# slash if non-empty, suitable for immediately appending a filename +# without needing to append a separator. +# value returned in "$func_relative_path_result" +func_relative_path () +{ + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=${func_dirname_result} + if test "x$func_relative_path_tlibdir" = x ; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test "x$func_stripname_result" != x ; then + func_relative_path_result=${func_relative_path_result}/${func_stripname_result} + fi + + # Normalisation. If bindir is libdir, return empty string, + # else relative path ending with a slash; either way, target + # file name can be directly appended. + if test ! -z "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result/" + func_relative_path_result=$func_stripname_result + fi +} + +# The name of this program: +func_dirname_and_basename "$progpath" +progname=$func_basename_result + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=: + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' + +# Sed substitution that converts a w32 file name or path +# which contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }$*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` + done + my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} + + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$opt_dry_run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" + fi + + $ECHO "$my_tmpdir" +} + + +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () +{ + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" + esac +} + + +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "$1" | $SED \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; + *) + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" +} + + +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + +# func_tr_sh +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $opt_debug + + $SED -n '/(C)/!b go + :more + /\./!{ + N + s/\n# / / + b more + } + :go + /^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $opt_debug + + $SED -n '/^# Usage:/,/^# *.*--help/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + echo + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help [NOEXIT] +# Echo long help message to standard output and exit, +# unless 'noexit' is passed as argument. +func_help () +{ + $opt_debug + + $SED -n '/^# Usage:/,/# Report bugs to/ { + :print + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ + p + d + } + /^# .* home page:/b print + /^# General help using/b print + ' < "$progpath" + ret=$? + if test -z "$1"; then + exit $ret + fi +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $opt_debug + + func_error "missing argument for $1." + exit_cmd=exit +} + + +# func_split_short_opt shortopt +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +func_split_short_opt () +{ + my_sed_short_opt='1s/^\(..\).*$/\1/;q' + my_sed_short_rest='1s/^..\(.*\)$/\1/;q' + + func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` + func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` +} # func_split_short_opt may be replaced by extended shell implementation + + +# func_split_long_opt longopt +# Set func_split_long_opt_name and func_split_long_opt_arg shell +# variables after splitting LONGOPT at the `=' sign. +func_split_long_opt () +{ + my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' + my_sed_long_arg='1s/^--[^=]*=//' + + func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` + func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` +} # func_split_long_opt may be replaced by extended shell implementation + +exit_cmd=: + + + + + +magic="%%%MAGIC variable%%%" +magic_exe="%%%MAGIC EXE variable%%%" + +# Global variables. +nonopt= +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "${1}=\$${1}\${2}" +} # func_append may be replaced by extended shell implementation + +# func_append_quoted var value +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +func_append_quoted () +{ + func_quote_for_eval "${2}" + eval "${1}=\$${1}\\ \$func_quote_for_eval_result" +} # func_append_quoted may be replaced by extended shell implementation + + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "${@}"` +} # func_arith may be replaced by extended shell implementation + + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` +} # func_len may be replaced by extended shell implementation + + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +} # func_lo2o may be replaced by extended shell implementation + + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +} # func_xform may be replaced by extended shell implementation + + +# func_fatal_configuration arg... +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_error ${1+"$@"} + func_error "See the $PACKAGE documentation for more information." + func_fatal_error "Fatal configuration error." +} + + +# func_config +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + +# func_features +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test "$build_libtool_libs" = yes; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + +# func_enable_tag tagname +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname="$1" + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf="/$re_begincf/,/$re_endcf/p" + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# Shorthand for --mode=foo, only valid as the first argument +case $1 in +clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; +compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; +execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; +finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; +install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; +link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; +uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; +esac + + + +# Option defaults: +opt_debug=: +opt_dry_run=false +opt_config=false +opt_preserve_dup_deps=false +opt_features=false +opt_finish=false +opt_help=false +opt_help_all=false +opt_silent=: +opt_verbose=: +opt_silent=false +opt_verbose=false + + +# Parse options once, thoroughly. This comes as soon as possible in the +# script to make things like `--version' happen as quickly as we can. +{ + # this just eases exit handling + while test $# -gt 0; do + opt="$1" + shift + case $opt in + --debug|-x) opt_debug='set -x' + func_echo "enabling shell trace mode" + $opt_debug + ;; + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + --config) + opt_config=: +func_config + ;; + --dlopen|-dlopen) + optarg="$1" + opt_dlopen="${opt_dlopen+$opt_dlopen +}$optarg" + shift + ;; + --preserve-dup-deps) + opt_preserve_dup_deps=: + ;; + --features) + opt_features=: +func_features + ;; + --finish) + opt_finish=: +set dummy --mode finish ${1+"$@"}; shift + ;; + --help) + opt_help=: + ;; + --help-all) + opt_help_all=: +opt_help=': help-all' + ;; + --mode) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_mode="$optarg" +case $optarg in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; +esac + shift + ;; + --no-silent|--no-quiet) + opt_silent=false +func_append preserve_args " $opt" + ;; + --no-verbose) + opt_verbose=false +func_append preserve_args " $opt" + ;; + --silent|--quiet) + opt_silent=: +func_append preserve_args " $opt" + opt_verbose=false + ;; + --verbose|-v) + opt_verbose=: +func_append preserve_args " $opt" +opt_silent=false + ;; + --tag) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_tag="$optarg" +func_append preserve_args " $opt $optarg" +func_enable_tag "$optarg" + shift + ;; + + -\?|-h) func_usage ;; + --help) func_help ;; + --version) func_version ;; + + # Separate optargs to long options: + --*=*) + func_split_long_opt "$opt" + set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-n*|-v*) + func_split_short_opt "$opt" + set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognized option \`$opt'" ;; + *) set dummy "$opt" ${1+"$@"}; shift; break ;; + esac + done + + # Validate options: + + # save first non-option argument + if test "$#" -gt 0; then + nonopt="$opt" + shift + fi + + # preserve --debug + test "$opt_debug" = : || func_append preserve_args " --debug" + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" + fi + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test "$opt_mode" != execute; then + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$progname --help --mode=$opt_mode' for more information." + } + + + # Bail if the options were screwed + $exit_cmd $EXIT_FAILURE +} + + + + +## ----------- ## +## Main. ## +## ----------- ## + +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case "$lt_sysroot:$1" in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result="=$func_stripname_result" + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$lt_sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $opt_debug + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result="" + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result" ; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $opt_debug + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $opt_debug + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $opt_debug + if test -z "$2" && test -n "$1" ; then + func_error "Could not determine host file name corresponding to" + func_error " \`$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result="$1" + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $opt_debug + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " \`$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result="$3" + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $opt_debug + case $4 in + $1 ) func_to_host_path_result="$3$func_to_host_path_result" + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via `$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $opt_debug + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $opt_debug + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result="$1" +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result="$func_convert_core_msys_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via `$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $opt_debug + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd="func_convert_path_${func_stripname_result}" + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $opt_debug + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result="$1" +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_msys_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_mode_compile arg... +func_mode_compile () +{ + $opt_debug + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify \`-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + func_append_quoted lastarg "$arg" + done + IFS="$save_ifs" + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with \`-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj="$func_basename_result" + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from \`$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" + lobj=${xdir}$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test "$build_libtool_libs" = yes; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test "$pic_mode" != no; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test "$build_old_libs" = yes; then + if test "$pic_mode" != yes; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test "$need_locks" != no; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test "$opt_mode" = compile && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode \`$opt_mode'" + ;; + esac + + echo + $ECHO "Try \`$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test "$opt_help" = :; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | sed -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + sed '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" + ;; + + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = execute && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "\`$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument \`$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and \`=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the \`$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test "$opt_mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test "x$prev" = x-m && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename="" + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname" ; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename="$func_basename_result" + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename" ; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $opt_debug + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $opt_debug + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive which possess that section. Heuristic: eliminate + # all those which have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $opt_debug + if func_cygming_gnu_implib_p "$1" ; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1" ; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result="" + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + if test "$lock_old_archive_extraction" = yes; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test "$lock_old_archive_extraction" = yes; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ which is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options which match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +/* declarations of non-ANSI functions */ +#if defined(__MINGW32__) +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined(__CYGWIN__) +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined (other platforms) ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined(_MSC_VER) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +# ifndef _INTPTR_T_DEFINED +# define _INTPTR_T_DEFINED +# define intptr_t int +# endif +#elif defined(__MINGW32__) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined(__CYGWIN__) +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined (other platforms) ... */ +#endif + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +#if defined(LT_DEBUGWRAPPER) +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -e 's/\([\\"]\)/\\\1/g' \ + -e 's/^/ fputs ("/' -e 's/$/\\n", f);/' + + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $opt_debug + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir="$arg" + prev= + continue + ;; + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-flto*|-fwhopr*|-fuse-linker-plugin) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps ; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + else + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test "$prefer_static_libs" = yes || + test "$prefer_static_libs,$installed" = "built,no"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib="$l" + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$lt_sysroot$libdir" + absdir="$lt_sysroot$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + case "$host" in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + echo + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$opt_mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$dir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$opt_mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system can not link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|qnx|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + func_append verstring ":${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + func_append libobjs " $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$opt_mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test "X$deplibs_check_method" = "Xnone"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then + eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else + eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd1 in $cmds; do + IFS="$save_ifs" + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test "$try_normal_branch" = yes \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=${output_objdir}/${output_la}.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\${concat_cmds}$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" + + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " ${wl}-bind_at_load" + func_append finalize_command " ${wl}-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=yes + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=no + ;; + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' + fi + + exit $exit_status + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + func_append oldobjs " $symfileobj" + fi + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test "x$bindir" != x ; + then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +{ test "$opt_mode" = link || test "$opt_mode" = relink; } && + func_mode_link ${1+"$@"} + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=yes ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then + odir="$objdir" + else + odir="$dir/$objdir" + fi + func_basename "$file" + name="$func_basename_result" + test "$opt_mode" = uninstall && odir="$dir" + + # Remember odir for removal later, being careful to avoid duplicates + if test "$opt_mode" = clean; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case "$opt_mode" in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$opt_mode" = clean ; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + func_append rmfiles " $odir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +test -z "$opt_mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: +# vi:sw=2 + diff --git a/m4/gtk-doc.m4 b/m4/gtk-doc.m4 new file mode 100644 index 0000000..0ada151 --- /dev/null +++ b/m4/gtk-doc.m4 @@ -0,0 +1,67 @@ +dnl -*- mode: autoconf -*- + +# serial 1 + +dnl Usage: +dnl GTK_DOC_CHECK([minimum-gtk-doc-version]) +AC_DEFUN([GTK_DOC_CHECK], +[ + AC_REQUIRE([PKG_PROG_PKG_CONFIG]) + AC_BEFORE([AC_PROG_LIBTOOL],[$0])dnl setup libtool first + AC_BEFORE([AM_PROG_LIBTOOL],[$0])dnl setup libtool first + + dnl check for tools we added during development + AC_PATH_PROG([GTKDOC_CHECK],[gtkdoc-check]) + AC_PATH_PROGS([GTKDOC_REBASE],[gtkdoc-rebase],[true]) + AC_PATH_PROG([GTKDOC_MKPDF],[gtkdoc-mkpdf]) + + dnl for overriding the documentation installation directory + AC_ARG_WITH([html-dir], + AS_HELP_STRING([--with-html-dir=PATH], [path to installed docs]),, + [with_html_dir='${datadir}/gtk-doc/html']) + HTML_DIR="$with_html_dir" + AC_SUBST([HTML_DIR]) + + dnl enable/disable documentation building + AC_ARG_ENABLE([gtk-doc], + AS_HELP_STRING([--enable-gtk-doc], + [use gtk-doc to build documentation [[default=no]]]),, + [enable_gtk_doc=no]) + + if test x$enable_gtk_doc = xyes; then + ifelse([$1],[], + [PKG_CHECK_EXISTS([gtk-doc],, + AC_MSG_ERROR([gtk-doc not installed and --enable-gtk-doc requested]))], + [PKG_CHECK_EXISTS([gtk-doc >= $1],, + AC_MSG_ERROR([You need to have gtk-doc >= $1 installed to build $PACKAGE_NAME]))]) + dnl don't check for glib if we build glib + if test "x$PACKAGE_NAME" != "xglib"; then + dnl don't fail if someone does not have glib + PKG_CHECK_MODULES(GTKDOC_DEPS, glib-2.0 >= 2.10.0 gobject-2.0 >= 2.10.0,,) + fi + fi + + AC_MSG_CHECKING([whether to build gtk-doc documentation]) + AC_MSG_RESULT($enable_gtk_doc) + + dnl enable/disable output formats + AC_ARG_ENABLE([gtk-doc-html], + AS_HELP_STRING([--enable-gtk-doc-html], + [build documentation in html format [[default=yes]]]),, + [enable_gtk_doc_html=yes]) + AC_ARG_ENABLE([gtk-doc-pdf], + AS_HELP_STRING([--enable-gtk-doc-pdf], + [build documentation in pdf format [[default=no]]]),, + [enable_gtk_doc_pdf=no]) + + if test -z "$GTKDOC_MKPDF"; then + enable_gtk_doc_pdf=no + fi + + + AM_CONDITIONAL([ENABLE_GTK_DOC], [test x$enable_gtk_doc = xyes]) + AM_CONDITIONAL([GTK_DOC_BUILD_HTML], [test x$enable_gtk_doc_html = xyes]) + AM_CONDITIONAL([GTK_DOC_BUILD_PDF], [test x$enable_gtk_doc_pdf = xyes]) + AM_CONDITIONAL([GTK_DOC_USE_LIBTOOL], [test -n "$LIBTOOL"]) + AM_CONDITIONAL([GTK_DOC_USE_REBASE], [test -n "$GTKDOC_REBASE"]) +]) diff --git a/m4/libgcrypt.m4 b/m4/libgcrypt.m4 deleted file mode 100644 index 854eaaa..0000000 --- a/m4/libgcrypt.m4 +++ /dev/null @@ -1,108 +0,0 @@ -dnl Autoconf macros for libgcrypt -dnl Copyright (C) 2002, 2004 Free Software Foundation, Inc. -dnl -dnl This file is free software; as a special exception the author gives -dnl unlimited permission to copy and/or distribute it, with or without -dnl modifications, as long as this notice is preserved. -dnl -dnl This file is distributed in the hope that it will be useful, but -dnl WITHOUT ANY WARRANTY, to the extent permitted by law; without even the -dnl implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - -dnl AM_PATH_LIBGCRYPT([MINIMUM-VERSION, -dnl [ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND ]]]) -dnl Test for libgcrypt and define LIBGCRYPT_CFLAGS and LIBGCRYPT_LIBS. -dnl MINIMUN-VERSION is a string with the version number optionalliy prefixed -dnl with the API version to also check the API compatibility. Example: -dnl a MINIMUN-VERSION of 1:1.2.5 won't pass the test unless the installed -dnl version of libgcrypt is at least 1.2.5 *and* the API number is 1. Using -dnl this features allows to prevent build against newer versions of libgcrypt -dnl with a changed API. -dnl -AC_DEFUN([AM_PATH_LIBGCRYPT], -[ AC_ARG_WITH(libgcrypt-prefix, - AC_HELP_STRING([--with-libgcrypt-prefix=PFX], - [prefix where LIBGCRYPT is installed (optional)]), - libgcrypt_config_prefix="$withval", libgcrypt_config_prefix="") - if test x$libgcrypt_config_prefix != x ; then - if test x${LIBGCRYPT_CONFIG+set} != xset ; then - LIBGCRYPT_CONFIG=$libgcrypt_config_prefix/bin/libgcrypt-config - fi - fi - - AC_PATH_PROG(LIBGCRYPT_CONFIG, libgcrypt-config, no) - tmp=ifelse([$1], ,1:1.2.0,$1) - if echo "$tmp" | grep ':' >/dev/null 2>/dev/null ; then - req_libgcrypt_api=`echo "$tmp" | sed 's/\(.*\):\(.*\)/\1/'` - min_libgcrypt_version=`echo "$tmp" | sed 's/\(.*\):\(.*\)/\2/'` - else - req_libgcrypt_api=0 - min_libgcrypt_version="$tmp" - fi - - AC_MSG_CHECKING(for LIBGCRYPT - version >= $min_libgcrypt_version) - ok=no - if test "$LIBGCRYPT_CONFIG" != "no" ; then - req_major=`echo $min_libgcrypt_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\)/\1/'` - req_minor=`echo $min_libgcrypt_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\)/\2/'` - req_micro=`echo $min_libgcrypt_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\)/\3/'` - libgcrypt_config_version=`$LIBGCRYPT_CONFIG --version` - major=`echo $libgcrypt_config_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\).*/\1/'` - minor=`echo $libgcrypt_config_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\).*/\2/'` - micro=`echo $libgcrypt_config_version | \ - sed 's/\([[0-9]]*\)\.\([[0-9]]*\)\.\([[0-9]]*\).*/\3/'` - if test "$major" -gt "$req_major"; then - ok=yes - else - if test "$major" -eq "$req_major"; then - if test "$minor" -gt "$req_minor"; then - ok=yes - else - if test "$minor" -eq "$req_minor"; then - if test "$micro" -ge "$req_micro"; then - ok=yes - fi - fi - fi - fi - fi - fi - if test $ok = yes; then - AC_MSG_RESULT([yes ($libgcrypt_config_version)]) - else - AC_MSG_RESULT(no) - fi - if test $ok = yes; then - # If we have a recent libgcrypt, we should also check that the - # API is compatible - if test "$req_libgcrypt_api" -gt 0 ; then - tmp=`$LIBGCRYPT_CONFIG --api-version 2>/dev/null || echo 0` - if test "$tmp" -gt 0 ; then - AC_MSG_CHECKING([LIBGCRYPT API version]) - if test "$req_libgcrypt_api" -eq "$tmp" ; then - AC_MSG_RESULT([okay]) - else - ok=no - AC_MSG_RESULT([does not match. want=$req_libgcrypt_api got=$tmp]) - fi - fi - fi - fi - if test $ok = yes; then - LIBGCRYPT_CFLAGS=`$LIBGCRYPT_CONFIG --cflags` - LIBGCRYPT_LIBS=`$LIBGCRYPT_CONFIG --libs` - ifelse([$2], , :, [$2]) - else - LIBGCRYPT_CFLAGS="" - LIBGCRYPT_LIBS="" - ifelse([$3], , :, [$3]) - fi - AC_SUBST(LIBGCRYPT_CFLAGS) - AC_SUBST(LIBGCRYPT_LIBS) -]) diff --git a/m4/libtool.m4 b/m4/libtool.m4 new file mode 100644 index 0000000..88de383 --- /dev/null +++ b/m4/libtool.m4 @@ -0,0 +1,7835 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, +# Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, +# Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) + +# serial 57 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl + +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_WITH_SYSROOT])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PREPARE_SED_QUOTE_VARS +# -------------------------- +# Define a few sed substitution that help us do robust quoting. +m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +[# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' +]) + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to `config.status' so that its +# declaration there will have the same value as in `configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$[]1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +_LT_OUTPUT_LIBTOOL_INIT +]) + +# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +# ------------------------------------ +# Generate a child script FILE with all initialization necessary to +# reuse the environment learned by the parent script, and make the +# file executable. If COMMENT is supplied, it is inserted after the +# `#!' sequence but before initialization text begins. After this +# macro, additional text can be appended to FILE to form the body of +# the child script. The macro ends with non-zero status if the +# file could not be fully written (such as if the disk is full). +m4_ifdef([AS_INIT_GENERATED], +[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +[m4_defun([_LT_GENERATED_FILE_INIT], +[m4_require([AS_PREPARE])]dnl +[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +[lt_write_fail=0 +cat >$1 <<_ASEOF || lt_write_fail=1 +#! $SHELL +# Generated by $as_me. +$2 +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$1 <<\_ASEOF || lt_write_fail=1 +AS_SHELL_SANITIZE +_AS_PREPARE +exec AS_MESSAGE_FD>&1 +_ASEOF +test $lt_write_fail = 0 && chmod +x $1[]dnl +m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +[# Run this file to recreate a libtool stub with the current configuration.]) + +cat >>"$CONFIG_LT" <<\_LTEOF +lt_cl_silent=false +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2010 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +lt_cl_success=: +test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" +exec AS_MESSAGE_LOG_FD>/dev/null +$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +exec AS_MESSAGE_LOG_FD>>config.log +$lt_cl_success || AS_EXIT(1) +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + _LT_PROG_REPLACE_SHELLFNS + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +dnl AC_DEFUN([AC_LIBTOOL_RC], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], + [lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD + $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -f conftest && test ! -s conftest.err && test $_lt_result = 0 && $GREP forced_load conftest 2>&1 >/dev/null; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES +# -------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +# ---------------------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +# Store the results from the different compilers for each TAGNAME. +# Allow to override them for all tags through lt_cv_aix_libpath. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], + [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ + lt_aix_libpath_sed='[ + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }]' + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi],[]) + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" + fi + ]) + aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[m4_divert_text([M4SH-INIT], [$1 +])])# _LT_SHELL_INIT + + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Find how we can fake an echo command that does not interpret backslash. +# In particular, with Autoconf 2.60 or later we add some code to the start +# of the generated configure script which will find a shell with a builtin +# printf (which we can use as an echo command). +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +AC_MSG_CHECKING([how to print strings]) +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$[]1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +case "$ECHO" in + printf*) AC_MSG_RESULT([printf]) ;; + print*) AC_MSG_RESULT([print -r]) ;; + *) AC_MSG_RESULT([cat]) ;; +esac + +m4_ifdef([_AS_DETECT_SUGGESTED], +[_AS_DETECT_SUGGESTED([ + test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test "X`printf %s $ECHO`" = "X$ECHO" \ + || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) + +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_WITH_SYSROOT +# ---------------- +AC_DEFUN([_LT_WITH_SYSROOT], +[AC_MSG_CHECKING([for sysroot]) +AC_ARG_WITH([sysroot], +[ --with-sysroot[=DIR] Search for dependent libraries within DIR + (or the compiler's sysroot if not specified).], +[], [with_sysroot=no]) + +dnl lt_sysroot will always be passed unquoted. We quote it here +dnl in case the user passed a directory name. +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + AC_MSG_RESULT([${with_sysroot}]) + AC_MSG_ERROR([The sysroot must be an absolute path.]) + ;; +esac + + AC_MSG_RESULT([${lt_sysroot:-no}]) +_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +[dependent libraries, and in which our libraries should be installed.])]) + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +sparc*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) LD="${LD-ld} -m elf64_sparc" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK + + +# _LT_PROG_AR +# ----------- +m4_defun([_LT_PROG_AR], +[AC_CHECK_TOOLS(AR, [ar], false) +: ${AR=ar} +: ${AR_FLAGS=cru} +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) + +AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], + [lt_cv_ar_at_file=no + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + ]) + ]) + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi +_LT_DECL([], [archiver_list_spec], [1], + [How to feed a file listing to the archiver]) +])# _LT_PROG_AR + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[_LT_PROG_AR + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +_LT_DECL([], [lock_old_archive_extraction], [0], + [Whether to use a lock for old archive extraction]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`func_fallback_echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd1*) + dynamic_linker=no + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[123]]*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[[3-9]]*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], + [lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [lt_cv_shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + ]) + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Add ABI-specific directories to the system library path. + sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" + + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [install_override_mode], [1], + [Permission mode override for installation of shared libraries]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program which can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program which can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PROG_ECHO_BACKSLASH])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be Linux ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method = "file_magic"]) +_LT_DECL([], [file_magic_glob], [1], + [How to find potential files when deplibs_check_method = "file_magic"]) +_LT_DECL([], [want_nocaseglob], [1], + [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi]) +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + +# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +# -------------------------------- +# how to determine the name of the shared library +# associated with a specific link library. +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +m4_require([_LT_DECL_DLLTOOL]) +AC_CACHE_CHECK([how to associate runtime and link libraries], +lt_cv_sharedlib_from_linklib_cmd, +[lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac +]) +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + +_LT_DECL([], [sharedlib_from_linklib_cmd], [1], + [Command to associate shared and link libraries]) +])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB + + +# _LT_PATH_MANIFEST_TOOL +# ---------------------- +# locate the manifest tool +m4_defun([_LT_PATH_MANIFEST_TOOL], +[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], + [lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&AS_MESSAGE_LOG_FD + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest*]) +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi +_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +])# _LT_PATH_MANIFEST_TOOL + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; + *) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; + esac + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT@&t@_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT@&t@_DLSYM_CONST +#else +# define LT@&t@_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +_LT_DECL([], [nm_file_list_spec], [1], + [Specify filename containing input files for $NM]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Xcompiler -fPIC' + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ F* | *Sun*Fortran*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +AC_CACHE_CHECK([for $compiler option to produce PIC], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + ;; + esac + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; + *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + esac + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + freebsd1*) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + m4_if($1, [], [ + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + _LT_LINKER_OPTION([if $CC understands -b], + _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], + [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], + [lt_cv_irix_exported_symbol], + [save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], + [C++], [[int foo (void) { return 0; }]], + [Fortran 77], [[ + subroutine foo + end]], + [Fortran], [[ + subroutine foo + end]])])], + [lt_cv_irix_exported_symbol=yes], + [lt_cv_irix_exported_symbol=no]) + LDFLAGS="$save_LDFLAGS"]) + if test "$lt_cv_irix_exported_symbol" = yes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_CACHE_CHECK([whether -lc should be explicitly linked in], + [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), + [$RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + ]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], + [[If ld is used when linking, flag to hardcode $libdir into a binary + during linking. This must work even if $libdir does not exist]]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [postlink_cmds], [2], + [Commands necessary for finishing linking programs]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd[[12]]*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + gnu*) + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ + '"$_LT_TAGVAR(old_archive_cmds, $1)" + _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ + '"$_LT_TAGVAR(reload_cmds, $1)" + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_FUNC_STRIPNAME_CNF +# ---------------------- +# func_stripname_cnf prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# +# This function is identical to the (non-XSI) version of func_stripname, +# except this one can be used by m4 code that may be executed by configure, +# rather than the libtool script. +m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +AC_REQUIRE([_LT_DECL_SED]) +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf +])# _LT_FUNC_STRIPNAME_CNF + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +]) + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +esac + +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_LANG_PUSH(Fortran 77) +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} + CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" + CFLAGS="$lt_save_CFLAGS" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_LANG_PUSH(Fortran) + +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} + CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test "$_lt_disable_FC" != yes + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +CFLAGS=$GCJFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +CFLAGS= +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + +# _LT_DECL_DLLTOOL +# ---------------- +# Ensure DLLTOOL variable is set. +m4_defun([_LT_DECL_DLLTOOL], +[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +AC_SUBST([DLLTOOL]) +]) + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) +# ------------------------------------------------------ +# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and +# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. +m4_defun([_LT_PROG_FUNCTION_REPLACE], +[dnl { +sed -e '/^$1 ()$/,/^} # $1 /c\ +$1 ()\ +{\ +m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) +} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: +]) + + +# _LT_PROG_REPLACE_SHELLFNS +# ------------------------- +# Replace existing portable implementations of several shell functions with +# equivalent extended shell implementations where those features are available.. +m4_defun([_LT_PROG_REPLACE_SHELLFNS], +[if test x"$xsi_shell" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl + func_split_long_opt_name=${1%%=*} + func_split_long_opt_arg=${1#*=}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) + + _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) + + _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) + + _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) +fi + +if test x"$lt_shell_append" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) + + _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl + func_quote_for_eval "${2}" +dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ + eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) +fi +]) + +# _LT_PATH_CONVERSION_FUNCTIONS +# ----------------------------- +# Determine which file name conversion functions should be used by +# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +# for certain cross-compile configurations and native mingw. +m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_MSG_CHECKING([how to convert $build file names to $host format]) +AC_CACHE_VAL(lt_cv_to_host_file_cmd, +[case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac +]) +to_host_file_cmd=$lt_cv_to_host_file_cmd +AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], + [0], [convert $build file names to $host format])dnl + +AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +[#assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac +]) +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], + [0], [convert $build files to toolchain format])dnl +])# _LT_PATH_CONVERSION_FUNCTIONS diff --git a/m4/ltoptions.m4 b/m4/ltoptions.m4 new file mode 100644 index 0000000..17cfd51 --- /dev/null +++ b/m4/ltoptions.m4 @@ -0,0 +1,369 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 7 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [1], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [pic_mode="$withval"], + [pic_mode=default]) + +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/m4/ltsugar.m4 b/m4/ltsugar.m4 new file mode 100644 index 0000000..9000a05 --- /dev/null +++ b/m4/ltsugar.m4 @@ -0,0 +1,123 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/m4/ltversion.m4 b/m4/ltversion.m4 new file mode 100644 index 0000000..9c7b5d4 --- /dev/null +++ b/m4/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# @configure_input@ + +# serial 3293 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.4]) +m4_define([LT_PACKAGE_REVISION], [1.3293]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.4' +macro_revision='1.3293' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/m4/lt~obsolete.m4 b/m4/lt~obsolete.m4 new file mode 100644 index 0000000..c573da9 --- /dev/null +++ b/m4/lt~obsolete.m4 @@ -0,0 +1,98 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 5 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) diff --git a/missing b/missing new file mode 100755 index 0000000..86a8fc3 --- /dev/null +++ b/missing @@ -0,0 +1,331 @@ +#! /bin/sh +# Common stub for a few missing GNU programs while installing. + +scriptversion=2012-01-06.13; # UTC + +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, +# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. +# Originally by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 +fi + +run=: +sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' +sed_minuso='s/.* -o \([^ ]*\).*/\1/p' + +# In the cases where this matters, `missing' is being run in the +# srcdir already. +if test -f configure.ac; then + configure_ac=configure.ac +else + configure_ac=configure.in +fi + +msg="missing on your system" + +case $1 in +--run) + # Try to run requested program, and just exit if it succeeds. + run= + shift + "$@" && exit 0 + # Exit code 63 means version mismatch. This often happens + # when the user try to use an ancient version of a tool on + # a file that requires a minimum version. In this case we + # we should proceed has if the program had been absent, or + # if --run hadn't been passed. + if test $? = 63; then + run=: + msg="probably too old" + fi + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an +error status if there is no known handling for PROGRAM. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + --run try to run the given command, and emulate it if it fails + +Supported PROGRAM values: + aclocal touch file \`aclocal.m4' + autoconf touch file \`configure' + autoheader touch file \`config.h.in' + autom4te touch the output file, or create a stub one + automake touch all \`Makefile.in' files + bison create \`y.tab.[ch]', if possible, from existing .[ch] + flex create \`lex.yy.c', if possible, from existing .c + help2man touch the output file + lex create \`lex.yy.c', if possible, from existing .c + makeinfo touch the output file + yacc create \`y.tab.[ch]', if possible, from existing .[ch] + +Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and +\`g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: Unknown \`$1' option" + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 + ;; + +esac + +# normalize program name to check for. +program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + +# Now exit if we have it, but it failed. Also exit now if we +# don't have it and --version was passed (most likely to detect +# the program). This is about non-GNU programs, so use $1 not +# $program. +case $1 in + lex*|yacc*) + # Not GNU programs, they don't have --version. + ;; + + *) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + elif test "x$2" = "x--version" || test "x$2" = "x--help"; then + # Could not run --version or --help. This is probably someone + # running `$TOOL --version' or `$TOOL --help' to check whether + # $TOOL exists and not knowing $TOOL uses missing. + exit 1 + fi + ;; +esac + +# If it does not exist, or fails to run (possibly an outdated version), +# try to emulate it. +case $program in + aclocal*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acinclude.m4' or \`${configure_ac}'. You might want + to install the \`Automake' and \`Perl' packages. Grab them from + any GNU archive site." + touch aclocal.m4 + ;; + + autoconf*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`${configure_ac}'. You might want to install the + \`Autoconf' and \`GNU m4' packages. Grab them from any GNU + archive site." + touch configure + ;; + + autoheader*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acconfig.h' or \`${configure_ac}'. You might want + to install the \`Autoconf' and \`GNU m4' packages. Grab them + from any GNU archive site." + files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` + test -z "$files" && files="config.h" + touch_files= + for f in $files; do + case $f in + *:*) touch_files="$touch_files "`echo "$f" | + sed -e 's/^[^:]*://' -e 's/:.*//'`;; + *) touch_files="$touch_files $f.in";; + esac + done + touch $touch_files + ;; + + automake*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. + You might want to install the \`Automake' and \`Perl' packages. + Grab them from any GNU archive site." + find . -type f -name Makefile.am -print | + sed 's/\.am$/.in/' | + while read f; do touch "$f"; done + ;; + + autom4te*) + echo 1>&2 "\ +WARNING: \`$1' is needed, but is $msg. + You might have modified some files without having the + proper tools for further handling them. + You can get \`$1' as part of \`Autoconf' from any GNU + archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo "#! /bin/sh" + echo "# Created by GNU Automake missing as a replacement of" + echo "# $ $@" + echo "exit 0" + chmod +x $file + exit 1 + fi + ;; + + bison*|yacc*) + echo 1>&2 "\ +WARNING: \`$1' $msg. You should only need it if + you modified a \`.y' file. You may need the \`Bison' package + in order for those modifications to take effect. You can get + \`Bison' from any GNU archive site." + rm -f y.tab.c y.tab.h + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.y) + SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.c + fi + SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.h + fi + ;; + esac + fi + if test ! -f y.tab.h; then + echo >y.tab.h + fi + if test ! -f y.tab.c; then + echo 'main() { return 0; }' >y.tab.c + fi + ;; + + lex*|flex*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.l' file. You may need the \`Flex' package + in order for those modifications to take effect. You can get + \`Flex' from any GNU archive site." + rm -f lex.yy.c + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.l) + SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" lex.yy.c + fi + ;; + esac + fi + if test ! -f lex.yy.c; then + echo 'main() { return 0; }' >lex.yy.c + fi + ;; + + help2man*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a dependency of a manual page. You may need the + \`Help2man' package in order for those modifications to take + effect. You can get \`Help2man' from any GNU archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo ".ab help2man is required to generate this page" + exit $? + fi + ;; + + makeinfo*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.texi' or \`.texinfo' file, or any other file + indirectly affecting the aspect of the manual. The spurious + call might also be the consequence of using a buggy \`make' (AIX, + DU, IRIX). You might want to install the \`Texinfo' package or + the \`GNU make' package. Grab either from any GNU archive site." + # The file to touch is that specified with -o ... + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -z "$file"; then + # ... or it is the one specified with @setfilename ... + infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n ' + /^@setfilename/{ + s/.* \([^ ]*\) *$/\1/ + p + q + }' $infile` + # ... or it is derived from the source name (dir/f.texi becomes f.info) + test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info + fi + # If the file does not exist, the user really needs makeinfo; + # let's fail without touching anything. + test -f $file || exit 1 + touch $file + ;; + + *) + echo 1>&2 "\ +WARNING: \`$1' is needed, and is $msg. + You might have modified some files without having the + proper tools for further handling them. Check the \`README' file, + it often tells you about the needed prerequisites for installing + this package. You may also peek at any GNU archive site, in case + some other package would contain this missing \`$1' program." + exit 1 + ;; +esac + +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/packaging/libsoup2.4.changes b/packaging/libsoup2.4.changes new file mode 100644 index 0000000..66025e0 --- /dev/null +++ b/packaging/libsoup2.4.changes @@ -0,0 +1,2 @@ +* Wed Jul 05 2012 - Rusty Lynch - 2.37.92 + - Packaging version 2.37.92 diff --git a/packaging/libsoup2.4.spec b/packaging/libsoup2.4.spec index f7c274a..562da62 100644 --- a/packaging/libsoup2.4.spec +++ b/packaging/libsoup2.4.spec @@ -2,14 +2,12 @@ Name: libsoup2.4 Summary: HTTP client/server library for GNOME -Version: 2.35.90 +Version: 2.37.92 Release: 7 -Group: TO_BE/FILLED_IN +Group: system/libraries License: LGPLv2 URL: http://live.gnome.org/LibSoup Source0: libsoup2.4-%{version}.tar.gz -Source1001: packaging/libsoup2.4.manifest -Patch0: libsoup-disable-gtkdoc.patch BuildRequires: pkgconfig(glib-2.0) BuildRequires: pkgconfig(gnutls) @@ -39,14 +37,12 @@ an HTTP library implementation in C (development files). %prep %setup -q -n libsoup2.4-%{version} -%patch0 -p1 %build -cp %{SOURCE1001} . touch gtk-doc.make USE_GNOME2_MACROS=1 . gnome-autogen.sh \ - --prefix=/usr --without-gnome --enable-sqllite=yes --disable-tls-check --disable-static + --prefix=/usr --without-gnome --enable-sqllite=yes --disable-tls-check --disable-static make %{?jobs:-j%jobs} @@ -66,11 +62,9 @@ rm -rf %{buildroot} %files -%manifest libsoup2.4.manifest /usr/lib/*.so.* %files devel -%manifest libsoup2.4.manifest /usr/include/libsoup-2.4/* /usr/lib/*.so /usr/lib/pkgconfig/libsoup-2.4.pc diff --git a/tests/Makefile.am b/tests/Makefile.am index 06f6c58..81aaa86 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,6 +1,6 @@ INCLUDES = \ -I$(top_srcdir) \ - -DSRCDIR=\""$(srcdir)"\" \ + -DSRCDIR=\""$(abs_srcdir)"\" \ -DLIBSOUP_DISABLE_DEPRECATED \ $(SOUP_MAINTAINER_FLAGS) \ $(XML_CFLAGS) \ @@ -14,6 +14,7 @@ LIBS = \ noinst_PROGRAMS = \ chunk-test \ coding-test \ + connection-test \ context-test \ continue-test \ cookies-test \ @@ -30,6 +31,7 @@ noinst_PROGRAMS = \ simple-httpd \ simple-proxy \ sniffing-test \ + ssl-test \ streaming-test \ timeout-test \ uri-parsing \ @@ -42,6 +44,7 @@ TEST_SRCS = test-utils.c test-utils.h auth_test_SOURCES = auth-test.c $(TEST_SRCS) chunk_test_SOURCES = chunk-test.c $(TEST_SRCS) coding_test_SOURCES = coding-test.c $(TEST_SRCS) +connection_test_SOURCES = connection-test.c $(TEST_SRCS) context_test_SOURCES = context-test.c $(TEST_SRCS) continue_test_SOURCES = continue-test.c $(TEST_SRCS) cookies_test_SOURCES = cookies-test.c $(TEST_SRCS) @@ -65,6 +68,7 @@ server_auth_test_SOURCES = server-auth-test.c $(TEST_SRCS) simple_httpd_SOURCES = simple-httpd.c simple_proxy_SOURCES = simple-proxy.c sniffing_test_SOURCES = sniffing-test.c $(TEST_SRCS) +ssl_test_SOURCES = ssl-test.c $(TEST_SRCS) streaming_test_SOURCES = streaming-test.c $(TEST_SRCS) timeout_test_SOURCES = timeout-test.c $(TEST_SRCS) uri_parsing_SOURCES = uri-parsing.c $(TEST_SRCS) @@ -84,6 +88,7 @@ endif TESTS = \ chunk-test \ coding-test \ + connection-test \ context-test \ continue-test \ cookies-test \ @@ -94,6 +99,7 @@ TESTS = \ redirect-test \ requester-test \ sniffing-test \ + ssl-test \ streaming-test \ timeout-test \ uri-parsing \ @@ -101,12 +107,14 @@ TESTS = \ $(CURL_TESTS) \ $(XMLRPC_TESTS) -SNIFFING_FILES = \ +RESOURCES = \ resources/atom.xml \ resources/home.gif \ resources/html_binary.html \ resources/mbox \ resources/mbox.gz \ + resources/mbox.raw \ + resources/mbox.zlib \ resources/ps_binary.ps \ resources/rss20.xml \ resources/test.html \ @@ -121,7 +129,7 @@ EXTRA_DIST = \ test-cert.pem \ test-key.pem \ xmlrpc-server.php \ - $(SNIFFING_FILES) + $(RESOURCES) if MISSING_REGRESSION_TEST_PACKAGES check-local: check-TESTS diff --git a/tests/Makefile.in b/tests/Makefile.in new file mode 100644 index 0000000..a9e82a7 --- /dev/null +++ b/tests/Makefile.in @@ -0,0 +1,979 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +noinst_PROGRAMS = chunk-test$(EXEEXT) coding-test$(EXEEXT) \ + connection-test$(EXEEXT) context-test$(EXEEXT) \ + continue-test$(EXEEXT) cookies-test$(EXEEXT) date$(EXEEXT) \ + dns$(EXEEXT) forms-test$(EXEEXT) get$(EXEEXT) getbug$(EXEEXT) \ + header-parsing$(EXEEXT) misc-test$(EXEEXT) ntlm-test$(EXEEXT) \ + redirect-test$(EXEEXT) requester-test$(EXEEXT) \ + simple-httpd$(EXEEXT) simple-proxy$(EXEEXT) \ + sniffing-test$(EXEEXT) ssl-test$(EXEEXT) \ + streaming-test$(EXEEXT) timeout-test$(EXEEXT) \ + uri-parsing$(EXEEXT) $(am__EXEEXT_1) $(am__EXEEXT_2) \ + $(am__EXEEXT_3) +TESTS = chunk-test$(EXEEXT) coding-test$(EXEEXT) \ + connection-test$(EXEEXT) context-test$(EXEEXT) \ + continue-test$(EXEEXT) cookies-test$(EXEEXT) date$(EXEEXT) \ + header-parsing$(EXEEXT) misc-test$(EXEEXT) ntlm-test$(EXEEXT) \ + redirect-test$(EXEEXT) requester-test$(EXEEXT) \ + sniffing-test$(EXEEXT) ssl-test$(EXEEXT) \ + streaming-test$(EXEEXT) timeout-test$(EXEEXT) \ + uri-parsing$(EXEEXT) $(am__EXEEXT_2) $(am__EXEEXT_1) \ + $(am__EXEEXT_3) +subdir = tests +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \ + $(srcdir)/httpd.conf.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/gtk-doc.m4 \ + $(top_srcdir)/m4/introspection.m4 $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = httpd.conf +CONFIG_CLEAN_VPATH_FILES = +@HAVE_CURL_TRUE@am__EXEEXT_1 = forms-test$(EXEEXT) \ +@HAVE_CURL_TRUE@ server-auth-test$(EXEEXT) +@HAVE_APACHE_TRUE@am__EXEEXT_2 = auth-test$(EXEEXT) \ +@HAVE_APACHE_TRUE@ proxy-test$(EXEEXT) pull-api$(EXEEXT) \ +@HAVE_APACHE_TRUE@ range-test$(EXEEXT) +@HAVE_XMLRPC_EPI_PHP_TRUE@am__EXEEXT_3 = xmlrpc-test$(EXEEXT) \ +@HAVE_XMLRPC_EPI_PHP_TRUE@ xmlrpc-server-test$(EXEEXT) +PROGRAMS = $(noinst_PROGRAMS) +am__objects_1 = test-utils.$(OBJEXT) +am_auth_test_OBJECTS = auth-test.$(OBJEXT) $(am__objects_1) +auth_test_OBJECTS = $(am_auth_test_OBJECTS) +auth_test_LDADD = $(LDADD) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am_chunk_test_OBJECTS = chunk-test.$(OBJEXT) $(am__objects_1) +chunk_test_OBJECTS = $(am_chunk_test_OBJECTS) +chunk_test_LDADD = $(LDADD) +am_coding_test_OBJECTS = coding-test.$(OBJEXT) $(am__objects_1) +coding_test_OBJECTS = $(am_coding_test_OBJECTS) +coding_test_LDADD = $(LDADD) +am_connection_test_OBJECTS = connection-test.$(OBJEXT) \ + $(am__objects_1) +connection_test_OBJECTS = $(am_connection_test_OBJECTS) +connection_test_LDADD = $(LDADD) +am_context_test_OBJECTS = context-test.$(OBJEXT) $(am__objects_1) +context_test_OBJECTS = $(am_context_test_OBJECTS) +context_test_LDADD = $(LDADD) +am_continue_test_OBJECTS = continue-test.$(OBJEXT) $(am__objects_1) +continue_test_OBJECTS = $(am_continue_test_OBJECTS) +continue_test_LDADD = $(LDADD) +am_cookies_test_OBJECTS = cookies-test.$(OBJEXT) $(am__objects_1) +cookies_test_OBJECTS = $(am_cookies_test_OBJECTS) +cookies_test_LDADD = $(LDADD) +am_date_OBJECTS = date.$(OBJEXT) $(am__objects_1) +date_OBJECTS = $(am_date_OBJECTS) +date_LDADD = $(LDADD) +am_dns_OBJECTS = dns.$(OBJEXT) +dns_OBJECTS = $(am_dns_OBJECTS) +dns_LDADD = $(LDADD) +am_forms_test_OBJECTS = forms-test.$(OBJEXT) $(am__objects_1) +forms_test_OBJECTS = $(am_forms_test_OBJECTS) +forms_test_LDADD = $(LDADD) +am_get_OBJECTS = get.$(OBJEXT) +get_OBJECTS = $(am_get_OBJECTS) +@BUILD_LIBSOUP_GNOME_TRUE@get_DEPENDENCIES = $(top_builddir)/libsoup/libsoup-gnome-2.4.la +am_getbug_OBJECTS = getbug.$(OBJEXT) +getbug_OBJECTS = $(am_getbug_OBJECTS) +getbug_LDADD = $(LDADD) +am_header_parsing_OBJECTS = header-parsing.$(OBJEXT) $(am__objects_1) +header_parsing_OBJECTS = $(am_header_parsing_OBJECTS) +header_parsing_LDADD = $(LDADD) +am_misc_test_OBJECTS = misc-test.$(OBJEXT) $(am__objects_1) +misc_test_OBJECTS = $(am_misc_test_OBJECTS) +misc_test_LDADD = $(LDADD) +am_ntlm_test_OBJECTS = ntlm-test.$(OBJEXT) $(am__objects_1) +ntlm_test_OBJECTS = $(am_ntlm_test_OBJECTS) +ntlm_test_LDADD = $(LDADD) +am_proxy_test_OBJECTS = proxy-test.$(OBJEXT) $(am__objects_1) +proxy_test_OBJECTS = $(am_proxy_test_OBJECTS) +proxy_test_LDADD = $(LDADD) +am_pull_api_OBJECTS = pull-api.$(OBJEXT) $(am__objects_1) +pull_api_OBJECTS = $(am_pull_api_OBJECTS) +pull_api_LDADD = $(LDADD) +am_range_test_OBJECTS = range-test.$(OBJEXT) $(am__objects_1) +range_test_OBJECTS = $(am_range_test_OBJECTS) +range_test_LDADD = $(LDADD) +am_redirect_test_OBJECTS = redirect-test.$(OBJEXT) $(am__objects_1) +redirect_test_OBJECTS = $(am_redirect_test_OBJECTS) +redirect_test_LDADD = $(LDADD) +am_requester_test_OBJECTS = requester-test.$(OBJEXT) $(am__objects_1) +requester_test_OBJECTS = $(am_requester_test_OBJECTS) +requester_test_LDADD = $(LDADD) +am_server_auth_test_OBJECTS = server-auth-test.$(OBJEXT) \ + $(am__objects_1) +server_auth_test_OBJECTS = $(am_server_auth_test_OBJECTS) +server_auth_test_LDADD = $(LDADD) +am_simple_httpd_OBJECTS = simple-httpd.$(OBJEXT) +simple_httpd_OBJECTS = $(am_simple_httpd_OBJECTS) +simple_httpd_LDADD = $(LDADD) +am_simple_proxy_OBJECTS = simple-proxy.$(OBJEXT) +simple_proxy_OBJECTS = $(am_simple_proxy_OBJECTS) +simple_proxy_LDADD = $(LDADD) +am_sniffing_test_OBJECTS = sniffing-test.$(OBJEXT) $(am__objects_1) +sniffing_test_OBJECTS = $(am_sniffing_test_OBJECTS) +sniffing_test_LDADD = $(LDADD) +am_ssl_test_OBJECTS = ssl-test.$(OBJEXT) $(am__objects_1) +ssl_test_OBJECTS = $(am_ssl_test_OBJECTS) +ssl_test_LDADD = $(LDADD) +am_streaming_test_OBJECTS = streaming-test.$(OBJEXT) $(am__objects_1) +streaming_test_OBJECTS = $(am_streaming_test_OBJECTS) +streaming_test_LDADD = $(LDADD) +am_timeout_test_OBJECTS = timeout-test.$(OBJEXT) $(am__objects_1) +timeout_test_OBJECTS = $(am_timeout_test_OBJECTS) +timeout_test_LDADD = $(LDADD) +am_uri_parsing_OBJECTS = uri-parsing.$(OBJEXT) $(am__objects_1) +uri_parsing_OBJECTS = $(am_uri_parsing_OBJECTS) +uri_parsing_LDADD = $(LDADD) +am_xmlrpc_server_test_OBJECTS = xmlrpc-server-test.$(OBJEXT) \ + $(am__objects_1) +xmlrpc_server_test_OBJECTS = $(am_xmlrpc_server_test_OBJECTS) +xmlrpc_server_test_LDADD = $(LDADD) +am_xmlrpc_test_OBJECTS = xmlrpc-test.$(OBJEXT) $(am__objects_1) +xmlrpc_test_OBJECTS = $(am_xmlrpc_test_OBJECTS) +xmlrpc_test_LDADD = $(LDADD) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) +depcomp = $(SHELL) $(top_srcdir)/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CFLAGS) $(CFLAGS) +AM_V_CC = $(am__v_CC_@AM_V@) +am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) +am__v_CC_0 = @echo " CC " $@; +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +CCLD = $(CC) +LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_@AM_V@) +am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) +am__v_CCLD_0 = @echo " CCLD " $@; +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +SOURCES = $(auth_test_SOURCES) $(chunk_test_SOURCES) \ + $(coding_test_SOURCES) $(connection_test_SOURCES) \ + $(context_test_SOURCES) $(continue_test_SOURCES) \ + $(cookies_test_SOURCES) $(date_SOURCES) $(dns_SOURCES) \ + $(forms_test_SOURCES) $(get_SOURCES) $(getbug_SOURCES) \ + $(header_parsing_SOURCES) $(misc_test_SOURCES) \ + $(ntlm_test_SOURCES) $(proxy_test_SOURCES) $(pull_api_SOURCES) \ + $(range_test_SOURCES) $(redirect_test_SOURCES) \ + $(requester_test_SOURCES) $(server_auth_test_SOURCES) \ + $(simple_httpd_SOURCES) $(simple_proxy_SOURCES) \ + $(sniffing_test_SOURCES) $(ssl_test_SOURCES) \ + $(streaming_test_SOURCES) $(timeout_test_SOURCES) \ + $(uri_parsing_SOURCES) $(xmlrpc_server_test_SOURCES) \ + $(xmlrpc_test_SOURCES) +DIST_SOURCES = $(auth_test_SOURCES) $(chunk_test_SOURCES) \ + $(coding_test_SOURCES) $(connection_test_SOURCES) \ + $(context_test_SOURCES) $(continue_test_SOURCES) \ + $(cookies_test_SOURCES) $(date_SOURCES) $(dns_SOURCES) \ + $(forms_test_SOURCES) $(get_SOURCES) $(getbug_SOURCES) \ + $(header_parsing_SOURCES) $(misc_test_SOURCES) \ + $(ntlm_test_SOURCES) $(proxy_test_SOURCES) $(pull_api_SOURCES) \ + $(range_test_SOURCES) $(redirect_test_SOURCES) \ + $(requester_test_SOURCES) $(server_auth_test_SOURCES) \ + $(simple_httpd_SOURCES) $(simple_proxy_SOURCES) \ + $(sniffing_test_SOURCES) $(ssl_test_SOURCES) \ + $(streaming_test_SOURCES) $(timeout_test_SOURCES) \ + $(uri_parsing_SOURCES) $(xmlrpc_server_test_SOURCES) \ + $(xmlrpc_test_SOURCES) +ETAGS = etags +CTAGS = ctags +am__tty_colors = \ +red=; grn=; lgn=; blu=; std= +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +APACHE_HTTPD = @APACHE_HTTPD@ +APACHE_MODULE_DIR = @APACHE_MODULE_DIR@ +APACHE_PHP_MODULE = @APACHE_PHP_MODULE@ +APACHE_PHP_MODULE_DIR = @APACHE_PHP_MODULE_DIR@ +APACHE_SSL_MODULE_DIR = @APACHE_SSL_MODULE_DIR@ +AR = @AR@ +AS = @AS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL = @CURL@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GLIB_CFLAGS = @GLIB_CFLAGS@ +GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@ +GLIB_GENMARSHAL = @GLIB_GENMARSHAL@ +GLIB_LIBS = @GLIB_LIBS@ +GLIB_MAKEFILE = @GLIB_MAKEFILE@ +GLIB_MKENUMS = @GLIB_MKENUMS@ +GNOME_KEYRING_CFLAGS = @GNOME_KEYRING_CFLAGS@ +GNOME_KEYRING_LIBS = @GNOME_KEYRING_LIBS@ +GOBJECT_QUERY = @GOBJECT_QUERY@ +GREP = @GREP@ +GTKDOC_CHECK = @GTKDOC_CHECK@ +GTKDOC_DEPS_CFLAGS = @GTKDOC_DEPS_CFLAGS@ +GTKDOC_DEPS_LIBS = @GTKDOC_DEPS_LIBS@ +GTKDOC_MKPDF = @GTKDOC_MKPDF@ +GTKDOC_REBASE = @GTKDOC_REBASE@ +HAVE_GNOME = @HAVE_GNOME@ +HTML_DIR = @HTML_DIR@ +IF_HAVE_PHP = @IF_HAVE_PHP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +INTROSPECTION_CFLAGS = @INTROSPECTION_CFLAGS@ +INTROSPECTION_COMPILER = @INTROSPECTION_COMPILER@ +INTROSPECTION_GENERATE = @INTROSPECTION_GENERATE@ +INTROSPECTION_GIRDIR = @INTROSPECTION_GIRDIR@ +INTROSPECTION_LIBS = @INTROSPECTION_LIBS@ +INTROSPECTION_MAKEFILE = @INTROSPECTION_MAKEFILE@ +INTROSPECTION_SCANNER = @INTROSPECTION_SCANNER@ +INTROSPECTION_TYPELIBDIR = @INTROSPECTION_TYPELIBDIR@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = \ + $(top_builddir)/libsoup/libsoup-2.4.la \ + $(LIBGNUTLS_LIBS) \ + $(GLIB_LIBS) + +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MISSING_REGRESSION_TEST_PACKAGES = @MISSING_REGRESSION_TEST_PACKAGES@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PHP = @PHP@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SOUP_AGE = @SOUP_AGE@ +SOUP_API_VERSION = @SOUP_API_VERSION@ +SOUP_CURRENT = @SOUP_CURRENT@ +SOUP_DEBUG_FLAGS = @SOUP_DEBUG_FLAGS@ +SOUP_MAINTAINER_FLAGS = @SOUP_MAINTAINER_FLAGS@ +SOUP_REVISION = @SOUP_REVISION@ +SQLITE_CFLAGS = @SQLITE_CFLAGS@ +SQLITE_LIBS = @SQLITE_LIBS@ +STRIP = @STRIP@ +VERSION = @VERSION@ +XML_CFLAGS = @XML_CFLAGS@ +XML_LIBS = @XML_LIBS@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +ntlm_auth = @ntlm_auth@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +INCLUDES = \ + -I$(top_srcdir) \ + -DSRCDIR=\""$(abs_srcdir)"\" \ + -DLIBSOUP_DISABLE_DEPRECATED \ + $(SOUP_MAINTAINER_FLAGS) \ + $(XML_CFLAGS) \ + $(GLIB_CFLAGS) + +TEST_SRCS = test-utils.c test-utils.h +auth_test_SOURCES = auth-test.c $(TEST_SRCS) +chunk_test_SOURCES = chunk-test.c $(TEST_SRCS) +coding_test_SOURCES = coding-test.c $(TEST_SRCS) +connection_test_SOURCES = connection-test.c $(TEST_SRCS) +context_test_SOURCES = context-test.c $(TEST_SRCS) +continue_test_SOURCES = continue-test.c $(TEST_SRCS) +cookies_test_SOURCES = cookies-test.c $(TEST_SRCS) +date_SOURCES = date.c $(TEST_SRCS) +dns_SOURCES = dns.c +forms_test_SOURCES = forms-test.c $(TEST_SRCS) +get_SOURCES = get.c +@BUILD_LIBSOUP_GNOME_TRUE@get_LDADD = $(top_builddir)/libsoup/libsoup-gnome-2.4.la +getbug_SOURCES = getbug.c +header_parsing_SOURCES = header-parsing.c $(TEST_SRCS) +misc_test_SOURCES = misc-test.c $(TEST_SRCS) +ntlm_test_SOURCES = ntlm-test.c $(TEST_SRCS) +proxy_test_SOURCES = proxy-test.c $(TEST_SRCS) +pull_api_SOURCES = pull-api.c $(TEST_SRCS) +range_test_SOURCES = range-test.c $(TEST_SRCS) +redirect_test_SOURCES = redirect-test.c $(TEST_SRCS) +requester_test_SOURCES = requester-test.c $(TEST_SRCS) +server_auth_test_SOURCES = server-auth-test.c $(TEST_SRCS) +simple_httpd_SOURCES = simple-httpd.c +simple_proxy_SOURCES = simple-proxy.c +sniffing_test_SOURCES = sniffing-test.c $(TEST_SRCS) +ssl_test_SOURCES = ssl-test.c $(TEST_SRCS) +streaming_test_SOURCES = streaming-test.c $(TEST_SRCS) +timeout_test_SOURCES = timeout-test.c $(TEST_SRCS) +uri_parsing_SOURCES = uri-parsing.c $(TEST_SRCS) +xmlrpc_test_SOURCES = xmlrpc-test.c $(TEST_SRCS) +xmlrpc_server_test_SOURCES = xmlrpc-server-test.c $(TEST_SRCS) +@HAVE_APACHE_TRUE@APACHE_TESTS = auth-test proxy-test pull-api range-test +@HAVE_CURL_TRUE@CURL_TESTS = forms-test server-auth-test +@HAVE_XMLRPC_EPI_PHP_TRUE@XMLRPC_TESTS = xmlrpc-test xmlrpc-server-test +RESOURCES = \ + resources/atom.xml \ + resources/home.gif \ + resources/html_binary.html \ + resources/mbox \ + resources/mbox.gz \ + resources/mbox.raw \ + resources/mbox.zlib \ + resources/ps_binary.ps \ + resources/rss20.xml \ + resources/test.html \ + resources/text_binary.txt + +EXTRA_DIST = \ + htdigest \ + htpasswd \ + httpd.conf.in \ + index.txt \ + libsoup.supp \ + test-cert.pem \ + test-key.pem \ + xmlrpc-server.php \ + $(RESOURCES) + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tests/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign tests/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +httpd.conf: $(top_builddir)/config.status $(srcdir)/httpd.conf.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +clean-noinstPROGRAMS: + @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list +auth-test$(EXEEXT): $(auth_test_OBJECTS) $(auth_test_DEPENDENCIES) $(EXTRA_auth_test_DEPENDENCIES) + @rm -f auth-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(auth_test_OBJECTS) $(auth_test_LDADD) $(LIBS) +chunk-test$(EXEEXT): $(chunk_test_OBJECTS) $(chunk_test_DEPENDENCIES) $(EXTRA_chunk_test_DEPENDENCIES) + @rm -f chunk-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(chunk_test_OBJECTS) $(chunk_test_LDADD) $(LIBS) +coding-test$(EXEEXT): $(coding_test_OBJECTS) $(coding_test_DEPENDENCIES) $(EXTRA_coding_test_DEPENDENCIES) + @rm -f coding-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(coding_test_OBJECTS) $(coding_test_LDADD) $(LIBS) +connection-test$(EXEEXT): $(connection_test_OBJECTS) $(connection_test_DEPENDENCIES) $(EXTRA_connection_test_DEPENDENCIES) + @rm -f connection-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(connection_test_OBJECTS) $(connection_test_LDADD) $(LIBS) +context-test$(EXEEXT): $(context_test_OBJECTS) $(context_test_DEPENDENCIES) $(EXTRA_context_test_DEPENDENCIES) + @rm -f context-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(context_test_OBJECTS) $(context_test_LDADD) $(LIBS) +continue-test$(EXEEXT): $(continue_test_OBJECTS) $(continue_test_DEPENDENCIES) $(EXTRA_continue_test_DEPENDENCIES) + @rm -f continue-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(continue_test_OBJECTS) $(continue_test_LDADD) $(LIBS) +cookies-test$(EXEEXT): $(cookies_test_OBJECTS) $(cookies_test_DEPENDENCIES) $(EXTRA_cookies_test_DEPENDENCIES) + @rm -f cookies-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(cookies_test_OBJECTS) $(cookies_test_LDADD) $(LIBS) +date$(EXEEXT): $(date_OBJECTS) $(date_DEPENDENCIES) $(EXTRA_date_DEPENDENCIES) + @rm -f date$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(date_OBJECTS) $(date_LDADD) $(LIBS) +dns$(EXEEXT): $(dns_OBJECTS) $(dns_DEPENDENCIES) $(EXTRA_dns_DEPENDENCIES) + @rm -f dns$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(dns_OBJECTS) $(dns_LDADD) $(LIBS) +forms-test$(EXEEXT): $(forms_test_OBJECTS) $(forms_test_DEPENDENCIES) $(EXTRA_forms_test_DEPENDENCIES) + @rm -f forms-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(forms_test_OBJECTS) $(forms_test_LDADD) $(LIBS) +get$(EXEEXT): $(get_OBJECTS) $(get_DEPENDENCIES) $(EXTRA_get_DEPENDENCIES) + @rm -f get$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(get_OBJECTS) $(get_LDADD) $(LIBS) +getbug$(EXEEXT): $(getbug_OBJECTS) $(getbug_DEPENDENCIES) $(EXTRA_getbug_DEPENDENCIES) + @rm -f getbug$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(getbug_OBJECTS) $(getbug_LDADD) $(LIBS) +header-parsing$(EXEEXT): $(header_parsing_OBJECTS) $(header_parsing_DEPENDENCIES) $(EXTRA_header_parsing_DEPENDENCIES) + @rm -f header-parsing$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(header_parsing_OBJECTS) $(header_parsing_LDADD) $(LIBS) +misc-test$(EXEEXT): $(misc_test_OBJECTS) $(misc_test_DEPENDENCIES) $(EXTRA_misc_test_DEPENDENCIES) + @rm -f misc-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(misc_test_OBJECTS) $(misc_test_LDADD) $(LIBS) +ntlm-test$(EXEEXT): $(ntlm_test_OBJECTS) $(ntlm_test_DEPENDENCIES) $(EXTRA_ntlm_test_DEPENDENCIES) + @rm -f ntlm-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(ntlm_test_OBJECTS) $(ntlm_test_LDADD) $(LIBS) +proxy-test$(EXEEXT): $(proxy_test_OBJECTS) $(proxy_test_DEPENDENCIES) $(EXTRA_proxy_test_DEPENDENCIES) + @rm -f proxy-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(proxy_test_OBJECTS) $(proxy_test_LDADD) $(LIBS) +pull-api$(EXEEXT): $(pull_api_OBJECTS) $(pull_api_DEPENDENCIES) $(EXTRA_pull_api_DEPENDENCIES) + @rm -f pull-api$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(pull_api_OBJECTS) $(pull_api_LDADD) $(LIBS) +range-test$(EXEEXT): $(range_test_OBJECTS) $(range_test_DEPENDENCIES) $(EXTRA_range_test_DEPENDENCIES) + @rm -f range-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(range_test_OBJECTS) $(range_test_LDADD) $(LIBS) +redirect-test$(EXEEXT): $(redirect_test_OBJECTS) $(redirect_test_DEPENDENCIES) $(EXTRA_redirect_test_DEPENDENCIES) + @rm -f redirect-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(redirect_test_OBJECTS) $(redirect_test_LDADD) $(LIBS) +requester-test$(EXEEXT): $(requester_test_OBJECTS) $(requester_test_DEPENDENCIES) $(EXTRA_requester_test_DEPENDENCIES) + @rm -f requester-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(requester_test_OBJECTS) $(requester_test_LDADD) $(LIBS) +server-auth-test$(EXEEXT): $(server_auth_test_OBJECTS) $(server_auth_test_DEPENDENCIES) $(EXTRA_server_auth_test_DEPENDENCIES) + @rm -f server-auth-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(server_auth_test_OBJECTS) $(server_auth_test_LDADD) $(LIBS) +simple-httpd$(EXEEXT): $(simple_httpd_OBJECTS) $(simple_httpd_DEPENDENCIES) $(EXTRA_simple_httpd_DEPENDENCIES) + @rm -f simple-httpd$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(simple_httpd_OBJECTS) $(simple_httpd_LDADD) $(LIBS) +simple-proxy$(EXEEXT): $(simple_proxy_OBJECTS) $(simple_proxy_DEPENDENCIES) $(EXTRA_simple_proxy_DEPENDENCIES) + @rm -f simple-proxy$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(simple_proxy_OBJECTS) $(simple_proxy_LDADD) $(LIBS) +sniffing-test$(EXEEXT): $(sniffing_test_OBJECTS) $(sniffing_test_DEPENDENCIES) $(EXTRA_sniffing_test_DEPENDENCIES) + @rm -f sniffing-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(sniffing_test_OBJECTS) $(sniffing_test_LDADD) $(LIBS) +ssl-test$(EXEEXT): $(ssl_test_OBJECTS) $(ssl_test_DEPENDENCIES) $(EXTRA_ssl_test_DEPENDENCIES) + @rm -f ssl-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(ssl_test_OBJECTS) $(ssl_test_LDADD) $(LIBS) +streaming-test$(EXEEXT): $(streaming_test_OBJECTS) $(streaming_test_DEPENDENCIES) $(EXTRA_streaming_test_DEPENDENCIES) + @rm -f streaming-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(streaming_test_OBJECTS) $(streaming_test_LDADD) $(LIBS) +timeout-test$(EXEEXT): $(timeout_test_OBJECTS) $(timeout_test_DEPENDENCIES) $(EXTRA_timeout_test_DEPENDENCIES) + @rm -f timeout-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(timeout_test_OBJECTS) $(timeout_test_LDADD) $(LIBS) +uri-parsing$(EXEEXT): $(uri_parsing_OBJECTS) $(uri_parsing_DEPENDENCIES) $(EXTRA_uri_parsing_DEPENDENCIES) + @rm -f uri-parsing$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(uri_parsing_OBJECTS) $(uri_parsing_LDADD) $(LIBS) +xmlrpc-server-test$(EXEEXT): $(xmlrpc_server_test_OBJECTS) $(xmlrpc_server_test_DEPENDENCIES) $(EXTRA_xmlrpc_server_test_DEPENDENCIES) + @rm -f xmlrpc-server-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(xmlrpc_server_test_OBJECTS) $(xmlrpc_server_test_LDADD) $(LIBS) +xmlrpc-test$(EXEEXT): $(xmlrpc_test_OBJECTS) $(xmlrpc_test_DEPENDENCIES) $(EXTRA_xmlrpc_test_DEPENDENCIES) + @rm -f xmlrpc-test$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(xmlrpc_test_OBJECTS) $(xmlrpc_test_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/auth-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/chunk-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/coding-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/connection-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/context-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/continue-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cookies-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/date.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dns.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/forms-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/get.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getbug.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/header-parsing.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/misc-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ntlm-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proxy-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pull-api.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/range-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/redirect-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/requester-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/server-auth-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple-httpd.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simple-proxy.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sniffing-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ssl-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/streaming-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test-utils.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/timeout-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uri-parsing.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xmlrpc-server-test.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xmlrpc-test.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +check-TESTS: $(TESTS) + @failed=0; all=0; xfail=0; xpass=0; skip=0; \ + srcdir=$(srcdir); export srcdir; \ + list=' $(TESTS) '; \ + $(am__tty_colors); \ + if test -n "$$list"; then \ + for tst in $$list; do \ + if test -f ./$$tst; then dir=./; \ + elif test -f $$tst; then dir=; \ + else dir="$(srcdir)/"; fi; \ + if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xpass=`expr $$xpass + 1`; \ + failed=`expr $$failed + 1`; \ + col=$$red; res=XPASS; \ + ;; \ + *) \ + col=$$grn; res=PASS; \ + ;; \ + esac; \ + elif test $$? -ne 77; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xfail=`expr $$xfail + 1`; \ + col=$$lgn; res=XFAIL; \ + ;; \ + *) \ + failed=`expr $$failed + 1`; \ + col=$$red; res=FAIL; \ + ;; \ + esac; \ + else \ + skip=`expr $$skip + 1`; \ + col=$$blu; res=SKIP; \ + fi; \ + echo "$${col}$$res$${std}: $$tst"; \ + done; \ + if test "$$all" -eq 1; then \ + tests="test"; \ + All=""; \ + else \ + tests="tests"; \ + All="All "; \ + fi; \ + if test "$$failed" -eq 0; then \ + if test "$$xfail" -eq 0; then \ + banner="$$All$$all $$tests passed"; \ + else \ + if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ + banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ + fi; \ + else \ + if test "$$xpass" -eq 0; then \ + banner="$$failed of $$all $$tests failed"; \ + else \ + if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ + banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ + fi; \ + fi; \ + dashes="$$banner"; \ + skipped=""; \ + if test "$$skip" -ne 0; then \ + if test "$$skip" -eq 1; then \ + skipped="($$skip test was not run)"; \ + else \ + skipped="($$skip tests were not run)"; \ + fi; \ + test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$skipped"; \ + fi; \ + report=""; \ + if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ + report="Please report to $(PACKAGE_BUGREPORT)"; \ + test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$report"; \ + fi; \ + dashes=`echo "$$dashes" | sed s/./=/g`; \ + if test "$$failed" -eq 0; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + fi; \ + echo "$${col}$$dashes$${std}"; \ + echo "$${col}$$banner$${std}"; \ + test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ + test -z "$$report" || echo "$${col}$$report$${std}"; \ + echo "$${col}$$dashes$${std}"; \ + test "$$failed" -eq 0; \ + else :; fi + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +@MISSING_REGRESSION_TEST_PACKAGES_FALSE@check-local: +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-TESTS check-local +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-TESTS check-am check-local \ + clean clean-generic clean-libtool clean-noinstPROGRAMS ctags \ + distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am + + +@MISSING_REGRESSION_TEST_PACKAGES_TRUE@check-local: check-TESTS +@MISSING_REGRESSION_TEST_PACKAGES_TRUE@ @echo "" +@MISSING_REGRESSION_TEST_PACKAGES_TRUE@ @echo "NOTE: some tests were not run due to missing packages:" $(MISSING_REGRESSION_TEST_PACKAGES) +@MISSING_REGRESSION_TEST_PACKAGES_TRUE@ @echo "" + +kill-httpd: + $(APACHE_HTTPD) -d `pwd` -f httpd.conf -k stop + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/tests/auth-test.c b/tests/auth-test.c index 5b45a73..3bbee87 100644 --- a/tests/auth-test.c +++ b/tests/auth-test.c @@ -304,6 +304,111 @@ bug271540_finished (SoupSession *session, SoupMessage *msg, gpointer data) } static void +do_pipelined_auth_test (const char *base_uri) +{ + SoupSession *session; + SoupMessage *msg; + gboolean authenticated; + char *uri; + int i; + + debug_printf (1, "Testing pipelined auth (bug 271540):\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + + authenticated = FALSE; + g_signal_connect (session, "authenticate", + G_CALLBACK (bug271540_authenticate), &authenticated); + + uri = g_strconcat (base_uri, "Basic/realm1/", NULL); + for (i = 0; i < 10; i++) { + msg = soup_message_new (SOUP_METHOD_GET, uri); + g_object_set_data (G_OBJECT (msg), "#", GINT_TO_POINTER (i + 1)); + g_signal_connect (msg, "wrote_headers", + G_CALLBACK (bug271540_sent), &authenticated); + + soup_session_queue_message (session, msg, + bug271540_finished, &i); + } + g_free (uri); + + loop = g_main_loop_new (NULL, TRUE); + g_main_loop_run (loop); + g_main_loop_unref (loop); + + soup_test_session_abort_unref (session); +} + +/* We test two different things here: + * + * 1. If we get a 401 response with "WWW-Authenticate: Digest + * stale=true...", we should retry and succeed *without* the + * session asking for a password again. + * + * 2. If we get a successful response with "Authentication-Info: + * nextnonce=...", we should update the nonce automatically so as + * to avoid getting a stale nonce error on the next request. + * + * In our Apache config, /Digest/realm1 and /Digest/realm1/expire are + * set up to use the same auth info, but only the latter has an + * AuthDigestNonceLifetime (of 2 seconds). The way nonces work in + * Apache, a nonce received from /Digest/realm1 will still expire in + * /Digest/realm1/expire, but it won't issue a nextnonce for a request + * in /Digest/realm1. This lets us test both behaviors. + * + * The expected conversation is: + * + * First message + * GET /Digest/realm1 + * + * 401 Unauthorized + * WWW-Authenticate: Digest nonce=A + * + * [emit 'authenticate'] + * + * GET /Digest/realm1 + * Authorization: Digest nonce=A + * + * 200 OK + * [No Authentication-Info] + * + * [sleep 2 seconds: nonce A is no longer valid, but we have no + * way of knowing that] + * + * Second message + * GET /Digest/realm1/expire/ + * Authorization: Digest nonce=A + * + * 401 Unauthorized + * WWW-Authenticate: Digest stale=true nonce=B + * + * GET /Digest/realm1/expire/ + * Authorization: Digest nonce=B + * + * 200 OK + * Authentication-Info: nextnonce=C + * + * [sleep 1 second] + * + * Third message + * GET /Digest/realm1/expire/ + * Authorization: Digest nonce=C + * [nonce=B would work here too] + * + * 200 OK + * Authentication-Info: nextnonce=D + * + * [sleep 1 second; nonces B and C are no longer valid, but D is] + * + * Fourth message + * GET /Digest/realm1/expire/ + * Authorization: Digest nonce=D + * + * 200 OK + * Authentication-Info: nextnonce=D + * + */ + +static void digest_nonce_authenticate (SoupSession *session, SoupMessage *msg, SoupAuth *auth, gboolean retrying, gpointer data) { @@ -359,6 +464,31 @@ do_digest_nonce_test (SoupSession *session, g_object_unref (msg); } +static void +do_digest_expiration_test (const char *base_uri) +{ + SoupSession *session; + char *uri; + + debug_printf (1, "\nTesting digest nonce expiration:\n"); + + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + + uri = g_strconcat (base_uri, "Digest/realm1/", NULL); + do_digest_nonce_test (session, "First", uri, TRUE, TRUE); + g_free (uri); + sleep (2); + uri = g_strconcat (base_uri, "Digest/realm1/expire/", NULL); + do_digest_nonce_test (session, "Second", uri, TRUE, FALSE); + sleep (1); + do_digest_nonce_test (session, "Third", uri, FALSE, FALSE); + sleep (1); + do_digest_nonce_test (session, "Fourth", uri, FALSE, FALSE); + g_free (uri); + + soup_test_session_abort_unref (session); +} + /* Async auth test. We queue three requests to /Basic/realm1, ensuring * that they are sent in order. The first and third ones will be * paused from the authentication callback. The second will be allowed @@ -450,6 +580,7 @@ do_async_auth_test (const char *base_uri) debug_printf (1, "\nTesting async auth:\n"); + loop = g_main_loop_new (NULL, TRUE); session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); remaining = 0; @@ -615,6 +746,7 @@ do_async_auth_test (const char *base_uri) g_object_unref (msg1); g_free (uri); + g_main_loop_unref (loop); } typedef struct { @@ -853,6 +985,102 @@ do_select_auth_test (void) soup_test_server_quit_unref (server); } +static void +sneakily_close_connection (SoupMessage *msg, gpointer user_data) +{ + /* Sneakily close the connection after the response, by + * tricking soup-message-io into thinking that had been + * the plan all along. + */ + soup_message_headers_append (msg->response_headers, + "Connection", "close"); +} + +static void +auth_close_request_started (SoupServer *server, SoupMessage *msg, + SoupClientContext *client, gpointer user_data) +{ + g_signal_connect (msg, "wrote-headers", + G_CALLBACK (sneakily_close_connection), NULL); +} + +typedef struct { + SoupSession *session; + SoupMessage *msg; + SoupAuth *auth; +} AuthCloseData; + +static gboolean +auth_close_idle_authenticate (gpointer user_data) +{ + AuthCloseData *acd = user_data; + + soup_auth_authenticate (acd->auth, "user", "good-basic"); + soup_session_unpause_message (acd->session, acd->msg); + + g_object_unref (acd->auth); + return FALSE; +} + +static void +auth_close_authenticate (SoupSession *session, SoupMessage *msg, + SoupAuth *auth, gboolean retrying, gpointer data) +{ + AuthCloseData *acd = data; + + soup_session_pause_message (session, msg); + acd->auth = g_object_ref (auth); + g_idle_add (auth_close_idle_authenticate, acd); +} + +static void +do_auth_close_test (void) +{ + SoupServer *server; + SoupAuthDomain *basic_auth_domain; + SoupURI *uri; + AuthCloseData acd; + + debug_printf (1, "\nTesting auth when server times out connection:\n"); + + server = soup_test_server_new (FALSE); + soup_server_add_handler (server, NULL, + server_callback, NULL, NULL); + + uri = soup_uri_new ("http://127.0.0.1/close"); + soup_uri_set_port (uri, soup_server_get_port (server)); + + basic_auth_domain = soup_auth_domain_basic_new ( + SOUP_AUTH_DOMAIN_REALM, "auth-test", + SOUP_AUTH_DOMAIN_ADD_PATH, "/", + SOUP_AUTH_DOMAIN_BASIC_AUTH_CALLBACK, server_basic_auth_callback, + NULL); + soup_server_add_auth_domain (server, basic_auth_domain); + g_object_unref (basic_auth_domain); + + g_signal_connect (server, "request-started", + G_CALLBACK (auth_close_request_started), NULL); + + acd.session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + g_signal_connect (acd.session, "authenticate", + G_CALLBACK (auth_close_authenticate), &acd); + + acd.msg = soup_message_new_from_uri ("GET", uri); + soup_uri_free (uri); + soup_session_send_message (acd.session, acd.msg); + + if (acd.msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Final status wrong: expected %u, got %u %s\n", + SOUP_STATUS_OK, acd.msg->status_code, + acd.msg->reason_phrase); + errors++; + } + + g_object_unref (acd.msg); + soup_test_session_abort_unref (acd.session); + soup_test_server_quit_unref (server); +} + static SoupAuthTest relogin_tests[] = { { "Auth provided via URL, should succeed", "Basic/realm12/", "1", TRUE, "01", SOUP_STATUS_OK }, @@ -954,12 +1182,8 @@ do_batch_tests (const gchar *base_uri_str, gint ntests) int main (int argc, char **argv) { - SoupSession *session; - SoupMessage *msg; const char *base_uri; - char *uri; - gboolean authenticated; - int i, ntests; + int ntests; test_init (argc, argv, NULL); apache_init (); @@ -976,130 +1200,12 @@ main (int argc, char **argv) ntests = G_N_ELEMENTS (relogin_tests); do_batch_tests (base_uri, ntests); - /* And now for some regression tests */ - loop = g_main_loop_new (NULL, TRUE); - - debug_printf (1, "Testing pipelined auth (bug 271540):\n"); - session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); - - authenticated = FALSE; - g_signal_connect (session, "authenticate", - G_CALLBACK (bug271540_authenticate), &authenticated); - - uri = g_strconcat (base_uri, "Basic/realm1/", NULL); - for (i = 0; i < 10; i++) { - msg = soup_message_new (SOUP_METHOD_GET, uri); - g_object_set_data (G_OBJECT (msg), "#", GINT_TO_POINTER (i + 1)); - g_signal_connect (msg, "wrote_headers", - G_CALLBACK (bug271540_sent), &authenticated); - - soup_session_queue_message (session, msg, - bug271540_finished, &i); - } - g_free (uri); - - g_main_loop_run (loop); - soup_test_session_abort_unref (session); - - debug_printf (1, "\nTesting digest nonce expiration:\n"); - - /* We test two different things here: - * - * 1. If we get a 401 response with - * "WWW-Authenticate: Digest stale=true...", we should - * retry and succeed *without* the session asking for a - * password again. - * - * 2. If we get a successful response with - * "Authentication-Info: nextnonce=...", we should update - * the nonce automatically so as to avoid getting a - * stale nonce error on the next request. - * - * In our Apache config, /Digest/realm1 and - * /Digest/realm1/expire are set up to use the same auth info, - * but only the latter has an AuthDigestNonceLifetime (of 2 - * seconds). The way nonces work in Apache, a nonce received - * from /Digest/realm1 will still expire in - * /Digest/realm1/expire, but it won't issue a nextnonce for a - * request in /Digest/realm1. This lets us test both - * behaviors. - * - * The expected conversation is: - * - * First message - * GET /Digest/realm1 - * - * 401 Unauthorized - * WWW-Authenticate: Digest nonce=A - * - * [emit 'authenticate'] - * - * GET /Digest/realm1 - * Authorization: Digest nonce=A - * - * 200 OK - * [No Authentication-Info] - * - * [sleep 2 seconds: nonce A is no longer valid, but we have no - * way of knowing that] - * - * Second message - * GET /Digest/realm1/expire/ - * Authorization: Digest nonce=A - * - * 401 Unauthorized - * WWW-Authenticate: Digest stale=true nonce=B - * - * GET /Digest/realm1/expire/ - * Authorization: Digest nonce=B - * - * 200 OK - * Authentication-Info: nextnonce=C - * - * [sleep 1 second] - * - * Third message - * GET /Digest/realm1/expire/ - * Authorization: Digest nonce=C - * [nonce=B would work here too] - * - * 200 OK - * Authentication-Info: nextnonce=D - * - * [sleep 1 second; nonces B and C are no longer valid, but D is] - * - * Fourth message - * GET /Digest/realm1/expire/ - * Authorization: Digest nonce=D - * - * 200 OK - * Authentication-Info: nextnonce=D - * - */ - - session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); - - uri = g_strconcat (base_uri, "Digest/realm1/", NULL); - do_digest_nonce_test (session, "First", uri, TRUE, TRUE); - g_free (uri); - sleep (2); - uri = g_strconcat (base_uri, "Digest/realm1/expire/", NULL); - do_digest_nonce_test (session, "Second", uri, TRUE, FALSE); - sleep (1); - do_digest_nonce_test (session, "Third", uri, FALSE, FALSE); - sleep (1); - do_digest_nonce_test (session, "Fourth", uri, FALSE, FALSE); - g_free (uri); - - soup_test_session_abort_unref (session); - - /* Async auth */ + /* Other regression tests */ + do_pipelined_auth_test (base_uri); + do_digest_expiration_test (base_uri); do_async_auth_test (base_uri); - - /* Selecting correct auth when multiple auth types are available */ do_select_auth_test (); - - g_main_loop_unref (loop); + do_auth_close_test (); test_cleanup (); return errors != 0; diff --git a/tests/chunk-test.c b/tests/chunk-test.c index 435f7fa..3805fb7 100644 --- a/tests/chunk-test.c +++ b/tests/chunk-test.c @@ -403,6 +403,60 @@ do_temporary_test (SoupSession *session, SoupURI *base_uri) g_object_unref (msg); } +#define LARGE_CHUNK_SIZE 1000000 + +typedef struct { + SoupBuffer *buf; + gsize offset; +} LargeChunkData; + +static void +large_wrote_body_data (SoupMessage *msg, SoupBuffer *chunk, gpointer user_data) +{ + LargeChunkData *lcd = user_data; + + if (memcmp (chunk->data, lcd->buf->data + lcd->offset, chunk->length) != 0) { + debug_printf (1, " chunk data mismatch at %ld\n", (long)lcd->offset); + errors++; + } else + debug_printf (2, " chunk data match at %ld\n", (long)lcd->offset); + lcd->offset += chunk->length; +} + +static void +do_large_chunk_test (SoupSession *session, SoupURI *base_uri) +{ + SoupMessage *msg; + char *buf_data; + int i; + LargeChunkData lcd; + + debug_printf (1, "PUT w/ large chunk\n"); + + msg = soup_message_new_from_uri ("PUT", base_uri); + + buf_data = g_malloc0 (LARGE_CHUNK_SIZE); + for (i = 0; i < LARGE_CHUNK_SIZE; i++) + buf_data[i] = i & 0xFF; + lcd.buf = soup_buffer_new (SOUP_MEMORY_TAKE, buf_data, LARGE_CHUNK_SIZE); + lcd.offset = 0; + soup_message_body_append_buffer (msg->request_body, lcd.buf); + soup_message_body_set_accumulate (msg->request_body, FALSE); + + g_signal_connect (msg, "wrote_body_data", + G_CALLBACK (large_wrote_body_data), &lcd); + soup_session_send_message (session, msg); + + if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + debug_printf (1, " message failed: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + + soup_buffer_free (lcd.buf); + g_object_unref (msg); +} + static void do_chunk_tests (SoupURI *base_uri) { @@ -422,6 +476,8 @@ do_chunk_tests (SoupURI *base_uri) do_response_test (session, base_uri); debug_printf (2, "\n\n"); do_temporary_test (session, base_uri); + debug_printf (2, "\n\n"); + do_large_chunk_test (session, base_uri); soup_test_session_abort_unref (session); } @@ -434,9 +490,7 @@ server_callback (SoupServer *server, SoupMessage *msg, char *md5; if (g_str_has_prefix (path, "/redirect")) { - soup_message_set_status (msg, SOUP_STATUS_FOUND); - soup_message_headers_replace (msg->response_headers, - "Location", "/"); + soup_message_set_redirect (msg, SOUP_STATUS_FOUND, "/"); return; } diff --git a/tests/coding-test.c b/tests/coding-test.c index 14b046b..2ac0373 100644 --- a/tests/coding-test.c +++ b/tests/coding-test.c @@ -1,6 +1,7 @@ /* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* * Copyright (C) 2007 Red Hat, Inc. + * Copyright (C) 2011 Igalia, S.L. */ #include @@ -43,17 +44,40 @@ server_callback (SoupServer *server, SoupMessage *msg, else codings = NULL; - if (codings && g_slist_find_custom (codings, "gzip", (GCompareFunc)g_ascii_strcasecmp)) { - file = g_strdup_printf (SRCDIR "/resources%s.gz", path); - if (g_file_test (file, G_FILE_TEST_EXISTS)) { - soup_message_headers_append (msg->response_headers, - "Content-Encoding", - "gzip"); - } else { - g_free (file); - file = NULL; + if (codings) { + gboolean claim_deflate, claim_gzip; + const char *file_path = NULL, *encoding = NULL; + + claim_deflate = g_slist_find_custom (codings, "deflate", (GCompareFunc)g_ascii_strcasecmp) != NULL; + claim_gzip = g_slist_find_custom (codings, "gzip", (GCompareFunc)g_ascii_strcasecmp) != NULL; + + if (claim_gzip && (!claim_deflate || + (!soup_header_contains (options, "prefer-deflate-zlib") && + !soup_header_contains (options, "prefer-deflate-raw")))) { + file_path = SRCDIR "/resources%s.gz"; + encoding = "gzip"; + } else if (claim_deflate) { + if (soup_header_contains (options, "prefer-deflate-raw")) { + file_path = SRCDIR "/resources%s.raw"; + encoding = "deflate"; + } else { + file_path = SRCDIR "/resources%s.zlib"; + encoding = "deflate"; + } + } + if (file_path && encoding) { + file = g_strdup_printf (file_path, path); + if (g_file_test (file, G_FILE_TEST_EXISTS)) { + soup_message_headers_append (msg->response_headers, + "Content-Encoding", + encoding); + } else { + g_free (file); + file = NULL; + } } } + soup_header_free_list (codings); if (!file) @@ -70,9 +94,15 @@ server_callback (SoupServer *server, SoupMessage *msg, g_free (file); if (soup_header_contains (options, "force-encode")) { + const gchar *encoding = "gzip"; + + if (soup_header_contains (options, "prefer-deflate-zlib") || + soup_header_contains (options, "prefer-deflate-raw")) + encoding = "deflate"; + soup_message_headers_replace (msg->response_headers, "Content-Encoding", - "gzip"); + encoding); } /* Content-Type matches the "real" format, not the sent format */ @@ -96,20 +126,21 @@ server_callback (SoupServer *server, SoupMessage *msg, } } +typedef enum { + NO_CHECK, + EXPECT_DECODED, + EXPECT_NOT_DECODED +} MessageContentStatus; + static void -do_coding_test (void) +do_single_coding_test (SoupSession *session, + SoupMessage *msg, + const char *expected_encoding, + const char *expected_content_type, + MessageContentStatus status) { - SoupSession *session; - SoupMessage *msg, *msgz, *msgj, *msge; - SoupURI *uri; const char *coding, *type; - session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); - uri = soup_uri_new_with_base (base_uri, "/mbox"); - - - debug_printf (1, "GET /mbox, plain\n"); - msg = soup_message_new_from_uri ("GET", uri); soup_session_send_message (session, msg); if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { debug_printf (1, " Unexpected status %d %s\n", @@ -117,226 +148,156 @@ do_coding_test (void) errors++; } coding = soup_message_headers_get_one (msg->response_headers, "Content-Encoding"); - if (coding) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", - coding); - errors++; + if (expected_encoding) { + if (!coding || g_ascii_strcasecmp (coding, expected_encoding) != 0) { + debug_printf (1, " Unexpected Content-Encoding: %s\n", + coding ? coding : "(none)"); + errors++; + } + } else { + if (coding) { + debug_printf (1, " Unexpected Content-Encoding: %s\n", + coding); + errors++; + } } - if (soup_message_get_flags (msg) & SOUP_MESSAGE_CONTENT_DECODED) { - debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED set!\n"); - errors++; + if (status != NO_CHECK) { + if (status == EXPECT_DECODED) { + if (!(soup_message_get_flags (msg) & SOUP_MESSAGE_CONTENT_DECODED)) { + debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED not set!\n"); + errors++; + } + } else { + if (soup_message_get_flags (msg) & SOUP_MESSAGE_CONTENT_DECODED) { + debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED set!\n"); + errors++; + } + } } type = soup_message_headers_get_one (msg->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "text/plain") != 0) { + if (!type || g_ascii_strcasecmp (type, expected_content_type) != 0) { debug_printf (1, " Unexpected Content-Type: %s\n", type ? type : "(none)"); errors++; } +} - debug_printf (1, "GET /mbox, Accept-Encoding: gzip\n"); - soup_session_add_feature_by_type (session, SOUP_TYPE_CONTENT_DECODER); - msgz = soup_message_new_from_uri ("GET", uri); - soup_session_send_message (session, msgz); - if (!SOUP_STATUS_IS_SUCCESSFUL (msgz->status_code)) { - debug_printf (1, " Unexpected status %d %s\n", - msgz->status_code, msgz->reason_phrase); - errors++; - } - coding = soup_message_headers_get_one (msgz->response_headers, "Content-Encoding"); - if (!coding || g_ascii_strcasecmp (coding, "gzip") != 0) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", - coding ? coding : "(none)"); - errors++; - } - if (!(soup_message_get_flags (msgz) & SOUP_MESSAGE_CONTENT_DECODED)) { - debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED not set!\n"); +static void +check_msg_bodies (SoupMessage *msg1, + SoupMessage *msg2, + const char *msg1_type, + const char *msg2_type) +{ + if (msg1->response_body->length != msg2->response_body->length) { + debug_printf (1, " Message length mismatch: %lu (%s) vs %lu (%s)\n", + (gulong)msg1->response_body->length, + msg1_type, + (gulong)msg2->response_body->length, + msg2_type); errors++; - } - type = soup_message_headers_get_one (msgz->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "text/plain") != 0) { - debug_printf (1, " Unexpected Content-Type: %s\n", - type ? type : "(none)"); + } else if (memcmp (msg1->response_body->data, + msg2->response_body->data, + msg1->response_body->length) != 0) { + debug_printf (1, " Message data mismatch (%s/%s)\n", + msg1_type, msg2_type); errors++; } +} - if (msg->response_body->length != msgz->response_body->length) { - debug_printf (1, " Message length mismatch: %lu (plain) vs %lu (compressed)\n", - (gulong)msg->response_body->length, - (gulong)msgz->response_body->length); - errors++; - } else if (memcmp (msg->response_body->data, - msgz->response_body->data, - msg->response_body->length) != 0) { - debug_printf (1, " Message data mismatch (plain/compressed)\n"); - errors++; - } +static void +do_coding_test (void) +{ + SoupSession *session; + SoupMessage *msg, *msgz, *msgj, *msge, *msgzl, *msgzlj, *msgzle, *msgzlr, *msgzlre; + SoupURI *uri; + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + uri = soup_uri_new_with_base (base_uri, "/mbox"); + /* Plain text data, no claim */ + debug_printf (1, "GET /mbox, plain\n"); + msg = soup_message_new_from_uri ("GET", uri); + do_single_coding_test (session, msg, NULL, "text/plain", EXPECT_NOT_DECODED); + + /* Plain text data, claim gzip */ + debug_printf (1, "GET /mbox, Accept-Encoding: gzip\n"); + soup_session_add_feature_by_type (session, SOUP_TYPE_CONTENT_DECODER); + msgz = soup_message_new_from_uri ("GET", uri); + do_single_coding_test (session, msgz, "gzip", "text/plain", EXPECT_DECODED); + check_msg_bodies (msg, msgz, "plain", "compressed"); + + /* Plain text data, claim gzip w/ junk */ debug_printf (1, "GET /mbox, Accept-Encoding: gzip, plus trailing junk\n"); msgj = soup_message_new_from_uri ("GET", uri); soup_message_headers_append (msgj->request_headers, "X-Test-Options", "trailing-junk"); - soup_session_send_message (session, msgj); - if (!SOUP_STATUS_IS_SUCCESSFUL (msgj->status_code)) { - debug_printf (1, " Unexpected status %d %s\n", - msgj->status_code, msgj->reason_phrase); - errors++; - } - coding = soup_message_headers_get_one (msgj->response_headers, "Content-Encoding"); - if (!coding || g_ascii_strcasecmp (coding, "gzip") != 0) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", - coding ? coding : "(none)"); - errors++; - } - if (!(soup_message_get_flags (msgj) & SOUP_MESSAGE_CONTENT_DECODED)) { - debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED not set!\n"); - errors++; - } - type = soup_message_headers_get_one (msgj->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "text/plain") != 0) { - debug_printf (1, " Unexpected Content-Type: %s\n", - type ? type : "(none)"); - errors++; - } - - if (msg->response_body->length != msgj->response_body->length) { - debug_printf (1, " Message length mismatch: %lu (plain) vs %lu (compressed w/ junk)\n", - (gulong)msg->response_body->length, - (gulong)msgj->response_body->length); - errors++; - } else if (memcmp (msg->response_body->data, - msgj->response_body->data, - msg->response_body->length) != 0) { - debug_printf (1, " Message data mismatch (plain/compressed w/ junk)\n"); - errors++; - } - + do_single_coding_test (session, msgj, "gzip", "text/plain", EXPECT_DECODED); + check_msg_bodies (msg, msgj, "plain", "compressed w/ junk"); + /* Plain text data, claim gzip with server error */ debug_printf (1, "GET /mbox, Accept-Encoding: gzip, with server error\n"); msge = soup_message_new_from_uri ("GET", uri); soup_message_headers_append (msge->request_headers, "X-Test-Options", "force-encode"); - soup_session_send_message (session, msge); - if (!SOUP_STATUS_IS_SUCCESSFUL (msge->status_code)) { - debug_printf (1, " Unexpected status %d %s\n", - msge->status_code, msge->reason_phrase); - errors++; - } - coding = soup_message_headers_get_one (msge->response_headers, "Content-Encoding"); - if (!coding || g_ascii_strcasecmp (coding, "gzip") != 0) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", - coding ? coding : "(none)"); - errors++; - } - /* Since the content wasn't actually gzip-encoded, decoding it - * should have failed and so the flag won't be set. - */ - if (soup_message_get_flags (msge) & SOUP_MESSAGE_CONTENT_DECODED) { - debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED set!\n"); - errors++; - } - type = soup_message_headers_get_one (msge->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "text/plain") != 0) { - debug_printf (1, " Unexpected Content-Type: %s\n", - type ? type : "(none)"); - errors++; - } + do_single_coding_test (session, msge, "gzip", "text/plain", EXPECT_NOT_DECODED); /* Failed content-decoding should have left the body untouched * from what the server sent... which happens to be the * uncompressed data. */ - if (msg->response_body->length != msge->response_body->length) { - debug_printf (1, " Message length mismatch: %lu (plain) vs %lu (mis-encoded)\n", - (gulong)msg->response_body->length, - (gulong)msge->response_body->length); - errors++; - } else if (memcmp (msg->response_body->data, - msge->response_body->data, - msg->response_body->length) != 0) { - debug_printf (1, " Message data mismatch (plain/misencoded)\n"); - errors++; - } - + check_msg_bodies (msg, msge, "plain", "mis-encoded"); + + /* Plain text data, claim deflate */ + debug_printf (1, "GET /mbox, Accept-Encoding: deflate\n"); + msgzl = soup_message_new_from_uri ("GET", uri); + soup_message_headers_append (msgzl->request_headers, + "X-Test-Options", "prefer-deflate-zlib"); + do_single_coding_test (session, msgzl, "deflate", "text/plain", EXPECT_DECODED); + check_msg_bodies (msg, msgzl, "plain", "compressed"); + + /* Plain text data, claim deflate w/ junk */ + debug_printf (1, "GET /mbox, Accept-Encoding: deflate, plus trailing junk\n"); + msgzlj = soup_message_new_from_uri ("GET", uri); + soup_message_headers_append (msgzlj->request_headers, + "X-Test-Options", "prefer-deflate-zlib, trailing-junk"); + do_single_coding_test (session, msgzlj, "deflate", "text/plain", EXPECT_DECODED); + check_msg_bodies (msg, msgzlj, "plain", "compressed w/ junk"); + + /* Plain text data, claim deflate with server error */ + debug_printf (1, "GET /mbox, Accept-Encoding: deflate, with server error\n"); + msgzle = soup_message_new_from_uri ("GET", uri); + soup_message_headers_append (msgzle->request_headers, + "X-Test-Options", "force-encode, prefer-deflate-zlib"); + do_single_coding_test (session, msgzle, "deflate", "text/plain", EXPECT_NOT_DECODED); + check_msg_bodies (msg, msgzle, "plain", "mis-encoded"); + + /* Plain text data, claim deflate (no zlib headers)*/ + debug_printf (1, "GET /mbox, Accept-Encoding: deflate (raw data)\n"); + msgzlr = soup_message_new_from_uri ("GET", uri); + soup_message_headers_append (msgzlr->request_headers, + "X-Test-Options", "prefer-deflate-raw"); + do_single_coding_test (session, msgzlr, "deflate", "text/plain", EXPECT_DECODED); + check_msg_bodies (msg, msgzlr, "plain", "compressed"); + + /* Plain text data, claim deflate with server error */ + debug_printf (1, "GET /mbox, Accept-Encoding: deflate (raw data), with server error\n"); + msgzlre = soup_message_new_from_uri ("GET", uri); + soup_message_headers_append (msgzlre->request_headers, + "X-Test-Options", "force-encode, prefer-deflate-raw"); + do_single_coding_test (session, msgzlre, "deflate", "text/plain", EXPECT_NOT_DECODED); + check_msg_bodies (msg, msgzlre, "plain", "mis-encoded"); g_object_unref (msg); + g_object_unref (msgzlre); + g_object_unref (msgzlr); + g_object_unref (msgzlj); + g_object_unref (msgzle); + g_object_unref (msgzl); g_object_unref (msgz); g_object_unref (msgj); g_object_unref (msge); soup_uri_free (uri); - - uri = soup_uri_new_with_base (base_uri, "/mbox.gz"); - - debug_printf (1, "GET /mbox.gz, Accept-Encoding: gzip\n"); - soup_session_add_feature_by_type (session, SOUP_TYPE_CONTENT_DECODER); - msgz = soup_message_new_from_uri ("GET", uri); - soup_session_send_message (session, msgz); - if (!SOUP_STATUS_IS_SUCCESSFUL (msgz->status_code)) { - debug_printf (1, " Unexpected status %d %s\n", - msgz->status_code, msgz->reason_phrase); - errors++; - } - coding = soup_message_headers_get_one (msgz->response_headers, "Content-Encoding"); - if (coding) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", coding); - errors++; - } - type = soup_message_headers_get_one (msgz->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "application/gzip") != 0) { - debug_printf (1, " Unexpected Content-Type: %s\n", - type ? type : "(none)"); - errors++; - } - - - debug_printf (1, "GET /mbox.gz, Accept-Encoding: gzip, with server error\n"); - msge = soup_message_new_from_uri ("GET", uri); - soup_message_headers_append (msge->request_headers, - "X-Test-Options", "force-encode"); - soup_session_send_message (session, msge); - if (!SOUP_STATUS_IS_SUCCESSFUL (msge->status_code)) { - debug_printf (1, " Unexpected status %d %s\n", - msge->status_code, msge->reason_phrase); - errors++; - } - coding = soup_message_headers_get_one (msge->response_headers, "Content-Encoding"); - if (!coding || g_ascii_strcasecmp (coding, "gzip") != 0) { - debug_printf (1, " Unexpected Content-Encoding: %s\n", - coding ? coding : "(none)"); - errors++; - } - /* SoupContentDecoder should have recognized the bug and thus - * not decoded it - */ - if (soup_message_get_flags (msge) & SOUP_MESSAGE_CONTENT_DECODED) { - debug_printf (1, " SOUP_MESSAGE_CONTENT_DECODED set!\n"); - errors++; - } - type = soup_message_headers_get_one (msge->response_headers, "Content-Type"); - if (!type || g_ascii_strcasecmp (type, "application/gzip") != 0) { - debug_printf (1, " Unexpected Content-Type: %s\n", - type ? type : "(none)"); - errors++; - } - - if (msgz->response_body->length != msge->response_body->length) { - debug_printf (1, " Message length mismatch: %lu (.gz) vs %lu (mis-encoded)\n", - (gulong)msgz->response_body->length, - (gulong)msge->response_body->length); - errors++; - } else if (memcmp (msgz->response_body->data, - msge->response_body->data, - msgz->response_body->length) != 0) { - debug_printf (1, " Message data mismatch (gz/misencoded)\n"); - errors++; - } - - - g_object_unref (msgz); - g_object_unref (msge); - soup_uri_free (uri); - soup_test_session_abort_unref (session); } diff --git a/tests/connection-test.c b/tests/connection-test.c new file mode 100644 index 0000000..545bf10 --- /dev/null +++ b/tests/connection-test.c @@ -0,0 +1,592 @@ +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ +/* + * Copyright 2007-2012 Red Hat, Inc. + */ + +#include + +#include + +#include "test-utils.h" + +SoupServer *server; +SoupURI *base_uri; +GMutex server_mutex; + +static void +forget_close (SoupMessage *msg, gpointer user_data) +{ + soup_message_headers_remove (msg->response_headers, "Connection"); +} + +static void +close_socket (SoupMessage *msg, gpointer user_data) +{ + SoupSocket *sock = user_data; + + soup_socket_disconnect (sock); + + /* But also add the missing data to the message now, so + * SoupServer can clean up after itself properly. + */ + soup_message_body_append (msg->response_body, SOUP_MEMORY_STATIC, + "foo", 3); +} + +static void +timeout_socket (SoupSocket *sock, gpointer user_data) +{ + soup_socket_disconnect (sock); +} + +static void +timeout_request_started (SoupServer *server, SoupMessage *msg, + SoupClientContext *client, gpointer user_data) +{ + SoupSocket *sock; + GMainContext *context = soup_server_get_async_context (server); + guint readable; + + sock = soup_client_context_get_socket (client); + readable = g_signal_connect (sock, "readable", + G_CALLBACK (timeout_socket), NULL); + while (soup_socket_is_connected (sock)) + g_main_context_iteration (context, TRUE); + g_signal_handler_disconnect (sock, readable); + g_signal_handlers_disconnect_by_func (server, timeout_request_started, NULL); +} + +static void +setup_timeout_persistent (SoupServer *server, SoupSocket *sock) +{ + char buf[1]; + gsize nread; + + /* In order for the test to work correctly, we have to + * close the connection *after* the client side writes + * the request. To ensure that this happens reliably, + * regardless of thread scheduling, we: + * + * 1. Try to read off the socket now, knowing it will + * fail (since the client is waiting for us to + * return a response). This will cause it to + * emit "readable" later. + * 2. Connect to the server's request-started signal. + * 3. Run an inner main loop from that signal handler + * until the socket emits "readable". (If we don't + * do this then it's possible the client's next + * request would be ready before we returned to + * the main loop, and so the signal would never be + * emitted.) + * 4. Close the socket. + */ + + soup_socket_read (sock, buf, 1, &nread, NULL, NULL); + g_signal_connect (server, "request-started", + G_CALLBACK (timeout_request_started), NULL); +} + +static void +server_callback (SoupServer *server, SoupMessage *msg, + const char *path, GHashTable *query, + SoupClientContext *context, gpointer data) +{ + /* The way this gets used in the tests, we don't actually + * need to hold it through the whole function, so it's simpler + * to just release it right away. + */ + g_mutex_lock (&server_mutex); + g_mutex_unlock (&server_mutex); + + if (msg->method != SOUP_METHOD_GET && msg->method != SOUP_METHOD_POST) { + soup_message_set_status (msg, SOUP_STATUS_NOT_IMPLEMENTED); + return; + } + + if (g_str_has_prefix (path, "/content-length/")) { + gboolean too_long = strcmp (path, "/content-length/long") == 0; + gboolean no_close = strcmp (path, "/content-length/noclose") == 0; + + soup_message_set_status (msg, SOUP_STATUS_OK); + soup_message_set_response (msg, "text/plain", + SOUP_MEMORY_STATIC, "foobar", 6); + if (too_long) + soup_message_headers_set_content_length (msg->response_headers, 9); + soup_message_headers_append (msg->response_headers, + "Connection", "close"); + + if (too_long) { + SoupSocket *sock; + + /* soup-message-io will wait for us to add + * another chunk after the first, to fill out + * the declared Content-Length. Instead, we + * forcibly close the socket at that point. + */ + sock = soup_client_context_get_socket (context); + g_signal_connect (msg, "wrote-chunk", + G_CALLBACK (close_socket), sock); + } else if (no_close) { + /* Remove the 'Connection: close' after writing + * the headers, so that when we check it after + * writing the body, we'll think we aren't + * supposed to close it. + */ + g_signal_connect (msg, "wrote-headers", + G_CALLBACK (forget_close), NULL); + } + return; + } + + if (!strcmp (path, "/timeout-persistent")) { + SoupSocket *sock; + + sock = soup_client_context_get_socket (context); + setup_timeout_persistent (server, sock); + } + + soup_message_set_status (msg, SOUP_STATUS_OK); + soup_message_set_response (msg, "text/plain", + SOUP_MEMORY_STATIC, "index", 5); + return; +} + +static void +do_content_length_framing_test (void) +{ + SoupSession *session; + SoupMessage *msg; + SoupURI *request_uri; + goffset declared_length; + + debug_printf (1, "\nInvalid Content-Length framing tests\n"); + + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + + debug_printf (1, " Content-Length larger than message body length\n"); + request_uri = soup_uri_new_with_base (base_uri, "/content-length/long"); + msg = soup_message_new_from_uri ("GET", request_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } else { + declared_length = soup_message_headers_get_content_length (msg->response_headers); + debug_printf (2, " Content-Length: %lu, body: %s\n", + (gulong)declared_length, msg->response_body->data); + if (msg->response_body->length >= declared_length) { + debug_printf (1, " Body length %lu >= declared length %lu\n", + (gulong)msg->response_body->length, + (gulong)declared_length); + errors++; + } + } + soup_uri_free (request_uri); + g_object_unref (msg); + + debug_printf (1, " Server claims 'Connection: close' but doesn't\n"); + request_uri = soup_uri_new_with_base (base_uri, "/content-length/noclose"); + msg = soup_message_new_from_uri ("GET", request_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } else { + declared_length = soup_message_headers_get_content_length (msg->response_headers); + debug_printf (2, " Content-Length: %lu, body: %s\n", + (gulong)declared_length, msg->response_body->data); + if (msg->response_body->length != declared_length) { + debug_printf (1, " Body length %lu != declared length %lu\n", + (gulong)msg->response_body->length, + (gulong)declared_length); + errors++; + } + } + soup_uri_free (request_uri); + g_object_unref (msg); + + soup_test_session_abort_unref (session); +} + +static void +request_started_socket_collector (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + SoupSocket **sockets = user_data; + int i; + + debug_printf (2, " msg %p => socket %p\n", msg, socket); + for (i = 0; i < 4; i++) { + if (!sockets[i]) { + /* We ref the socket to make sure that even if + * it gets disconnected, it doesn't get freed, + * since our checks would get messed up if the + * slice allocator reused the same address for + * two consecutive sockets. + */ + sockets[i] = g_object_ref (socket); + return; + } + } + + debug_printf (1, " socket queue overflowed!\n"); + errors++; + soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); +} + +static void +do_timeout_test_for_session (SoupSession *session) +{ + SoupMessage *msg; + SoupSocket *sockets[4] = { NULL, NULL, NULL, NULL }; + SoupURI *timeout_uri; + int i; + + g_signal_connect (session, "request-started", + G_CALLBACK (request_started_socket_collector), + &sockets); + + debug_printf (1, " First message\n"); + timeout_uri = soup_uri_new_with_base (base_uri, "/timeout-persistent"); + msg = soup_message_new_from_uri ("GET", timeout_uri); + soup_uri_free (timeout_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + if (sockets[1]) { + debug_printf (1, " Message was retried??\n"); + errors++; + sockets[1] = sockets[2] = sockets[3] = NULL; + } + g_object_unref (msg); + + debug_printf (1, " Second message\n"); + msg = soup_message_new_from_uri ("GET", base_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + if (sockets[1] != sockets[0]) { + debug_printf (1, " Message was not retried on existing connection\n"); + errors++; + } else if (!sockets[2]) { + debug_printf (1, " Message was not retried after disconnect\n"); + errors++; + } else if (sockets[2] == sockets[1]) { + debug_printf (1, " Message was retried on closed connection??\n"); + errors++; + } else if (sockets[3]) { + debug_printf (1, " Message was retried again??\n"); + errors++; + } + g_object_unref (msg); + + for (i = 0; sockets[i]; i++) + g_object_unref (sockets[i]); +} + +static void +do_persistent_connection_timeout_test (void) +{ + SoupSession *session; + + debug_printf (1, "\nUnexpected timing out of persistent connections\n"); + + debug_printf (1, " Async session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + do_timeout_test_for_session (session); + soup_test_session_abort_unref (session); + + debug_printf (1, " Sync session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); + do_timeout_test_for_session (session); + soup_test_session_abort_unref (session); +} + +static GMainLoop *max_conns_loop; +static int msgs_done; +static guint quit_loop_timeout; +#define MAX_CONNS 2 +#define TEST_CONNS (MAX_CONNS * 2) + +static gboolean +idle_start_server (gpointer data) +{ + g_mutex_unlock (&server_mutex); + return FALSE; +} + +static gboolean +quit_loop (gpointer data) +{ + quit_loop_timeout = 0; + g_main_loop_quit (max_conns_loop); + return FALSE; +} + +static void +max_conns_request_started (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + if (++msgs_done == MAX_CONNS) { + if (quit_loop_timeout) + g_source_remove (quit_loop_timeout); + quit_loop_timeout = g_timeout_add (100, quit_loop, NULL); + } +} + +static void +max_conns_message_complete (SoupSession *session, SoupMessage *msg, gpointer user_data) +{ + if (++msgs_done == TEST_CONNS) + g_main_loop_quit (max_conns_loop); +} + +static void +do_max_conns_test_for_session (SoupSession *session) +{ + SoupMessage *msgs[TEST_CONNS]; + int i; + + max_conns_loop = g_main_loop_new (NULL, TRUE); + + g_mutex_lock (&server_mutex); + + g_signal_connect (session, "request-started", + G_CALLBACK (max_conns_request_started), NULL); + msgs_done = 0; + for (i = 0; i < TEST_CONNS; i++) { + msgs[i] = soup_message_new_from_uri ("GET", base_uri); + g_object_ref (msgs[i]); + soup_session_queue_message (session, msgs[i], + max_conns_message_complete, NULL); + } + + g_main_loop_run (max_conns_loop); + if (msgs_done != MAX_CONNS) { + debug_printf (1, " Queued %d connections out of max %d?", + msgs_done, MAX_CONNS); + errors++; + } + g_signal_handlers_disconnect_by_func (session, max_conns_request_started, NULL); + + msgs_done = 0; + g_idle_add (idle_start_server, NULL); + quit_loop_timeout = g_timeout_add (1000, quit_loop, NULL); + g_main_loop_run (max_conns_loop); + + for (i = 0; i < TEST_CONNS; i++) { + if (!SOUP_STATUS_IS_SUCCESSFUL (msgs[i]->status_code)) { + debug_printf (1, " Message %d failed? %d %s\n", + i, msgs[i]->status_code, + msgs[i]->reason_phrase ? msgs[i]->reason_phrase : "-"); + errors++; + } + } + + if (msgs_done != TEST_CONNS) { + /* Clean up so we don't get a spurious "Leaked + * session" error. + */ + for (i = 0; i < TEST_CONNS; i++) + soup_session_cancel_message (session, msgs[i], SOUP_STATUS_CANCELLED); + g_main_loop_run (max_conns_loop); + } + + g_main_loop_unref (max_conns_loop); + if (quit_loop_timeout) + g_source_remove (quit_loop_timeout); + + for (i = 0; i < TEST_CONNS; i++) + g_object_unref (msgs[i]); +} + +static void +do_max_conns_test (void) +{ + SoupSession *session; + + debug_printf (1, "\nExceeding max-conns\n"); + + debug_printf (1, " Async session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_MAX_CONNS, MAX_CONNS, + NULL); + do_max_conns_test_for_session (session); + soup_test_session_abort_unref (session); + + debug_printf (1, " Sync session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, + SOUP_SESSION_MAX_CONNS, MAX_CONNS, + NULL); + do_max_conns_test_for_session (session); + soup_test_session_abort_unref (session); +} + +GMainLoop *loop; + +static void +np_request_started (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + SoupSocket **save_socket = user_data; + + *save_socket = g_object_ref (socket); +} + +static void +np_request_unqueued (SoupSession *session, SoupMessage *msg, + gpointer user_data) +{ + SoupSocket *socket = *(SoupSocket **)user_data; + + if (soup_socket_is_connected (socket)) { + debug_printf (1, " socket is still connected\n"); + errors++; + } + + g_main_loop_quit (loop); +} + +static void +do_non_persistent_test_for_session (SoupSession *session) +{ + SoupMessage *msg; + SoupSocket *socket = NULL; + + loop = g_main_loop_new (NULL, FALSE); + + g_signal_connect (session, "request-started", + G_CALLBACK (np_request_started), + &socket); + g_signal_connect (session, "request-unqueued", + G_CALLBACK (np_request_unqueued), + &socket); + + msg = soup_message_new_from_uri ("GET", base_uri); + soup_message_headers_append (msg->request_headers, "Connection", "close"); + g_object_ref (msg); + soup_session_queue_message (session, msg, NULL, NULL); + g_main_loop_run (loop); + + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + g_object_unref (msg); +} + +static void +do_non_persistent_connection_test (void) +{ + SoupSession *session; + + debug_printf (1, "\nNon-persistent connections are closed immediately\n"); + + debug_printf (1, " Async session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + do_non_persistent_test_for_session (session); + soup_test_session_abort_unref (session); + + debug_printf (1, " Sync session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); + do_non_persistent_test_for_session (session); + soup_test_session_abort_unref (session); +} + +static void +do_non_idempotent_test_for_session (SoupSession *session) +{ + SoupMessage *msg; + SoupSocket *sockets[4] = { NULL, NULL, NULL, NULL }; + int i; + + g_signal_connect (session, "request-started", + G_CALLBACK (request_started_socket_collector), + &sockets); + + debug_printf (2, " GET\n"); + msg = soup_message_new_from_uri ("GET", base_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + if (sockets[1]) { + debug_printf (1, " Message was retried??\n"); + errors++; + sockets[1] = sockets[2] = sockets[3] = NULL; + } + g_object_unref (msg); + + debug_printf (2, " POST\n"); + msg = soup_message_new_from_uri ("POST", base_uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " Unexpected response: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + if (sockets[1] == sockets[0]) { + debug_printf (1, " Message was sent on existing connection!\n"); + errors++; + } + if (sockets[2]) { + debug_printf (1, " Too many connections used...\n"); + errors++; + } + g_object_unref (msg); + + for (i = 0; sockets[i]; i++) + g_object_unref (sockets[i]); +} + +static void +do_non_idempotent_connection_test (void) +{ + SoupSession *session; + + debug_printf (1, "\nNon-idempotent methods are always sent on new connections\n"); + + debug_printf (1, " Async session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + do_non_idempotent_test_for_session (session); + soup_test_session_abort_unref (session); + + debug_printf (1, " Sync session\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); + do_non_idempotent_test_for_session (session); + soup_test_session_abort_unref (session); +} + +int +main (int argc, char **argv) +{ + test_init (argc, argv, NULL); + + server = soup_test_server_new (TRUE); + soup_server_add_handler (server, NULL, server_callback, "http", NULL); + base_uri = soup_uri_new ("http://127.0.0.1/"); + soup_uri_set_port (base_uri, soup_server_get_port (server)); + + do_content_length_framing_test (); + do_persistent_connection_timeout_test (); + do_max_conns_test (); + do_non_persistent_connection_test (); + do_non_idempotent_connection_test (); + + soup_uri_free (base_uri); + soup_test_server_quit_unref (server); + + test_cleanup (); + return errors != 0; +} diff --git a/tests/context-test.c b/tests/context-test.c index fe9af31..673ae21 100644 --- a/tests/context-test.c +++ b/tests/context-test.c @@ -97,48 +97,45 @@ server_callback (SoupServer *server, SoupMessage *msg, static gboolean idle_start_test1_thread (gpointer loop); static gpointer test1_thread (gpointer user_data); -static GCond *test1_cond; -static GMutex *test1_mutex; +static GCond test1_cond; +static GMutex test1_mutex; +static GMainLoop *test1_loop; static void -do_test1 (void) +do_test1 (int n, gboolean use_thread_context) { - GMainLoop *loop; - - debug_printf (1, "Test 1: blocking the main thread does not block other thread\n"); - - test1_cond = g_cond_new (); - test1_mutex = g_mutex_new (); - - loop = g_main_loop_new (NULL, FALSE); - g_idle_add (idle_start_test1_thread, loop); - g_main_loop_run (loop); - g_main_loop_unref (loop); - - g_mutex_free (test1_mutex); - g_cond_free (test1_cond); + debug_printf (1, "\nTest %d: blocking the main thread does not block other thread\n", n); + if (use_thread_context) + debug_printf (1, "(Using g_main_context_push_thread_default())\n"); + else + debug_printf (1, "(Using SOUP_SESSION_ASYNC_CONTEXT)\n"); + + test1_loop = g_main_loop_new (NULL, FALSE); + g_idle_add (idle_start_test1_thread, GINT_TO_POINTER (use_thread_context)); + g_main_loop_run (test1_loop); + g_main_loop_unref (test1_loop); } static gboolean -idle_start_test1_thread (gpointer loop) +idle_start_test1_thread (gpointer use_thread_context) { - GTimeVal time; + guint64 time; GThread *thread; - g_mutex_lock (test1_mutex); - thread = g_thread_create (test1_thread, base_uri, TRUE, NULL); + g_mutex_lock (&test1_mutex); + thread = g_thread_new ("test1_thread", test1_thread, use_thread_context); - g_get_current_time (&time); - time.tv_sec += 5; - if (g_cond_timed_wait (test1_cond, test1_mutex, &time)) + time = g_get_monotonic_time () + 5000000; + if (g_cond_wait_until (&test1_cond, &test1_mutex, time)) g_thread_join (thread); else { debug_printf (1, " timeout!\n"); + g_thread_unref (thread); errors++; } - g_mutex_unlock (test1_mutex); - g_main_loop_quit (loop); + g_mutex_unlock (&test1_mutex); + g_main_loop_quit (test1_loop); return FALSE; } @@ -149,7 +146,7 @@ test1_finished (SoupSession *session, SoupMessage *msg, gpointer loop) } static gpointer -test1_thread (gpointer user_data) +test1_thread (gpointer use_thread_context) { SoupSession *session; GMainContext *async_context; @@ -158,14 +155,20 @@ test1_thread (gpointer user_data) GMainLoop *loop; /* Wait for main thread to be waiting on test1_cond */ - g_mutex_lock (test1_mutex); - g_mutex_unlock (test1_mutex); + g_mutex_lock (&test1_mutex); + g_mutex_unlock (&test1_mutex); async_context = g_main_context_new (); - session = soup_test_session_new ( - SOUP_TYPE_SESSION_ASYNC, - SOUP_SESSION_ASYNC_CONTEXT, async_context, - NULL); + if (use_thread_context) { + g_main_context_push_thread_default (async_context); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_USE_THREAD_CONTEXT, TRUE, + NULL); + } else { + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_ASYNC_CONTEXT, async_context, + NULL); + } g_main_context_unref (async_context); uri = g_build_filename (base_uri, "slow", NULL); @@ -197,7 +200,10 @@ test1_thread (gpointer user_data) soup_test_session_abort_unref (session); g_free (uri); - g_cond_signal (test1_cond); + g_cond_signal (&test1_cond); + + if (use_thread_context) + g_main_context_pop_thread_default (async_context); return NULL; } @@ -208,7 +214,7 @@ test1_thread (gpointer user_data) static gboolean idle_test2_fail (gpointer user_data); static void -do_test2 (void) +do_test2 (int n, gboolean use_thread_context) { guint idle; GMainContext *async_context; @@ -216,15 +222,25 @@ do_test2 (void) char *uri; SoupMessage *msg; - debug_printf (1, "Test 2: a session with its own context is independent of the main loop.\n"); + debug_printf (1, "\nTest %d: a session with its own context is independent of the main loop.\n", n); + if (use_thread_context) + debug_printf (1, "(Using g_main_context_push_thread_default())\n"); + else + debug_printf (1, "(Using SOUP_SESSION_ASYNC_CONTEXT)\n"); idle = g_idle_add_full (G_PRIORITY_HIGH, idle_test2_fail, NULL, NULL); async_context = g_main_context_new (); - session = soup_test_session_new ( - SOUP_TYPE_SESSION_ASYNC, - SOUP_SESSION_ASYNC_CONTEXT, async_context, - NULL); + if (use_thread_context) { + g_main_context_push_thread_default (async_context); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_USE_THREAD_CONTEXT, TRUE, + NULL); + } else { + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_ASYNC_CONTEXT, async_context, + NULL); + } g_main_context_unref (async_context); uri = g_build_filename (base_uri, "slow", NULL); @@ -243,6 +259,9 @@ do_test2 (void) g_free (uri); g_source_remove (idle); + + if (use_thread_context) + g_main_context_pop_thread_default (async_context); } static gboolean @@ -253,6 +272,111 @@ idle_test2_fail (gpointer user_data) return FALSE; } +static void +multi_request_started (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + g_object_set_data (G_OBJECT (msg), "started", GUINT_TO_POINTER (TRUE)); +} + +static void +msg1_got_headers (SoupMessage *msg, gpointer user_data) +{ + GMainLoop *loop = user_data; + + g_main_loop_quit (loop); +} + +static void +multi_msg_finished (SoupSession *session, SoupMessage *msg, gpointer user_data) +{ + GMainLoop *loop = user_data; + + g_object_set_data (G_OBJECT (msg), "finished", GUINT_TO_POINTER (TRUE)); + g_main_loop_quit (loop); +} + +static void +do_multicontext_test (int n) +{ + SoupSession *session; + SoupMessage *msg1, *msg2; + GMainContext *context1, *context2; + GMainLoop *loop1, *loop2; + + debug_printf (1, "\nTest %d: Using multiple async contexts\n", n); + + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_USE_THREAD_CONTEXT, TRUE, + NULL); + g_signal_connect (session, "request-started", + G_CALLBACK (multi_request_started), NULL); + + context1 = g_main_context_new (); + loop1 = g_main_loop_new (context1, FALSE); + context2 = g_main_context_new (); + loop2 = g_main_loop_new (context2, FALSE); + + g_main_context_push_thread_default (context1); + msg1 = soup_message_new ("GET", base_uri); + g_object_ref (msg1); + soup_session_queue_message (session, msg1, multi_msg_finished, loop1); + g_signal_connect (msg1, "got-headers", + G_CALLBACK (msg1_got_headers), loop1); + g_object_set_data (G_OBJECT (msg1), "session", session); + g_main_context_pop_thread_default (context1); + + g_main_context_push_thread_default (context2); + msg2 = soup_message_new ("GET", base_uri); + g_object_ref (msg2); + soup_session_queue_message (session, msg2, multi_msg_finished, loop2); + g_main_context_pop_thread_default (context2); + + g_main_context_push_thread_default (context1); + g_main_loop_run (loop1); + g_main_context_pop_thread_default (context1); + + if (!g_object_get_data (G_OBJECT (msg1), "started")) { + debug_printf (1, " msg1 not started??\n"); + errors++; + } + if (g_object_get_data (G_OBJECT (msg2), "started")) { + debug_printf (1, " msg2 started while loop1 was running!\n"); + errors++; + } + + g_main_context_push_thread_default (context2); + g_main_loop_run (loop2); + g_main_context_pop_thread_default (context2); + + if (g_object_get_data (G_OBJECT (msg1), "finished")) { + debug_printf (1, " msg1 finished while loop2 was running!\n"); + errors++; + } + if (!g_object_get_data (G_OBJECT (msg2), "finished")) { + debug_printf (1, " msg2 not finished??\n"); + errors++; + } + + g_main_context_push_thread_default (context1); + g_main_loop_run (loop1); + g_main_context_pop_thread_default (context1); + + if (!g_object_get_data (G_OBJECT (msg1), "finished")) { + debug_printf (1, " msg1 not finished??\n"); + errors++; + } + + g_object_unref (msg1); + g_object_unref (msg2); + + soup_test_session_abort_unref (session); + + g_main_loop_unref (loop1); + g_main_loop_unref (loop2); + g_main_context_unref (context1); + g_main_context_unref (context2); +} int main (int argc, char **argv) @@ -266,8 +390,11 @@ main (int argc, char **argv) base_uri = g_strdup_printf ("http://127.0.0.1:%u/", soup_server_get_port (server)); - do_test1 (); - do_test2 (); + do_test1 (1, FALSE); + do_test1 (2, TRUE); + do_test2 (3, FALSE); + do_test2 (4, TRUE); + do_multicontext_test (5); g_free (base_uri); soup_test_server_quit_unref (server); diff --git a/tests/dns.c b/tests/dns.c index 6c44800..1751cfb 100644 --- a/tests/dns.c +++ b/tests/dns.c @@ -43,7 +43,6 @@ main (int argc, char **argv) if (argc < 2) usage (); - g_thread_init (NULL); g_type_init (); for (i = 1; i < argc; i++) { diff --git a/tests/forms-test.c b/tests/forms-test.c index 4c2846e..fbbd97c 100644 --- a/tests/forms-test.c +++ b/tests/forms-test.c @@ -395,9 +395,7 @@ md5_post_callback (SoupServer *server, SoupMessage *msg, NULL); redirect_uri = soup_uri_to_string (uri, FALSE); - soup_message_set_status (msg, SOUP_STATUS_SEE_OTHER); - soup_message_headers_replace (msg->response_headers, "Location", - redirect_uri); + soup_message_set_redirect (msg, SOUP_STATUS_SEE_OTHER, redirect_uri); g_free (redirect_uri); soup_uri_free (uri); diff --git a/tests/get.c b/tests/get.c index 79d6e80..55dd735 100644 --- a/tests/get.c +++ b/tests/get.c @@ -61,6 +61,13 @@ get_url (const char *url) while (soup_message_headers_iter_next (&iter, &hname, &value)) printf ("%s: %s\r\n", hname, value); printf ("\n"); + } else if (msg->status_code == SOUP_STATUS_SSL_FAILED) { + GTlsCertificateFlags flags; + + if (soup_message_get_https_status (msg, NULL, &flags)) + printf ("%s: %d %s (0x%x)\n", name, msg->status_code, msg->reason_phrase, flags); + else + printf ("%s: %d %s (no handshake status)\n", name, msg->status_code, msg->reason_phrase); } else if (!quiet || SOUP_STATUS_IS_TRANSPORT_ERROR (msg->status_code)) printf ("%s: %d %s\n", name, msg->status_code, msg->reason_phrase); @@ -98,15 +105,14 @@ main (int argc, char **argv) { const char *cafile = NULL, *url; SoupURI *proxy = NULL, *parsed; - gboolean synchronous = FALSE; + gboolean synchronous = FALSE, ntlm = FALSE; int opt; - g_thread_init (NULL); g_type_init (); method = SOUP_METHOD_GET; - while ((opt = getopt (argc, argv, "c:dhp:qs")) != -1) { + while ((opt = getopt (argc, argv, "c:dhnp:qs")) != -1) { switch (opt) { case 'c': cafile = optarg; @@ -121,6 +127,10 @@ main (int argc, char **argv) debug = TRUE; break; + case 'n': + ntlm = TRUE; + break; + case 'p': proxy = soup_uri_new (optarg); if (!proxy) { @@ -166,6 +176,7 @@ main (int argc, char **argv) SOUP_SESSION_ADD_FEATURE_BY_TYPE, SOUP_TYPE_COOKIE_JAR, SOUP_SESSION_USER_AGENT, "get ", SOUP_SESSION_ACCEPT_LANGUAGE_AUTO, TRUE, + SOUP_SESSION_USE_NTLM, ntlm, NULL); } else { session = soup_session_async_new_with_options ( @@ -177,6 +188,7 @@ main (int argc, char **argv) SOUP_SESSION_ADD_FEATURE_BY_TYPE, SOUP_TYPE_COOKIE_JAR, SOUP_SESSION_USER_AGENT, "get ", SOUP_SESSION_ACCEPT_LANGUAGE_AUTO, TRUE, + SOUP_SESSION_USE_NTLM, ntlm, NULL); } diff --git a/tests/getbug.c b/tests/getbug.c index 86ae304..1af3f53 100644 --- a/tests/getbug.c +++ b/tests/getbug.c @@ -19,6 +19,9 @@ static GMainLoop *loop; static void print_value (GValue *value) { +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif if (G_VALUE_HOLDS_STRING (value)) printf ("%s", g_value_get_string (value)); else if (G_VALUE_HOLDS_INT (value)) @@ -37,6 +40,9 @@ print_value (GValue *value) printf (" ]"); } else printf ("(%s)", g_type_name (G_VALUE_TYPE (value))); +#ifdef G_GNUC_END_IGNORE_DEPRECATIONS +G_GNUC_END_IGNORE_DEPRECATIONS +#endif } static void @@ -95,7 +101,6 @@ main (int argc, char **argv) const char *uri = "http://bugzilla.redhat.com/bugzilla/xmlrpc.cgi"; int opt, bug; - g_thread_init (NULL); g_type_init (); while ((opt = getopt (argc, argv, "p:")) != -1) { diff --git a/tests/header-parsing.c b/tests/header-parsing.c index 63a29bb..fcdd550 100644 --- a/tests/header-parsing.c +++ b/tests/header-parsing.c @@ -19,7 +19,7 @@ static struct RequestTest { guint status; const char *method, *path; SoupHTTPVersion version; - Header headers[4]; + Header headers[10]; } reqtests[] = { /**********************/ /*** VALID REQUESTS ***/ @@ -177,6 +177,30 @@ static struct RequestTest { } }, + { "GET with full URI", + "GET http://example.com HTTP/1.1\r\n", -1, + SOUP_STATUS_OK, + "GET", "http://example.com", SOUP_HTTP_1_1, + { { NULL } } + }, + + { "GET with full URI in upper-case", + "GET HTTP://example.com HTTP/1.1\r\n", -1, + SOUP_STATUS_OK, + "GET", "HTTP://example.com", SOUP_HTTP_1_1, + { { NULL } } + }, + + /* It's better for this to be passed through: this means a SoupServer + * could implement ftp-over-http proxying, for instance + */ + { "GET with full URI of unrecognised scheme", + "GET AbOuT: HTTP/1.1\r\n", -1, + SOUP_STATUS_OK, + "GET", "AbOuT:", SOUP_HTTP_1_1, + { { NULL } } + }, + /****************************/ /*** RECOVERABLE REQUESTS ***/ /****************************/ @@ -206,7 +230,7 @@ static struct RequestTest { /* RFC 2616 section 19.3 says we SHOULD accept these */ { "LF instead of CRLF after header", - "GET / HTTP/1.1\nHost: example.com\nConnection: close\n", -1, + "GET / HTTP/1.1\r\nHost: example.com\nConnection: close\n", -1, SOUP_STATUS_OK, "GET", "/", SOUP_HTTP_1_1, { { "Host", "example.com" }, @@ -224,6 +248,18 @@ static struct RequestTest { } }, + { "Mixed CRLF/LF", + "GET / HTTP/1.1\r\na: b\r\nc: d\ne: f\r\ng: h\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + { "c", "d" }, + { "e", "f" }, + { "g", "h" }, + { NULL } + } + }, + { "Req w/ incorrect whitespace in Request-Line", "GET /\tHTTP/1.1\r\nHost: example.com\r\n", -1, SOUP_STATUS_OK, @@ -242,7 +278,11 @@ static struct RequestTest { } }, - /* qv bug 579318, do_bad_header_tests() below */ + /* If the request/status line is parseable, then we + * just ignore any invalid-looking headers after that. + * (qv bug 579318). + */ + { "Req w/ mangled header", "GET / HTTP/1.1\r\nHost: example.com\r\nFoo one\r\nBar: two\r\n", -1, SOUP_STATUS_OK, @@ -253,6 +293,77 @@ static struct RequestTest { } }, + { "First header line is continuation", + "GET / HTTP/1.1\r\n b\r\nHost: example.com\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "Host", "example.com" }, + { "c", "d" }, + { NULL } + } + }, + + { "Zero-length header name", + "GET / HTTP/1.1\r\na: b\r\n: example.com\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + { "c", "d" }, + { NULL } + } + }, + + { "CR in header name", + "GET / HTTP/1.1\r\na: b\r\na\rb: cd\r\nx\r: y\r\n\rz: w\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + { "c", "d" }, + { NULL } + } + }, + + { "CR in header value", + "GET / HTTP/1.1\r\na: b\r\nHost: example\rcom\r\np: \rq\r\ns: t\r\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + { "Host", "example com" }, /* CR in the middle turns to space */ + { "p", "q" }, /* CR at beginning is ignored */ + { "s", "t" }, /* CR at end is ignored */ + { "c", "d" }, + { NULL } + } + }, + + { "Tab in header name", + "GET / HTTP/1.1\r\na: b\r\na\tb: cd\r\nx\t: y\r\np: q\r\n\tz: w\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + /* Tab anywhere in the header name causes it to be + * ignored... except at beginning of line where it's a + * continuation line + */ + { "p", "q z: w" }, + { "c", "d" }, + { NULL } + } + }, + + { "Tab in header value", + "GET / HTTP/1.1\r\na: b\r\nab: c\td\r\nx: \ty\r\nz: w\t\r\nc: d\r\n", -1, + SOUP_STATUS_OK, + "GET", "/", SOUP_HTTP_1_1, + { { "a", "b" }, + { "ab", "c\td" }, /* internal tab preserved */ + { "x", "y" }, /* leading tab ignored */ + { "z", "w" }, /* trailing tab ignored */ + { "c", "d" }, + { NULL } + } + }, + /************************/ /*** INVALID REQUESTS ***/ /************************/ @@ -299,6 +410,13 @@ static struct RequestTest { { { NULL } } }, + { "NUL at beginning of Method", + "\x00 / HTTP/1.1\r\nHost: example.com\r\n", 35, + SOUP_STATUS_BAD_REQUEST, + NULL, NULL, -1, + { { NULL } } + }, + { "NUL in Path", "GET /\x00 HTTP/1.1\r\nHost: example.com\r\n", 38, SOUP_STATUS_BAD_REQUEST, @@ -306,7 +424,14 @@ static struct RequestTest { { { NULL } } }, - { "NUL in Header", + { "NUL in header name", + "GET / HTTP/1.1\r\n\x00: silly\r\n", 37, + SOUP_STATUS_BAD_REQUEST, + NULL, NULL, -1, + { { NULL } } + }, + + { "NUL in header value", "GET / HTTP/1.1\r\nHost: example\x00com\r\n", 37, SOUP_STATUS_BAD_REQUEST, NULL, NULL, -1, @@ -535,13 +660,25 @@ static struct ResponseTest { { { NULL } } }, + { "NUL at start", + "\x00HTTP/1.1 200 OK\r\nFoo: bar\r\n", 28, + -1, 0, NULL, + { { NULL } } + }, + { "NUL in Reason Phrase", "HTTP/1.1 200 O\x00K\r\nFoo: bar\r\n", 28, -1, 0, NULL, { { NULL } } }, - { "NUL in Header", + { "NUL in header name", + "HTTP/1.1 200 OK\r\nF\x00oo: bar\r\n", 28, + -1, 0, NULL, + { { NULL } } + }, + + { "NUL in header value", "HTTP/1.1 200 OK\r\nFoo: b\x00ar\r\n", 28, -1, 0, NULL, { { NULL } } diff --git a/tests/libsoup.supp b/tests/libsoup.supp index 69a72ac..24c6516 100644 --- a/tests/libsoup.supp +++ b/tests/libsoup.supp @@ -41,6 +41,13 @@ fun:g_thread_init_glib } { + glib/g_thread_self + Memcheck:Leak + ... + fun:g_slice_alloc0 + fun:g_thread_self +} +{ glib/g_type_register_static Memcheck:Leak ... @@ -107,6 +114,12 @@ fun:get_dispatch } { + glib/g_signal_connect + Memcheck:Leak + ... + fun:handler_list_ensure +} +{ glib/g_signal_handlers_destroy Memcheck:Leak ... @@ -167,16 +180,35 @@ fun:_g_io_modules_ensure_loaded } { - glib/tlsinit + glib/giomodules2 + Memcheck:Leak + ... + fun:_g_io_module_get_default +} +{ + glib/proxydefault + Memcheck:Leak + ... + fun:get_default_proxy_resolver +} +{ + glib/proxysettings + Memcheck:Leak + ... + fun:update_settings + fun:g_proxy_resolver_gnome_lookup +} +{ + glib/tlsdefault Memcheck:Leak ... fun:get_default_tls_backend } { - glib/tlscrypto + glib/tlsdb Memcheck:Leak ... - fun:gcry_pthread_mutex_init + fun:g_tls_backend_get_default_database } { glib/tlscache @@ -197,7 +229,133 @@ fun:ensure_attribute_hash fun:lookup_attribute } +{ + glib/unixsignalthread + Memcheck:Leak + ... + fun:ensure_unix_signal_handler_installed_unlocked +} +{ + glib/gioscheduler + Memcheck:Leak + ... + fun:init_scheduler +} +{ + glib/gtestinit + Memcheck:Leak + ... + fun:g_test_init +} +{ + glib/gtestroot + Memcheck:Leak + ... + fun:g_test_get_root +} +{ + glib/gtestseed + Memcheck:Leak + ... + fun:test_run_seed +} +{ + glib/gtestcase + Memcheck:Leak + ... + fun:g_test_create_case +} +{ + glib/gtestsuite + Memcheck:Leak + ... + fun:g_test_create_suite +} +{ + glib/gtestsuiteadd + Memcheck:Leak + ... + fun:g_test_suite_add +} +{ + glib/gtestsuiteaddsuite + Memcheck:Leak + ... + fun:g_test_suite_add_suite +} +{ + glib/gtestsuiterun + Memcheck:Leak + ... + fun:g_slist_copy + fun:g_test_run_suite_internal +} +{ + glib/gthreadpool + Memcheck:Leak + ... + fun:g_thread_pool_start_thread +} +{ + glib/gthreadpool1 + Memcheck:Leak + ... + fun:g_thread_pool_wait_for_new_pool +} +{ + glib/gthreadpool2 + Memcheck:Leak + ... + fun:g_thread_pool_wait_for_new_task +} +{ + glib/unused_thread_queue + Memcheck:Leak + ... + fun:g_async_queue_new + fun:g_thread_pool_new +} +{ + glib/gthreadtls + Memcheck:Leak + ... + fun:_dl_allocate_tls + ... + fun:g_thread_create_posix_impl +} +{ + glib/filenamecharsets + Memcheck:Leak + ... + fun:g_get_filename_charsets +} +{ + glib/charset + Memcheck:Leak + ... + fun:g_get_charset +} +{ + glib/gtlssessioncache + Memcheck:Leak + ... + fun:g_tls_backend_gnutls_store_session +} +{ + glib/cached_poll_array + Memcheck:Leak + ... + fun:g_malloc_n + fun:g_main_context_iterate +} +# probably inlines the aggressive memcpy/memcmp +{ + gnutls/der + Memcheck:Addr4 + ... + fun:asn1_der_coding +} # probably using uninitialized memory as padding or something { gnutls/handshake @@ -205,6 +363,19 @@ ... fun:gnutls_handshake } +{ + gnutls/init + Memcheck:Leak + ... + fun:gnutls_global_init +} +# known leak in gnutls 2.12, fixed in 3.0 +{ + gnutls/server_key_leak + Memcheck:Leak + ... + fun:call_get_cert_callback +} { libxml2/xmlInitParser @@ -213,6 +384,12 @@ fun:xmlInitParser } { + libxml2/xmlInitParserCtxt + Memcheck:Leak + ... + fun:xmlInitParserCtxt +} +{ libxml2/xmlInitializeDict Memcheck:Leak ... @@ -244,3 +421,27 @@ ... fun:intern_header_name } + + +# fixme? +{ + glib/gmuteximpl + Memcheck:Leak + ... + fun:g_mutex_impl_new + fun:g_mutex_get_impl +} +{ + glib/gcondimpl + Memcheck:Leak + ... + fun:g_cond_impl_new + fun:g_cond_get_impl +} +{ + glib/gprivateimpl + Memcheck:Leak + ... + fun:g_private_impl_new + fun:g_private_get_impl +} diff --git a/tests/misc-test.c b/tests/misc-test.c index 411cb11..4b2663f 100644 --- a/tests/misc-test.c +++ b/tests/misc-test.c @@ -1,26 +1,16 @@ /* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */ /* - * Copyright (C) 2007 Red Hat, Inc. + * Copyright 2007-2012 Red Hat, Inc. */ -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include #include "test-utils.h" -SoupServer *server; -SoupURI *base_uri; -GMutex *server_mutex; +SoupServer *server, *ssl_server; +SoupURI *base_uri, *ssl_base_uri; static gboolean auth_callback (SoupAuthDomain *auth_domain, SoupMessage *msg, @@ -29,73 +19,6 @@ auth_callback (SoupAuthDomain *auth_domain, SoupMessage *msg, return !strcmp (username, "user") && !strcmp (password, "password"); } -static void -forget_close (SoupMessage *msg, gpointer user_data) -{ - soup_message_headers_remove (msg->response_headers, "Connection"); -} - -static void -close_socket (SoupMessage *msg, gpointer user_data) -{ - SoupSocket *sock = user_data; - - soup_socket_disconnect (sock); -} - -static void -timeout_socket (SoupSocket *sock, gpointer user_data) -{ - soup_socket_disconnect (sock); -} - -static void -timeout_request_started (SoupServer *server, SoupMessage *msg, - SoupClientContext *client, gpointer user_data) -{ - SoupSocket *sock; - GMainContext *context = soup_server_get_async_context (server); - guint readable; - - sock = soup_client_context_get_socket (client); - readable = g_signal_connect (sock, "readable", - G_CALLBACK (timeout_socket), NULL); - while (soup_socket_is_connected (sock)) - g_main_context_iteration (context, TRUE); - g_signal_handler_disconnect (sock, readable); - g_signal_handlers_disconnect_by_func (server, timeout_request_started, NULL); -} - -static void -setup_timeout_persistent (SoupServer *server, SoupSocket *sock) -{ - char buf[1]; - gsize nread; - - /* In order for the test to work correctly, we have to - * close the connection *after* the client side writes - * the request. To ensure that this happens reliably, - * regardless of thread scheduling, we: - * - * 1. Try to read off the socket now, knowing it will - * fail (since the client is waiting for us to - * return a response). This will cause it to - * emit "readable" later. - * 2. Connect to the server's request-started signal. - * 3. Run an inner main loop from that signal handler - * until the socket emits "readable". (If we don't - * do this then it's possible the client's next - * request would be ready before we returned to - * the main loop, and so the signal would never be - * emitted.) - * 4. Close the socket. - */ - - soup_socket_read (sock, buf, 1, &nread, NULL, NULL); - g_signal_connect (server, "request-started", - G_CALLBACK (timeout_request_started), NULL); -} - static gboolean timeout_finish_message (gpointer msg) { @@ -111,13 +34,7 @@ server_callback (SoupServer *server, SoupMessage *msg, SoupClientContext *context, gpointer data) { SoupURI *uri = soup_message_get_uri (msg); - - /* The way this gets used in the tests, we don't actually - * need to hold it through the whole function, so it's simpler - * to just release it right away. - */ - g_mutex_lock (server_mutex); - g_mutex_unlock (server_mutex); + const char *server_protocol = data; soup_message_headers_append (msg->response_headers, "X-Handled-By", "server_callback"); @@ -129,64 +46,44 @@ server_callback (SoupServer *server, SoupMessage *msg, return; } - if (msg->method != SOUP_METHOD_GET) { + if (msg->method != SOUP_METHOD_GET && msg->method != SOUP_METHOD_POST) { soup_message_set_status (msg, SOUP_STATUS_NOT_IMPLEMENTED); return; } if (!strcmp (path, "/redirect")) { - soup_message_set_status (msg, SOUP_STATUS_FOUND); - soup_message_headers_append (msg->response_headers, - /* Kids: don't try this at home! - * RFC2616 says to use an - * absolute URI! - */ - "Location", "/"); + soup_message_set_redirect (msg, SOUP_STATUS_FOUND, "/"); return; } - if (g_str_has_prefix (path, "/content-length/")) { - gboolean too_long = strcmp (path, "/content-length/long") == 0; - gboolean no_close = strcmp (path, "/content-length/noclose") == 0; + if (!strcmp (path, "/alias-redirect")) { + SoupURI *redirect_uri; + char *redirect_string; + const char *redirect_protocol; + + redirect_protocol = soup_message_headers_get_one (msg->request_headers, "X-Redirect-Protocol"); + redirect_uri = soup_uri_copy (uri); + soup_uri_set_scheme (redirect_uri, "foo"); + if (!g_strcmp0 (redirect_protocol, "https")) + soup_uri_set_port (redirect_uri, ssl_base_uri->port); + else + soup_uri_set_port (redirect_uri, base_uri->port); + soup_uri_set_path (redirect_uri, "/alias-redirected"); + redirect_string = soup_uri_to_string (redirect_uri, FALSE); + + soup_message_set_redirect (msg, SOUP_STATUS_FOUND, redirect_string); + g_free (redirect_string); + soup_uri_free (redirect_uri); + return; + } else if (!strcmp (path, "/alias-redirected")) { soup_message_set_status (msg, SOUP_STATUS_OK); - soup_message_set_response (msg, "text/plain", - SOUP_MEMORY_STATIC, "foobar", 6); - if (too_long) - soup_message_headers_set_content_length (msg->response_headers, 9); soup_message_headers_append (msg->response_headers, - "Connection", "close"); - - if (too_long) { - SoupSocket *sock; - - /* soup-message-io will wait for us to add - * another chunk after the first, to fill out - * the declared Content-Length. Instead, we - * forcibly close the socket at that point. - */ - sock = soup_client_context_get_socket (context); - g_signal_connect (msg, "wrote-chunk", - G_CALLBACK (close_socket), sock); - } else if (no_close) { - /* Remove the 'Connection: close' after writing - * the headers, so that when we check it after - * writing the body, we'll think we aren't - * supposed to close it. - */ - g_signal_connect (msg, "wrote-headers", - G_CALLBACK (forget_close), NULL); - } + "X-Redirected-Protocol", + server_protocol); return; } - if (!strcmp (path, "/timeout-persistent")) { - SoupSocket *sock; - - sock = soup_client_context_get_socket (context); - setup_timeout_persistent (server, sock); - } - if (!strcmp (path, "/slow")) { soup_server_pause_message (server, msg); g_object_set_data (G_OBJECT (msg), "server", server); @@ -562,6 +459,12 @@ ea_connection_created (SoupSession *session, GObject *conn, gpointer user_data) } static void +ea_request_started (SoupSession *session, SoupMessage *msg, SoupSocket *socket, gpointer user_data) +{ + soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); +} + +static void do_early_abort_test (void) { SoupSession *session; @@ -604,64 +507,25 @@ do_early_abort_test (void) g_main_context_iteration (context, FALSE); soup_test_session_abort_unref (session); -} - -static void -do_content_length_framing_test (void) -{ - SoupSession *session; - SoupMessage *msg; - SoupURI *request_uri; - goffset declared_length; - - debug_printf (1, "\nInvalid Content-Length framing tests\n"); session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + msg = soup_message_new_from_uri ("GET", base_uri); - debug_printf (1, " Content-Length larger than message body length\n"); - request_uri = soup_uri_new_with_base (base_uri, "/content-length/long"); - msg = soup_message_new_from_uri ("GET", request_uri); + g_signal_connect (session, "request-started", + G_CALLBACK (ea_request_started), NULL); soup_session_send_message (session, msg); - if (msg->status_code != SOUP_STATUS_OK) { - debug_printf (1, " Unexpected response: %d %s\n", - msg->status_code, msg->reason_phrase); - errors++; - } else { - declared_length = soup_message_headers_get_content_length (msg->response_headers); - debug_printf (2, " Content-Length: %lu, body: %s\n", - (gulong)declared_length, msg->response_body->data); - if (msg->response_body->length >= declared_length) { - debug_printf (1, " Body length %lu >= declared length %lu\n", - (gulong)msg->response_body->length, - (gulong)declared_length); - errors++; - } - } - soup_uri_free (request_uri); - g_object_unref (msg); + debug_printf (2, " Message 3 completed\n"); - debug_printf (1, " Server claims 'Connection: close' but doesn't\n"); - request_uri = soup_uri_new_with_base (base_uri, "/content-length/noclose"); - msg = soup_message_new_from_uri ("GET", request_uri); - soup_session_send_message (session, msg); - if (msg->status_code != SOUP_STATUS_OK) { + if (msg->status_code != SOUP_STATUS_CANCELLED) { debug_printf (1, " Unexpected response: %d %s\n", msg->status_code, msg->reason_phrase); errors++; - } else { - declared_length = soup_message_headers_get_content_length (msg->response_headers); - debug_printf (2, " Content-Length: %lu, body: %s\n", - (gulong)declared_length, msg->response_body->data); - if (msg->response_body->length != declared_length) { - debug_printf (1, " Body length %lu != declared length %lu\n", - (gulong)msg->response_body->length, - (gulong)declared_length); - errors++; - } } - soup_uri_free (request_uri); g_object_unref (msg); + while (g_main_context_pending (context)) + g_main_context_iteration (context, FALSE); + soup_test_session_abort_unref (session); } @@ -718,292 +582,249 @@ do_accept_language_test (void) g_unsetenv ("LANGUAGE"); } -static void -timeout_test_request_started (SoupSession *session, SoupMessage *msg, - SoupSocket *socket, gpointer user_data) +static gboolean +cancel_message_timeout (gpointer msg) { - SoupSocket **sockets = user_data; - int i; + SoupSession *session = g_object_get_data (G_OBJECT (msg), "session"); - debug_printf (2, " msg %p => socket %p\n", msg, socket); - for (i = 0; i < 4; i++) { - if (!sockets[i]) { - /* We ref the socket to make sure that even if - * it gets disconnected, it doesn't get freed, - * since our checks would get messed up if the - * slice allocator reused the same address for - * two consecutive sockets. - */ - sockets[i] = g_object_ref (socket); - return; - } - } + soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); + g_object_unref (msg); + g_object_unref (session); + return FALSE; +} + +static gpointer +cancel_message_thread (gpointer msg) +{ + SoupSession *session = g_object_get_data (G_OBJECT (msg), "session"); - debug_printf (1, " socket queue overflowed!\n"); - errors++; + g_usleep (100000); /* .1s */ soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); + g_object_unref (msg); + g_object_unref (session); + return NULL; } static void -do_timeout_test_for_session (SoupSession *session) +do_cancel_while_reading_test_for_session (SoupSession *session) { SoupMessage *msg; - SoupSocket *sockets[4] = { NULL, NULL, NULL, NULL }; - SoupURI *timeout_uri; - int i; + GThread *thread = NULL; + SoupURI *uri; - g_signal_connect (session, "request-started", - G_CALLBACK (timeout_test_request_started), - &sockets); + uri = soup_uri_new_with_base (base_uri, "/slow"); + msg = soup_message_new_from_uri ("GET", uri); + soup_uri_free (uri); - debug_printf (1, " First message\n"); - timeout_uri = soup_uri_new_with_base (base_uri, "/timeout-persistent"); - msg = soup_message_new_from_uri ("GET", timeout_uri); - soup_uri_free (timeout_uri); - soup_session_send_message (session, msg); - if (msg->status_code != SOUP_STATUS_OK) { - debug_printf (1, " Unexpected response: %d %s\n", - msg->status_code, msg->reason_phrase); - errors++; - } - if (sockets[1]) { - debug_printf (1, " Message was retried??\n"); - errors++; - sockets[1] = sockets[2] = sockets[3] = NULL; - } - g_object_unref (msg); + g_object_set_data (G_OBJECT (msg), "session", session); + g_object_ref (msg); + g_object_ref (session); + if (SOUP_IS_SESSION_ASYNC (session)) + g_timeout_add (100, cancel_message_timeout, msg); + else + thread = g_thread_new ("cancel_message_thread", cancel_message_thread, msg); - debug_printf (1, " Second message\n"); - msg = soup_message_new_from_uri ("GET", base_uri); soup_session_send_message (session, msg); - if (msg->status_code != SOUP_STATUS_OK) { - debug_printf (1, " Unexpected response: %d %s\n", + + if (msg->status_code != SOUP_STATUS_CANCELLED) { + debug_printf (1, " FAILED: %d %s (expected Cancelled)\n", msg->status_code, msg->reason_phrase); errors++; } - if (sockets[1] != sockets[0]) { - debug_printf (1, " Message was not retried on existing connection\n"); - errors++; - } else if (!sockets[2]) { - debug_printf (1, " Message was not retried after disconnect\n"); - errors++; - } else if (sockets[2] == sockets[1]) { - debug_printf (1, " Message was retried on closed connection??\n"); - errors++; - } else if (sockets[3]) { - debug_printf (1, " Message was retried again??\n"); - errors++; - } g_object_unref (msg); - for (i = 0; sockets[i]; i++) - g_object_unref (sockets[i]); + if (thread) + g_thread_join (thread); } static void -do_persistent_connection_timeout_test (void) +do_cancel_while_reading_test (void) { SoupSession *session; - debug_printf (1, "\nUnexpected timing out of persistent connections\n"); + debug_printf (1, "\nCancelling message while reading response\n"); debug_printf (1, " Async session\n"); session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); - do_timeout_test_for_session (session); + do_cancel_while_reading_test_for_session (session); soup_test_session_abort_unref (session); debug_printf (1, " Sync session\n"); session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); - do_timeout_test_for_session (session); + do_cancel_while_reading_test_for_session (session); soup_test_session_abort_unref (session); } -static GMainLoop *max_conns_loop; -static int msgs_done; -#define MAX_CONNS 2 -#define TEST_CONNS (MAX_CONNS * 2) - -static gboolean -idle_start_server (gpointer data) -{ - g_mutex_unlock (server_mutex); - return FALSE; -} - -static gboolean -quit_loop (gpointer data) -{ - g_main_loop_quit (max_conns_loop); - return FALSE; -} - -static void -max_conns_request_started (SoupSession *session, SoupMessage *msg, - SoupSocket *socket, gpointer user_data) -{ - if (++msgs_done == MAX_CONNS) - g_timeout_add (100, quit_loop, NULL); -} - -static void -max_conns_message_complete (SoupSession *session, SoupMessage *msg, gpointer user_data) -{ - if (++msgs_done == TEST_CONNS) - g_main_loop_quit (max_conns_loop); -} - static void -do_max_conns_test_for_session (SoupSession *session) +do_aliases_test_for_session (SoupSession *session, + const char *redirect_protocol) { - SoupMessage *msgs[TEST_CONNS]; - int i; - guint timeout_id; - - max_conns_loop = g_main_loop_new (NULL, TRUE); + SoupMessage *msg; + SoupURI *uri; + const char *redirected_protocol; - g_mutex_lock (server_mutex); + uri = soup_uri_new_with_base (base_uri, "/alias-redirect"); + msg = soup_message_new_from_uri ("GET", uri); + if (redirect_protocol) + soup_message_headers_append (msg->request_headers, "X-Redirect-Protocol", redirect_protocol); + soup_uri_free (uri); + soup_session_send_message (session, msg); - g_signal_connect (session, "request-started", - G_CALLBACK (max_conns_request_started), NULL); - msgs_done = 0; - for (i = 0; i < TEST_CONNS; i++) { - msgs[i] = soup_message_new_from_uri ("GET", base_uri); - g_object_ref (msgs[i]); - soup_session_queue_message (session, msgs[i], - max_conns_message_complete, NULL); - } + redirected_protocol = soup_message_headers_get_one (msg->response_headers, "X-Redirected-Protocol"); - g_main_loop_run (max_conns_loop); - if (msgs_done != MAX_CONNS) { - debug_printf (1, " Queued %d connections out of max %d?", - msgs_done, MAX_CONNS); + if (g_strcmp0 (redirect_protocol, redirected_protocol)) { + debug_printf (1, " redirect went to %s, should have gone to %s!\n", + redirected_protocol ? redirected_protocol : "(none)", + redirect_protocol ? redirect_protocol : "(none)"); + errors++; + } else if (redirect_protocol && !SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + debug_printf (1, " msg failed? (%d %s)\n", + msg->status_code, msg->reason_phrase); + errors++; + } else if (!redirect_protocol && SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + debug_printf (1, " msg succeeded? (%d %s)\n", + msg->status_code, msg->reason_phrase); errors++; } - g_signal_handlers_disconnect_by_func (session, max_conns_request_started, NULL); - - msgs_done = 0; - g_idle_add (idle_start_server, NULL); - timeout_id = g_timeout_add (1000, quit_loop, NULL); - g_main_loop_run (max_conns_loop); - - for (i = 0; i < TEST_CONNS; i++) { - if (!SOUP_STATUS_IS_SUCCESSFUL (msgs[i]->status_code)) { - debug_printf (1, " Message %d failed? %d %s\n", - i, msgs[i]->status_code, - msgs[i]->reason_phrase ? msgs[i]->reason_phrase : "-"); - errors++; - } - } - - if (msgs_done != TEST_CONNS) { - /* Clean up so we don't get a spurious "Leaked - * session" error. - */ - for (i = 0; i < TEST_CONNS; i++) - soup_session_cancel_message (session, msgs[i], SOUP_STATUS_CANCELLED); - g_main_loop_run (max_conns_loop); - g_source_remove (timeout_id); - } - - g_main_loop_unref (max_conns_loop); - for (i = 0; i < TEST_CONNS; i++) - g_object_unref (msgs[i]); + g_object_unref (msg); } static void -do_max_conns_test (void) +do_aliases_test (void) { SoupSession *session; + char *aliases[] = { "foo", NULL }; - debug_printf (1, "\nExceeding max-conns\n"); + debug_printf (1, "\nhttp-aliases / https-aliases\n"); - debug_printf (1, " Async session\n"); + debug_printf (1, " Default behavior\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + do_aliases_test_for_session (session, "http"); + soup_test_session_abort_unref (session); + + debug_printf (1, " foo-means-https\n"); session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, - SOUP_SESSION_MAX_CONNS, MAX_CONNS, + SOUP_SESSION_HTTPS_ALIASES, aliases, NULL); - do_max_conns_test_for_session (session); + do_aliases_test_for_session (session, "https"); soup_test_session_abort_unref (session); - debug_printf (1, " Sync session\n"); - session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, - SOUP_SESSION_MAX_CONNS, MAX_CONNS, + debug_printf (1, " foo-means-nothing\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_HTTP_ALIASES, NULL, NULL); - do_max_conns_test_for_session (session); + do_aliases_test_for_session (session, NULL); soup_test_session_abort_unref (session); } -static gboolean -cancel_message_timeout (gpointer msg) +static void +do_dot_dot_test (void) { - SoupSession *session = g_object_get_data (G_OBJECT (msg), "session"); + SoupSession *session; + SoupMessage *msg; + SoupURI *uri; - soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); - g_object_unref (msg); - g_object_unref (session); - return FALSE; -} + debug_printf (1, "\n'..' smuggling test\n"); -static gpointer -cancel_message_thread (gpointer msg) -{ - SoupSession *session = g_object_get_data (G_OBJECT (msg), "session"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); - g_usleep (100000); /* .1s */ - soup_session_cancel_message (session, msg, SOUP_STATUS_CANCELLED); + uri = soup_uri_new_with_base (base_uri, "/..%2ftest"); + msg = soup_message_new_from_uri ("GET", uri); + soup_uri_free (uri); + + soup_session_send_message (session, msg); + + if (msg->status_code != SOUP_STATUS_BAD_REQUEST) { + debug_printf (1, " FAILED: %d %s (expected Bad Request)\n", + msg->status_code, msg->reason_phrase); + errors++; + } g_object_unref (msg); - g_object_unref (session); - return NULL; + + soup_test_session_abort_unref (session); } static void -do_cancel_while_reading_test_for_session (SoupSession *session) +do_ipv6_test (void) { + SoupServer *ipv6_server; + SoupURI *ipv6_uri; + SoupAddress *ipv6_addr; + SoupSession *session; SoupMessage *msg; - GThread *thread = NULL; - SoupURI *uri; - uri = soup_uri_new_with_base (base_uri, "/slow"); - msg = soup_message_new_from_uri ("GET", uri); - soup_uri_free (uri); + debug_printf (1, "\nIPv6 server test\n"); - g_object_set_data (G_OBJECT (msg), "session", session); - g_object_ref (msg); - g_object_ref (session); - if (SOUP_IS_SESSION_ASYNC (session)) - g_timeout_add (100, cancel_message_timeout, msg); - else - thread = g_thread_create (cancel_message_thread, msg, TRUE, NULL); + ipv6_addr = soup_address_new ("::1", SOUP_ADDRESS_ANY_PORT); + soup_address_resolve_sync (ipv6_addr, NULL); + ipv6_server = soup_server_new (SOUP_SERVER_INTERFACE, ipv6_addr, + NULL); + g_object_unref (ipv6_addr); + soup_server_add_handler (ipv6_server, NULL, server_callback, NULL, NULL); + soup_server_run_async (ipv6_server); + + ipv6_uri = soup_uri_new ("http://[::1]/"); + soup_uri_set_port (ipv6_uri, soup_server_get_port (ipv6_server)); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + + debug_printf (1, " HTTP/1.1\n"); + msg = soup_message_new_from_uri ("GET", ipv6_uri); soup_session_send_message (session, msg); + if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + debug_printf (1, " request failed: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + g_object_unref (msg); - if (msg->status_code != SOUP_STATUS_CANCELLED) { - debug_printf (1, " FAILED: %d %s (expected Cancelled)\n", + debug_printf (1, " HTTP/1.0\n"); + msg = soup_message_new_from_uri ("GET", ipv6_uri); + soup_message_set_http_version (msg, SOUP_HTTP_1_0); + soup_session_send_message (session, msg); + if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + debug_printf (1, " request failed: %d %s\n", msg->status_code, msg->reason_phrase); errors++; } g_object_unref (msg); - if (thread) - g_thread_join (thread); + soup_uri_free (ipv6_uri); + soup_test_session_abort_unref (session); + soup_test_server_quit_unref (ipv6_server); } static void -do_cancel_while_reading_test (void) +do_idle_on_dispose_test (void) { SoupSession *session; + SoupMessage *msg; + GMainContext *async_context; - debug_printf (1, "\nCancelling message while reading response\n"); + debug_printf (1, "\nTesting SoupSessionAsync dispose behavior\n"); - debug_printf (1, " Async session\n"); - session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); - do_cancel_while_reading_test_for_session (session); - soup_test_session_abort_unref (session); + async_context = g_main_context_new (); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, + SOUP_SESSION_ASYNC_CONTEXT, async_context, + NULL); - debug_printf (1, " Sync session\n"); - session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); - do_cancel_while_reading_test_for_session (session); - soup_test_session_abort_unref (session); + msg = soup_message_new_from_uri ("GET", base_uri); + soup_session_send_message (session, msg); + g_object_unref (msg); + + while (g_main_context_iteration (async_context, FALSE)) + ; + + g_object_run_dispose (G_OBJECT (session)); + + if (g_main_context_iteration (async_context, FALSE)) { + debug_printf (1, " idle was queued!\n"); + errors++; + } + + g_object_unref (session); + g_main_context_unref (async_context); } int @@ -1013,10 +834,8 @@ main (int argc, char **argv) test_init (argc, argv, NULL); - server_mutex = g_mutex_new (); - server = soup_test_server_new (TRUE); - soup_server_add_handler (server, NULL, server_callback, NULL, NULL); + soup_server_add_handler (server, NULL, server_callback, "http", NULL); base_uri = soup_uri_new ("http://127.0.0.1/"); soup_uri_set_port (base_uri, soup_server_get_port (server)); @@ -1028,19 +847,27 @@ main (int argc, char **argv) soup_server_add_auth_domain (server, auth_domain); g_object_unref (auth_domain); + ssl_server = soup_test_server_new_ssl (TRUE); + soup_server_add_handler (ssl_server, NULL, server_callback, "https", NULL); + ssl_base_uri = soup_uri_new ("https://127.0.0.1/"); + soup_uri_set_port (ssl_base_uri, soup_server_get_port (ssl_server)); + do_host_test (); do_callback_unref_test (); do_msg_reuse_test (); do_star_test (); do_early_abort_test (); - do_content_length_framing_test (); do_accept_language_test (); - do_persistent_connection_timeout_test (); - do_max_conns_test (); do_cancel_while_reading_test (); + do_aliases_test (); + do_dot_dot_test (); + do_ipv6_test (); + do_idle_on_dispose_test (); soup_uri_free (base_uri); + soup_uri_free (ssl_base_uri); soup_test_server_quit_unref (server); + soup_test_server_quit_unref (ssl_server); test_cleanup (); return errors != 0; diff --git a/tests/ntlm-test.c b/tests/ntlm-test.c index cd74b15..f5462c6 100644 --- a/tests/ntlm-test.c +++ b/tests/ntlm-test.c @@ -41,7 +41,7 @@ typedef enum { #define NTLM_CHALLENGE "TlRMTVNTUAACAAAADAAMADAAAAABAoEAASNFZ4mrze8AAAAAAAAAAGIAYgA8AAAARABPAE0AQQBJAE4AAgAMAEQATwBNAEEASQBOAAEADABTAEUAUgBWAEUAUgAEABQAZABvAG0AYQBpAG4ALgBjAG8AbQADACIAcwBlAHIAdgBlAHIALgBkAG8AbQBhAGkAbgAuAGMAbwBtAAAAAAA=" -#define NTLM_RESPONSE_USER(response) ((response)[87] == 'h' ? NTLM_AUTHENTICATED_ALICE : NTLM_AUTHENTICATED_BOB) +#define NTLM_RESPONSE_USER(response) ((response)[102] == 'E' ? NTLM_AUTHENTICATED_ALICE : NTLM_AUTHENTICATED_BOB) static void clear_state (gpointer connections, GObject *ex_connection) @@ -58,29 +58,25 @@ server_callback (SoupServer *server, SoupMessage *msg, SoupSocket *socket; const char *auth; NTLMServerState state, required_user = 0; - gboolean auth_required = FALSE, not_found = FALSE; - gboolean basic_allowed = FALSE, ntlm_allowed = FALSE; + gboolean auth_required, not_found = FALSE; + gboolean basic_allowed = TRUE, ntlm_allowed = TRUE; if (msg->method != SOUP_METHOD_GET) { soup_message_set_status (msg, SOUP_STATUS_NOT_IMPLEMENTED); return; } - if (!strncmp (path, "/alice", 6)) { - auth_required = TRUE; - ntlm_allowed = TRUE; + if (!strncmp (path, "/alice", 6)) required_user = NTLM_AUTHENTICATED_ALICE; - } else if (!strncmp (path, "/bob", 4)) { - auth_required = TRUE; - ntlm_allowed = TRUE; + else if (!strncmp (path, "/bob", 4)) required_user = NTLM_AUTHENTICATED_BOB; - } else if (!strncmp (path, "/either", 7)) { - auth_required = TRUE; - ntlm_allowed = basic_allowed = TRUE; - } else if (!strncmp (path, "/basic", 6)) { - auth_required = TRUE; - basic_allowed = TRUE; - } + else if (!strncmp (path, "/either", 7)) + ; + else if (!strncmp (path, "/basic", 6)) + ntlm_allowed = FALSE; + else if (!strncmp (path, "/noauth", 7)) + basic_allowed = ntlm_allowed = FALSE; + auth_required = ntlm_allowed || basic_allowed; if (strstr (path, "/404")) not_found = TRUE; @@ -95,7 +91,9 @@ server_callback (SoupServer *server, SoupMessage *msg, if (!strncmp (auth + 5, NTLM_REQUEST_START, strlen (NTLM_REQUEST_START))) { state = NTLM_RECEIVED_REQUEST; - /* If they start, they must finish */ + /* If they start, they must finish, even if + * it was unnecessary. + */ auth_required = ntlm_allowed = TRUE; basic_allowed = FALSE; } else if (state == NTLM_SENT_CHALLENGE && @@ -104,12 +102,15 @@ server_callback (SoupServer *server, SoupMessage *msg, state = NTLM_RESPONSE_USER (auth + 5); } else state = NTLM_UNAUTHENTICATED; - } else if (!strncmp (auth, "Basic ", 6) && basic_allowed) { + } else if (basic_allowed && !strncmp (auth, "Basic ", 6)) { gsize len; char *decoded = (char *)g_base64_decode (auth + 6, &len); - if (!strncmp (decoded, "alice:password", len) || - !strncmp (decoded, "bob:password", len)) + if (!strncmp (decoded, "alice:password", len) && + required_user != NTLM_AUTHENTICATED_BOB) + auth_required = FALSE; + else if (!strncmp (decoded, "bob:password", len) && + required_user != NTLM_AUTHENTICATED_ALICE) auth_required = FALSE; g_free (decoded); } @@ -122,13 +123,13 @@ server_callback (SoupServer *server, SoupMessage *msg, if (auth_required) { soup_message_set_status (msg, SOUP_STATUS_UNAUTHORIZED); - if (basic_allowed) { + if (basic_allowed && state != NTLM_RECEIVED_REQUEST) { soup_message_headers_append (msg->response_headers, "WWW-Authenticate", "Basic realm=\"ntlm-test\""); } - if (state == NTLM_RECEIVED_REQUEST) { + if (ntlm_allowed && state == NTLM_RECEIVED_REQUEST) { soup_message_headers_append (msg->response_headers, "WWW-Authenticate", "NTLM " NTLM_CHALLENGE); @@ -158,7 +159,8 @@ static void authenticate (SoupSession *session, SoupMessage *msg, SoupAuth *auth, gboolean retrying, gpointer user) { - soup_auth_authenticate (auth, user, "password"); + if (!retrying) + soup_auth_authenticate (auth, user, "password"); } typedef struct { @@ -180,11 +182,9 @@ prompt_check (SoupMessage *msg, gpointer user_data) "WWW-Authenticate"); if (header && strstr (header, "Basic ")) state->got_basic_prompt = TRUE; - if (!state->sent_ntlm_request) { - if (header && strstr (header, "NTLM") && - !strstr (header, NTLM_CHALLENGE)) - state->got_ntlm_prompt = TRUE; - } + if (header && strstr (header, "NTLM") && + !strstr (header, NTLM_CHALLENGE)) + state->got_ntlm_prompt = TRUE; } static void @@ -333,10 +333,11 @@ static void do_ntlm_round (SoupURI *base_uri, gboolean use_ntlm, const char *user) { SoupSession *session; - gboolean alice = use_ntlm && !strcmp (user, "alice"); - gboolean bob = use_ntlm && !strcmp (user, "bob"); - - g_return_if_fail (use_ntlm || !alice); + gboolean alice = !g_strcmp0 (user, "alice"); + gboolean bob = !g_strcmp0 (user, "bob"); + gboolean alice_via_ntlm = use_ntlm && alice; + gboolean bob_via_ntlm = use_ntlm && bob; + gboolean alice_via_basic = !use_ntlm && alice; session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); if (use_ntlm) @@ -347,40 +348,87 @@ do_ntlm_round (SoupURI *base_uri, gboolean use_ntlm, const char *user) G_CALLBACK (authenticate), (char *)user); } + /* 1. Server doesn't request auth, so both get_ntlm_prompt and + * get_basic_prompt are both FALSE, and likewise do_basic. But + * if we're using NTLM we'll try that even without the server + * asking. + */ do_message (session, base_uri, "/noauth", FALSE, use_ntlm, FALSE, FALSE, SOUP_STATUS_OK); + + /* 2. Server requires auth as Alice, so it will request that + * if we didn't already authenticate the connection to her in + * the previous step. If we authenticated as Bob in the + * previous step, then we'll just immediately get a 401 here. + * So in no case will we see the client try to do_ntlm. + */ do_message (session, base_uri, "/alice", - !use_ntlm || bob, FALSE, - FALSE, FALSE, + !alice_via_ntlm, FALSE, + !alice_via_ntlm, alice_via_basic, alice ? SOUP_STATUS_OK : SOUP_STATUS_UNAUTHORIZED); + + /* 3. Server still requires auth as Alice, but this URI + * doesn't exist, so Alice should get a 404, but others still + * get 401. Alice-via-NTLM is still authenticated, and so + * won't get prompts, and Alice-via-Basic knows at this point + * to send auth without it being requested, so also won't get + * prompts. But Bob/nobody will. + */ do_message (session, base_uri, "/alice/404", - !use_ntlm, bob, - FALSE, FALSE, + !alice, bob_via_ntlm, + !alice, alice_via_basic, alice ? SOUP_STATUS_NOT_FOUND : SOUP_STATUS_UNAUTHORIZED); + + /* 4. Should be exactly the same as #3, except the status code */ do_message (session, base_uri, "/alice", - !use_ntlm, bob, - FALSE, FALSE, + !alice, bob_via_ntlm, + !alice, alice_via_basic, alice ? SOUP_STATUS_OK : SOUP_STATUS_UNAUTHORIZED); + + /* 5. This path requires auth as Bob; Alice-via-NTLM will get + * an immediate 401 and not try to reauthenticate. + * Alice-via-Basic will get a 401 and then try to do Basic + * (and fail). Bob-via-NTLM will try to do NTLM right away and + * succeed. + */ do_message (session, base_uri, "/bob", - !use_ntlm || alice, bob, - FALSE, FALSE, + !bob_via_ntlm, bob_via_ntlm, + !bob_via_ntlm, alice_via_basic, bob ? SOUP_STATUS_OK : SOUP_STATUS_UNAUTHORIZED); + + /* 6. Back to /alice. Somewhat the inverse of #5; Bob-via-NTLM + * will get an immediate 401 and not try again, Alice-via-NTLM + * will try to do NTLM right away and succeed. Alice-via-Basic + * still knows about this path, so will try Basic right away + * and succeed. + */ do_message (session, base_uri, "/alice", - !use_ntlm || bob, alice, - FALSE, FALSE, + !alice_via_ntlm, alice_via_ntlm, + !alice_via_ntlm, alice_via_basic, alice ? SOUP_STATUS_OK : SOUP_STATUS_UNAUTHORIZED); + + /* 7. Server accepts Basic auth from either user, but not NTLM. + * Since Bob-via-NTLM is unauthenticated at this point, he'll try + * NTLM before realizing that the server doesn't support it. + */ do_message (session, base_uri, "/basic", - FALSE, bob, + FALSE, bob_via_ntlm, TRUE, user != NULL, user != NULL ? SOUP_STATUS_OK : SOUP_STATUS_UNAUTHORIZED); + + /* 8. Server accepts Basic or NTLM from either user. + * Alice-via-NTLM is still authenticated at this point from #6, + * and Bob-via-NTLM is authenticated from #7, so neither + * of them will do anything. + */ do_message (session, base_uri, "/either", !use_ntlm, FALSE, !use_ntlm, !use_ntlm && user != NULL, diff --git a/tests/redirect-test.c b/tests/redirect-test.c index f99d9c7..aa1fb54 100644 --- a/tests/redirect-test.c +++ b/tests/redirect-test.c @@ -226,12 +226,125 @@ do_redirect_tests (SoupURI *base_uri) soup_test_session_abort_unref (session); session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); - debug_printf (1, "Sync session\n"); + debug_printf (1, "\nSync session\n"); for (n = 0; n < n_tests; n++) do_test (session, base_uri, n); soup_test_session_abort_unref (session); } +typedef struct { + SoupSession *session; + SoupMessage *msg1, *msg2; + SoupURI *uri1, *uri2; + SoupSocket *sock1, *sock2; +} ConnectionTestData; + +static void +msg2_finished (SoupSession *session, SoupMessage *msg2, gpointer user_data) +{ + if (!SOUP_STATUS_IS_SUCCESSFUL (msg2->status_code)) { + debug_printf (1, " msg2 failed: %d %s\n", + msg2->status_code, msg2->reason_phrase); + errors++; + } +} + +static void +unpause_msg1 (SoupMessage *msg2, gpointer user_data) +{ + ConnectionTestData *data = user_data; + + if (!data->sock1) { + debug_printf (1, " msg1 has no connection?\n"); + errors++; + } else if (!data->sock2) { + debug_printf (1, " msg2 has no connection?\n"); + errors++; + } else if (data->sock1 == data->sock2) { + debug_printf (1, " Both messages sharing the same connection\n"); + errors++; + } + + soup_session_unpause_message (data->session, data->msg1); +} + +static gboolean +msg1_just_restarted (gpointer user_data) +{ + ConnectionTestData *data = user_data; + + soup_session_pause_message (data->session, data->msg1); + + data->msg2 = soup_message_new_from_uri ("GET", data->uri2); + + g_signal_connect (data->msg2, "got_body", + G_CALLBACK (unpause_msg1), data); + + soup_session_queue_message (data->session, data->msg2, msg2_finished, data); + return FALSE; +} + +static void +msg1_about_to_restart (SoupMessage *msg1, gpointer user_data) +{ + ConnectionTestData *data = user_data; + + /* Do nothing when loading the redirected-to resource */ + if (!SOUP_STATUS_IS_REDIRECTION (data->msg1->status_code)) + return; + + /* We have to pause msg1 after the I/O finishes, but before + * the queue runs again. + */ + g_idle_add_full (G_PRIORITY_HIGH, msg1_just_restarted, data, NULL); +} + +static void +request_started (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + ConnectionTestData *data = user_data; + + if (msg == data->msg1) + data->sock1 = socket; + else if (msg == data->msg2) + data->sock2 = socket; + else + g_warn_if_reached (); +} + +static void +do_connection_test (SoupURI *base_uri) +{ + ConnectionTestData data; + + debug_printf (1, "\nConnection reuse\n"); + memset (&data, 0, sizeof (data)); + + data.session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + g_signal_connect (data.session, "request-started", + G_CALLBACK (request_started), &data); + + data.uri1 = soup_uri_new_with_base (base_uri, "/301"); + data.uri2 = soup_uri_new_with_base (base_uri, "/"); + data.msg1 = soup_message_new_from_uri ("GET", data.uri1); + + g_signal_connect (data.msg1, "got-body", + G_CALLBACK (msg1_about_to_restart), &data); + soup_session_send_message (data.session, data.msg1); + + if (!SOUP_STATUS_IS_SUCCESSFUL (data.msg1->status_code)) { + debug_printf (1, " msg1 failed: %d %s\n", + data.msg1->status_code, data.msg1->reason_phrase); + errors++; + } + g_object_unref (data.msg1); + soup_uri_free (data.uri1); + soup_uri_free (data.uri2); + + soup_test_session_abort_unref (data.session); +} + static void server_callback (SoupServer *server, SoupMessage *msg, const char *path, GHashTable *query, @@ -240,6 +353,14 @@ server_callback (SoupServer *server, SoupMessage *msg, char *remainder; guint status_code; + /* Make sure that a HTTP/1.0 redirect doesn't cause an + * HTTP/1.0 re-request. (#521848) + */ + if (soup_message_get_http_version (msg) == SOUP_HTTP_1_0) { + soup_message_set_status (msg, SOUP_STATUS_BAD_REQUEST); + return; + } + if (g_str_has_prefix (path, "/bad")) { if (!strcmp (path, "/bad")) { soup_message_set_status (msg, SOUP_STATUS_FOUND); @@ -280,14 +401,6 @@ server_callback (SoupServer *server, SoupMessage *msg, return; } - /* Make sure that a HTTP/1.0 redirect doesn't cause an - * HTTP/1.0 re-request. (#521848) - */ - if (soup_message_get_http_version (msg) == SOUP_HTTP_1_0) { - soup_message_set_status (msg, SOUP_STATUS_BAD_REQUEST); - return; - } - soup_message_set_status (msg, SOUP_STATUS_OK); /* FIXME: this is wrong, though it doesn't matter for @@ -310,17 +423,15 @@ server_callback (SoupServer *server, SoupMessage *msg, return; } - /* See above comment re bug 521848. */ - soup_message_set_http_version (msg, SOUP_HTTP_1_0); + /* See above comment re bug 521848. We only test this on the + * double-redirects so that we get connection-reuse testing + * the rest of the time. + */ + if (*remainder == '/') + soup_message_set_http_version (msg, SOUP_HTTP_1_0); - soup_message_set_status (msg, status_code); - if (*remainder) { - soup_message_headers_replace (msg->response_headers, - "Location", remainder); - } else { - soup_message_headers_replace (msg->response_headers, - "Location", "/"); - } + soup_message_set_redirect (msg, status_code, + *remainder ? remainder : "/"); } static void @@ -367,6 +478,7 @@ main (int argc, char **argv) base_uri = soup_uri_new ("http://127.0.0.1"); soup_uri_set_port (base_uri, port); do_redirect_tests (base_uri); + do_connection_test (base_uri); soup_uri_free (base_uri); } else { printf ("Listening on port %d\n", port); diff --git a/tests/requester-test.c b/tests/requester-test.c index d303865..0c71d0b 100644 --- a/tests/requester-test.c +++ b/tests/requester-test.c @@ -18,7 +18,10 @@ SoupServer *server; GMainLoop *loop; char buf[1024]; -SoupBuffer *response; +SoupBuffer *response, *auth_response; + +#define REDIRECT_HTML_BODY "Try again\r\n" +#define AUTH_HTML_BODY "Unauthorized\r\n" static void get_index (void) @@ -34,6 +37,10 @@ get_index (void) } response = soup_buffer_new (SOUP_MEMORY_TAKE, contents, length); + + auth_response = soup_buffer_new (SOUP_MEMORY_STATIC, + AUTH_HTML_BODY, + strlen (AUTH_HTML_BODY)); } static void @@ -41,10 +48,68 @@ server_callback (SoupServer *server, SoupMessage *msg, const char *path, GHashTable *query, SoupClientContext *context, gpointer data) { + gboolean chunked = FALSE; + int i; + + if (strcmp (path, "/auth") == 0) { + soup_message_set_status (msg, SOUP_STATUS_UNAUTHORIZED); + soup_message_set_response (msg, "text/html", + SOUP_MEMORY_STATIC, + AUTH_HTML_BODY, + strlen (AUTH_HTML_BODY)); + soup_message_headers_append (msg->response_headers, + "WWW-Authenticate", + "Basic: realm=\"requester-test\""); + return; + } else if (strcmp (path, "/foo") == 0) { + soup_message_set_redirect (msg, SOUP_STATUS_FOUND, "/"); + /* Make the response HTML so if we sniff that instead of the + * real body, we'll notice. + */ + soup_message_set_response (msg, "text/html", + SOUP_MEMORY_STATIC, + REDIRECT_HTML_BODY, + strlen (REDIRECT_HTML_BODY)); + return; + } else if (strcmp (path, "/chunked") == 0) { + chunked = TRUE; + } else if (strcmp (path, "/non-persistent") == 0) { + soup_message_headers_append (msg->response_headers, + "Connection", "close"); + } + soup_message_set_status (msg, SOUP_STATUS_OK); - soup_message_set_response (msg, "text/plain", - SOUP_MEMORY_STATIC, NULL, 0); - soup_message_body_append_buffer (msg->response_body, response); + + if (chunked) { + soup_message_headers_set_encoding (msg->response_headers, + SOUP_ENCODING_CHUNKED); + + for (i = 0; i < response->length; i += 8192) { + SoupBuffer *tmp; + + tmp = soup_buffer_new_subbuffer (response, i, + MIN (8192, response->length - i)); + soup_message_body_append_buffer (msg->response_body, tmp); + soup_buffer_free (tmp); + } + soup_message_body_complete (msg->response_body); + } else + soup_message_body_append_buffer (msg->response_body, response); +} + +static void +stream_closed (GObject *source, GAsyncResult *res, gpointer user_data) +{ + GInputStream *stream = G_INPUT_STREAM (source); + GError *error = NULL; + + if (!g_input_stream_close_finish (stream, res, &error)) { + debug_printf (1, " close failed: %s", error->message); + g_error_free (error); + errors++; + } + g_main_loop_quit (loop); + g_object_unref (stream); } static void @@ -57,14 +122,16 @@ test_read_ready (GObject *source, GAsyncResult *res, gpointer user_data) nread = g_input_stream_read_finish (stream, res, &error); if (nread == -1) { - debug_printf (1, " read_async failed: %s", error->message); + debug_printf (1, " read_async failed: %s", error->message); + g_error_free (error); errors++; g_object_unref (stream); g_main_loop_quit (loop); return; } else if (nread == 0) { - g_object_unref (stream); - g_main_loop_quit (loop); + g_input_stream_close_async (stream, + G_PRIORITY_DEFAULT, NULL, + stream_closed, NULL); return; } @@ -75,16 +142,56 @@ test_read_ready (GObject *source, GAsyncResult *res, gpointer user_data) } static void +auth_test_sent (GObject *source, GAsyncResult *res, gpointer user_data) +{ + GString *body = user_data; + GInputStream *stream; + GError *error = NULL; + SoupMessage *msg; + const char *content_type; + + stream = soup_request_send_finish (SOUP_REQUEST (source), res, &error); + if (!stream) { + debug_printf (1, " send_async failed: %s\n", error->message); + errors++; + g_main_loop_quit (loop); + return; + } + + msg = soup_request_http_get_message (SOUP_REQUEST_HTTP (source)); + if (msg->status_code != SOUP_STATUS_UNAUTHORIZED) { + debug_printf (1, " GET failed: %d %s\n", msg->status_code, + msg->reason_phrase); + errors++; + g_main_loop_quit (loop); + return; + } + g_object_unref (msg); + + content_type = soup_request_get_content_type (SOUP_REQUEST (source)); + if (g_strcmp0 (content_type, "text/html") != 0) { + debug_printf (1, " failed to sniff Content-Type: got %s\n", + content_type ? content_type : "(NULL)"); + errors++; + } + + g_input_stream_read_async (stream, buf, sizeof (buf), + G_PRIORITY_DEFAULT, NULL, + test_read_ready, body); +} + +static void test_sent (GObject *source, GAsyncResult *res, gpointer user_data) { GString *body = user_data; GInputStream *stream; GError *error = NULL; SoupMessage *msg; + const char *content_type; stream = soup_request_send_finish (SOUP_REQUEST (source), res, &error); if (!stream) { - debug_printf (1, " send_async failed: %s", error->message); + debug_printf (1, " send_async failed: %s\n", error->message); errors++; g_main_loop_quit (loop); return; @@ -92,12 +199,20 @@ test_sent (GObject *source, GAsyncResult *res, gpointer user_data) msg = soup_request_http_get_message (SOUP_REQUEST_HTTP (source)); if (!SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { - debug_printf (1, " GET failed: %d %s", msg->status_code, + debug_printf (1, " GET failed: %d %s\n", msg->status_code, msg->reason_phrase); errors++; g_main_loop_quit (loop); return; } + g_object_unref (msg); + + content_type = soup_request_get_content_type (SOUP_REQUEST (source)); + if (g_strcmp0 (content_type, "text/plain") != 0) { + debug_printf (1, " failed to sniff Content-Type: got %s\n", + content_type ? content_type : "(NULL)"); + errors++; + } g_input_stream_read_async (stream, buf, sizeof (buf), G_PRIORITY_DEFAULT, NULL, @@ -105,39 +220,105 @@ test_sent (GObject *source, GAsyncResult *res, gpointer user_data) } static void -do_test_for_thread_and_context (SoupSession *session, const char *uri) +request_started (SoupSession *session, SoupMessage *msg, + SoupSocket *socket, gpointer user_data) +{ + SoupSocket **save_socket = user_data; + + *save_socket = g_object_ref (socket); +} + +static void +do_one_test (SoupSession *session, SoupURI *uri, + GAsyncReadyCallback callback, SoupBuffer *expected_response, + gboolean persistent) { SoupRequester *requester; SoupRequest *request; GString *body; + guint started_id; + SoupSocket *socket = NULL; - requester = soup_requester_new (); - soup_session_add_feature (session, SOUP_SESSION_FEATURE (requester)); - g_object_unref (requester); + requester = SOUP_REQUESTER (soup_session_get_feature (session, SOUP_TYPE_REQUESTER)); body = g_string_new (NULL); + request = soup_requester_request_uri (requester, uri, NULL); + + started_id = g_signal_connect (session, "request-started", + G_CALLBACK (request_started), + &socket); - request = soup_requester_request (requester, uri, NULL); - soup_request_send_async (request, NULL, test_sent, body); + soup_request_send_async (request, NULL, callback, body); g_object_unref (request); loop = g_main_loop_new (soup_session_get_async_context (session), TRUE); g_main_loop_run (loop); g_main_loop_unref (loop); - if (body->len != response->length) { - debug_printf (1, " body length mismatch: expected %d, got %d\n", - (int)response->length, (int)body->len); + g_signal_handler_disconnect (session, started_id); + + if (body->len != expected_response->length) { + debug_printf (1, " body length mismatch: expected %d, got %d\n", + (int)expected_response->length, (int)body->len); errors++; - } else if (memcmp (body->str, response->data, response->length) != 0) { - debug_printf (1, " body data mismatch\n"); + } else if (memcmp (body->str, expected_response->data, + expected_response->length) != 0) { + debug_printf (1, " body data mismatch\n"); errors++; } + if (persistent) { + if (!soup_socket_is_connected (socket)) { + debug_printf (1, " socket not still connected!\n"); + errors++; + } + } else { + if (soup_socket_is_connected (socket)) { + debug_printf (1, " socket still connected!\n"); + errors++; + } + } + g_object_unref (socket); + g_string_free (body, TRUE); } static void +do_test_for_thread_and_context (SoupSession *session, const char *base_uri) +{ + SoupRequester *requester; + SoupURI *uri; + + requester = soup_requester_new (); + soup_session_add_feature (session, SOUP_SESSION_FEATURE (requester)); + g_object_unref (requester); + soup_session_add_feature_by_type (session, SOUP_TYPE_CONTENT_SNIFFER); + + debug_printf (1, " basic test\n"); + uri = soup_uri_new (base_uri); + do_one_test (session, uri, test_sent, response, TRUE); + soup_uri_free (uri); + + debug_printf (1, " chunked test\n"); + uri = soup_uri_new (base_uri); + soup_uri_set_path (uri, "/chunked"); + do_one_test (session, uri, test_sent, response, TRUE); + soup_uri_free (uri); + + debug_printf (1, " auth test\n"); + uri = soup_uri_new (base_uri); + soup_uri_set_path (uri, "/auth"); + do_one_test (session, uri, auth_test_sent, auth_response, TRUE); + soup_uri_free (uri); + + debug_printf (1, " non-persistent test\n"); + uri = soup_uri_new (base_uri); + soup_uri_set_path (uri, "/non-persistent"); + do_one_test (session, uri, test_sent, response, FALSE); + soup_uri_free (uri); +} + +static void do_simple_test (const char *uri) { SoupSession *session; @@ -161,11 +342,12 @@ do_test_with_context (const char *uri) session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, SOUP_SESSION_ASYNC_CONTEXT, async_context, NULL); - g_main_context_unref (async_context); do_test_for_thread_and_context (session, uri); soup_test_session_abort_unref (session); + g_main_context_pop_thread_default (async_context); + g_main_context_unref (async_context); return NULL; } @@ -183,8 +365,9 @@ do_thread_test (const char *uri) debug_printf (1, "Streaming in another thread\n"); - thread = g_thread_create ((GThreadFunc)do_test_with_context, - (gpointer)uri, TRUE, NULL); + thread = g_thread_new ("do_test_with_context", + (GThreadFunc)do_test_with_context, + (gpointer)uri); g_thread_join (thread); } @@ -198,7 +381,8 @@ main (int argc, char **argv) server = soup_test_server_new (TRUE); soup_server_add_handler (server, NULL, server_callback, NULL, NULL); - uri = g_strdup_printf ("http://127.0.0.1:%u/", soup_server_get_port (server)); + + uri = g_strdup_printf ("http://127.0.0.1:%u/foo", soup_server_get_port (server)); do_simple_test (uri); do_thread_test (uri); diff --git a/tests/resources/mbox.raw b/tests/resources/mbox.raw new file mode 100644 index 0000000000000000000000000000000000000000..bd4e2621c54eaa70575167d5f302c5a2d71e580d GIT binary patch literal 287 zcmV+)0pR}akwH(wFc^l<^jEw`Oe`rA1u7w!01+b`6yv$uK3vJR;?^Pjdpis=yy(?r zNiR+NJ$>H3)668GMCo`GQAT*B5D!I)b{Ae-dO~)3@Pz2IC$dGB@{O{Q#NaeBWRrAD zvDpx}MpjU4OtIclw&$U;BunyTi7n|R@x5VuBFP;5-G-cq*bp|T`P@@0uF=_T>R=i*8T_dr1Iekif}c7nf0$!dkhZBu+` z8zDAresponse_headers, - "Location", redir_uri); - soup_message_set_status (msg, SOUP_STATUS_MOVED_PERMANENTLY); + redir_uri = g_strdup_printf ("%s/", soup_message_get_uri (msg)->path); + soup_message_set_redirect (msg, SOUP_STATUS_MOVED_PERMANENTLY, + redir_uri); g_free (redir_uri); - g_free (uri); return; } @@ -130,6 +127,7 @@ do_get (SoupServer *server, SoupMessage *msg, const char *path) g_free (index_path); return; } + g_free (index_path); listing = get_directory_listing (path); soup_message_set_response (msg, "text/html", @@ -264,7 +262,6 @@ main (int argc, char **argv) int ssl_port = SOUP_ADDRESS_ANY_PORT; const char *ssl_cert_file = NULL, *ssl_key_file = NULL; - g_thread_init (NULL); g_type_init (); signal (SIGINT, quit); diff --git a/tests/simple-proxy.c b/tests/simple-proxy.c index 0101b63..52c2412 100644 --- a/tests/simple-proxy.c +++ b/tests/simple-proxy.c @@ -139,7 +139,6 @@ main (int argc, char **argv) int port = SOUP_ADDRESS_ANY_PORT; SoupAuthDomain *auth_domain = NULL; - g_thread_init (NULL); g_type_init (); signal (SIGINT, quit); diff --git a/tests/sniffing-test.c b/tests/sniffing-test.c index 828f1d5..2812334 100644 --- a/tests/sniffing-test.c +++ b/tests/sniffing-test.c @@ -23,7 +23,7 @@ server_callback (SoupServer *server, SoupMessage *msg, GError *error = NULL; char *query_key; char *contents; - gsize length, offset; + gsize length = 0, offset; gboolean empty_response = FALSE; if (msg->method != SOUP_METHOD_GET) { diff --git a/tests/ssl-test.c b/tests/ssl-test.c new file mode 100644 index 0000000..0308254 --- /dev/null +++ b/tests/ssl-test.c @@ -0,0 +1,187 @@ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "libsoup/soup.h" + +#include "test-utils.h" + +static void +do_properties_test_for_session (SoupSession *session, char *uri) +{ + SoupMessage *msg; + GTlsCertificate *cert; + GTlsCertificateFlags flags; + + msg = soup_message_new ("GET", uri); + soup_session_send_message (session, msg); + if (msg->status_code != SOUP_STATUS_OK) { + debug_printf (1, " FAILED: %d %s\n", + msg->status_code, msg->reason_phrase); + errors++; + } + + if (soup_message_get_https_status (msg, &cert, &flags)) { + if (!G_IS_TLS_CERTIFICATE (cert)) { + debug_printf (1, " No certificate?\n"); + errors++; + } + if (flags != G_TLS_CERTIFICATE_UNKNOWN_CA) { + debug_printf (1, " Wrong cert flags (got %x, wanted %x)\n", + flags, G_TLS_CERTIFICATE_UNKNOWN_CA); + errors++; + } + } else { + debug_printf (1, " Response not https\n"); + errors++; + } + if (soup_message_get_flags (msg) & SOUP_MESSAGE_CERTIFICATE_TRUSTED) { + debug_printf (1, " CERTIFICATE_TRUSTED set?\n"); + errors++; + } + + g_object_unref (msg); +} + +static void +do_properties_tests (char *uri) +{ + SoupSession *session; + + debug_printf (1, "\nSoupMessage properties\n"); + + debug_printf (1, " async\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + g_object_set (G_OBJECT (session), + SOUP_SESSION_SSL_CA_FILE, "/dev/null", + SOUP_SESSION_SSL_STRICT, FALSE, + NULL); + do_properties_test_for_session (session, uri); + soup_test_session_abort_unref (session); + + debug_printf (1, " sync\n"); + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); + g_object_set (G_OBJECT (session), + SOUP_SESSION_SSL_CA_FILE, "/dev/null", + SOUP_SESSION_SSL_STRICT, FALSE, + NULL); + do_properties_test_for_session (session, uri); + soup_test_session_abort_unref (session); +} + +static void +do_one_strict_test (SoupSession *session, char *uri, + gboolean strict, gboolean with_ca_list, + guint expected_status) +{ + SoupMessage *msg; + + /* Note that soup_test_session_new() sets + * SOUP_SESSION_SSL_CA_FILE by default, and turns off + * SOUP_SESSION_SSL_STRICT. + */ + + g_object_set (G_OBJECT (session), + SOUP_SESSION_SSL_STRICT, strict, + SOUP_SESSION_SSL_CA_FILE, with_ca_list ? SRCDIR "/test-cert.pem" : "/dev/null", + NULL); + /* Close existing connections with old params */ + soup_session_abort (session); + + msg = soup_message_new ("GET", uri); + soup_session_send_message (session, msg); + if (msg->status_code != expected_status) { + debug_printf (1, " FAILED: %d %s (expected %d %s)\n", + msg->status_code, msg->reason_phrase, + expected_status, + soup_status_get_phrase (expected_status)); + if (msg->status_code == SOUP_STATUS_SSL_FAILED) { + GTlsCertificateFlags flags = 0; + + soup_message_get_https_status (msg, NULL, &flags); + debug_printf (1, " tls error flags: 0x%x\n", flags); + } + errors++; + } else if (with_ca_list && SOUP_STATUS_IS_SUCCESSFUL (msg->status_code)) { + if (!(soup_message_get_flags (msg) & SOUP_MESSAGE_CERTIFICATE_TRUSTED)) { + debug_printf (1, " CERTIFICATE_TRUSTED not set?\n"); + errors++; + } + } else { + if (with_ca_list && soup_message_get_flags (msg) & SOUP_MESSAGE_CERTIFICATE_TRUSTED) { + debug_printf (1, " CERTIFICATE_TRUSTED set?\n"); + errors++; + } + } + + g_object_unref (msg); +} + +static void +do_strict_tests (char *uri) +{ + SoupSession *session; + + debug_printf (1, "strict/nonstrict\n"); + + session = soup_test_session_new (SOUP_TYPE_SESSION_ASYNC, NULL); + debug_printf (1, " async with CA list\n"); + do_one_strict_test (session, uri, TRUE, TRUE, SOUP_STATUS_OK); + debug_printf (1, " async without CA list\n"); + do_one_strict_test (session, uri, TRUE, FALSE, SOUP_STATUS_SSL_FAILED); + debug_printf (1, " async non-strict with CA list\n"); + do_one_strict_test (session, uri, FALSE, TRUE, SOUP_STATUS_OK); + debug_printf (1, " async non-strict without CA list\n"); + do_one_strict_test (session, uri, FALSE, FALSE, SOUP_STATUS_OK); + soup_test_session_abort_unref (session); + + session = soup_test_session_new (SOUP_TYPE_SESSION_SYNC, NULL); + debug_printf (1, " sync with CA list\n"); + do_one_strict_test (session, uri, TRUE, TRUE, SOUP_STATUS_OK); + debug_printf (1, " sync without CA list\n"); + do_one_strict_test (session, uri, TRUE, FALSE, SOUP_STATUS_SSL_FAILED); + debug_printf (1, " sync non-strict with CA list\n"); + do_one_strict_test (session, uri, FALSE, TRUE, SOUP_STATUS_OK); + debug_printf (1, " sync non-strict without CA list\n"); + do_one_strict_test (session, uri, FALSE, FALSE, SOUP_STATUS_OK); + soup_test_session_abort_unref (session); +} + +static void +server_handler (SoupServer *server, + SoupMessage *msg, + const char *path, + GHashTable *query, + SoupClientContext *client, + gpointer user_data) +{ + soup_message_set_status (msg, SOUP_STATUS_OK); + soup_message_set_response (msg, "text/plain", + SOUP_MEMORY_STATIC, + "ok\r\n", 4); +} + +int +main (int argc, char **argv) +{ + SoupServer *server; + char *uri; + + test_init (argc, argv, NULL); + + if (tls_available) { + server = soup_test_server_new_ssl (TRUE); + soup_server_add_handler (server, NULL, server_handler, NULL, NULL); + uri = g_strdup_printf ("https://127.0.0.1:%u/", + soup_server_get_port (server)); + + do_strict_tests (uri); + do_properties_tests (uri); + + g_free (uri); + soup_test_server_quit_unref (server); + } + + test_cleanup (); + return errors != 0; +} diff --git a/tests/test-cert.pem b/tests/test-cert.pem index a6b6608..7f20626 100644 --- a/tests/test-cert.pem +++ b/tests/test-cert.pem @@ -1,22 +1,17 @@ -----BEGIN CERTIFICATE----- -MIIDjzCCAvigAwIBAgIBADANBgkqhkiG9w0BAQQFADCBkjELMAkGA1UEBhMCVVMx -FjAUBgNVBAgTDU1hc3NhY2h1c2V0dHMxDzANBgNVBAcTBkJvc3RvbjEPMA0GA1UE -ChMGWGltaWFuMRUwEwYDVQQLEwxTb3VwIEtpdGNoZW4xEjAQBgNVBAMTCWxvY2Fs -aG9zdDEeMBwGCSqGSIb3DQEJARYPc291cEB4aW1pYW4uY29tMB4XDTAzMDkyMzE4 -Mzc0MVoXDTEzMDkyMzE4Mzc0MVowgZIxCzAJBgNVBAYTAlVTMRYwFAYDVQQIEw1N -YXNzYWNodXNldHRzMQ8wDQYDVQQHEwZCb3N0b24xDzANBgNVBAoTBlhpbWlhbjEV -MBMGA1UECxMMU291cCBLaXRjaGVuMRIwEAYDVQQDEwlsb2NhbGhvc3QxHjAcBgkq -hkiG9w0BCQEWD3NvdXBAeGltaWFuLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw -gYkCgYEAwzT/WxfdXqb2hbyjQav3FtN7tLxj3UbZKCKDYlizBsNLxb9exfebhV4h -CoAcaSNvLUnk3tAXnk+BDsIC1V4SbwqHYR17PnO3YZ8fkNwh5RGZwNx+zafdfFyu -+3Sh+mE03bljpDlTsgPL8CiFCd68MPRnuHoKt5iTpSyLC6Df0qcCAwEAAaOB8jCB -7zAdBgNVHQ4EFgQU9A9omrgBK5Kkl6FRxrgJU2voj4Uwgb8GA1UdIwSBtzCBtIAU -9A9omrgBK5Kkl6FRxrgJU2voj4WhgZikgZUwgZIxCzAJBgNVBAYTAlVTMRYwFAYD -VQQIEw1NYXNzYWNodXNldHRzMQ8wDQYDVQQHEwZCb3N0b24xDzANBgNVBAoTBlhp -bWlhbjEVMBMGA1UECxMMU291cCBLaXRjaGVuMRIwEAYDVQQDEwlsb2NhbGhvc3Qx -HjAcBgkqhkiG9w0BCQEWD3NvdXBAeGltaWFuLmNvbYIBADAMBgNVHRMEBTADAQH/ -MA0GCSqGSIb3DQEBBAUAA4GBAGCV56N7bEDNdE76T8i68gS00NIVVosVQjS39Ojd -ED+rvq0YYvuc2UXlzAonuCJfwFc73g4wSIjS0xijF5rnugZ+aay0LNv2y+Rf34CQ -RNswrwurFjlxgTOO+Wx2IM64mAnBfj43M8uKEZFqAiGKrZZ0xIqyUMlku0FgXDH2 -Jvpg +MIICpDCCAYwCCQC8Suc8hjfgujANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQDDAkx +MjcuMC4wLjEwHhcNMTEwOTE5MTkyMjA1WhcNMjEwOTE2MTkyMjA1WjAUMRIwEAYD +VQQDDAkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCr +OH7kblu+5zkYTk/ZG21OgbIyltxhLDHPmUpl4yDUFqX5BEtoVfg0Ms4ZuaoeDi4t +b2LV6Em3UDQwmwPMm2SakfJvRd3nfL6G3UkkBsVqT3V04M9u8fk6YgHPT8PN1Lj7 +5bv9AMRyQRV1QIPondMhbt8JhlmCR6ALbxYtsXkbQF7qzbj7Y2cjvoHzPQSk0QpB +rEUpj6Schm1NkPen48Z1X1faGL0F3roFHEsf6U1AjP5A4A/UGQsRtq35VzVnKgxW +N7jumUevEMIvyqLjmvK864AHMIRVCOls9GcIta80bViuVqgtuGgVGM/7SoZfIvPF +A10jIe7KQoXWAwRi4WclAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAJfihY634dRr +DeEA4SQ1e0/kB6EF8oeaC+5EuGOJxtoX+yXJfWJsEtmjRwobyOBVV997hdOtdZjo +mdJOCKerOFKccO9PLNJZ+/l4+NHv9OwOcu4UqvrSsps/pmr/22SIyQswbLLJfPAT +KjGTDLlj//zrLxzUGsu7lgGsY4s4fVbftFZv7P5AyErpwiFk8qM1BP0NMkn4XWSA +uSyTeB6O+tWYdh3bA1BeKC2P85sl6xFJI2gxvNTxtdcg9beDqNuEheJ+mEtD3P4w +HDG1vFaAX0MH1RJSDO/dIoJerN6LTjiTYYYg8yV0lmBxijv25Z/3Gi33OuG9jkdR +vXDwJpC+/ko= -----END CERTIFICATE----- diff --git a/tests/test-key.pem b/tests/test-key.pem index 9bea9bf..36bbcc2 100644 --- a/tests/test-key.pem +++ b/tests/test-key.pem @@ -1,15 +1,27 @@ -----BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQDDNP9bF91epvaFvKNBq/cW03u0vGPdRtkoIoNiWLMGw0vFv17F -95uFXiEKgBxpI28tSeTe0BeeT4EOwgLVXhJvCodhHXs+c7dhnx+Q3CHlEZnA3H7N -p918XK77dKH6YTTduWOkOVOyA8vwKIUJ3rww9Ge4egq3mJOlLIsLoN/SpwIDAQAB -AoGAOGAi6zzuKrrPcXo0L/ApEQeMr3rE4I/ogUXOaeWx9l8KkBafmU7UNGUl57Fu -AxM/tXWkypCQcaEGZau0Q8jCS5wKgynNi72F4OzBqgjgW4vvtrjfC1LagnCd2ZMX -V5XVECjO/sEDg0hJeOsXlKbECAgvHMU3dSCGO7DmuG9tIxkCQQDsth1VvVjOdfp6 -klOfYzbAM1p9HIcNPJMeuBFqq//UHX4aPqh/6G6W06TOTN+bjZBmitG9yjV958t2 -rPxl64f7AkEA0x0WOLm5S0LNsv7zwjXuTcj+NCHL36b3dK90oxX8Gq69PANL/EJY -ItpHNLgzzo4DRmQy8q0WZlC9HYk1YljERQJAEN7+AkFnlfeErb3GJgMNQO+oEGi7 -G29o0PSvkRnHNxgPB9HVcqBfWXKmOWnzOgQB+b0FK/DAlUOzFbdImf8KhwJAFLty -hzeV/tIcqUtoXNY3BOSMMkpvXxNikc75QVrTWzt10gLw32EUjreo7oB4dfx0TeFh -L3vYC0w6hkAHQhU9kQJAPSEQ+Bqzlk6BrQNrNFEVzi1Rwpz7LOzhOjuYW6bsiAdX -axA4r6Xh25x08ZU7cqX7gwVLHL6pgrEKuUs0Nc5Klg== +MIIEowIBAAKCAQEAqzh+5G5bvuc5GE5P2RttToGyMpbcYSwxz5lKZeMg1Bal+QRL +aFX4NDLOGbmqHg4uLW9i1ehJt1A0MJsDzJtkmpHyb0Xd53y+ht1JJAbFak91dODP +bvH5OmIBz0/DzdS4++W7/QDEckEVdUCD6J3TIW7fCYZZgkegC28WLbF5G0Be6s24 ++2NnI76B8z0EpNEKQaxFKY+knIZtTZD3p+PGdV9X2hi9Bd66BRxLH+lNQIz+QOAP +1BkLEbat+Vc1ZyoMVje47plHrxDCL8qi45ryvOuABzCEVQjpbPRnCLWvNG1Yrlao +LbhoFRjP+0qGXyLzxQNdIyHuykKF1gMEYuFnJQIDAQABAoIBABh+MXC99LPfYcR/ +V17IVJ+ZYANqn0XrS4jV9dWTYxvTzZRMr/jR63qUFfWKILLB9osbVvkgjIMDnyOg +2S9Iv2B5JkQSq4a0ypCCUTctHMpzaWr5ydKmHK/kWzvrvifQmVG3cGfl1zQ86TPn +sbbx9MTglllHdcB0PInGL1cD/z4NgEbRr1B6aBcq0AHqJJIIHvQNPmH6HGASg48j +hVAZ2sYjp9DSK97HKSABpBCsRN8XrMgYOAsu5a1rtXhjtMUZo6LpEwEwH94rDOgv +ZvJLGrpSvKKWPGcyANyK1a5nzO8INcXY+X54a8VB1YAjymzDy/WM1OX/jBfZem53 +HwC7m5UCgYEA3crcBI9f3aDgCYEUDWZ8hTQUVhvN3pi8QWr7QubMJmIyxBj394wm +hTo8woYxNqy9VkO0X+jxHA0rzN1KvS9j2CAzJ+tYNhEsRc6onjEXKGLhO/1gHbkT +rRx2J1uA2HQTbZNGhws9Qdl3A7syw6fFw9T/5rooWbiv5SDqSCRFzlcCgYEAxaDj +JYyLLl+Jbsltkv71th5TvLi14q6KlZJGEUeSsymUx9evBS7s/h62Boe9/7Y63Zkl +dR3IvxX7HNqW/fGjuQr9jQvMMduil8L218lChuR/4HEsXrSvyw8MIMlDtjTWajB6 +c8eiU+z/5zrlhzooKk2LaQHTUrrbxk9rN0raEOMCgYEA2Klz1wyMVL/0O7SZdyG3 +4JPojdmpeZrwxGMSwt8dbR2ehAv0KCID+z/R3SEj8Eo8x4lqKgsvhfyj3gQLH9as +jZOfBY8U4/RQsHzaIXbJLY2yg1zYSRDkVMap8Xak3k4+MFufmQp0s+ARMFbtl05M +lip8NdOC2WregVFvLDwq6Q8CgYBy3gyoqoPLNGRhLFqv8dlHPWFWc9XkJ6cNQLPR +H1S5JhYAAfEMhjXhjmAmc4ePtY+JdZY7+E/SISiPoM3aVDThPO4aqRzKbeqXYw6u +ZaBxXyakgaNUeJkk4V4fQFxG73cgyYSi/wnu1fX3pFf8vWTTEbdSFWmK0GklXsvm +m28cGQKBgGvG//W8NGfXWL/komyKXw8GJ41Ip0sa20KNkNZwAaA1BVTaHIYT+rxo +SgVQPHmzP8J9p4U0d9lQ5BW0LWERVkFHOg8k0evDsSm3FVbeRCBjlKqVUOLoo4Hk +A+fSYWWWl1j9E9urpiT/d4AQY1bFUxcUebDSK9XT6ZPOusyX0fSe -----END RSA PRIVATE KEY----- diff --git a/tests/test-utils.c b/tests/test-utils.c index 15495a0..47f1db0 100644 --- a/tests/test-utils.c +++ b/tests/test-utils.c @@ -81,7 +81,6 @@ test_init (int argc, char **argv, GOptionEntry *entries) GError *error = NULL; GTlsBackend *tls_backend; - g_thread_init (NULL); g_type_init (); name = strrchr (argv[0], '/'); @@ -230,7 +229,6 @@ soup_test_session_new (GType type, ...) g_object_set (G_OBJECT (session), SOUP_SESSION_SSL_CA_FILE, SRCDIR "/test-cert.pem", - SOUP_SESSION_SSL_STRICT, FALSE, NULL); if (http_debug_level && !logger) { @@ -298,8 +296,7 @@ test_server_new (gboolean in_own_thread, gboolean ssl) if (in_own_thread) { GThread *thread; - thread = g_thread_create (run_server_thread, server, - TRUE, NULL); + thread = g_thread_new ("server_thread", run_server_thread, server); g_object_set_data (G_OBJECT (server), "thread", thread); } else soup_server_run_async (server); diff --git a/tests/timeout-test.c b/tests/timeout-test.c index d3b6279..4cde526 100644 --- a/tests/timeout-test.c +++ b/tests/timeout-test.c @@ -135,6 +135,7 @@ do_timeout_tests (char *fast_uri, char *slow_uri) NULL); do_tests_for_session (timeout_session, NULL, plain_session, fast_uri, slow_uri); soup_test_session_abort_unref (timeout_session); + soup_test_session_abort_unref (plain_session); } static gboolean diff --git a/tests/uri-parsing.c b/tests/uri-parsing.c index e8568a9..fb04144 100644 --- a/tests/uri-parsing.c +++ b/tests/uri-parsing.c @@ -12,71 +12,122 @@ static struct { const char *uri_string, *result; + const SoupURI bits; } abs_tests[] = { - { "foo:", "foo:" }, - { "file:/dev/null", "file:/dev/null" }, - { "file:///dev/null", "file:///dev/null" }, - { "ftp://user@host/path", "ftp://user@host/path" }, - { "ftp://user@host:9999/path", "ftp://user@host:9999/path" }, - { "ftp://user:password@host/path", "ftp://user@host/path" }, - { "ftp://user:password@host:9999/path", "ftp://user@host:9999/path" }, - { "ftp://user:password@host", "ftp://user@host" }, - { "http://us%65r@host", "http://user@host/" }, - { "http://us%40r@host", "http://us%40r@host/" }, - { "http://us%3ar@host", "http://us%3Ar@host/" }, - { "http://us%2fr@host", "http://us%2Fr@host/" }, - { "http://us%3fr@host", "http://us%3Fr@host/" }, - { "http://host?query", "http://host/?query" }, + { "foo:", "foo:", + { "foo", NULL, NULL, NULL, 0, "", NULL, NULL } }, + { "file:/dev/null", "file:/dev/null", + { "file", NULL, NULL, NULL, 0, "/dev/null", NULL, NULL } }, + { "file:///dev/null", "file:///dev/null", + { "file", NULL, NULL, "", 0, "/dev/null", NULL, NULL } }, + { "ftp://user@host/path", "ftp://user@host/path", + { "ftp", "user", NULL, "host", 21, "/path", NULL, NULL } }, + { "ftp://user@host:9999/path", "ftp://user@host:9999/path", + { "ftp", "user", NULL, "host", 9999, "/path", NULL, NULL } }, + { "ftp://user:password@host/path", "ftp://user@host/path", + { "ftp", "user", "password", "host", 21, "/path", NULL, NULL } }, + { "ftp://user:password@host:9999/path", "ftp://user@host:9999/path", + { "ftp", "user", "password", "host", 9999, "/path", NULL, NULL } }, + { "ftp://user:password@host", "ftp://user@host", + { "ftp", "user", "password", "host", 21, "", NULL, NULL } }, + { "http://us%65r@host", "http://user@host/", + { "http", "user", NULL, "host", 80, "/", NULL, NULL } }, + { "http://us%40r@host", "http://us%40r@host/", + { "http", "us\x40r", NULL, "host", 80, "/", NULL, NULL } }, + { "http://us%3ar@host", "http://us%3Ar@host/", + { "http", "us\x3ar", NULL, "host", 80, "/", NULL, NULL } }, + { "http://us%2fr@host", "http://us%2Fr@host/", + { "http", "us\x2fr", NULL, "host", 80, "/", NULL, NULL } }, + { "http://us%3fr@host", "http://us%3Fr@host/", + { "http", "us\x3fr", NULL, "host", 80, "/", NULL, NULL } }, + { "http://host?query", "http://host/?query", + { "http", NULL, NULL, "host", 80, "/", "query", NULL } }, { "http://host/path?query=http%3A%2F%2Fhost%2Fpath%3Fchildparam%3Dchildvalue¶m=value", - "http://host/path?query=http%3A%2F%2Fhost%2Fpath%3Fchildparam%3Dchildvalue¶m=value" }, + "http://host/path?query=http%3A%2F%2Fhost%2Fpath%3Fchildparam%3Dchildvalue¶m=value", + { "http", NULL, NULL, "host", 80, "/path", "query=http%3A%2F%2Fhost%2Fpath%3Fchildparam%3Dchildvalue¶m=value", NULL } }, { "http://control-chars/%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%7F", - "http://control-chars/%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%7F"}, + "http://control-chars/%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%7F", + { "http", NULL, NULL, "control-chars", 80, "/%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%7F", NULL, NULL } }, { "http://space/%20", - "http://space/%20" }, + "http://space/%20", + { "http", NULL, NULL, "space", 80, "/%20", NULL, NULL } }, { "http://delims/%3C%3E%23%25%22", - "http://delims/%3C%3E%23%25%22" }, + "http://delims/%3C%3E%23%25%22", + { "http", NULL, NULL, "delims", 80, "/%3C%3E%23%25%22", NULL, NULL } }, { "http://unwise-chars/%7B%7D%7C%5C%5E%5B%5D%60", - "http://unwise-chars/%7B%7D%7C%5C%5E%5B%5D%60" }, + "http://unwise-chars/%7B%7D%7C%5C%5E%5B%5D%60", + { "http", NULL, NULL, "unwise-chars", 80, "/%7B%7D%7C%5C%5E%5B%5D%60", NULL, NULL } }, /* From RFC 2732 */ { "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/index.html", - "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]/index.html" }, + "http://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]/index.html", + { "http", NULL, NULL, "FEDC:BA98:7654:3210:FEDC:BA98:7654:3210", 80, "/index.html", NULL, NULL } }, { "http://[1080:0:0:0:8:800:200C:417A]/index.html", - "http://[1080:0:0:0:8:800:200C:417A]/index.html" }, + "http://[1080:0:0:0:8:800:200C:417A]/index.html", + { "http", NULL, NULL, "1080:0:0:0:8:800:200C:417A", 80, "/index.html", NULL, NULL } }, { "http://[3ffe:2a00:100:7031::1]", - "http://[3ffe:2a00:100:7031::1]/" }, + "http://[3ffe:2a00:100:7031::1]/", + { "http", NULL, NULL, "3ffe:2a00:100:7031::1", 80, "/", NULL, NULL } }, { "http://[1080::8:800:200C:417A]/foo", - "http://[1080::8:800:200C:417A]/foo" }, + "http://[1080::8:800:200C:417A]/foo", + { "http", NULL, NULL, "1080::8:800:200C:417A", 80, "/foo", NULL, NULL } }, { "http://[::192.9.5.5]/ipng", - "http://[::192.9.5.5]/ipng" }, + "http://[::192.9.5.5]/ipng", + { "http", NULL, NULL, "::192.9.5.5", 80, "/ipng", NULL, NULL } }, { "http://[::FFFF:129.144.52.38]:80/index.html", - "http://[::FFFF:129.144.52.38]/index.html" }, + "http://[::FFFF:129.144.52.38]/index.html", + { "http", NULL, NULL, "::FFFF:129.144.52.38", 80, "/index.html", NULL, NULL } }, { "http://[2010:836B:4179::836B:4179]", - "http://[2010:836B:4179::836B:4179]/" }, + "http://[2010:836B:4179::836B:4179]/", + { "http", NULL, NULL, "2010:836B:4179::836B:4179", 80, "/", NULL, NULL } }, /* Try to recover certain kinds of invalid URIs */ { "http://host/path with spaces", - "http://host/path%20with%20spaces" }, - { " http://host/path", "http://host/path" }, - { "http://host/path ", "http://host/path" }, - { "http://host ", "http://host/" }, - { "http://host:999 ", "http://host:999/" }, - { "http://host/pa\nth", "http://host/path" }, - { "http:\r\n//host/path", "http://host/path" }, - { "http://\thost/path", "http://host/path" }, + "http://host/path%20with%20spaces", + { "http", NULL, NULL, "host", 80, "/path%20with%20spaces", NULL, NULL } }, + { " http://host/path", "http://host/path", + { "http", NULL, NULL, "host", 80, "/path", NULL, NULL } }, + { "http://host/path ", "http://host/path", + { "http", NULL, NULL, "host", 80, "/path", NULL, NULL } }, + { "http://host ", "http://host/", + { "http", NULL, NULL, "host", 80, "/", NULL, NULL } }, + { "http://host:999 ", "http://host:999/", + { "http", NULL, NULL, "host", 999, "/", NULL, NULL } }, + { "http://host/pa\nth", "http://host/path", + { "http", NULL, NULL, "host", 80, "/path", NULL, NULL } }, + { "http:\r\n//host/path", "http://host/path", + { "http", NULL, NULL, "host", 80, "/path", NULL, NULL } }, + { "http://\thost/path", "http://host/path", + { "http", NULL, NULL, "host", 80, "/path", NULL, NULL } }, /* Bug 594405; 0-length is different from not-present */ - { "http://host/path?", "http://host/path?" }, - { "http://host/path#", "http://host/path#" }, + { "http://host/path?", "http://host/path?", + { "http", NULL, NULL, "host", 80, "/path", "", NULL } }, + { "http://host/path#", "http://host/path#", + { "http", NULL, NULL, "host", 80, "/path", NULL, "" } }, /* Bug 590524; ignore badly-%-encoding */ - { "http://host/path%", "http://host/path%" }, - { "http://h%ost/path", "http://h%25ost/path" }, - { "http://host/path%%", "http://host/path%%" }, - { "http://host/path%%%", "http://host/path%%%" }, - { "http://host/path%/x/", "http://host/path%/x/" }, - { "http://host/path%0x/", "http://host/path%0x/" }, - { "http://host/path%ax", "http://host/path%ax" } + { "http://host/path%", "http://host/path%", + { "http", NULL, NULL, "host", 80, "/path%", NULL, NULL } }, + { "http://h%ost/path", "http://h%25ost/path", + { "http", NULL, NULL, "h%ost", 80, "/path", NULL, NULL } }, + { "http://host/path%%", "http://host/path%%", + { "http", NULL, NULL, "host", 80, "/path%%", NULL, NULL } }, + { "http://host/path%%%", "http://host/path%%%", + { "http", NULL, NULL, "host", 80, "/path%%%", NULL, NULL } }, + { "http://host/path%/x/", "http://host/path%/x/", + { "http", NULL, NULL, "host", 80, "/path%/x/", NULL, NULL } }, + { "http://host/path%0x/", "http://host/path%0x/", + { "http", NULL, NULL, "host", 80, "/path%0x/", NULL, NULL } }, + { "http://host/path%ax", "http://host/path%ax", + { "http", NULL, NULL, "host", 80, "/path%ax", NULL, NULL } }, + + /* Bug 662806; %-encode non-ASCII characters */ + { "http://host/p\xc3\xa4th/", "http://host/p%C3%A4th/", + { "http", NULL, NULL, "host", 80, "/p%C3%A4th/", NULL, NULL } }, + + { "HTTP:////////////////", "http:////////////////", + { "http", NULL, NULL, "", 80, "//////////////", NULL, NULL } }, }; static int num_abs_tests = G_N_ELEMENTS(abs_tests); @@ -84,48 +135,90 @@ static int num_abs_tests = G_N_ELEMENTS(abs_tests); static const char *base = "http://a/b/c/d;p?q"; static struct { const char *uri_string, *result; + const SoupURI bits; } rel_tests[] = { - { "g:h", "g:h" }, - { "g", "http://a/b/c/g" }, - { "./g", "http://a/b/c/g" }, - { "g/", "http://a/b/c/g/" }, - { "/g", "http://a/g" }, - { "//g", "http://g/" }, - { "?y", "http://a/b/c/d;p?y" }, - { "g?y", "http://a/b/c/g?y" }, - { "#s", "http://a/b/c/d;p?q#s" }, - { "g#s", "http://a/b/c/g#s" }, - { "g?y#s", "http://a/b/c/g?y#s" }, - { ";x", "http://a/b/c/;x" }, - { "g;x", "http://a/b/c/g;x" }, - { "g;x?y#s", "http://a/b/c/g;x?y#s" }, - { ".", "http://a/b/c/" }, - { "./", "http://a/b/c/" }, - { "..", "http://a/b/" }, - { "../", "http://a/b/" }, - { "../g", "http://a/b/g" }, - { "../..", "http://a/" }, - { "../../", "http://a/" }, - { "../../g", "http://a/g" }, - { "", "http://a/b/c/d;p?q" }, - { "../../../g", "http://a/g" }, - { "../../../../g", "http://a/g" }, - { "/./g", "http://a/g" }, - { "/../g", "http://a/g" }, - { "g.", "http://a/b/c/g." }, - { ".g", "http://a/b/c/.g" }, - { "g..", "http://a/b/c/g.." }, - { "..g", "http://a/b/c/..g" }, - { "./../g", "http://a/b/g" }, - { "./g/.", "http://a/b/c/g/" }, - { "g/./h", "http://a/b/c/g/h" }, - { "g/../h", "http://a/b/c/h" }, - { "g;x=1/./y", "http://a/b/c/g;x=1/y" }, - { "g;x=1/../y", "http://a/b/c/y" }, - { "g?y/./x", "http://a/b/c/g?y/./x" }, - { "g?y/../x", "http://a/b/c/g?y/../x" }, - { "g#s/./x", "http://a/b/c/g#s/./x" }, - { "g#s/../x", "http://a/b/c/g#s/../x" }, + { "g:h", "g:h", + { "g", NULL, NULL, NULL, 0, "h", NULL, NULL } }, + { "g", "http://a/b/c/g", + { "http", NULL, NULL, "a", 80, "/b/c/g", NULL, NULL } }, + { "./g", "http://a/b/c/g", + { "http", NULL, NULL, "a", 80, "/b/c/g", NULL, NULL } }, + { "g/", "http://a/b/c/g/", + { "http", NULL, NULL, "a", 80, "/b/c/g/", NULL, NULL } }, + { "/g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "//g", "http://g/", + { "http", NULL, NULL, "g", 80, "/", NULL, NULL } }, + { "?y", "http://a/b/c/d;p?y", + { "http", NULL, NULL, "a", 80, "/b/c/d;p", "y", NULL } }, + { "g?y", "http://a/b/c/g?y", + { "http", NULL, NULL, "a", 80, "/b/c/g", "y", NULL } }, + { "#s", "http://a/b/c/d;p?q#s", + { "http", NULL, NULL, "a", 80, "/b/c/d;p", "q", "s" } }, + { "g#s", "http://a/b/c/g#s", + { "http", NULL, NULL, "a", 80, "/b/c/g", NULL, "s" } }, + { "g?y#s", "http://a/b/c/g?y#s", + { "http", NULL, NULL, "a", 80, "/b/c/g", "y", "s" } }, + { ";x", "http://a/b/c/;x", + { "http", NULL, NULL, "a", 80, "/b/c/;x", NULL, NULL } }, + { "g;x", "http://a/b/c/g;x", + { "http", NULL, NULL, "a", 80, "/b/c/g;x", NULL, NULL } }, + { "g;x?y#s", "http://a/b/c/g;x?y#s", + { "http", NULL, NULL, "a", 80, "/b/c/g;x", "y", "s" } }, + { ".", "http://a/b/c/", + { "http", NULL, NULL, "a", 80, "/b/c/", NULL, NULL } }, + { "./", "http://a/b/c/", + { "http", NULL, NULL, "a", 80, "/b/c/", NULL, NULL } }, + { "..", "http://a/b/", + { "http", NULL, NULL, "a", 80, "/b/", NULL, NULL } }, + { "../", "http://a/b/", + { "http", NULL, NULL, "a", 80, "/b/", NULL, NULL } }, + { "../g", "http://a/b/g", + { "http", NULL, NULL, "a", 80, "/b/g", NULL, NULL } }, + { "../..", "http://a/", + { "http", NULL, NULL, "a", 80, "/", NULL, NULL } }, + { "../../", "http://a/", + { "http", NULL, NULL, "a", 80, "/", NULL, NULL } }, + { "../../g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "", "http://a/b/c/d;p?q", + { "http", NULL, NULL, "a", 80, "/b/c/d;p", "q", NULL } }, + { "../../../g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "../../../../g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "/./g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "/../g", "http://a/g", + { "http", NULL, NULL, "a", 80, "/g", NULL, NULL } }, + { "g.", "http://a/b/c/g.", + { "http", NULL, NULL, "a", 80, "/b/c/g.", NULL, NULL } }, + { ".g", "http://a/b/c/.g", + { "http", NULL, NULL, "a", 80, "/b/c/.g", NULL, NULL } }, + { "g..", "http://a/b/c/g..", + { "http", NULL, NULL, "a", 80, "/b/c/g..", NULL, NULL } }, + { "..g", "http://a/b/c/..g", + { "http", NULL, NULL, "a", 80, "/b/c/..g", NULL, NULL } }, + { "./../g", "http://a/b/g", + { "http", NULL, NULL, "a", 80, "/b/g", NULL, NULL } }, + { "./g/.", "http://a/b/c/g/", + { "http", NULL, NULL, "a", 80, "/b/c/g/", NULL, NULL } }, + { "g/./h", "http://a/b/c/g/h", + { "http", NULL, NULL, "a", 80, "/b/c/g/h", NULL, NULL } }, + { "g/../h", "http://a/b/c/h", + { "http", NULL, NULL, "a", 80, "/b/c/h", NULL, NULL } }, + { "g;x=1/./y", "http://a/b/c/g;x=1/y", + { "http", NULL, NULL, "a", 80, "/b/c/g;x=1/y", NULL, NULL } }, + { "g;x=1/../y", "http://a/b/c/y", + { "http", NULL, NULL, "a", 80, "/b/c/y", NULL, NULL } }, + { "g?y/./x", "http://a/b/c/g?y/./x", + { "http", NULL, NULL, "a", 80, "/b/c/g", "y/./x", NULL } }, + { "g?y/../x", "http://a/b/c/g?y/../x", + { "http", NULL, NULL, "a", 80, "/b/c/g", "y/../x", NULL } }, + { "g#s/./x", "http://a/b/c/g#s/./x", + { "http", NULL, NULL, "a", 80, "/b/c/g", NULL, "s/./x" } }, + { "g#s/../x", "http://a/b/c/g#s/../x", + { "http", NULL, NULL, "a", 80, "/b/c/g", NULL, "s/../x" } }, /* RFC 3986 notes that some old parsers will parse this as * a relative URL ("http://a/b/c/g"), but it should be @@ -133,7 +226,7 @@ static struct { * correctly as being absolute, but then reject it since it's * an http URL with no host. */ - { "http:g", NULL } + { "http:g", NULL, { NULL } } }; static int num_rel_tests = G_N_ELEMENTS(rel_tests); @@ -149,9 +242,42 @@ static struct { }; static int num_eq_tests = G_N_ELEMENTS(eq_tests); +#define test_cmpstr(a, b) _test_cmpstr (#a, #b, a, b) + +static gboolean +_test_cmpstr (const char *got_desc, + const char *exp_desc, + const char *got, + const char *expected) +{ + if (got == expected) + return TRUE; + + if (got == NULL) { + debug_printf (1, "ERR\n %s = NULL, expected %s = \"%s\"\n", + got_desc, exp_desc, expected); + return FALSE; + } + + if (expected == NULL) { + debug_printf (1, "ERR\n %s = \"%s\", expected %s = NULL\n", + got_desc, got, exp_desc); + return FALSE; + } + + if (strcmp (got, expected) != 0) { + debug_printf (1, "ERR\n %s = \"%s\", expected %s = \"%s\"\n", + got_desc, got, exp_desc, expected); + return FALSE; + } + + return TRUE; +} + static gboolean do_uri (SoupURI *base_uri, const char *base_str, - const char *in_uri, const char *out_uri) + const char *in_uri, const char *out_uri, + const SoupURI *bits) { SoupURI *uri; char *uri_string; @@ -176,6 +302,40 @@ do_uri (SoupURI *base_uri, const char *base_str, } } + if (bits != NULL) { + gboolean failed = FALSE; + + if (!test_cmpstr (uri->scheme, bits->scheme)) + failed = TRUE; + + if (!test_cmpstr (uri->user, bits->user)) + failed = TRUE; + + if (!test_cmpstr (uri->password, bits->password)) + failed = TRUE; + + if (!test_cmpstr (uri->host, bits->host)) + failed = TRUE; + + if (uri->port != bits->port) { + debug_printf (1, "ERR\n port was %u, expected %u\n", + uri->port, bits->port); + failed = TRUE; + } + + if (!test_cmpstr (uri->path, bits->path)) + failed = TRUE; + + if (!test_cmpstr (uri->query, bits->query)) + failed = TRUE; + + if (!test_cmpstr (uri->fragment, bits->fragment)) + failed = TRUE; + + if (failed) + return FALSE; + } + uri_string = soup_uri_to_string (uri, FALSE); soup_uri_free (uri); @@ -195,6 +355,156 @@ do_uri (SoupURI *base_uri, const char *base_str, return TRUE; } +static void +do_soup_uri_null_tests (void) +{ + SoupURI *uri, *uri2; + char *uri_string; + + debug_printf (1, "\nsoup_uri_new (NULL)\n"); + uri = soup_uri_new (NULL); + if (SOUP_URI_IS_VALID (uri) || SOUP_URI_VALID_FOR_HTTP (uri)) { + debug_printf (1, " ERROR: soup_uri_new(NULL) returns valid URI?\n"); + errors++; + } + + /* This implicitly also verifies that none of these methods g_warn */ + if (soup_uri_get_scheme (uri) || + soup_uri_get_user (uri) || + soup_uri_get_password (uri) || + soup_uri_get_host (uri) || + soup_uri_get_port (uri) || + soup_uri_get_path (uri) || + soup_uri_get_query (uri) || + soup_uri_get_fragment (uri)) { + debug_printf (1, " ERROR: soup_uri_new(NULL) returns non-empty URI?\n"); + errors++; + } + + expect_warning = TRUE; + uri2 = soup_uri_new_with_base (uri, "/path"); + if (uri2 || expect_warning) { + debug_printf (1, " ERROR: soup_uri_new_with_base didn't fail on NULL URI?\n"); + errors++; + expect_warning = FALSE; + } + + expect_warning = TRUE; + uri_string = soup_uri_to_string (uri, FALSE); + if (expect_warning) { + debug_printf (1, " ERROR: soup_uri_to_string didn't fail on NULL URI?\n"); + errors++; + expect_warning = FALSE; + } else if (*uri_string) { + debug_printf (1, " ERROR: soup_uri_to_string on NULL URI returned '%s'\n", + uri_string); + errors++; + } + g_free (uri_string); + + soup_uri_set_scheme (uri, SOUP_URI_SCHEME_HTTP); + if (SOUP_URI_IS_VALID (uri) || SOUP_URI_VALID_FOR_HTTP (uri)) { + debug_printf (1, " ERROR: setting scheme on NULL URI makes it valid?\n"); + errors++; + } + + expect_warning = TRUE; + uri_string = soup_uri_to_string (uri, FALSE); + if (expect_warning) { + debug_printf (1, " ERROR: soup_uri_to_string didn't fail on scheme-only URI?\n"); + errors++; + expect_warning = FALSE; + } else if (strcmp (uri_string, "http:") != 0) { + debug_printf (1, " ERROR: soup_uri_to_string returned '%s' instead of 'http:'\n", + uri_string); + errors++; + } + g_free (uri_string); + + soup_uri_set_host (uri, "localhost"); + if (SOUP_URI_IS_VALID (uri)) { + debug_printf (1, " ERROR: setting scheme+host on NULL URI makes it valid?\n"); + errors++; + } + if (SOUP_URI_VALID_FOR_HTTP (uri)) { + debug_printf (1, " ERROR: setting scheme+host on NULL URI makes it valid for http?\n"); + errors++; + } + + expect_warning = TRUE; + uri_string = soup_uri_to_string (uri, FALSE); + if (expect_warning) { + debug_printf (1, " ERROR: soup_uri_to_string didn't fail on scheme+host URI?\n"); + errors++; + expect_warning = FALSE; + } else if (strcmp (uri_string, "http://localhost/") != 0) { + debug_printf (1, " ERROR: soup_uri_to_string with NULL path returned '%s' instead of 'http://localhost/'\n", + uri_string); + errors++; + } + g_free (uri_string); + + expect_warning = TRUE; + uri2 = soup_uri_new_with_base (uri, "/path"); + if (expect_warning) { + debug_printf (1, " ERROR: soup_uri_new_with_base didn't warn on NULL+scheme URI?\n"); + errors++; + expect_warning = FALSE; + } else if (!uri2) { + debug_printf (1, " ERROR: soup_uri_new_with_base didn't fix path on NULL+scheme URI\n"); + errors++; + } + + if (uri2) { + uri_string = soup_uri_to_string (uri2, FALSE); + if (!uri_string) { + debug_printf (1, " ERROR: soup_uri_to_string failed on uri2?\n"); + errors++; + } else if (strcmp (uri_string, "http://localhost/path") != 0) { + debug_printf (1, " ERROR: soup_uri_to_string returned '%s' instead of 'http://localhost/path'\n", + uri_string); + errors++; + } + g_free (uri_string); + soup_uri_free (uri2); + } + + expect_warning = TRUE; + soup_uri_set_path (uri, NULL); + if (expect_warning) { + debug_printf (1, " ERROR: setting path to NULL doesn't warn\n"); + errors++; + expect_warning = FALSE; + } + if (!uri->path || *uri->path) { + debug_printf (1, " ERROR: setting path to NULL != \"\"\n"); + errors++; + soup_uri_set_path (uri, ""); + } + + uri_string = soup_uri_to_string (uri, FALSE); + if (!uri_string) { + debug_printf (1, " ERROR: soup_uri_to_string failed on complete URI?\n"); + errors++; + } else if (strcmp (uri_string, "http://localhost/") != 0) { + debug_printf (1, " ERROR: soup_uri_to_string with empty path returned '%s' instead of 'http://localhost/'\n", + uri_string); + errors++; + } + g_free (uri_string); + + if (!SOUP_URI_IS_VALID (uri)) { + debug_printf (1, " ERROR: setting scheme+path on NULL URI doesn't make it valid?\n"); + errors++; + } + if (!SOUP_URI_VALID_FOR_HTTP (uri)) { + debug_printf (1, " ERROR: setting scheme+host+path on NULL URI doesn't make it valid for http?\n"); + errors++; + } + + soup_uri_free (uri); +} + int main (int argc, char **argv) { @@ -207,7 +517,7 @@ main (int argc, char **argv) debug_printf (1, "Absolute URI parsing\n"); for (i = 0; i < num_abs_tests; i++) { if (!do_uri (NULL, NULL, abs_tests[i].uri_string, - abs_tests[i].result)) + abs_tests[i].result, &abs_tests[i].bits)) errors++; } @@ -228,7 +538,7 @@ main (int argc, char **argv) for (i = 0; i < num_rel_tests; i++) { if (!do_uri (base_uri, base, rel_tests[i].uri_string, - rel_tests[i].result)) + rel_tests[i].result, &rel_tests[i].bits)) errors++; } soup_uri_free (base_uri); @@ -251,6 +561,8 @@ main (int argc, char **argv) soup_uri_free (uri2); } + do_soup_uri_null_tests (); + test_cleanup (); return errors != 0; } diff --git a/tests/xmlrpc-server-test.c b/tests/xmlrpc-server-test.c index 9eae702..ebb7ca6 100644 --- a/tests/xmlrpc-server-test.c +++ b/tests/xmlrpc-server-test.c @@ -13,6 +13,10 @@ #include "test-utils.h" +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif + GMainLoop *loop; static void diff --git a/tests/xmlrpc-test.c b/tests/xmlrpc-test.c index c7c1774..f442e2c 100644 --- a/tests/xmlrpc-test.c +++ b/tests/xmlrpc-test.c @@ -12,6 +12,10 @@ #include "test-utils.h" +#ifdef G_GNUC_BEGIN_IGNORE_DEPRECATIONS +G_GNUC_BEGIN_IGNORE_DEPRECATIONS +#endif + static SoupSession *session; static const char *default_uri = "http://127.0.0.1:47524/xmlrpc-server.php"; static const char *uri = NULL; -- 2.7.4