diff options
Diffstat (limited to 'src/extern/url.c')
-rw-r--r-- | src/extern/url.c | 859 |
1 files changed, 503 insertions, 356 deletions
diff --git a/src/extern/url.c b/src/extern/url.c index 9cb9707..fe54db2 100644 --- a/src/extern/url.c +++ b/src/extern/url.c @@ -47,6 +47,7 @@ * </DESC> */ +#include <url.h> #include "url.h" /* we use a global one for convenience */ @@ -56,355 +57,367 @@ static CURLM *multi_handle; static size_t write_callback(char *buffer, size_t size, size_t nitems, - void *userp) -{ - char *newbuff; - size_t rembuff; - - URL_FILE *url = (URL_FILE *)userp; - size *= nitems; - - rembuff = url->buffer_len - url->buffer_pos; /* remaining space in buffer */ - - if(size > rembuff) { - /* not enough space in buffer */ - newbuff = realloc(url->buffer, url->buffer_len + (size - rembuff)); - if(newbuff == NULL) { - fprintf(stderr, "callback buffer grow failed\n"); - size = rembuff; + void *userp) { + char *newbuff; + size_t rembuff; + + URL_FILE *url = (URL_FILE *) userp; + size *= nitems; + + rembuff = url->buffer_len - url->buffer_pos; /* remaining space in buffer */ + + if (size > rembuff) { + /* not enough space in buffer */ + newbuff = realloc(url->buffer, url->buffer_len + (size - rembuff)); + if (newbuff == NULL) { + fprintf(stderr, "callback buffer grow failed\n"); + size = rembuff; + } else { + /* realloc succeeded increase buffer size*/ + url->buffer_len += size - rembuff; + url->buffer = newbuff; + } } - else { - /* realloc succeeded increase buffer size*/ - url->buffer_len += size - rembuff; - url->buffer = newbuff; - } - } - memcpy(&url->buffer[url->buffer_pos], buffer, size); - url->buffer_pos += size; + memcpy(&url->buffer[url->buffer_pos], buffer, size); + url->buffer_pos += size; - return size; + return size; } /* use to attempt to fill the read buffer up to requested number of bytes */ -static int fill_buffer(URL_FILE *file, size_t want) -{ - fd_set fdread; - fd_set fdwrite; - fd_set fdexcep; - struct timeval timeout; - int rc; - CURLMcode mc; /* curl_multi_fdset() return code */ - - /* only attempt to fill buffer if transactions still running and buffer - * doesn't exceed required size already - */ - if((!file->still_running) || (file->buffer_pos > want)) - return 0; - - /* attempt to fill buffer */ - do { - int maxfd = -1; - long curl_timeo = -1; - - FD_ZERO(&fdread); - FD_ZERO(&fdwrite); - FD_ZERO(&fdexcep); - - /* set a suitable timeout to fail on */ - timeout.tv_sec = 60; /* 1 minute */ - timeout.tv_usec = 0; - - curl_multi_timeout(multi_handle, &curl_timeo); - if(curl_timeo >= 0) { - timeout.tv_sec = curl_timeo / 1000; - if(timeout.tv_sec > 1) - timeout.tv_sec = 1; - else - timeout.tv_usec = (curl_timeo % 1000) * 1000; - } - - /* get file descriptors from the transfers */ - mc = curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); - - if(mc != CURLM_OK) { - fprintf(stderr, "curl_multi_fdset() failed, code %d.\n", mc); - break; - } - - /* On success the value of maxfd is guaranteed to be >= -1. We call - select(maxfd + 1, ...); specially in case of (maxfd == -1) there are - no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- - to sleep 100ms, which is the minimum suggested value in the - curl_multi_fdset() doc. */ - - if(maxfd == -1) { +static int fill_buffer(URL_FILE *file, size_t want) { + fd_set fdread; + fd_set fdwrite; + fd_set fdexcep; + struct timeval timeout; + int rc; + CURLMcode mc; /* curl_multi_fdset() return code */ + + /* only attempt to fill buffer if transactions still running and buffer + * doesn't exceed required size already + */ + if ((!file->still_running) || (file->buffer_pos > want)) + return 0; + + /* attempt to fill buffer */ + do { + int maxfd = -1; + long curl_timeo = -1; + + FD_ZERO(&fdread); + FD_ZERO(&fdwrite); + FD_ZERO(&fdexcep); + + /* set a suitable timeout to fail on */ + timeout.tv_sec = 60; /* 1 minute */ + timeout.tv_usec = 0; + + curl_multi_timeout(multi_handle, &curl_timeo); + if (curl_timeo >= 0) { + timeout.tv_sec = curl_timeo / 1000; + if (timeout.tv_sec > 1) + timeout.tv_sec = 1; + else + timeout.tv_usec = (curl_timeo % 1000) * 1000; + } + + /* get file descriptors from the transfers */ + mc = curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); + + if (mc != CURLM_OK) { + fprintf(stderr, "curl_multi_fdset() failed, code %d.\n", mc); + break; + } + + /* On success the value of maxfd is guaranteed to be >= -1. We call + select(maxfd + 1, ...); specially in case of (maxfd == -1) there are + no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- + to sleep 100ms, which is the minimum suggested value in the + curl_multi_fdset() doc. */ + + if (maxfd == -1) { #ifdef _WIN32 - Sleep(100); - rc = 0; + Sleep(100); + rc = 0; #else - /* Portable sleep for platforms other than Windows. */ - struct timeval wait = { 0, 100 * 1000 }; /* 100ms */ - rc = select(0, NULL, NULL, NULL, &wait); + /* Portable sleep for platforms other than Windows. */ + struct timeval wait = {0, 100 * 1000}; /* 100ms */ + rc = select(0, NULL, NULL, NULL, &wait); #endif - } - else { - /* Note that on some platforms 'timeout' may be modified by select(). - If you need access to the original value save a copy beforehand. */ - rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); - } - - switch(rc) { - case -1: - /* select error */ - break; + } else { + /* Note that on some platforms 'timeout' may be modified by select(). + If you need access to the original value save a copy beforehand. */ + rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); + } + + switch (rc) { + case -1: + /* select error */ + break; + + case 0: + default: + /* timeout or readable/writable sockets */ + curl_multi_perform(multi_handle, &file->still_running); + file->http_status = get_http_response(multi_handle); + break; + } + } while (file->still_running && (file->buffer_pos < want)); + return 1; +} - case 0: - default: - /* timeout or readable/writable sockets */ - curl_multi_perform(multi_handle, &file->still_running); - break; +/* use to remove want bytes from the front of a files buffer */ +static int use_buffer(URL_FILE *file, size_t want) { + /* sort out buffer */ + if (file->buffer_pos <= want) { + /* ditch buffer - write will recreate */ + free(file->buffer); + file->buffer = NULL; + file->buffer_pos = 0; + file->buffer_len = 0; + } else { + /* move rest down make it available for later */ + memmove(file->buffer, + &file->buffer[want], + (file->buffer_pos - want)); + + file->buffer_pos -= want; } - } while(file->still_running && (file->buffer_pos < want)); - return 1; + return 0; } -/* use to remove want bytes from the front of a files buffer */ -static int use_buffer(URL_FILE *file, size_t want) -{ - /* sort out buffer */ - if(file->buffer_pos <= want) { - /* ditch buffer - write will recreate */ - free(file->buffer); - file->buffer = NULL; - file->buffer_pos = 0; - file->buffer_len = 0; - } - else { - /* move rest down make it available for later */ - memmove(file->buffer, - &file->buffer[want], - (file->buffer_pos - want)); - - file->buffer_pos -= want; - } - return 0; +/** + * + * @param handle + * @return + */ +long get_http_response(CURLM *handle) { + long http_status = 0; + CURLMsg *m = NULL; + + do { + int msg_queue = 0; + m = curl_multi_info_read(handle, &msg_queue); + if (m != NULL) { + curl_easy_getinfo(m->easy_handle, CURLINFO_RESPONSE_CODE, &http_status); + } + } while (m); + + return http_status; } -URL_FILE *url_fopen(const char *url, const char *operation) -{ - /* this code could check for URLs or types in the 'url' and - basically use the real fopen() for standard files */ +URL_FILE *url_fopen(const char *url, const char *operation) { + /* this code could check for URLs or types in the 'url' and + basically use the real fopen() for standard files */ - URL_FILE *file; - (void)operation; + URL_FILE *file; + (void) operation; - file = calloc(1, sizeof(URL_FILE)); - if(!file) - return NULL; + file = calloc(1, sizeof(URL_FILE)); + if (!file) + return NULL; - file->handle.file = fopen(url, operation); - if(file->handle.file) - file->type = CFTYPE_FILE; /* marked as URL */ + file->http_status = 0; + file->handle.file = fopen(url, operation); + if (file->handle.file) + file->type = CFTYPE_FILE; /* marked as URL */ - else { - file->type = CFTYPE_CURL; /* marked as URL */ - file->handle.curl = curl_easy_init(); + else { + file->type = CFTYPE_CURL; /* marked as URL */ + file->handle.curl = curl_easy_init(); - curl_easy_setopt(file->handle.curl, CURLOPT_URL, url); - curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file); - curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L); - curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback); + curl_easy_setopt(file->handle.curl, CURLOPT_URL, url); + curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file); + curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback); + curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L); + curl_easy_setopt(file->handle.curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(file->handle.curl, CURLOPT_FAILONERROR, 1L); - if(!multi_handle) - multi_handle = curl_multi_init(); + if (!multi_handle) + multi_handle = curl_multi_init(); - curl_multi_add_handle(multi_handle, file->handle.curl); + curl_multi_add_handle(multi_handle, file->handle.curl); - /* lets start the fetch */ - curl_multi_perform(multi_handle, &file->still_running); + /* lets start the fetch */ + curl_multi_perform(multi_handle, &file->still_running); - if((file->buffer_pos == 0) && (!file->still_running)) { - /* if still_running is 0 now, we should return NULL */ + if ((file->buffer_pos == 0) && (!file->still_running)) { + /* if still_running is 0 now, we should return NULL */ - /* make sure the easy handle is not in the multi handle anymore */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + /* make sure the easy handle is not in the multi handle anymore */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* cleanup */ - curl_easy_cleanup(file->handle.curl); + /* cleanup */ + curl_easy_cleanup(file->handle.curl); - free(file); + free(file); - file = NULL; + file = NULL; + } } - } - return file; + return file; } -int url_fclose(URL_FILE *file) -{ - int ret = 0;/* default is good return */ +int url_fclose(URL_FILE *file) { + int ret = 0;/* default is good return */ - switch(file->type) { - case CFTYPE_FILE: - ret = fclose(file->handle.file); /* passthrough */ - break; + switch (file->type) { + case CFTYPE_FILE: + ret = fclose(file->handle.file); /* passthrough */ + break; - case CFTYPE_CURL: - /* make sure the easy handle is not in the multi handle anymore */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + case CFTYPE_CURL: + /* make sure the easy handle is not in the multi handle anymore */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* cleanup */ - curl_easy_cleanup(file->handle.curl); - break; + /* cleanup */ + curl_easy_cleanup(file->handle.curl); + break; - default: /* unknown or supported type - oh dear */ - ret = EOF; - errno = EBADF; - break; - } + default: /* unknown or supported type - oh dear */ + ret = EOF; + errno = EBADF; + break; + } - free(file->buffer);/* free any allocated buffer space */ - free(file); + free(file->buffer);/* free any allocated buffer space */ + free(file); - return ret; + return ret; } -int url_feof(URL_FILE *file) -{ - int ret = 0; - - switch(file->type) { - case CFTYPE_FILE: - ret = feof(file->handle.file); - break; - - case CFTYPE_CURL: - if((file->buffer_pos == 0) && (!file->still_running)) - ret = 1; - break; - - default: /* unknown or supported type - oh dear */ - ret = -1; - errno = EBADF; - break; - } - return ret; +int url_feof(URL_FILE *file) { + int ret = 0; + + switch (file->type) { + case CFTYPE_FILE: + ret = feof(file->handle.file); + break; + + case CFTYPE_CURL: + if ((file->buffer_pos == 0) && (!file->still_running)) + ret = 1; + break; + + default: /* unknown or supported type - oh dear */ + ret = -1; + errno = EBADF; + break; + } + return ret; } -size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file) -{ - size_t want; +size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file) { + size_t want; - switch(file->type) { - case CFTYPE_FILE: - want = fread(ptr, size, nmemb, file->handle.file); - break; + switch (file->type) { + case CFTYPE_FILE: + want = fread(ptr, size, nmemb, file->handle.file); + break; - case CFTYPE_CURL: - want = nmemb * size; + case CFTYPE_CURL: + want = nmemb * size; - fill_buffer(file, want); + fill_buffer(file, want); - /* check if there's data in the buffer - if not fill_buffer() - * either errored or EOF */ - if(!file->buffer_pos) - return 0; + /* check if there's data in the buffer - if not fill_buffer() + * either errored or EOF */ + if (!file->buffer_pos) + return 0; - /* ensure only available data is considered */ - if(file->buffer_pos < want) - want = file->buffer_pos; + /* ensure only available data is considered */ + if (file->buffer_pos < want) + want = file->buffer_pos; - /* xfer data to caller */ - memcpy(ptr, file->buffer, want); + /* xfer data to caller */ + memcpy(ptr, file->buffer, want); - use_buffer(file, want); + use_buffer(file, want); - want = want / size; /* number of items */ - break; + want = want / size; /* number of items */ + break; - default: /* unknown or supported type - oh dear */ - want = 0; - errno = EBADF; - break; + default: /* unknown or supported type - oh dear */ + want = 0; + errno = EBADF; + break; - } - return want; + } + return want; } -char *url_fgets(char *ptr, size_t size, URL_FILE *file) -{ - size_t want = size - 1;/* always need to leave room for zero termination */ - size_t loop; - - switch(file->type) { - case CFTYPE_FILE: - ptr = fgets(ptr, (int)size, file->handle.file); - break; - - case CFTYPE_CURL: - fill_buffer(file, want); - - /* check if there's data in the buffer - if not fill either errored or - * EOF */ - if(!file->buffer_pos) - return NULL; - - /* ensure only available data is considered */ - if(file->buffer_pos < want) - want = file->buffer_pos; - - /*buffer contains data */ - /* look for newline or eof */ - for(loop = 0; loop < want; loop++) { - if(file->buffer[loop] == '\n') { - want = loop + 1;/* include newline */ - break; - } - } +char *url_fgets(char *ptr, size_t size, URL_FILE *file) { + size_t want = size - 1;/* always need to leave room for zero termination */ + size_t loop; + + switch (file->type) { + case CFTYPE_FILE: + ptr = fgets(ptr, (int) size, file->handle.file); + break; + + case CFTYPE_CURL: + fill_buffer(file, want); - /* xfer data to caller */ - memcpy(ptr, file->buffer, want); - ptr[want] = 0;/* always null terminate */ + /* check if there's data in the buffer - if not fill either errored or + * EOF */ + if (!file->buffer_pos) + return NULL; - use_buffer(file, want); + /* ensure only available data is considered */ + if (file->buffer_pos < want) + want = file->buffer_pos; - break; + /*buffer contains data */ + /* look for newline or eof */ + for (loop = 0; loop < want; loop++) { + if (file->buffer[loop] == '\n') { + want = loop + 1;/* include newline */ + break; + } + } - default: /* unknown or supported type - oh dear */ - ptr = NULL; - errno = EBADF; - break; - } + /* xfer data to caller */ + memcpy(ptr, file->buffer, want); + ptr[want] = 0;/* always null terminate */ - return ptr;/*success */ + use_buffer(file, want); + + break; + + default: /* unknown or supported type - oh dear */ + ptr = NULL; + errno = EBADF; + break; + } + + return ptr;/*success */ } -void url_rewind(URL_FILE *file) -{ - switch(file->type) { - case CFTYPE_FILE: - rewind(file->handle.file); /* passthrough */ - break; +void url_rewind(URL_FILE *file) { + switch (file->type) { + case CFTYPE_FILE: + rewind(file->handle.file); /* passthrough */ + break; - case CFTYPE_CURL: - /* halt transaction */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + case CFTYPE_CURL: + /* halt transaction */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* restart */ - curl_multi_add_handle(multi_handle, file->handle.curl); + /* restart */ + curl_multi_add_handle(multi_handle, file->handle.curl); - /* ditch buffer - write will recreate - resets stream pos*/ - free(file->buffer); - file->buffer = NULL; - file->buffer_pos = 0; - file->buffer_len = 0; + /* ditch buffer - write will recreate - resets stream pos*/ + free(file->buffer); + file->buffer = NULL; + file->buffer_pos = 0; + file->buffer_len = 0; - break; + break; - default: /* unknown or supported type - oh dear */ - break; - } + default: /* unknown or supported type - oh dear */ + break; + } } #define FGETSFILE "fgets.test" @@ -414,95 +427,229 @@ void url_rewind(URL_FILE *file) /* Small main program to retrieve from a url using fgets and fread saving the * output to two test files (note the fgets method will corrupt binary files if * they contain 0 chars */ -int _test_url_fopen(int argc, char *argv[]) -{ - URL_FILE *handle; - FILE *outf; - - size_t nread; - char buffer[256]; - const char *url; - - if(argc < 2) - url = "http://192.168.7.3/testfile";/* default to testurl */ - else - url = argv[1];/* use passed url */ - - /* copy from url line by line with fgets */ - outf = fopen(FGETSFILE, "wb+"); - if(!outf) { - perror("couldn't open fgets output file\n"); - return 1; - } +int _test_url_fopen(int argc, char *argv[]) { + URL_FILE *handle; + FILE *outf; + + size_t nread; + char buffer[256]; + const char *url; + + if (argc < 2) + url = "http://192.168.7.3/testfile";/* default to testurl */ + else + url = argv[1];/* use passed url */ + + /* copy from url line by line with fgets */ + outf = fopen(FGETSFILE, "wb+"); + if (!outf) { + perror("couldn't open fgets output file\n"); + return 1; + } - handle = url_fopen(url, "r"); - if(!handle) { - printf("couldn't url_fopen() %s\n", url); - fclose(outf); - return 2; - } + handle = url_fopen(url, "r"); + if (!handle) { + printf("couldn't url_fopen() %s\n", url); + fclose(outf); + return 2; + } - while(!url_feof(handle)) { - url_fgets(buffer, sizeof(buffer), handle); - fwrite(buffer, 1, strlen(buffer), outf); - } + while (!url_feof(handle)) { + url_fgets(buffer, sizeof(buffer), handle); + fwrite(buffer, 1, strlen(buffer), outf); + } - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - /* Copy from url with fread */ - outf = fopen(FREADFILE, "wb+"); - if(!outf) { - perror("couldn't open fread output file\n"); - return 1; - } + /* Copy from url with fread */ + outf = fopen(FREADFILE, "wb+"); + if (!outf) { + perror("couldn't open fread output file\n"); + return 1; + } - handle = url_fopen("testfile", "r"); - if(!handle) { - printf("couldn't url_fopen() testfile\n"); - fclose(outf); - return 2; - } + handle = url_fopen("testfile", "r"); + if (!handle) { + printf("couldn't url_fopen() testfile\n"); + fclose(outf); + return 2; + } - do { - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); - } while(nread); + do { + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); + } while (nread); - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - /* Test rewind */ - outf = fopen(REWINDFILE, "wb+"); - if(!outf) { - perror("couldn't open fread output file\n"); - return 1; - } + /* Test rewind */ + outf = fopen(REWINDFILE, "wb+"); + if (!outf) { + perror("couldn't open fread output file\n"); + return 1; + } - handle = url_fopen("testfile", "r"); - if(!handle) { - printf("couldn't url_fopen() testfile\n"); - fclose(outf); - return 2; - } + handle = url_fopen("testfile", "r"); + if (!handle) { + printf("couldn't url_fopen() testfile\n"); + fclose(outf); + return 2; + } - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); - url_rewind(handle); + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); + url_rewind(handle); - buffer[0]='\n'; - fwrite(buffer, 1, 1, outf); + buffer[0] = '\n'; + fwrite(buffer, 1, 1, outf); - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - return 0;/* all done */ + return 0;/* all done */ +} + +const char *http_response_str(long code) { + switch (code) { + case 100: + return "Continue"; + case 101: + return "Switching Protocol"; + case 102: + return "Processing (WebDAV)"; + case 103: + return "Early Hints"; + case 200: + return "OK"; + case 201: + return "Created"; + case 202: + return "Accepted"; + case 203: + return "Non-Authoritative Information"; + case 204: + return "No Content"; + case 205: + return "Reset Content"; + case 206: + return "Partial Content"; + case 207: + return "Multi-Status (WebDAV)"; + case 208: + return "Already Reported"; + case 226: + return "IM Used"; + case 300: + return "Multiple Choices"; + case 301: + return "Moved Permanently"; + case 302: + return "Found"; + case 303: + return "See Other"; + case 304: + return "Not Modified"; + case 305: + return "Use Proxy (DEPRECATED)"; + case 306: + return "Unused"; + case 307: + return "Temporary Redirect"; + case 308: + return "Permanent Redirect"; + case 400: + return "Bad Request"; + case 401: + return "Unauthorized"; + case 402: + return "Payment Required"; + case 403: + return "Forbidden"; + case 404: + return "Not Found"; + case 405: + return "Method Not Allowed"; + case 406: + return "Not Acceptable"; + case 407: + return "Proxy Authentication Required"; + case 408: + return "Request Timeout"; + case 409: + return "Conflict"; + case 410: + return "Gone"; + case 411: + return "Length Required"; + case 412: + return "Precondition Failed"; + case 413: + return "Payload Too Large"; + case 414: + return "URI Too Long"; + case 415: + return "Unsupported Media Type"; + case 416: + return "Range Not Satisfiable"; + case 417: + return "Exception Failed"; + case 418: + return "I'm a teapot"; + case 419: + return "Expectation Failed"; + case 421: + return "Misdirected Request"; + case 422: + return "Unprocessable Entity (WebDAV)"; + case 423: + return "Locked (WebDAV)"; + case 424: + return "Failed Dependency (WebDAV)"; + case 425: + return "Too Early"; + case 426: + return "Upgrade Required"; + case 428: + return "Precondition Required"; + case 429: + return "Too Many Requests"; + case 431: + return "Request Header Fields Too Large"; + case 451: + return "Unavailable For Legal Reasons"; + case 500: + return "Internal Server Error"; + case 501: + return "Not Implemented"; + case 502: + return "Bad Gateway"; + case 503: + return "Service Unavailable"; + case 504: + return "Gateway Timeout"; + case 505: + return "HTTP Version Not Supported"; + case 506: + return "Variant Also Negotiates"; + case 507: + return "Insufficient Storage (WebDAV)"; + case 508: + return "Loop Detected (WebDAV)"; + case 510: + return "Not Extended"; + case 511: + return "Network Authentication Required"; + default: + return "Unknown"; + } } |