diff options
author | Joseph Hunkeler <jhunkeler@gmail.com> | 2020-01-21 23:49:46 -0500 |
---|---|---|
committer | Joseph Hunkeler <jhunkeler@gmail.com> | 2020-01-21 23:49:46 -0500 |
commit | 1a427e0e5e16bc75d05a961a4001923b26b7b2f8 (patch) | |
tree | 375c1e80c8982836a58b03839dd75921c1c6233b /src | |
parent | 77a35c4a098a11bcb8d93a8748f21a930b88b0d5 (diff) | |
download | spmc-1a427e0e5e16bc75d05a961a4001923b26b7b2f8.tar.gz |
groundhog day
Diffstat (limited to 'src')
-rw-r--r-- | src/config_global.c | 49 | ||||
-rw-r--r-- | src/deps.c | 2 | ||||
-rw-r--r-- | src/extern/url.c | 859 | ||||
-rw-r--r-- | src/manifest.c | 192 | ||||
-rw-r--r-- | src/spm.c | 5 |
5 files changed, 723 insertions, 384 deletions
diff --git a/src/config_global.c b/src/config_global.c index 4305942..14f3a26 100644 --- a/src/config_global.c +++ b/src/config_global.c @@ -98,18 +98,33 @@ char *get_package_manifest(void) { char *ucd = get_user_conf_dir(); sprintf(template, "%s%c%s", ucd, DIRSEP, "manifest.dat"); + return strdup(template); + + /* + Manifest *manifest; + manifest = manifest_read(NULL); + if (manifest != NULL) { + manifest_free(manifest); + } if (access(template, F_OK) != 0) { fprintf(stderr, "Package manifest not found: %s\n", template); - Manifest *manifest = manifest_from(PKG_DIR); + manifest = manifest_from(PKG_DIR); + if (manifest == NULL) { + perror("manifest generator"); + fprintf(SYSERROR); + return NULL; + } manifest_write(manifest); manifest_free(manifest); } free(ucd); return strdup(template); + */ } + /** * Check whether SPM has access to external programs it needs */ @@ -148,12 +163,32 @@ void init_config_global(void) { SPM_GLOBAL.package_manifest = NULL; SPM_GLOBAL.config = NULL; SPM_GLOBAL.verbose = 0; + SPM_GLOBAL.repo_target = NULL; if (uname(&SPM_GLOBAL.sysinfo) != 0) { fprintf(SYSERROR); - exit(1); + exit(errno); } + // Initialize filesystem paths structure + SPM_GLOBAL.fs.binpath = calloc(strlen(SPM_PROGRAM_BIN) + 1, sizeof(char)); + SPM_GLOBAL.fs.includepath = calloc(strlen(SPM_PROGRAM_INCLUDE) + 1, sizeof(char)); + SPM_GLOBAL.fs.libpath = calloc(strlen(SPM_PROGRAM_LIB) + 1, sizeof(char)); + SPM_GLOBAL.fs.datapath = calloc(strlen(SPM_PROGRAM_DATA) + 1, sizeof(char)); + + if (!SPM_GLOBAL.fs.binpath || !SPM_GLOBAL.fs.includepath + || !SPM_GLOBAL.fs.libpath) { + perror("Unable to allocate memory for global filesystem paths"); + fprintf(SYSERROR); + exit(errno); + } + + strcpy(SPM_GLOBAL.fs.binpath, SPM_PROGRAM_BIN); + strcpy(SPM_GLOBAL.fs.includepath, SPM_PROGRAM_INCLUDE); + strcpy(SPM_GLOBAL.fs.libpath, SPM_PROGRAM_LIB); + strcpy(SPM_GLOBAL.fs.datapath, SPM_PROGRAM_DATA); + SPM_GLOBAL.fs.manpath = join((char *[]) {SPM_PROGRAM_DATA, "man", NULL}, DIRSEPS); + SPM_GLOBAL.user_config_basedir = get_user_conf_dir(); SPM_GLOBAL.user_config_file = get_user_config_file(); if (SPM_GLOBAL.user_config_file) { @@ -162,6 +197,14 @@ void init_config_global(void) { ConfigItem *item = NULL; + // Initialize repository target (i.e. repository path suffix) + SPM_GLOBAL.repo_target = join((char *[]) {SPM_GLOBAL.sysinfo.sysname, SPM_GLOBAL.sysinfo.machine, NULL}, DIRSEPS); + item = config_get(SPM_GLOBAL.config, "repo_target"); + if (item) { + free(SPM_GLOBAL.repo_target); + SPM_GLOBAL.repo_target = normpath(item->value); + } + // Initialize temp directory item = config_get(SPM_GLOBAL.config, "tmp_dir"); if (item) { @@ -198,12 +241,14 @@ void init_config_global(void) { item = config_get(SPM_GLOBAL.config, "package_manifest"); if (item) { SPM_GLOBAL.package_manifest = item->value; + /* if (access(SPM_GLOBAL.package_manifest, F_OK) != 0) { fprintf(stderr, "Package manifest not found: %s\n", SPM_GLOBAL.package_manifest); Manifest *manifest = manifest_from(PKG_DIR); manifest_write(manifest); manifest_free(manifest); } + */ } else { SPM_GLOBAL.package_manifest = get_package_manifest(); @@ -210,7 +210,7 @@ int dep_all(Dependencies **deps, const char *_package) { // Remove temporary data unlink(depfile); - unlink(tmpdir); + rmdir(tmpdir); free(suffix); return 0; } diff --git a/src/extern/url.c b/src/extern/url.c index 9cb9707..fe54db2 100644 --- a/src/extern/url.c +++ b/src/extern/url.c @@ -47,6 +47,7 @@ * </DESC> */ +#include <url.h> #include "url.h" /* we use a global one for convenience */ @@ -56,355 +57,367 @@ static CURLM *multi_handle; static size_t write_callback(char *buffer, size_t size, size_t nitems, - void *userp) -{ - char *newbuff; - size_t rembuff; - - URL_FILE *url = (URL_FILE *)userp; - size *= nitems; - - rembuff = url->buffer_len - url->buffer_pos; /* remaining space in buffer */ - - if(size > rembuff) { - /* not enough space in buffer */ - newbuff = realloc(url->buffer, url->buffer_len + (size - rembuff)); - if(newbuff == NULL) { - fprintf(stderr, "callback buffer grow failed\n"); - size = rembuff; + void *userp) { + char *newbuff; + size_t rembuff; + + URL_FILE *url = (URL_FILE *) userp; + size *= nitems; + + rembuff = url->buffer_len - url->buffer_pos; /* remaining space in buffer */ + + if (size > rembuff) { + /* not enough space in buffer */ + newbuff = realloc(url->buffer, url->buffer_len + (size - rembuff)); + if (newbuff == NULL) { + fprintf(stderr, "callback buffer grow failed\n"); + size = rembuff; + } else { + /* realloc succeeded increase buffer size*/ + url->buffer_len += size - rembuff; + url->buffer = newbuff; + } } - else { - /* realloc succeeded increase buffer size*/ - url->buffer_len += size - rembuff; - url->buffer = newbuff; - } - } - memcpy(&url->buffer[url->buffer_pos], buffer, size); - url->buffer_pos += size; + memcpy(&url->buffer[url->buffer_pos], buffer, size); + url->buffer_pos += size; - return size; + return size; } /* use to attempt to fill the read buffer up to requested number of bytes */ -static int fill_buffer(URL_FILE *file, size_t want) -{ - fd_set fdread; - fd_set fdwrite; - fd_set fdexcep; - struct timeval timeout; - int rc; - CURLMcode mc; /* curl_multi_fdset() return code */ - - /* only attempt to fill buffer if transactions still running and buffer - * doesn't exceed required size already - */ - if((!file->still_running) || (file->buffer_pos > want)) - return 0; - - /* attempt to fill buffer */ - do { - int maxfd = -1; - long curl_timeo = -1; - - FD_ZERO(&fdread); - FD_ZERO(&fdwrite); - FD_ZERO(&fdexcep); - - /* set a suitable timeout to fail on */ - timeout.tv_sec = 60; /* 1 minute */ - timeout.tv_usec = 0; - - curl_multi_timeout(multi_handle, &curl_timeo); - if(curl_timeo >= 0) { - timeout.tv_sec = curl_timeo / 1000; - if(timeout.tv_sec > 1) - timeout.tv_sec = 1; - else - timeout.tv_usec = (curl_timeo % 1000) * 1000; - } - - /* get file descriptors from the transfers */ - mc = curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); - - if(mc != CURLM_OK) { - fprintf(stderr, "curl_multi_fdset() failed, code %d.\n", mc); - break; - } - - /* On success the value of maxfd is guaranteed to be >= -1. We call - select(maxfd + 1, ...); specially in case of (maxfd == -1) there are - no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- - to sleep 100ms, which is the minimum suggested value in the - curl_multi_fdset() doc. */ - - if(maxfd == -1) { +static int fill_buffer(URL_FILE *file, size_t want) { + fd_set fdread; + fd_set fdwrite; + fd_set fdexcep; + struct timeval timeout; + int rc; + CURLMcode mc; /* curl_multi_fdset() return code */ + + /* only attempt to fill buffer if transactions still running and buffer + * doesn't exceed required size already + */ + if ((!file->still_running) || (file->buffer_pos > want)) + return 0; + + /* attempt to fill buffer */ + do { + int maxfd = -1; + long curl_timeo = -1; + + FD_ZERO(&fdread); + FD_ZERO(&fdwrite); + FD_ZERO(&fdexcep); + + /* set a suitable timeout to fail on */ + timeout.tv_sec = 60; /* 1 minute */ + timeout.tv_usec = 0; + + curl_multi_timeout(multi_handle, &curl_timeo); + if (curl_timeo >= 0) { + timeout.tv_sec = curl_timeo / 1000; + if (timeout.tv_sec > 1) + timeout.tv_sec = 1; + else + timeout.tv_usec = (curl_timeo % 1000) * 1000; + } + + /* get file descriptors from the transfers */ + mc = curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); + + if (mc != CURLM_OK) { + fprintf(stderr, "curl_multi_fdset() failed, code %d.\n", mc); + break; + } + + /* On success the value of maxfd is guaranteed to be >= -1. We call + select(maxfd + 1, ...); specially in case of (maxfd == -1) there are + no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- + to sleep 100ms, which is the minimum suggested value in the + curl_multi_fdset() doc. */ + + if (maxfd == -1) { #ifdef _WIN32 - Sleep(100); - rc = 0; + Sleep(100); + rc = 0; #else - /* Portable sleep for platforms other than Windows. */ - struct timeval wait = { 0, 100 * 1000 }; /* 100ms */ - rc = select(0, NULL, NULL, NULL, &wait); + /* Portable sleep for platforms other than Windows. */ + struct timeval wait = {0, 100 * 1000}; /* 100ms */ + rc = select(0, NULL, NULL, NULL, &wait); #endif - } - else { - /* Note that on some platforms 'timeout' may be modified by select(). - If you need access to the original value save a copy beforehand. */ - rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); - } - - switch(rc) { - case -1: - /* select error */ - break; + } else { + /* Note that on some platforms 'timeout' may be modified by select(). + If you need access to the original value save a copy beforehand. */ + rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); + } + + switch (rc) { + case -1: + /* select error */ + break; + + case 0: + default: + /* timeout or readable/writable sockets */ + curl_multi_perform(multi_handle, &file->still_running); + file->http_status = get_http_response(multi_handle); + break; + } + } while (file->still_running && (file->buffer_pos < want)); + return 1; +} - case 0: - default: - /* timeout or readable/writable sockets */ - curl_multi_perform(multi_handle, &file->still_running); - break; +/* use to remove want bytes from the front of a files buffer */ +static int use_buffer(URL_FILE *file, size_t want) { + /* sort out buffer */ + if (file->buffer_pos <= want) { + /* ditch buffer - write will recreate */ + free(file->buffer); + file->buffer = NULL; + file->buffer_pos = 0; + file->buffer_len = 0; + } else { + /* move rest down make it available for later */ + memmove(file->buffer, + &file->buffer[want], + (file->buffer_pos - want)); + + file->buffer_pos -= want; } - } while(file->still_running && (file->buffer_pos < want)); - return 1; + return 0; } -/* use to remove want bytes from the front of a files buffer */ -static int use_buffer(URL_FILE *file, size_t want) -{ - /* sort out buffer */ - if(file->buffer_pos <= want) { - /* ditch buffer - write will recreate */ - free(file->buffer); - file->buffer = NULL; - file->buffer_pos = 0; - file->buffer_len = 0; - } - else { - /* move rest down make it available for later */ - memmove(file->buffer, - &file->buffer[want], - (file->buffer_pos - want)); - - file->buffer_pos -= want; - } - return 0; +/** + * + * @param handle + * @return + */ +long get_http_response(CURLM *handle) { + long http_status = 0; + CURLMsg *m = NULL; + + do { + int msg_queue = 0; + m = curl_multi_info_read(handle, &msg_queue); + if (m != NULL) { + curl_easy_getinfo(m->easy_handle, CURLINFO_RESPONSE_CODE, &http_status); + } + } while (m); + + return http_status; } -URL_FILE *url_fopen(const char *url, const char *operation) -{ - /* this code could check for URLs or types in the 'url' and - basically use the real fopen() for standard files */ +URL_FILE *url_fopen(const char *url, const char *operation) { + /* this code could check for URLs or types in the 'url' and + basically use the real fopen() for standard files */ - URL_FILE *file; - (void)operation; + URL_FILE *file; + (void) operation; - file = calloc(1, sizeof(URL_FILE)); - if(!file) - return NULL; + file = calloc(1, sizeof(URL_FILE)); + if (!file) + return NULL; - file->handle.file = fopen(url, operation); - if(file->handle.file) - file->type = CFTYPE_FILE; /* marked as URL */ + file->http_status = 0; + file->handle.file = fopen(url, operation); + if (file->handle.file) + file->type = CFTYPE_FILE; /* marked as URL */ - else { - file->type = CFTYPE_CURL; /* marked as URL */ - file->handle.curl = curl_easy_init(); + else { + file->type = CFTYPE_CURL; /* marked as URL */ + file->handle.curl = curl_easy_init(); - curl_easy_setopt(file->handle.curl, CURLOPT_URL, url); - curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file); - curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L); - curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback); + curl_easy_setopt(file->handle.curl, CURLOPT_URL, url); + curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file); + curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback); + curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L); + curl_easy_setopt(file->handle.curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(file->handle.curl, CURLOPT_FAILONERROR, 1L); - if(!multi_handle) - multi_handle = curl_multi_init(); + if (!multi_handle) + multi_handle = curl_multi_init(); - curl_multi_add_handle(multi_handle, file->handle.curl); + curl_multi_add_handle(multi_handle, file->handle.curl); - /* lets start the fetch */ - curl_multi_perform(multi_handle, &file->still_running); + /* lets start the fetch */ + curl_multi_perform(multi_handle, &file->still_running); - if((file->buffer_pos == 0) && (!file->still_running)) { - /* if still_running is 0 now, we should return NULL */ + if ((file->buffer_pos == 0) && (!file->still_running)) { + /* if still_running is 0 now, we should return NULL */ - /* make sure the easy handle is not in the multi handle anymore */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + /* make sure the easy handle is not in the multi handle anymore */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* cleanup */ - curl_easy_cleanup(file->handle.curl); + /* cleanup */ + curl_easy_cleanup(file->handle.curl); - free(file); + free(file); - file = NULL; + file = NULL; + } } - } - return file; + return file; } -int url_fclose(URL_FILE *file) -{ - int ret = 0;/* default is good return */ +int url_fclose(URL_FILE *file) { + int ret = 0;/* default is good return */ - switch(file->type) { - case CFTYPE_FILE: - ret = fclose(file->handle.file); /* passthrough */ - break; + switch (file->type) { + case CFTYPE_FILE: + ret = fclose(file->handle.file); /* passthrough */ + break; - case CFTYPE_CURL: - /* make sure the easy handle is not in the multi handle anymore */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + case CFTYPE_CURL: + /* make sure the easy handle is not in the multi handle anymore */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* cleanup */ - curl_easy_cleanup(file->handle.curl); - break; + /* cleanup */ + curl_easy_cleanup(file->handle.curl); + break; - default: /* unknown or supported type - oh dear */ - ret = EOF; - errno = EBADF; - break; - } + default: /* unknown or supported type - oh dear */ + ret = EOF; + errno = EBADF; + break; + } - free(file->buffer);/* free any allocated buffer space */ - free(file); + free(file->buffer);/* free any allocated buffer space */ + free(file); - return ret; + return ret; } -int url_feof(URL_FILE *file) -{ - int ret = 0; - - switch(file->type) { - case CFTYPE_FILE: - ret = feof(file->handle.file); - break; - - case CFTYPE_CURL: - if((file->buffer_pos == 0) && (!file->still_running)) - ret = 1; - break; - - default: /* unknown or supported type - oh dear */ - ret = -1; - errno = EBADF; - break; - } - return ret; +int url_feof(URL_FILE *file) { + int ret = 0; + + switch (file->type) { + case CFTYPE_FILE: + ret = feof(file->handle.file); + break; + + case CFTYPE_CURL: + if ((file->buffer_pos == 0) && (!file->still_running)) + ret = 1; + break; + + default: /* unknown or supported type - oh dear */ + ret = -1; + errno = EBADF; + break; + } + return ret; } -size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file) -{ - size_t want; +size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file) { + size_t want; - switch(file->type) { - case CFTYPE_FILE: - want = fread(ptr, size, nmemb, file->handle.file); - break; + switch (file->type) { + case CFTYPE_FILE: + want = fread(ptr, size, nmemb, file->handle.file); + break; - case CFTYPE_CURL: - want = nmemb * size; + case CFTYPE_CURL: + want = nmemb * size; - fill_buffer(file, want); + fill_buffer(file, want); - /* check if there's data in the buffer - if not fill_buffer() - * either errored or EOF */ - if(!file->buffer_pos) - return 0; + /* check if there's data in the buffer - if not fill_buffer() + * either errored or EOF */ + if (!file->buffer_pos) + return 0; - /* ensure only available data is considered */ - if(file->buffer_pos < want) - want = file->buffer_pos; + /* ensure only available data is considered */ + if (file->buffer_pos < want) + want = file->buffer_pos; - /* xfer data to caller */ - memcpy(ptr, file->buffer, want); + /* xfer data to caller */ + memcpy(ptr, file->buffer, want); - use_buffer(file, want); + use_buffer(file, want); - want = want / size; /* number of items */ - break; + want = want / size; /* number of items */ + break; - default: /* unknown or supported type - oh dear */ - want = 0; - errno = EBADF; - break; + default: /* unknown or supported type - oh dear */ + want = 0; + errno = EBADF; + break; - } - return want; + } + return want; } -char *url_fgets(char *ptr, size_t size, URL_FILE *file) -{ - size_t want = size - 1;/* always need to leave room for zero termination */ - size_t loop; - - switch(file->type) { - case CFTYPE_FILE: - ptr = fgets(ptr, (int)size, file->handle.file); - break; - - case CFTYPE_CURL: - fill_buffer(file, want); - - /* check if there's data in the buffer - if not fill either errored or - * EOF */ - if(!file->buffer_pos) - return NULL; - - /* ensure only available data is considered */ - if(file->buffer_pos < want) - want = file->buffer_pos; - - /*buffer contains data */ - /* look for newline or eof */ - for(loop = 0; loop < want; loop++) { - if(file->buffer[loop] == '\n') { - want = loop + 1;/* include newline */ - break; - } - } +char *url_fgets(char *ptr, size_t size, URL_FILE *file) { + size_t want = size - 1;/* always need to leave room for zero termination */ + size_t loop; + + switch (file->type) { + case CFTYPE_FILE: + ptr = fgets(ptr, (int) size, file->handle.file); + break; + + case CFTYPE_CURL: + fill_buffer(file, want); - /* xfer data to caller */ - memcpy(ptr, file->buffer, want); - ptr[want] = 0;/* always null terminate */ + /* check if there's data in the buffer - if not fill either errored or + * EOF */ + if (!file->buffer_pos) + return NULL; - use_buffer(file, want); + /* ensure only available data is considered */ + if (file->buffer_pos < want) + want = file->buffer_pos; - break; + /*buffer contains data */ + /* look for newline or eof */ + for (loop = 0; loop < want; loop++) { + if (file->buffer[loop] == '\n') { + want = loop + 1;/* include newline */ + break; + } + } - default: /* unknown or supported type - oh dear */ - ptr = NULL; - errno = EBADF; - break; - } + /* xfer data to caller */ + memcpy(ptr, file->buffer, want); + ptr[want] = 0;/* always null terminate */ - return ptr;/*success */ + use_buffer(file, want); + + break; + + default: /* unknown or supported type - oh dear */ + ptr = NULL; + errno = EBADF; + break; + } + + return ptr;/*success */ } -void url_rewind(URL_FILE *file) -{ - switch(file->type) { - case CFTYPE_FILE: - rewind(file->handle.file); /* passthrough */ - break; +void url_rewind(URL_FILE *file) { + switch (file->type) { + case CFTYPE_FILE: + rewind(file->handle.file); /* passthrough */ + break; - case CFTYPE_CURL: - /* halt transaction */ - curl_multi_remove_handle(multi_handle, file->handle.curl); + case CFTYPE_CURL: + /* halt transaction */ + curl_multi_remove_handle(multi_handle, file->handle.curl); - /* restart */ - curl_multi_add_handle(multi_handle, file->handle.curl); + /* restart */ + curl_multi_add_handle(multi_handle, file->handle.curl); - /* ditch buffer - write will recreate - resets stream pos*/ - free(file->buffer); - file->buffer = NULL; - file->buffer_pos = 0; - file->buffer_len = 0; + /* ditch buffer - write will recreate - resets stream pos*/ + free(file->buffer); + file->buffer = NULL; + file->buffer_pos = 0; + file->buffer_len = 0; - break; + break; - default: /* unknown or supported type - oh dear */ - break; - } + default: /* unknown or supported type - oh dear */ + break; + } } #define FGETSFILE "fgets.test" @@ -414,95 +427,229 @@ void url_rewind(URL_FILE *file) /* Small main program to retrieve from a url using fgets and fread saving the * output to two test files (note the fgets method will corrupt binary files if * they contain 0 chars */ -int _test_url_fopen(int argc, char *argv[]) -{ - URL_FILE *handle; - FILE *outf; - - size_t nread; - char buffer[256]; - const char *url; - - if(argc < 2) - url = "http://192.168.7.3/testfile";/* default to testurl */ - else - url = argv[1];/* use passed url */ - - /* copy from url line by line with fgets */ - outf = fopen(FGETSFILE, "wb+"); - if(!outf) { - perror("couldn't open fgets output file\n"); - return 1; - } +int _test_url_fopen(int argc, char *argv[]) { + URL_FILE *handle; + FILE *outf; + + size_t nread; + char buffer[256]; + const char *url; + + if (argc < 2) + url = "http://192.168.7.3/testfile";/* default to testurl */ + else + url = argv[1];/* use passed url */ + + /* copy from url line by line with fgets */ + outf = fopen(FGETSFILE, "wb+"); + if (!outf) { + perror("couldn't open fgets output file\n"); + return 1; + } - handle = url_fopen(url, "r"); - if(!handle) { - printf("couldn't url_fopen() %s\n", url); - fclose(outf); - return 2; - } + handle = url_fopen(url, "r"); + if (!handle) { + printf("couldn't url_fopen() %s\n", url); + fclose(outf); + return 2; + } - while(!url_feof(handle)) { - url_fgets(buffer, sizeof(buffer), handle); - fwrite(buffer, 1, strlen(buffer), outf); - } + while (!url_feof(handle)) { + url_fgets(buffer, sizeof(buffer), handle); + fwrite(buffer, 1, strlen(buffer), outf); + } - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - /* Copy from url with fread */ - outf = fopen(FREADFILE, "wb+"); - if(!outf) { - perror("couldn't open fread output file\n"); - return 1; - } + /* Copy from url with fread */ + outf = fopen(FREADFILE, "wb+"); + if (!outf) { + perror("couldn't open fread output file\n"); + return 1; + } - handle = url_fopen("testfile", "r"); - if(!handle) { - printf("couldn't url_fopen() testfile\n"); - fclose(outf); - return 2; - } + handle = url_fopen("testfile", "r"); + if (!handle) { + printf("couldn't url_fopen() testfile\n"); + fclose(outf); + return 2; + } - do { - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); - } while(nread); + do { + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); + } while (nread); - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - /* Test rewind */ - outf = fopen(REWINDFILE, "wb+"); - if(!outf) { - perror("couldn't open fread output file\n"); - return 1; - } + /* Test rewind */ + outf = fopen(REWINDFILE, "wb+"); + if (!outf) { + perror("couldn't open fread output file\n"); + return 1; + } - handle = url_fopen("testfile", "r"); - if(!handle) { - printf("couldn't url_fopen() testfile\n"); - fclose(outf); - return 2; - } + handle = url_fopen("testfile", "r"); + if (!handle) { + printf("couldn't url_fopen() testfile\n"); + fclose(outf); + return 2; + } - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); - url_rewind(handle); + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); + url_rewind(handle); - buffer[0]='\n'; - fwrite(buffer, 1, 1, outf); + buffer[0] = '\n'; + fwrite(buffer, 1, 1, outf); - nread = url_fread(buffer, 1, sizeof(buffer), handle); - fwrite(buffer, 1, nread, outf); + nread = url_fread(buffer, 1, sizeof(buffer), handle); + fwrite(buffer, 1, nread, outf); - url_fclose(handle); + url_fclose(handle); - fclose(outf); + fclose(outf); - return 0;/* all done */ + return 0;/* all done */ +} + +const char *http_response_str(long code) { + switch (code) { + case 100: + return "Continue"; + case 101: + return "Switching Protocol"; + case 102: + return "Processing (WebDAV)"; + case 103: + return "Early Hints"; + case 200: + return "OK"; + case 201: + return "Created"; + case 202: + return "Accepted"; + case 203: + return "Non-Authoritative Information"; + case 204: + return "No Content"; + case 205: + return "Reset Content"; + case 206: + return "Partial Content"; + case 207: + return "Multi-Status (WebDAV)"; + case 208: + return "Already Reported"; + case 226: + return "IM Used"; + case 300: + return "Multiple Choices"; + case 301: + return "Moved Permanently"; + case 302: + return "Found"; + case 303: + return "See Other"; + case 304: + return "Not Modified"; + case 305: + return "Use Proxy (DEPRECATED)"; + case 306: + return "Unused"; + case 307: + return "Temporary Redirect"; + case 308: + return "Permanent Redirect"; + case 400: + return "Bad Request"; + case 401: + return "Unauthorized"; + case 402: + return "Payment Required"; + case 403: + return "Forbidden"; + case 404: + return "Not Found"; + case 405: + return "Method Not Allowed"; + case 406: + return "Not Acceptable"; + case 407: + return "Proxy Authentication Required"; + case 408: + return "Request Timeout"; + case 409: + return "Conflict"; + case 410: + return "Gone"; + case 411: + return "Length Required"; + case 412: + return "Precondition Failed"; + case 413: + return "Payload Too Large"; + case 414: + return "URI Too Long"; + case 415: + return "Unsupported Media Type"; + case 416: + return "Range Not Satisfiable"; + case 417: + return "Exception Failed"; + case 418: + return "I'm a teapot"; + case 419: + return "Expectation Failed"; + case 421: + return "Misdirected Request"; + case 422: + return "Unprocessable Entity (WebDAV)"; + case 423: + return "Locked (WebDAV)"; + case 424: + return "Failed Dependency (WebDAV)"; + case 425: + return "Too Early"; + case 426: + return "Upgrade Required"; + case 428: + return "Precondition Required"; + case 429: + return "Too Many Requests"; + case 431: + return "Request Header Fields Too Large"; + case 451: + return "Unavailable For Legal Reasons"; + case 500: + return "Internal Server Error"; + case 501: + return "Not Implemented"; + case 502: + return "Bad Gateway"; + case 503: + return "Service Unavailable"; + case 504: + return "Gateway Timeout"; + case 505: + return "HTTP Version Not Supported"; + case 506: + return "Variant Also Negotiates"; + case 507: + return "Insufficient Storage (WebDAV)"; + case 508: + return "Loop Detected (WebDAV)"; + case 510: + return "Not Extended"; + case 511: + return "Network Authentication Required"; + default: + return "Unknown"; + } } diff --git a/src/manifest.c b/src/manifest.c index f889de2..5ed3bab 100644 --- a/src/manifest.c +++ b/src/manifest.c @@ -3,6 +3,7 @@ */ #include "spm.h" #include <fnmatch.h> +#include "url.h" #define PACKAGE_MIN_DELIM 2 /** @@ -60,8 +61,8 @@ Manifest *manifest_from(const char *package_dir) { char **parts = split(fsdata->files[i], "-"); // Replace invalid character with a hyphen - replace_text(parts[0], "*", "-"); - replace_text(fsdata->files[i], "*", "-"); + replace_text(parts[0], SPM_MANIFEST_NODATA, "-"); + replace_text(fsdata->files[i], SPM_MANIFEST_NODATA, "-"); // Populate `ManifestPackage` record info->packages[i]->size = (size_t) get_file_size(fsdata->files[i]); @@ -100,14 +101,13 @@ void manifest_free(Manifest *info) { * @return */ int manifest_write(Manifest *info) { - const char *filename = "manifest.dat"; + char *reqs = NULL; char path[PATH_MAX]; memset(path, '\0', sizeof(path)); - sprintf(path, "%s%c%s", SPM_GLOBAL.user_config_basedir, DIRSEP, filename); - FILE *fp = fopen(path, "w+"); - char *reqs = NULL; + strcpy(path, SPM_GLOBAL.package_manifest); - // A little too much information (debug?) + FILE *fp = fopen(path, "w+"); +#ifdef _DEBUG if (SPM_GLOBAL.verbose) { for (size_t i = 0; i < info->records; i++) { printf("%-20s: %s\n" @@ -129,56 +129,183 @@ int manifest_write(Manifest *info) { printf("\n"); } } +#endif printf("Generating manifest file: %s\n", path); + fprintf(fp, "%s\n", SPM_MANIFEST_HEADER); + char data[BUFSIZ]; for (size_t i = 0; i < info->records; i++) { // write CSV-like manifest - char data[BUFSIZ]; memset(data, '\0', BUFSIZ); char *dptr = data; float percent = (((float)i + 1) / info->records) * 100; - printf("[%3.0f%%] %s\n", percent, info->packages[i]->archive); + if (SPM_GLOBAL.verbose) { + printf("[%3.0f%%] %s\n", percent, info->packages[i]->archive); + } reqs = join(info->packages[i]->requirements, ","); + char *archive = join((char *[]) {SPM_GLOBAL.package_dir, info->packages[i]->archive, NULL}, DIRSEPS); + char *checksum_sha256 = sha256sum(archive); + sprintf(dptr, "%s|" // archive "%zu|" // size "%s|" // name "%s|" // version "%s|" // revision "%zu|" // requirements_records - "%s" // requirements + "%s|" // requirements + "%s" // checksum_md5 , info->packages[i]->archive, info->packages[i]->size, info->packages[i]->name, info->packages[i]->version, info->packages[i]->revision, info->packages[i]->requirements_records, - reqs ? reqs : "*"); - fprintf(fp, "%s\n", dptr); + reqs ? reqs : SPM_MANIFEST_NODATA, + checksum_sha256 ? checksum_sha256 : SPM_MANIFEST_NODATA); + fprintf(fp, "%s\n", dptr); free(reqs); + if (checksum_sha256 != NULL) + free(checksum_sha256); } fclose(fp); return 0; } /** + * + * @param url + * @param dest + * @return + */ +int fetch(const char *url, const char *dest) { + URL_FILE *handle = NULL; + FILE *outf = NULL; + size_t chunk_size = 0xffff; + size_t nread = 0; + char *buffer = calloc(chunk_size + 1, sizeof(char)); + if (!buffer) { + perror("fetch buffer too big"); + return -1; + } + + handle = url_fopen(url, "r"); + if(!handle) { + printf("couldn't url_fopen() %s\n", url); + return 2; + } + + outf = fopen(dest, "wb+"); + if(!outf) { + perror("couldn't open fread output file\n"); + return 1; + } + + do { + nread = url_fread(buffer, 1, chunk_size, handle); + if (handle->http_status >= 400) { + free(buffer); + fclose(outf); + if (exists(dest) == 0) { + unlink(dest); + } + + long http_status = handle->http_status; + url_fclose(handle); + return http_status; + } + fwrite(buffer, 1, nread, outf); + } while (nread); + + free(buffer); + fclose(outf); + url_fclose(handle); + return 0; +} + +int manifest_validate(void) { + size_t line_count; + int problems; + char data[BUFSIZ]; + FILE *fp; + + if (exists(SPM_GLOBAL.package_manifest) != 0) { + return -1; + } + + if ((fp = fopen(SPM_GLOBAL.package_manifest, "r")) == NULL) { + perror(SPM_GLOBAL.package_manifest); + return -2; + } + + line_count = 0; + problems = 0; + while (fgets(data, BUFSIZ, fp) != NULL) { + int separators; + if (line_count == 0) { + if (strncmp(data, SPM_MANIFEST_HEADER, strlen(SPM_MANIFEST_HEADER)) != 0) { + fprintf(stderr, "Invalid manifest header: %s (expecting '%s')\n", strip(data), SPM_MANIFEST_HEADER); + problems++; + line_count++; + } + } + else if ((separators = num_chars(data, SPM_MANIFEST_SEPARATOR)) != SPM_MANIFEST_SEPARATOR_MAX) { + fprintf(stderr, "Invalid manifest record on line %zu: %s (expecting %d separators, found %d)\n", line_count, strip(data), SPM_MANIFEST_SEPARATOR_MAX, separators); + problems++; + } + line_count++; + } + return problems; +} +/** * Read the package manifest stored in the configuration directory * @return `Manifest` structure */ -Manifest *manifest_read(void) { - const char *filename = "manifest.dat"; +Manifest *manifest_read(char *file_or_url) { + FILE *fp = NULL; + char *filename = SPM_MANIFEST_FILENAME; char path[PATH_MAX]; - memset(path, '\0', sizeof(path)); - sprintf(path, "%s%c%s", SPM_GLOBAL.user_config_basedir, DIRSEP, filename); - FILE *fp = fopen(path, "r+"); - if (!fp) { - perror(filename); - return NULL; + + // When file_or_url is NULL we want to use the global manifest + if (file_or_url == NULL) { + // TODO: move this out + strcpy(path, SPM_GLOBAL.package_manifest); } + else { + strcpy(path, file_or_url); + } + + // Handle receiving a path without the manifest filename + // by appending the manifest to the path + if (endswith(path, filename) != 0) { + strcat(path, DIRSEPS); + strcat(path, filename); + } + + if (exists(path) != 0) { + // TODO: Move this out + char *remote_manifest = join((char *[]) {"http://astroconda.org/spm", SPM_GLOBAL.repo_target, filename, NULL}, DIRSEPS); + int fetch_status = fetch(remote_manifest, path); + if (fetch_status >= 400) { + fprintf(stderr, "HTTP %d: %s: %s\n", fetch_status, http_response_str(fetch_status), remote_manifest); + free(remote_manifest); + return NULL; + } + free(remote_manifest); + } + + int valid = 0; size_t total_records = 0; char data[BUFSIZ]; char *dptr = data; memset(dptr, '\0', BUFSIZ); + fp = fopen(path, "r+"); + if (!fp) { + perror(filename); + fprintf(SYSERROR); + return NULL; + } + while (fgets(dptr, BUFSIZ, fp) != NULL) { total_records++; } @@ -187,14 +314,31 @@ Manifest *manifest_read(void) { Manifest *info = (Manifest *)calloc(1, sizeof(Manifest)); info->packages = (ManifestPackage **)calloc(total_records + 1, sizeof(ManifestPackage *)); + if ((valid = manifest_validate()) != 0) { + return NULL; + } + // Begin parsing the manifest + char separator = SPM_MANIFEST_SEPARATOR; size_t i = 0; + + // Consume header + if (fgets(dptr, BUFSIZ, fp) == NULL) { + // file is probably empty + return NULL; + } + while (fgets(dptr, BUFSIZ, fp) != NULL) { dptr = strip(dptr); char *garbage; - char **parts = split(dptr, "|"); + char **parts = split(dptr, &separator); + char *_origin = dirname(path); info->packages[i] = (ManifestPackage *)calloc(1, sizeof(ManifestPackage)); + + strncpy(info->packages[i]->origin, _origin, strlen(_origin)); + free(_origin); + strncpy(info->packages[i]->archive, parts[0], strlen(parts[0])); info->packages[i]->size = strtoul(parts[1], &garbage, 10); strncpy(info->packages[i]->name, parts[2], strlen(parts[2])); @@ -203,10 +347,14 @@ Manifest *manifest_read(void) { info->packages[i]->requirements_records = (size_t) atoi(parts[5]); info->packages[i]->requirements = NULL; - if (strncmp(parts[6], "*", 2) != 0) { + if (strncmp(parts[6], SPM_MANIFEST_NODATA, strlen(SPM_MANIFEST_NODATA)) != 0) { info->packages[i]->requirements = split(parts[6], ","); } + if (strncmp(parts[7], SPM_MANIFEST_NODATA, strlen(SPM_MANIFEST_NODATA)) != 0) { + strncpy(info->packages[i]->checksum_sha256, parts[7], strlen(parts[7])); + } + split_free(parts); info->records = i; i++; @@ -149,7 +149,6 @@ int main(int argc, char *argv[], char *arge[]) { // Construct installation runtime environment RuntimeEnv *rt = runtime_copy(arge); - SPM_Hierarchy *root_hierarchy = NULL; // TODO: Move environment allocation out of (above) this loop if possible // TODO: replace variables below with SPM_Hierarchy, and write some control functions @@ -180,7 +179,7 @@ int main(int argc, char *argv[], char *arge[]) { dep_init(&deps); printf("Reading package manifest... "); - Manifest *manifest = manifest_read(); + Manifest *manifest = manifest_read(NULL); if (!manifest) { fprintf(stderr, "Package manifest is missing or corrupt\n"); runtime_free(rt); @@ -282,7 +281,7 @@ int main(int argc, char *argv[], char *arge[]) { } if (RUNTIME_SEARCH || RUNTIME_LIST) { - Manifest *info = manifest_read(); + Manifest *info = manifest_read(NULL); char name[255]; char op[25]; char ver[255]; |