From 7d675a70bf92bd1606f77fae01c6e56afdcee5ef Mon Sep 17 00:00:00 2001 From: Joseph Hunkeler Date: Fri, 21 Jun 2024 12:36:55 -0400 Subject: Rebrand OhMyCal (OMC) as STASIS --- src/CMakeLists.txt | 20 +- src/artifactory.c | 60 +- src/conda.c | 24 +- src/copy.c | 2 +- src/deliverable.c | 2100 -------------------------------------------------- src/delivery.c | 2100 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/docker.c | 22 +- src/download.c | 2 +- src/environment.c | 6 +- src/globals.c | 31 +- src/ini.c | 12 +- src/omc_indexer.c | 753 ------------------ src/omc_main.c | 612 --------------- src/recipe.c | 2 +- src/relocation.c | 4 +- src/stasis_indexer.c | 753 ++++++++++++++++++ src/stasis_main.c | 612 +++++++++++++++ src/str.c | 12 +- src/system.c | 8 +- src/template.c | 2 +- src/utils.c | 52 +- 21 files changed, 3594 insertions(+), 3595 deletions(-) delete mode 100644 src/deliverable.c create mode 100644 src/delivery.c delete mode 100644 src/omc_indexer.c delete mode 100644 src/omc_main.c create mode 100644 src/stasis_indexer.c create mode 100644 src/stasis_main.c (limited to 'src') diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f075102..c42bb0f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -2,7 +2,7 @@ include_directories(${CMAKE_BINARY_DIR}/include) include_directories(${CMAKE_SOURCE_DIR}/include) include_directories(${PROJECT_BINARY_DIR}) -add_library(ohmycal STATIC +add_library(stasis_core STATIC globals.c str.c strlist.c @@ -12,7 +12,7 @@ add_library(ohmycal STATIC utils.c system.c download.c - deliverable.c + delivery.c recipe.c relocation.c wheel.c @@ -24,13 +24,13 @@ add_library(ohmycal STATIC junitxml.c ) -add_executable(omc - omc_main.c +add_executable(stasis + stasis_main.c ) -target_link_libraries(omc PRIVATE ohmycal) -target_link_libraries(omc PUBLIC LibXml2::LibXml2) -add_executable(omc_indexer - omc_indexer.c +target_link_libraries(stasis PRIVATE stasis_core) +target_link_libraries(stasis PUBLIC LibXml2::LibXml2) +add_executable(stasis_indexer + stasis_indexer.c ) -target_link_libraries(omc_indexer PRIVATE ohmycal) -install(TARGETS omc omc_indexer RUNTIME) +target_link_libraries(stasis_indexer PRIVATE stasis_core) +install(TARGETS stasis stasis_indexer RUNTIME) diff --git a/src/artifactory.c b/src/artifactory.c index 5678d64..bd9aa99 100644 --- a/src/artifactory.c +++ b/src/artifactory.c @@ -1,6 +1,6 @@ -#include "omc.h" +#include "core.h" -extern struct OMC_GLOBAL globals; +extern struct STASIS_GLOBAL globals; int artifactory_download_cli(char *dest, char *jfrog_artifactory_base_url, @@ -12,8 +12,8 @@ int artifactory_download_cli(char *dest, char *remote_filename) { char url[PATH_MAX] = {0}; char path[PATH_MAX] = {0}; - char os_ident[OMC_NAME_MAX] = {0}; - char arch_ident[OMC_NAME_MAX] = {0}; + char os_ident[STASIS_NAME_MAX] = {0}; + char arch_ident[STASIS_NAME_MAX] = {0}; // convert platform string to lower-case strcpy(os_ident, os); @@ -73,7 +73,7 @@ int artifactory_download_cli(char *dest, } void jfrt_register_opt_str(char *jfrt_val, const char *opt_name, struct StrList **opt_map) { - char data[OMC_BUFSIZ]; + char data[STASIS_BUFSIZ]; memset(data, 0, sizeof(data)); if (jfrt_val == NULL) { @@ -85,7 +85,7 @@ void jfrt_register_opt_str(char *jfrt_val, const char *opt_name, struct StrList } void jfrt_register_opt_bool(bool jfrt_val, const char *opt_name, struct StrList **opt_map) { - char data[OMC_BUFSIZ]; + char data[STASIS_BUFSIZ]; memset(data, 0, sizeof(data)); if (jfrt_val == false) { @@ -97,7 +97,7 @@ void jfrt_register_opt_bool(bool jfrt_val, const char *opt_name, struct StrList } void jfrt_register_opt_int(int jfrt_val, const char *opt_name, struct StrList **opt_map) { - char data[OMC_BUFSIZ]; + char data[STASIS_BUFSIZ]; memset(data, 0, sizeof(data)); if (jfrt_val == 0) { @@ -109,7 +109,7 @@ void jfrt_register_opt_int(int jfrt_val, const char *opt_name, struct StrList ** } void jfrt_register_opt_long(long jfrt_val, const char *opt_name, struct StrList **opt_map) { - char data[OMC_BUFSIZ]; + char data[STASIS_BUFSIZ]; memset(data, 0, sizeof(data)); if (jfrt_val == 0) { @@ -141,18 +141,18 @@ static int auth_required(char *cmd) { } int jfrt_auth_init(struct JFRT_Auth *auth_ctx) { - char *url = getenv("OMC_JF_ARTIFACTORY_URL"); - char *user = getenv("OMC_JF_USER"); - char *access_token = getenv("OMC_JF_ACCESS_TOKEN"); - char *password = getenv("OMC_JF_PASSWORD"); - char *ssh_key_path = getenv("OMC_JF_SSH_KEY_PATH"); - char *ssh_passphrase = getenv("OMC_JF_SSH_PASSPHRASE"); - char *client_cert_key_path = getenv("OMC_JF_CLIENT_CERT_KEY_PATH"); - char *client_cert_path = getenv("OMC_JF_CLIENT_CERT_PATH"); + char *url = getenv("STASIS_JF_ARTIFACTORY_URL"); + char *user = getenv("STASIS_JF_USER"); + char *access_token = getenv("STASIS_JF_ACCESS_TOKEN"); + char *password = getenv("STASIS_JF_PASSWORD"); + char *ssh_key_path = getenv("STASIS_JF_SSH_KEY_PATH"); + char *ssh_passphrase = getenv("STASIS_JF_SSH_PASSPHRASE"); + char *client_cert_key_path = getenv("STASIS_JF_CLIENT_CERT_KEY_PATH"); + char *client_cert_path = getenv("STASIS_JF_CLIENT_CERT_PATH"); if (!url) { fprintf(stderr, "Artifactory URL is not configured:\n"); - fprintf(stderr, "please set OMC_JF_ARTIFACTORY_URL\n"); + fprintf(stderr, "please set STASIS_JF_ARTIFACTORY_URL\n"); return -1; } auth_ctx->url = url; @@ -184,10 +184,10 @@ int jfrt_auth_init(struct JFRT_Auth *auth_ctx) { auth_ctx->client_cert_path = client_cert_path; } else { fprintf(stderr, "Artifactory authentication is not configured:\n"); - fprintf(stderr, "set OMC_JF_USER and OMC_JF_PASSWORD\n"); - fprintf(stderr, "or, set OMC_JF_ACCESS_TOKEN\n"); - fprintf(stderr, "or, set OMC_JF_SSH_KEY_PATH and OMC_JF_SSH_KEY_PASSPHRASE\n"); - fprintf(stderr, "or, set OMC_JF_CLIENT_CERT_KEY_PATH and OMC_JF_CLIENT_CERT_PATH\n"); + fprintf(stderr, "set STASIS_JF_USER and STASIS_JF_PASSWORD\n"); + fprintf(stderr, "or, set STASIS_JF_ACCESS_TOKEN\n"); + fprintf(stderr, "or, set STASIS_JF_SSH_KEY_PATH and STASIS_JF_SSH_KEY_PASSPHRASE\n"); + fprintf(stderr, "or, set STASIS_JF_CLIENT_CERT_KEY_PATH and STASIS_JF_CLIENT_CERT_PATH\n"); return -1; } return 0; @@ -195,8 +195,8 @@ int jfrt_auth_init(struct JFRT_Auth *auth_ctx) { int jfrog_cli(struct JFRT_Auth *auth, char *args) { struct Process proc; - char cmd[OMC_BUFSIZ]; - char cmd_redacted[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; + char cmd_redacted[STASIS_BUFSIZ]; int status; memset(&proc, 0, sizeof(proc)); @@ -244,7 +244,7 @@ int jfrog_cli(struct JFRT_Auth *auth, char *args) { // Pings are noisy. Squelch them. if (!strstr(args, "rt ping")) { - msg(OMC_MSG_L2, "Executing: %s\n", cmd_redacted); + msg(STASIS_MSG_L2, "Executing: %s\n", cmd_redacted); } if (!globals.verbose) { @@ -256,28 +256,28 @@ int jfrog_cli(struct JFRT_Auth *auth, char *args) { } int jfrog_cli_rt(struct JFRT_Auth *auth, char *args) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "rt %s", args); return jfrog_cli(auth, args); } int jfrog_cli_rt_build_collect_env(struct JFRT_Auth *auth, char *build_name, char *build_number) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "rt build-collect-env \"%s\" \"%s\"", build_name, build_number); return jfrog_cli(auth, cmd); } int jfrog_cli_rt_build_publish(struct JFRT_Auth *auth, char *build_name, char *build_number) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "rt build-publish \"%s\" \"%s\"", build_name, build_number); return jfrog_cli(auth, cmd); } int jfrog_cli_rt_ping(struct JFRT_Auth *auth) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "rt ping"); @@ -285,7 +285,7 @@ int jfrog_cli_rt_ping(struct JFRT_Auth *auth) { } int jfrog_cli_rt_download(struct JFRT_Auth *auth, struct JFRT_Download *ctx, char *repo_path, char *dest) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); if (isempty(repo_path)) { @@ -352,7 +352,7 @@ int jfrog_cli_rt_download(struct JFRT_Auth *auth, struct JFRT_Download *ctx, cha } int jfrog_cli_rt_upload(struct JFRT_Auth *auth, struct JFRT_Upload *ctx, char *src, char *repo_path) { - char cmd[OMC_BUFSIZ]; + char cmd[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); if (isempty(src)) { diff --git a/src/conda.c b/src/conda.c index efb42fa..342b6af 100644 --- a/src/conda.c +++ b/src/conda.c @@ -5,13 +5,11 @@ #include #include "conda.h" -extern struct OMC_GLOBAL globals; - int python_exec(const char *args) { char command[PATH_MAX]; memset(command, 0, sizeof(command)); snprintf(command, sizeof(command) - 1, "python %s", args); - msg(OMC_MSG_L3, "Executing: %s\n", command); + msg(STASIS_MSG_L3, "Executing: %s\n", command); return system(command); } @@ -19,7 +17,7 @@ int pip_exec(const char *args) { char command[PATH_MAX]; memset(command, 0, sizeof(command)); snprintf(command, sizeof(command) - 1, "python -m pip %s", args); - msg(OMC_MSG_L3, "Executing: %s\n", command); + msg(STASIS_MSG_L3, "Executing: %s\n", command); return system(command); } @@ -51,7 +49,7 @@ int conda_exec(const char *args) { } snprintf(command, sizeof(command) - 1, "%s %s", conda_as, args); - msg(OMC_MSG_L3, "Executing: %s\n", command); + msg(STASIS_MSG_L3, "Executing: %s\n", command); return system(command); } @@ -109,7 +107,7 @@ int conda_activate(const char *root, const char *env_name) { // Parse the log file: // 1. Extract the environment keys and values from the sub-shell - // 2. Apply it to OMC's runtime environment + // 2. Apply it to STASIS's runtime environment // 3. Now we're ready to execute conda commands anywhere fp = fopen(proc.f_stdout, "r"); if (!fp) { @@ -118,7 +116,7 @@ int conda_activate(const char *root, const char *env_name) { } int i = 0; while (!feof(fp)) { - char buf[OMC_BUFSIZ] = {0}; + char buf[STASIS_BUFSIZ] = {0}; int ch = 0; size_t z = 0; // We are ingesting output from "env -0", can't use fgets() @@ -142,10 +140,10 @@ int conda_activate(const char *root, const char *env_name) { return -1; } if (!part[0]) { - msg(OMC_MSG_WARN | OMC_MSG_L1, "Invalid environment variable key ignored: '%s'\n", buf); + msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Invalid environment variable key ignored: '%s'\n", buf); i++; } else if (!part[1]) { - msg(OMC_MSG_WARN | OMC_MSG_L1, "Invalid environment variable value ignored: '%s'\n", buf); + msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Invalid environment variable value ignored: '%s'\n", buf); i++; } else { setenv(part[0], part[1], 1); @@ -193,7 +191,7 @@ int conda_check_required() { guard_free(cmd_out); guard_strlist_free(&result); } else { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "The base package requirement check could not be performed\n"); + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "The base package requirement check could not be performed\n"); return 2; } return 0; @@ -234,7 +232,7 @@ void conda_setup_headless() { } if (conda_exec(cmd)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Unable to install user-defined base packages (conda)\n"); + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to install user-defined base packages (conda)\n"); exit(1); } } @@ -256,13 +254,13 @@ void conda_setup_headless() { } if (pip_exec(cmd)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Unable to install user-defined base packages (pip)\n"); + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to install user-defined base packages (pip)\n"); exit(1); } } if (conda_check_required()) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Your OMC configuration lacks the bare" + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Your STASIS configuration lacks the bare" " minimum software required to build conda packages." " Please fix it.\n"); exit(1); diff --git a/src/copy.c b/src/copy.c index dcfd924..323208b 100644 --- a/src/copy.c +++ b/src/copy.c @@ -3,7 +3,7 @@ int copy2(const char *src, const char *dest, unsigned int op) { size_t bytes_read; size_t bytes_written; - char buf[OMC_BUFSIZ]; + char buf[STASIS_BUFSIZ]; struct stat src_stat, dnamest; FILE *fp1, *fp2; diff --git a/src/deliverable.c b/src/deliverable.c deleted file mode 100644 index f72f535..0000000 --- a/src/deliverable.c +++ /dev/null @@ -1,2100 +0,0 @@ -#define _GNU_SOURCE - -#include -#include "omc.h" - -extern struct OMC_GLOBAL globals; - -static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) { - int status = ini_has_key(ini, section_name, key); - if (!status) { - SYSERROR("%s:%s key is required but not defined", section_name, key); - exit(1); - } -} - -static void ini_getval_required(struct INIFILE *ini, char *section_name, char *key, unsigned type, union INIVal *val) { - int status = ini_getval(ini, section_name, key, type, val); - if (status || isempty(val->as_char_p)) { - SYSERROR("%s:%s value is required but not defined", section_name, key); - exit(1); - } -} - -static void conv_int(int *x, union INIVal val) { - *x = val.as_int; -} - -static void conv_str(char **x, union INIVal val) { - if (*x) { - guard_free(*x); - } - if (val.as_char_p) { - char *tplop = tpl_render(val.as_char_p); - if (tplop) { - *x = tplop; - } else { - *x = NULL; - } - } else { - *x = NULL; - } -} - -static void conv_str_noexpand(char **x, union INIVal val) { - if (*x) { - guard_free(*x); - } - *x = strdup(val.as_char_p); -} - -static void conv_strlist(struct StrList **x, char *tok, union INIVal val) { - if (!(*x)) - (*x) = strlist_init(); - if (val.as_char_p) { - char *tplop = tpl_render(val.as_char_p); - if (tplop) { - strip(tplop); - strlist_append_tokenize((*x), tplop, tok); - guard_free(tplop); - } - } -} - -static void conv_bool(bool *x, union INIVal val) { - *x = val.as_bool; -} - -int delivery_init_tmpdir(struct Delivery *ctx) { - char *tmpdir = NULL; - char *x = NULL; - int unusable = 0; - errno = 0; - - x = getenv("TMPDIR"); - if (x) { - guard_free(ctx->storage.tmpdir); - tmpdir = strdup(x); - } else { - tmpdir = ctx->storage.tmpdir; - } - - if (!tmpdir) { - // memory error - return -1; - } - - // If the directory doesn't exist, create it - if (access(tmpdir, F_OK) < 0) { - if (mkdirs(tmpdir, 0755) < 0) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno)); - goto l_delivery_init_tmpdir_fatal; - } - } - - // If we can't read, write, or execute, then die - if (access(tmpdir, R_OK | W_OK | X_OK) < 0) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "%s requires at least 0755 permissions.\n"); - goto l_delivery_init_tmpdir_fatal; - } - - struct statvfs st; - if (statvfs(tmpdir, &st) < 0) { - goto l_delivery_init_tmpdir_fatal; - } - -#if defined(OMC_OS_LINUX) - // If we can't execute programs, or write data to the file system at all, then die - if ((st.f_flag & ST_NOEXEC) != 0) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "%s is mounted with noexec\n", tmpdir); - goto l_delivery_init_tmpdir_fatal; - } -#endif - if ((st.f_flag & ST_RDONLY) != 0) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "%s is mounted read-only\n", tmpdir); - goto l_delivery_init_tmpdir_fatal; - } - - if (!globals.tmpdir) { - globals.tmpdir = strdup(tmpdir); - } - - if (!ctx->storage.tmpdir) { - ctx->storage.tmpdir = strdup(globals.tmpdir); - } - return unusable; - - l_delivery_init_tmpdir_fatal: - unusable = 1; - return unusable; -} - -void delivery_free(struct Delivery *ctx) { - guard_free(ctx->system.arch); - GENERIC_ARRAY_FREE(ctx->system.platform); - guard_free(ctx->meta.name); - guard_free(ctx->meta.version); - guard_free(ctx->meta.codename); - guard_free(ctx->meta.mission); - guard_free(ctx->meta.python); - guard_free(ctx->meta.mission); - guard_free(ctx->meta.python_compact); - guard_free(ctx->meta.based_on); - guard_runtime_free(ctx->runtime.environ); - guard_free(ctx->storage.root); - guard_free(ctx->storage.tmpdir); - guard_free(ctx->storage.delivery_dir); - guard_free(ctx->storage.tools_dir); - guard_free(ctx->storage.package_dir); - guard_free(ctx->storage.results_dir); - guard_free(ctx->storage.output_dir); - guard_free(ctx->storage.conda_install_prefix); - guard_free(ctx->storage.conda_artifact_dir); - guard_free(ctx->storage.conda_staging_dir); - guard_free(ctx->storage.conda_staging_url); - guard_free(ctx->storage.wheel_artifact_dir); - guard_free(ctx->storage.wheel_staging_dir); - guard_free(ctx->storage.wheel_staging_url); - guard_free(ctx->storage.build_dir); - guard_free(ctx->storage.build_recipes_dir); - guard_free(ctx->storage.build_sources_dir); - guard_free(ctx->storage.build_testing_dir); - guard_free(ctx->storage.build_docker_dir); - guard_free(ctx->storage.mission_dir); - guard_free(ctx->storage.docker_artifact_dir); - guard_free(ctx->info.time_str_epoch); - guard_free(ctx->info.build_name); - guard_free(ctx->info.build_number); - guard_free(ctx->info.release_name); - guard_free(ctx->conda.installer_baseurl); - guard_free(ctx->conda.installer_name); - guard_free(ctx->conda.installer_version); - guard_free(ctx->conda.installer_platform); - guard_free(ctx->conda.installer_arch); - guard_free(ctx->conda.installer_path); - guard_free(ctx->conda.tool_version); - guard_free(ctx->conda.tool_build_version); - guard_strlist_free(&ctx->conda.conda_packages); - guard_strlist_free(&ctx->conda.conda_packages_defer); - guard_strlist_free(&ctx->conda.pip_packages); - guard_strlist_free(&ctx->conda.pip_packages_defer); - guard_strlist_free(&ctx->conda.wheels_packages); - - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - guard_free(ctx->tests[i].name); - guard_free(ctx->tests[i].version); - guard_free(ctx->tests[i].repository); - guard_free(ctx->tests[i].repository_info_ref); - guard_free(ctx->tests[i].repository_info_tag); - guard_free(ctx->tests[i].script); - guard_free(ctx->tests[i].build_recipe); - // test-specific runtime variables - guard_runtime_free(ctx->tests[i].runtime.environ); - } - - guard_free(ctx->rules.release_fmt); - guard_free(ctx->rules.build_name_fmt); - guard_free(ctx->rules.build_number_fmt); - - guard_free(ctx->deploy.docker.test_script); - guard_free(ctx->deploy.docker.registry); - guard_free(ctx->deploy.docker.image_compression); - guard_strlist_free(&ctx->deploy.docker.tags); - guard_strlist_free(&ctx->deploy.docker.build_args); - - for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(ctx->deploy.jfrog[0]); i++) { - guard_free(ctx->deploy.jfrog[i].repo); - guard_free(ctx->deploy.jfrog[i].dest); - guard_strlist_free(&ctx->deploy.jfrog[i].files); - } - - if (ctx->_omc_ini_fp.delivery) { - ini_free(&ctx->_omc_ini_fp.delivery); - } - guard_free(ctx->_omc_ini_fp.delivery_path); - - if (ctx->_omc_ini_fp.cfg) { - // optional extras - ini_free(&ctx->_omc_ini_fp.cfg); - } - guard_free(ctx->_omc_ini_fp.cfg_path); - - if (ctx->_omc_ini_fp.mission) { - ini_free(&ctx->_omc_ini_fp.mission); - } - guard_free(ctx->_omc_ini_fp.mission_path); -} - -void delivery_init_dirs_stage2(struct Delivery *ctx) { - path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes"); - path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources"); - path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing"); - path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker"); - - path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); - path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); - path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); - path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); - path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); - - path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); - path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); - path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker"); -} - -void delivery_init_dirs_stage1(struct Delivery *ctx) { - char *rootdir = getenv("OMC_ROOT"); - if (rootdir) { - if (isempty(rootdir)) { - fprintf(stderr, "OMC_ROOT is set, but empty. Please assign a file system path to this environment variable.\n"); - exit(1); - } - path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name); - } else { - // use "omc" in current working directory - path_store(&ctx->storage.root, PATH_MAX, "omc", ctx->info.build_name); - } - path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools"); - path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); - if (delivery_init_tmpdir(ctx)) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir); - if (globals.tmpdir) - guard_free(globals.tmpdir); - exit(1); - } - - path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build"); - path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); - - if (!ctx->storage.mission_dir) { - path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission"); - } - - if (access(ctx->storage.mission_dir, F_OK)) { - msg(OMC_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno)); - exit(1); - } - - // Override installation prefix using global configuration key - if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) { - // user wants a specific path - globals.conda_fresh_start = false; - /* - if (mkdirs(globals.conda_install_prefix, 0755)) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "Unable to create directory: %s: %s\n", - strerror(errno), globals.conda_install_prefix); - exit(1); - } - */ - /* - ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL); - if (!ctx->storage.conda_install_prefix) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n"); - exit(1); - } - ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix); - */ - path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda"); - } else { - // install conda under the OMC tree - path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda"); - } -} - -int delivery_init_platform(struct Delivery *ctx) { - msg(OMC_MSG_L2, "Setting architecture\n"); - char archsuffix[20]; - struct utsname uts; - if (uname(&uts)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "uname() failed: %s\n", strerror(errno)); - return -1; - } - - ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform)); - if (!ctx->system.platform) { - SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX); - return -1; - } - for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) { - ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0])); - } - - ctx->system.arch = strdup(uts.machine); - if (!ctx->system.arch) { - // memory error - return -1; - } - - if (!strcmp(ctx->system.arch, "x86_64")) { - strcpy(archsuffix, "64"); - } else { - strcpy(archsuffix, ctx->system.arch); - } - - msg(OMC_MSG_L2, "Setting platform\n"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname); - if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) { - sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos"); - } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) { - sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux"); - } else { - // Not explicitly supported systems - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]); - tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - } - - // Declare some important bits as environment variables - setenv("OMC_ARCH", ctx->system.arch, 1); - setenv("OMC_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1); - setenv("OMC_CONDA_ARCH", ctx->system.arch, 1); - setenv("OMC_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1); - setenv("OMC_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1); - - // Register template variables - // These were moved out of main() because we can't take the address of system.platform[x] - // _before_ the array has been initialized. - tpl_register("system.arch", &ctx->system.arch); - tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - - return 0; -} - -static int populate_mission_ini(struct Delivery **ctx) { - union INIVal val; - struct INIFILE *ini; - - if ((*ctx)->_omc_ini_fp.mission) { - return 0; - } - - // Now populate the rules - char missionfile[PATH_MAX] = {0}; - if (getenv("OMC_SYSCONFDIR")) { - sprintf(missionfile, "%s/%s/%s/%s.ini", - getenv("OMC_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); - } else { - sprintf(missionfile, "%s/%s/%s/%s.ini", - globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); - } - - msg(OMC_MSG_L2, "Reading mission configuration: %s\n", missionfile); - (*ctx)->_omc_ini_fp.mission = ini_open(missionfile); - ini = (*ctx)->_omc_ini_fp.mission; - if (!ini) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Failed to read misson configuration: %s, %s\n", missionfile, strerror(errno)); - exit(1); - } - (*ctx)->_omc_ini_fp.mission_path = strdup(missionfile); - - ini_getval_required(ini, "meta", "release_fmt", INIVAL_TYPE_STR, &val); - conv_str(&(*ctx)->rules.release_fmt, val); - - // Used for setting artifactory build info - ini_getval_required(ini, "meta", "build_name_fmt", INIVAL_TYPE_STR, &val); - conv_str(&(*ctx)->rules.build_name_fmt, val); - - // Used for setting artifactory build info - ini_getval_required(ini, "meta", "build_number_fmt", INIVAL_TYPE_STR, &val); - conv_str(&(*ctx)->rules.build_number_fmt, val); - return 0; -} - -void validate_delivery_ini(struct INIFILE *ini) { - if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) { - ini_has_key_required(ini, "meta", "name"); - ini_has_key_required(ini, "meta", "version"); - ini_has_key_required(ini, "meta", "rc"); - ini_has_key_required(ini, "meta", "mission"); - ini_has_key_required(ini, "meta", "python"); - } else { - SYSERROR("%s", "[meta] configuration section is required"); - exit(1); - } - - if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) { - ini_has_key_required(ini, "conda", "installer_name"); - ini_has_key_required(ini, "conda", "installer_version"); - ini_has_key_required(ini, "conda", "installer_platform"); - ini_has_key_required(ini, "conda", "installer_arch"); - } else { - SYSERROR("%s", "[conda] configuration section is required"); - exit(1); - } - - for (size_t i = 0; i < ini->section_count; i++) { - struct INISection *section = ini->section[i]; - if (section && startswith(section->key, "test:")) { - char *name = strstr(section->key, ":"); - if (name && strlen(name) > 1) { - name = &name[1]; - } - ini_has_key_required(ini, section->key, "version"); - ini_has_key_required(ini, section->key, "repository"); - ini_has_key_required(ini, section->key, "script"); - } - } - - if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) { - // yeah? - } - - for (size_t i = 0; i < ini->section_count; i++) { - struct INISection *section = ini->section[i]; - if (section && startswith(section->key, "deploy:artifactory")) { - ini_has_key_required(ini, section->key, "files"); - ini_has_key_required(ini, section->key, "dest"); - } - } -} - -static int populate_delivery_ini(struct Delivery *ctx) { - union INIVal val; - struct INIFILE *ini = ctx->_omc_ini_fp.delivery; - struct INIData *rtdata; - RuntimeEnv *rt; - - validate_delivery_ini(ini); - // Populate runtime variables first they may be interpreted by other - // keys in the configuration - rt = runtime_copy(__environ); - while ((rtdata = ini_getall(ini, "runtime")) != NULL) { - char rec[OMC_BUFSIZ]; - sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value))); - runtime_set(rt, rtdata->key, rtdata->value); - } - runtime_apply(rt); - ctx->runtime.environ = rt; - - ini_getval_required(ini, "meta", "mission", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.mission, val); - - if (!strcasecmp(ctx->meta.mission, "hst")) { - ini_getval(ini, "meta", "codename", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.codename, val); - } else { - ctx->meta.codename = NULL; - } - - /* - if (!strcasecmp(ctx->meta.mission, "jwst")) { - ini_getval(ini, "meta", "version", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.version, val); - - } else { - ctx->meta.version = NULL; - } - */ - ini_getval(ini, "meta", "version", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.version, val); - - ini_getval_required(ini, "meta", "name", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.name, val); - - ini_getval(ini, "meta", "rc", INIVAL_TYPE_INT, &val); - conv_int(&ctx->meta.rc, val); - - ini_getval(ini, "meta", "final", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->meta.final, val); - - ini_getval(ini, "meta", "based_on", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.based_on, val); - - if (!ctx->meta.python) { - ini_getval(ini, "meta", "python", INIVAL_TYPE_STR, &val); - conv_str(&ctx->meta.python, val); - guard_free(ctx->meta.python_compact); - ctx->meta.python_compact = to_short_version(ctx->meta.python); - } else { - ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python); - } - - ini_getval_required(ini, "conda", "installer_name", INIVAL_TYPE_STR, &val); - conv_str(&ctx->conda.installer_name, val); - - ini_getval_required(ini, "conda", "installer_version", INIVAL_TYPE_STR, &val); - conv_str(&ctx->conda.installer_version, val); - - ini_getval_required(ini, "conda", "installer_platform", INIVAL_TYPE_STR, &val); - conv_str(&ctx->conda.installer_platform, val); - - ini_getval_required(ini, "conda", "installer_arch", INIVAL_TYPE_STR, &val); - conv_str(&ctx->conda.installer_arch, val); - - ini_getval_required(ini, "conda", "installer_baseurl", INIVAL_TYPE_STR, &val); - conv_str(&ctx->conda.installer_baseurl, val); - - ini_getval(ini, "conda", "conda_packages", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->conda.conda_packages, LINE_SEP, val); - - for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { - char *pkg = strlist_item(ctx->conda.conda_packages, i); - if (strpbrk(pkg, ";#")) { - strlist_remove(ctx->conda.conda_packages, i); - } - } - - ini_getval(ini, "conda", "pip_packages", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->conda.pip_packages, LINE_SEP, val); - - for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { - char *pkg = strlist_item(ctx->conda.pip_packages, i); - if (strpbrk(pkg, ";#")) { - strlist_remove(ctx->conda.pip_packages, i); - } - } - - // Delivery metadata consumed - populate_mission_ini(&ctx); - - if (ctx->info.release_name) { - guard_free(ctx->info.release_name); - guard_free(ctx->info.build_name); - guard_free(ctx->info.build_number); - } - - if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) { - fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt); - return -1; - } - - if (!ctx->info.build_name) { - delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt); - } - if (!ctx->info.build_number) { - delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt); - } - - // Best I can do to make output directories unique. Annoying. - delivery_init_dirs_stage2(ctx); - - if (!ctx->conda.conda_packages_defer) { - ctx->conda.conda_packages_defer = strlist_init(); - } - if (!ctx->conda.pip_packages_defer) { - ctx->conda.pip_packages_defer = strlist_init(); - } - - for (size_t z = 0, i = 0; i < ini->section_count; i++) { - if (startswith(ini->section[i]->key, "test:")) { - val.as_char_p = strchr(ini->section[i]->key, ':') + 1; - if (val.as_char_p && isempty(val.as_char_p)) { - return 1; - } - conv_str(&ctx->tests[z].name, val); - - ini_getval_required(ini, ini->section[i]->key, "version", INIVAL_TYPE_STR, &val); - conv_str(&ctx->tests[z].version, val); - - ini_getval_required(ini, ini->section[i]->key, "repository", INIVAL_TYPE_STR, &val); - conv_str(&ctx->tests[z].repository, val); - - ini_getval_required(ini, ini->section[i]->key, "script", INIVAL_TYPE_STR, &val); - conv_str_noexpand(&ctx->tests[z].script, val); - - ini_getval(ini, ini->section[i]->key, "repository_remove_tags", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->tests[z].repository_remove_tags, LINE_SEP, val); - - ini_getval(ini, ini->section[i]->key, "build_recipe", INIVAL_TYPE_STR, &val); - conv_str(&ctx->tests[z].build_recipe, val); - - ini_getval(ini, ini->section[i]->key, "runtime", INIVAL_TO_LIST, &val); - conv_strlist(&ctx->tests[z].runtime.environ, LINE_SEP, val); - z++; - } - } - - for (size_t z = 0, i = 0; i < ini->section_count; i++) { - if (startswith(ini->section[i]->key, "deploy:artifactory")) { - // Artifactory base configuration - ini_getval(ini, ini->section[i]->key, "workaround_parent_only", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.workaround_parent_only, val); - - ini_getval(ini, ini->section[i]->key, "exclusions", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.jfrog[z].upload_ctx.exclusions, val); - - ini_getval(ini, ini->section[i]->key, "explode", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.explode, val); - - ini_getval(ini, ini->section[i]->key, "recursive", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.recursive, val); - - ini_getval(ini, ini->section[i]->key, "retries", INIVAL_TYPE_INT, &val); - conv_int(&ctx->deploy.jfrog[z].upload_ctx.retries, val); - - ini_getval(ini, ini->section[i]->key, "retry_wait_time", INIVAL_TYPE_INT, &val); - conv_int(&ctx->deploy.jfrog[z].upload_ctx.retry_wait_time, val); - - ini_getval(ini, ini->section[i]->key, "detailed_summary", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.detailed_summary, val); - - ini_getval(ini, ini->section[i]->key, "quiet", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.quiet, val); - - ini_getval(ini, ini->section[i]->key, "regexp", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.regexp, val); - - ini_getval(ini, ini->section[i]->key, "spec", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.jfrog[z].upload_ctx.spec, val); - - ini_getval(ini, ini->section[i]->key, "flat", INIVAL_TYPE_BOOL, &val); - conv_bool(&ctx->deploy.jfrog[z].upload_ctx.flat, val); - - ini_getval(ini, ini->section[i]->key, "repo", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.jfrog[z].repo, val); - - ini_getval(ini, ini->section[i]->key, "dest", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.jfrog[z].dest, val); - - ini_getval(ini, ini->section[i]->key, "files", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->deploy.jfrog[z].files, LINE_SEP, val); - z++; - } - } - - for (size_t i = 0; i < ini->section_count; i++) { - if (startswith(ini->section[i]->key, "deploy:docker")) { - ini_getval(ini, ini->section[i]->key, "registry", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.docker.registry, val); - - ini_getval(ini, ini->section[i]->key, "image_compression", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.docker.image_compression, val); - - ini_getval(ini, ini->section[i]->key, "test_script", INIVAL_TYPE_STR, &val); - conv_str(&ctx->deploy.docker.test_script, val); - - ini_getval(ini, ini->section[i]->key, "build_args", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->deploy.docker.build_args, LINE_SEP, val); - - ini_getval(ini, ini->section[i]->key, "tags", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&ctx->deploy.docker.tags, LINE_SEP, val); - } - } - return 0; -} - -static int populate_delivery_cfg(struct Delivery *ctx) { - union INIVal val; - struct INIFILE *cfg = ctx->_omc_ini_fp.cfg; - if (!cfg) { - return -1; - } - ini_getval(cfg, "default", "conda_staging_dir", INIVAL_TYPE_STR, &val); - conv_str(&ctx->storage.conda_staging_dir, val); - ini_getval(cfg, "default", "conda_staging_url", INIVAL_TYPE_STR, &val); - conv_str(&ctx->storage.conda_staging_url, val); - ini_getval(cfg, "default", "wheel_staging_dir", INIVAL_TYPE_STR, &val); - conv_str(&ctx->storage.wheel_staging_dir, val); - ini_getval(cfg, "default", "wheel_staging_url", INIVAL_TYPE_STR, &val); - conv_str(&ctx->storage.wheel_staging_url, val); - ini_getval(cfg, "default", "conda_fresh_start", INIVAL_TYPE_BOOL, &val); - conv_bool(&globals.conda_fresh_start, val); - // Below can also be toggled by command-line arguments - if (!globals.continue_on_error) { - ini_getval(cfg, "default", "continue_on_error", INIVAL_TYPE_BOOL, &val); - conv_bool(&globals.continue_on_error, val); - } - // Below can also be toggled by command-line arguments - if (!globals.always_update_base_environment) { - ini_getval(cfg, "default", "always_update_base_environment", INIVAL_TYPE_BOOL, &val); - conv_bool(&globals.always_update_base_environment, val); - } - ini_getval(cfg, "default", "conda_install_prefix", INIVAL_TYPE_STR, &val); - conv_str(&globals.conda_install_prefix, val); - ini_getval(cfg, "default", "conda_packages", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&globals.conda_packages, LINE_SEP, val); - ini_getval(cfg, "default", "pip_packages", INIVAL_TYPE_STR_ARRAY, &val); - conv_strlist(&globals.pip_packages, LINE_SEP, val); - // Configure jfrog cli downloader - ini_getval(cfg, "jfrog_cli_download", "url", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.jfrog_artifactory_base_url, val); - ini_getval(cfg, "jfrog_cli_download", "product", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.jfrog_artifactory_product, val); - ini_getval(cfg, "jfrog_cli_download", "version_series", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.cli_major_ver, val); - ini_getval(cfg, "jfrog_cli_download", "version", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.version, val); - ini_getval(cfg, "jfrog_cli_download", "filename", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.remote_filename, val); - ini_getval(cfg, "deploy:artifactory", "url", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.url, val); - ini_getval(cfg, "deploy:artifactory", "repo", INIVAL_TYPE_STR, &val); - conv_str(&globals.jfrog.repo, val); - return 0; -} - -static int populate_info(struct Delivery *ctx) { - if (!ctx->info.time_str_epoch) { - // Record timestamp used for release - time(&ctx->info.time_now); - ctx->info.time_info = localtime(&ctx->info.time_now); - - ctx->info.time_str_epoch = calloc(OMC_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch)); - if (!ctx->info.time_str_epoch) { - msg(OMC_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n"); - return -1; - } - snprintf(ctx->info.time_str_epoch, OMC_TIME_STR_MAX - 1, "%li", ctx->info.time_now); - } - return 0; -} - -int *bootstrap_build_info(struct Delivery *ctx) { - struct Delivery local; - memset(&local, 0, sizeof(local)); - local._omc_ini_fp.cfg = ini_open(ctx->_omc_ini_fp.cfg_path); - local._omc_ini_fp.delivery = ini_open(ctx->_omc_ini_fp.delivery_path); - delivery_init_platform(&local); - populate_delivery_cfg(&local); - populate_delivery_ini(&local); - populate_info(&local); - ctx->info.build_name = strdup(local.info.build_name); - ctx->info.build_number = strdup(local.info.build_number); - ctx->info.release_name = strdup(local.info.release_name); - memcpy(&ctx->info.time_info, &local.info.time_info, sizeof(ctx->info.time_info)); - ctx->info.time_now = local.info.time_now; - ctx->info.time_str_epoch = strdup(local.info.time_str_epoch); - delivery_free(&local); - return 0; -} - -int delivery_init(struct Delivery *ctx) { - populate_info(ctx); - populate_delivery_cfg(ctx); - - // Set artifactory URL via environment variable if possible - char *jfurl = getenv("OMC_JF_ARTIFACTORY_URL"); - if (jfurl) { - if (globals.jfrog.url) { - guard_free(globals.jfrog.url); - } - globals.jfrog.url = strdup(jfurl); - } - - // Set artifactory repository via environment if possible - char *jfrepo = getenv("OMC_JF_REPO"); - if (jfrepo) { - if (globals.jfrog.repo) { - guard_free(globals.jfrog.repo); - } - globals.jfrog.repo = strdup(jfrepo); - } - - // Configure architecture and platform information - delivery_init_platform(ctx); - - // Create OMC directory structure - delivery_init_dirs_stage1(ctx); - - char config_local[PATH_MAX]; - sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config"); - setenv("XDG_CONFIG_HOME", config_local, 1); - - char cache_local[PATH_MAX]; - sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache"); - setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1); - - // add tools to PATH - char pathvar_tmp[OMC_BUFSIZ]; - sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH")); - setenv("PATH", pathvar_tmp, 1); - - // Prevent git from paginating output - setenv("GIT_PAGER", "", 1); - - populate_delivery_ini(ctx); - - if (ctx->deploy.docker.tags) { - for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) { - char *item = strlist_item(ctx->deploy.docker.tags, i); - tolower_s(item); - } - } - - if (ctx->deploy.docker.image_compression) { - if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) { - SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression); - return -1; - } - } - return 0; -} - -int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) { - size_t fmt_len = strlen(fmt); - - if (!*dest) { - *dest = calloc(OMC_NAME_MAX, sizeof(**dest)); - if (!*dest) { - return -1; - } - } - - for (size_t i = 0; i < fmt_len; i++) { - if (fmt[i] == '%' && strlen(&fmt[i])) { - i++; - switch (fmt[i]) { - case 'n': // name - strcat(*dest, ctx->meta.name); - break; - case 'c': // codename - strcat(*dest, ctx->meta.codename); - break; - case 'm': // mission - strcat(*dest, ctx->meta.mission); - break; - case 'r': // revision - sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc); - break; - case 'R': // "final"-aware revision - if (ctx->meta.final) - strcat(*dest, "final"); - else - sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc); - break; - case 'v': // version - strcat(*dest, ctx->meta.version); - break; - case 'P': // python version - strcat(*dest, ctx->meta.python); - break; - case 'p': // python version major/minor - strcat(*dest, ctx->meta.python_compact); - break; - case 'a': // system architecture name - strcat(*dest, ctx->system.arch); - break; - case 'o': // system platform (OS) name - strcat(*dest, ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - break; - case 't': // unix epoch - sprintf(*dest + strlen(*dest), "%ld", ctx->info.time_now); - break; - default: // unknown formatter, write as-is - sprintf(*dest + strlen(*dest), "%c%c", fmt[i - 1], fmt[i]); - break; - } - } else { // write non-format text - sprintf(*dest + strlen(*dest), "%c", fmt[i]); - } - } - return 0; -} - -void delivery_debug_show(struct Delivery *ctx) { - printf("\n====DEBUG====\n"); - printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir); - printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir); - printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No"); - printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No"); - printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No"); -} - -void delivery_meta_show(struct Delivery *ctx) { - if (globals.verbose) { - delivery_debug_show(ctx); - } - - printf("\n====DELIVERY====\n"); - printf("%-20s %-10s\n", "Target Python:", ctx->meta.python); - printf("%-20s %-10s\n", "Name:", ctx->meta.name); - printf("%-20s %-10s\n", "Mission:", ctx->meta.mission); - if (ctx->meta.codename) { - printf("%-20s %-10s\n", "Codename:", ctx->meta.codename); - } - if (ctx->meta.version) { - printf("%-20s %-10s\n", "Version", ctx->meta.version); - } - if (!ctx->meta.final) { - printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc); - } - printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No"); - printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New"); -} - -void delivery_conda_show(struct Delivery *ctx) { - printf("\n====CONDA====\n"); - printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix); - - puts("Native Packages:"); - if (strlist_count(ctx->conda.conda_packages)) { - for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { - char *token = strlist_item(ctx->conda.conda_packages, i); - if (isempty(token) || isblank(*token) || startswith(token, "-")) { - continue; - } - printf("%21s%s\n", "", token); - } - } else { - printf("%21s%s\n", "", "N/A"); - } - - puts("Python Packages:"); - if (strlist_count(ctx->conda.pip_packages)) { - for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { - char *token = strlist_item(ctx->conda.pip_packages, i); - if (isempty(token) || isblank(*token) || startswith(token, "-")) { - continue; - } - printf("%21s%s\n", "", token); - } - } else { - printf("%21s%s\n", "", "N/A"); - } -} - -void delivery_tests_show(struct Delivery *ctx) { - printf("\n====TESTS====\n"); - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].name) { - continue; - } - printf("%-20s %-20s %s\n", ctx->tests[i].name, - ctx->tests[i].version, - ctx->tests[i].repository); - } -} - -void delivery_runtime_show(struct Delivery *ctx) { - printf("\n====RUNTIME====\n"); - struct StrList *rt = NULL; - rt = strlist_copy(ctx->runtime.environ); - if (!rt) { - // no data - return; - } - strlist_sort(rt, OMC_SORT_ALPHA); - size_t total = strlist_count(rt); - for (size_t i = 0; i < total; i++) { - char *item = strlist_item(rt, i); - if (!item) { - // not supposed to occur - msg(OMC_MSG_WARN | OMC_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i); - return; - } - printf("%s\n", item); - } -} - -int delivery_build_recipes(struct Delivery *ctx) { - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - char *recipe_dir = NULL; - if (ctx->tests[i].build_recipe) { // build a conda recipe - int recipe_type; - int status; - if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) { - fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name); - return -1; - } - recipe_type = recipe_get_type(recipe_dir); - pushd(recipe_dir); - { - if (RECIPE_TYPE_ASTROCONDA == recipe_type) { - pushd(path_basename(ctx->tests[i].repository)); - } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { - pushd("recipe"); - } - - char recipe_version[100]; - char recipe_buildno[100]; - char recipe_git_url[PATH_MAX]; - char recipe_git_rev[PATH_MAX]; - - //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}"); - //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository); - //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version); - // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^ - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version); - sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository); - strcpy(recipe_git_rev, ""); - sprintf(recipe_buildno, " number: 0"); - - unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH; - //file_replace_text("meta.yaml", "{% set version = ", recipe_version); - if (ctx->meta.final) { - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version); - // TODO: replace sha256 of tagged archive - // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge. - // For now, remove the sha256 requirement - file_replace_text("meta.yaml", "sha256:", "\n", flags); - } else { - file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags); - file_replace_text("meta.yaml", " url:", recipe_git_url, flags); - //file_replace_text("meta.yaml", "sha256:", recipe_git_rev); - file_replace_text("meta.yaml", " sha256:", "\n", flags); - file_replace_text("meta.yaml", " number:", recipe_buildno, flags); - } - - char command[PATH_MAX]; - sprintf(command, "mambabuild --python=%s .", ctx->meta.python); - status = conda_exec(command); - if (status) { - return -1; - } - - if (RECIPE_TYPE_GENERIC != recipe_type) { - popd(); - } - popd(); - } - } - if (recipe_dir) { - guard_free(recipe_dir); - } - } - return 0; -} - -static int filter_repo_tags(char *repo, struct StrList *patterns) { - int result = 0; - - if (!pushd(repo)) { - int list_status = 0; - char *tags_raw = shell_output("git tag -l", &list_status); - struct StrList *tags = strlist_init(); - strlist_append_tokenize(tags, tags_raw, LINE_SEP); - - for (size_t i = 0; tags && i < strlist_count(tags); i++) { - char *tag = strlist_item(tags, i); - for (size_t p = 0; p < strlist_count(patterns); p++) { - char *pattern = strlist_item(patterns, p); - int match = fnmatch(pattern, tag, 0); - if (!match) { - char cmd[PATH_MAX] = {0}; - sprintf(cmd, "git tag -d %s", tag); - result += system(cmd); - break; - } - } - } - guard_strlist_free(&tags); - guard_free(tags_raw); - popd(); - } else { - result = -1; - } - return result; -} - -struct StrList *delivery_build_wheels(struct Delivery *ctx) { - struct StrList *result = NULL; - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - result = strlist_init(); - if (!result) { - perror("unable to allocate memory for string list"); - return NULL; - } - - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source - char srcdir[PATH_MAX]; - char wheeldir[PATH_MAX]; - memset(srcdir, 0, sizeof(srcdir)); - memset(wheeldir, 0, sizeof(wheeldir)); - - sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name); - git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version); - - if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { - filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags); - } - - pushd(srcdir); - { - char dname[NAME_MAX]; - char outdir[PATH_MAX]; - char cmd[PATH_MAX * 2]; - memset(dname, 0, sizeof(dname)); - memset(outdir, 0, sizeof(outdir)); - memset(cmd, 0, sizeof(outdir)); - - strcpy(dname, ctx->tests[i].name); - tolower_s(dname); - sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); - if (mkdirs(outdir, 0755)) { - fprintf(stderr, "failed to create output directory: %s\n", outdir); - } - - sprintf(cmd, "-m build -w -o %s", outdir); - if (python_exec(cmd)) { - fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version); - strlist_free(&result); - return NULL; - } - popd(); - } - } - } - return result; -} - -static const struct Test *requirement_from_test(struct Delivery *ctx, const char *name) { - struct Test *result; - - result = NULL; - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (strstr(name, ctx->tests[i].name)) { - result = &ctx->tests[i]; - break; - } - } - return result; -} - -int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) { - char cmd[PATH_MAX]; - char pkgs[OMC_BUFSIZ]; - char *env_current = getenv("CONDA_DEFAULT_ENV"); - - if (env_current) { - // The requested environment is not the current environment - if (strcmp(env_current, env_name) != 0) { - // Activate the requested environment - printf("Activating: %s\n", env_name); - conda_activate(conda_install_dir, env_name); - runtime_replace(&ctx->runtime.environ, __environ); - } - } - - memset(cmd, 0, sizeof(cmd)); - memset(pkgs, 0, sizeof(pkgs)); - strcat(cmd, "install"); - - typedef int (*Runner)(const char *); - Runner runner = NULL; - if (INSTALL_PKG_CONDA & type) { - runner = conda_exec; - } else if (INSTALL_PKG_PIP & type) { - runner = pip_exec; - } - - if (INSTALL_PKG_CONDA_DEFERRED & type) { - strcat(cmd, " --use-local"); - } else if (INSTALL_PKG_PIP_DEFERRED & type) { - // Don't change the baseline package set unless we're working with a - // new build. Release candidates will need to keep packages as stable - // as possible between releases. - if (!ctx->meta.based_on) { - strcat(cmd, " --upgrade"); - } - sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); - } - - for (size_t x = 0; manifest[x] != NULL; x++) { - char *name = NULL; - for (size_t p = 0; p < strlist_count(manifest[x]); p++) { - name = strlist_item(manifest[x], p); - strip(name); - if (!strlen(name)) { - continue; - } - if (INSTALL_PKG_PIP_DEFERRED & type) { - const struct Test *info = requirement_from_test(ctx, name); - if (info) { - sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, info->version); - } else { - fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name); - return -1; - } - } else { - if (startswith(name, "--") || startswith(name, "-")) { - sprintf(cmd + strlen(cmd), " %s", name); - } else { - sprintf(cmd + strlen(cmd), " '%s'", name); - } - } - } - int status = runner(cmd); - if (status) { - return status; - } - } - return 0; -} - -void delivery_get_installer_url(struct Delivery *ctx, char *result) { - if (ctx->conda.installer_version) { - // Use version specified by configuration file - sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl, - ctx->conda.installer_name, - ctx->conda.installer_version, - ctx->conda.installer_platform, - ctx->conda.installer_arch); - } else { - // Use latest installer - sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl, - ctx->conda.installer_name, - ctx->conda.installer_platform, - ctx->conda.installer_arch); - } - -} - -int delivery_get_installer(struct Delivery *ctx, char *installer_url) { - char script_path[PATH_MAX]; - char *installer = path_basename(installer_url); - - memset(script_path, 0, sizeof(script_path)); - sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer); - if (access(script_path, F_OK)) { - // Script doesn't exist - int fetch_status = download(installer_url, script_path, NULL); - if (HTTP_ERROR(fetch_status) || fetch_status < 0) { - // download failed - return -1; - } - } else { - msg(OMC_MSG_RESTRICT | OMC_MSG_L3, "Skipped, installer already exists\n", script_path); - } - - ctx->conda.installer_path = strdup(script_path); - if (!ctx->conda.installer_path) { - SYSERROR("Unable to duplicate script_path: '%s'", script_path); - return -1; - } - - return 0; -} - -int delivery_copy_conda_artifacts(struct Delivery *ctx) { - char cmd[OMC_BUFSIZ]; - char conda_build_dir[PATH_MAX]; - char subdir[PATH_MAX]; - memset(cmd, 0, sizeof(cmd)); - memset(conda_build_dir, 0, sizeof(conda_build_dir)); - memset(subdir, 0, sizeof(subdir)); - - sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld"); - // One must run conda build at least once to create the "conda-bld" directory. - // When this directory is missing there can be no build artifacts. - if (access(conda_build_dir, F_OK) < 0) { - msg(OMC_MSG_RESTRICT | OMC_MSG_WARN | OMC_MSG_L3, - "Skipped: 'conda build' has never been executed.\n"); - return 0; - } - - snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s", - conda_build_dir, - ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], - ctx->storage.conda_artifact_dir); - - return system(cmd); -} - -int delivery_copy_wheel_artifacts(struct Delivery *ctx) { - char cmd[PATH_MAX]; - memset(cmd, 0, sizeof(cmd)); - snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s", - ctx->storage.build_sources_dir, - ctx->storage.wheel_artifact_dir); - return system(cmd); -} - -int delivery_index_wheel_artifacts(struct Delivery *ctx) { - struct dirent *rec; - DIR *dp; - FILE *top_fp; - - dp = opendir(ctx->storage.wheel_artifact_dir); - if (!dp) { - return -1; - } - - // Generate a "dumb" local pypi index that is compatible with: - // pip install --extra-index-url - char top_index[PATH_MAX]; - memset(top_index, 0, sizeof(top_index)); - sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir); - top_fp = fopen(top_index, "w+"); - if (!top_fp) { - return -2; - } - - while ((rec = readdir(dp)) != NULL) { - // skip directories - if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) { - continue; - } - - FILE *bottom_fp; - char bottom_index[PATH_MAX * 2]; - memset(bottom_index, 0, sizeof(bottom_index)); - sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name); - bottom_fp = fopen(bottom_index, "w+"); - if (!bottom_fp) { - return -3; - } - - if (globals.verbose) { - printf("+ %s\n", rec->d_name); - } - // Add record to top level index - fprintf(top_fp, "%s
\n", rec->d_name, rec->d_name); - - char dpath[PATH_MAX * 2]; - memset(dpath, 0, sizeof(dpath)); - sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name); - struct StrList *packages = listdir(dpath); - if (!packages) { - fclose(top_fp); - fclose(bottom_fp); - return -4; - } - - for (size_t i = 0; i < strlist_count(packages); i++) { - char *package = strlist_item(packages, i); - if (!endswith(package, ".whl")) { - continue; - } - if (globals.verbose) { - printf("`- %s\n", package); - } - // Write record to bottom level index - fprintf(bottom_fp, "%s
\n", package, package); - } - fclose(bottom_fp); - - guard_strlist_free(&packages); - } - closedir(dp); - fclose(top_fp); - return 0; -} - -void delivery_install_conda(char *install_script, char *conda_install_dir) { - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - if (globals.conda_fresh_start) { - if (!access(conda_install_dir, F_OK)) { - // directory exists so remove it - if (rmtree(conda_install_dir)) { - perror("unable to remove previous installation"); - exit(1); - } - - // Proceed with the installation - // -b = batch mode (non-interactive) - char cmd[255] = {0}; - snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", - find_program("bash"), - install_script, - conda_install_dir); - if (shell_safe(&proc, cmd)) { - fprintf(stderr, "conda installation failed\n"); - exit(1); - } - } - } else { - msg(OMC_MSG_L3, "Conda removal disabled by configuration\n"); - } -} - -void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) { - if (conda_activate(conda_install_dir, "base")) { - fprintf(stderr, "conda activation failed\n"); - exit(1); - } - - // Setting the CONDARC environment variable appears to be the only consistent - // way to make sure the file is used. Not setting this variable leads to strange - // behavior, especially if a conda environment is already active when OMC is loaded. - char rcpath[PATH_MAX]; - sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc"); - setenv("CONDARC", rcpath, 1); - if (runtime_replace(&ctx->runtime.environ, __environ)) { - perror("unable to replace runtime environment after activating conda"); - exit(1); - } - - conda_setup_headless(); -} - -void delivery_defer_packages(struct Delivery *ctx, int type) { - struct StrList *dataptr = NULL; - struct StrList *deferred = NULL; - char *name = NULL; - char cmd[PATH_MAX]; - - memset(cmd, 0, sizeof(cmd)); - - char mode[10]; - if (DEFER_CONDA == type) { - dataptr = ctx->conda.conda_packages; - deferred = ctx->conda.conda_packages_defer; - strcpy(mode, "conda"); - } else if (DEFER_PIP == type) { - dataptr = ctx->conda.pip_packages; - deferred = ctx->conda.pip_packages_defer; - strcpy(mode, "pip"); - } - msg(OMC_MSG_L2, "Filtering %s packages by test definition...\n", mode); - - struct StrList *filtered = NULL; - filtered = strlist_init(); - for (size_t i = 0, z = 0; i < strlist_count(dataptr); i++) { - int ignore_pkg = 0; - - name = strlist_item(dataptr, i); - if (!strlen(name) || isblank(*name) || isspace(*name)) { - // no data - continue; - } - msg(OMC_MSG_L3, "package '%s': ", name); - - // Compile a list of packages that are *also* to be tested. - char *version; - for (size_t x = 0; x < sizeof(ctx->tests) / sizeof(ctx->tests[0]); x++) { - version = NULL; - if (ctx->tests[x].name) { - if (strstr(name, ctx->tests[x].name)) { - version = ctx->tests[x].version; - ignore_pkg = 1; - z++; - break; - } - } - } - - if (ignore_pkg) { - char build_at[PATH_MAX]; - if (DEFER_CONDA == type) { - sprintf(build_at, "%s=%s", name, version); - name = build_at; - } - - printf("BUILD FOR HOST\n"); - strlist_append(&deferred, name); - } else { - printf("USE EXISTING\n"); - strlist_append(&filtered, name); - } - } - - if (!strlist_count(deferred)) { - msg(OMC_MSG_WARN | OMC_MSG_L2, "No %s packages were filtered by test definitions\n", mode); - } else { - if (DEFER_CONDA == type) { - strlist_free(&ctx->conda.conda_packages); - ctx->conda.conda_packages = strlist_copy(filtered); - } else if (DEFER_PIP == type) { - strlist_free(&ctx->conda.pip_packages); - ctx->conda.pip_packages = strlist_copy(filtered); - } - } - if (filtered) { - strlist_free(&filtered); - } -} - -const char *release_header = "# delivery_name: %s\n" - "# delivery_fmt: %s\n" - "# creation_time: %s\n" - "# conda_ident: %s\n" - "# conda_build_ident: %s\n"; - -char *delivery_get_release_header(struct Delivery *ctx) { - char output[OMC_BUFSIZ]; - char stamp[100]; - strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info); - sprintf(output, release_header, - ctx->info.release_name, - ctx->rules.release_fmt, - stamp, - ctx->conda.tool_version, - ctx->conda.tool_build_version); - return strdup(output); -} - -int delivery_dump_metadata(struct Delivery *ctx) { - FILE *fp; - char filename[PATH_MAX]; - sprintf(filename, "%s/meta-%s.omc", ctx->storage.meta_dir, ctx->info.release_name); - fp = fopen(filename, "w+"); - if (!fp) { - return -1; - } - if (globals.verbose) { - printf("%s\n", filename); - } - fprintf(fp, "name %s\n", ctx->meta.name); - fprintf(fp, "version %s\n", ctx->meta.version); - fprintf(fp, "rc %d\n", ctx->meta.rc); - fprintf(fp, "python %s\n", ctx->meta.python); - fprintf(fp, "python_compact %s\n", ctx->meta.python_compact); - fprintf(fp, "mission %s\n", ctx->meta.mission); - fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : ""); - fprintf(fp, "platform %s %s %s %s\n", - ctx->system.platform[DELIVERY_PLATFORM], - ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], - ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], - ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - fprintf(fp, "arch %s\n", ctx->system.arch); - fprintf(fp, "time %s\n", ctx->info.time_str_epoch); - fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt); - fprintf(fp, "release_name %s\n", ctx->info.release_name); - fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt); - fprintf(fp, "build_name %s\n", ctx->info.build_name); - fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt); - fprintf(fp, "build_number %s\n", ctx->info.build_number); - fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl); - fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name); - fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version); - fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform); - fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch); - - fclose(fp); - return 0; -} - -void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) { - char output[PATH_MAX]; - char *header = NULL; - char *tempfile = NULL; - FILE *tp = NULL; - - if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) { - header = delivery_get_release_header(ctx); - if (!header) { - msg(OMC_MSG_ERROR, "failed to generate release header string\n", filename); - exit(1); - } - tempfile = xmkstemp(&tp, "w+"); - if (!tempfile || !tp) { - msg(OMC_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno)); - exit(1); - } - fprintf(tp, "%s", header); - - // Read the original file - char **contents = file_readlines(filename, 0, 0, NULL); - if (!contents) { - msg(OMC_MSG_ERROR, "%s: unable to read %s", filename); - exit(1); - } - - // Write temporary data - for (size_t i = 0; contents[i] != NULL; i++) { - if (startswith(contents[i], "channels:")) { - // Allow for additional conda channel injection - if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) { - fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]); - continue; - } - } else if (strstr(contents[i], "- pip:")) { - if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) { - // Allow for additional pip argument injection - fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]); - continue; - } - } else if (startswith(contents[i], "prefix:")) { - // Remove the prefix key - if (strstr(contents[i], "/") || strstr(contents[i], "\\")) { - // path is on the same line as the key - continue; - } else { - // path is on the next line? - if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) { - i++; - } - continue; - } - } - fprintf(tp, "%s", contents[i]); - } - GENERIC_ARRAY_FREE(contents); - guard_free(header); - fflush(tp); - fclose(tp); - - // Replace the original file with our temporary data - if (copy2(tempfile, filename, CT_PERM) < 0) { - fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename); - exit(1); - } - remove(tempfile); - guard_free(tempfile); - } else if (stage == DELIVERY_REWRITE_SPEC_STAGE_2) { - // Replace "local" channel with the staging URL - if (ctx->storage.conda_staging_url) { - file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0); - } else if (globals.jfrog.repo) { - sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); - file_replace_text(filename, "@CONDA_CHANNEL@", output, 0); - } else { - msg(OMC_MSG_WARN, "conda_staging_url is not configured\n", filename); - file_replace_text(filename, " - @CONDA_CHANNEL@", "", 0); - } - - if (ctx->storage.wheel_staging_url) { - file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0); - } else if (globals.jfrog.repo) { - sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); - file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); - } - } -} - -int delivery_index_conda_artifacts(struct Delivery *ctx) { - return conda_index(ctx->storage.conda_artifact_dir); -} - -void delivery_tests_run(struct Delivery *ctx) { - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - if (!ctx->tests[0].name) { - msg(OMC_MSG_WARN | OMC_MSG_L2, "no tests are defined!\n"); - } else { - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].name && !ctx->tests[i].repository && !ctx->tests[i].script) { - // skip unused test records - continue; - } - msg(OMC_MSG_L2, "Executing tests for %s %s\n", ctx->tests[i].name, ctx->tests[i].version); - if (!ctx->tests[i].script || !strlen(ctx->tests[i].script)) { - msg(OMC_MSG_WARN | OMC_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n", - ctx->tests[i].name); - continue; - } - - char destdir[PATH_MAX]; - sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(ctx->tests[i].repository)); - - if (!access(destdir, F_OK)) { - msg(OMC_MSG_L3, "Purging repository %s\n", destdir); - if (rmtree(destdir)) { - COE_CHECK_ABORT(1, "Unable to remove repository\n"); - } - } - msg(OMC_MSG_L3, "Cloning repository %s\n", ctx->tests[i].repository); - if (!git_clone(&proc, ctx->tests[i].repository, destdir, ctx->tests[i].version)) { - ctx->tests[i].repository_info_tag = strdup(git_describe(destdir)); - ctx->tests[i].repository_info_ref = strdup(git_rev_parse(destdir, "HEAD")); - } else { - COE_CHECK_ABORT(1, "Unable to clone repository\n"); - } - - if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { - filter_repo_tags(destdir, ctx->tests[i].repository_remove_tags); - } - - if (pushd(destdir)) { - COE_CHECK_ABORT(1, "Unable to enter repository directory\n"); - } else { -#if 1 - int status; - char cmd[PATH_MAX]; - - msg(OMC_MSG_L3, "Testing %s\n", ctx->tests[i].name); - memset(&proc, 0, sizeof(proc)); - - // Apply workaround for tox positional arguments - char *toxconf = NULL; - if (!access("tox.ini", F_OK)) { - if (!fix_tox_conf("tox.ini", &toxconf)) { - msg(OMC_MSG_L3, "Fixing tox positional arguments\n"); - if (!globals.workaround.tox_posargs) { - globals.workaround.tox_posargs = calloc(PATH_MAX, sizeof(*globals.workaround.tox_posargs)); - } else { - memset(globals.workaround.tox_posargs, 0, PATH_MAX); - } - snprintf(globals.workaround.tox_posargs, PATH_MAX - 1, "-c %s --root .", toxconf); - } - } - - // enable trace mode before executing each test script - memset(cmd, 0, sizeof(cmd)); - sprintf(cmd, "set -x ; %s", ctx->tests[i].script); - - char *cmd_rendered = tpl_render(cmd); - if (cmd_rendered) { - if (strcmp(cmd_rendered, cmd) != 0) { - strcpy(cmd, cmd_rendered); - } - guard_free(cmd_rendered); - } - - status = shell(&proc, cmd); - if (status) { - msg(OMC_MSG_ERROR, "Script failure: %s\n%s\n\nExit code: %d\n", ctx->tests[i].name, ctx->tests[i].script, status); - COE_CHECK_ABORT(1, "Test failure"); - } - - if (toxconf) { - remove(toxconf); - guard_free(toxconf); - } - popd(); -#else - msg(OMC_MSG_WARNING | OMC_MSG_L3, "TESTING DISABLED BY CODE!\n"); -#endif - } - } - } -} - -void delivery_gather_tool_versions(struct Delivery *ctx) { - int status = 0; - - // Extract version from tool output - ctx->conda.tool_version = shell_output("conda --version", &status); - if (ctx->conda.tool_version) - strip(ctx->conda.tool_version); - - ctx->conda.tool_build_version = shell_output("conda build --version", &status); - if (ctx->conda.tool_build_version) - strip(ctx->conda.tool_version); -} - -int delivery_init_artifactory(struct Delivery *ctx) { - int status = 0; - char dest[PATH_MAX] = {0}; - char filepath[PATH_MAX] = {0}; - snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir); - snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir); - - if (!access(filepath, F_OK)) { - // already have it - msg(OMC_MSG_L3, "Skipped download, %s already exists\n", filepath); - goto delivery_init_artifactory_envsetup; - } - - char *platform = ctx->system.platform[DELIVERY_PLATFORM]; - msg(OMC_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch); - if ((status = artifactory_download_cli(dest, - globals.jfrog.jfrog_artifactory_base_url, - globals.jfrog.jfrog_artifactory_product, - globals.jfrog.cli_major_ver, - globals.jfrog.version, - platform, - ctx->system.arch, - globals.jfrog.remote_filename))) { - remove(filepath); - } - - delivery_init_artifactory_envsetup: - // CI (ridiculously generic, why?) disables interactive prompts and progress bar output - setenv("CI", "1", 1); - - // JFROG_CLI_HOME_DIR is where .jfrog is stored - char path[PATH_MAX] = {0}; - snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir); - setenv("JFROG_CLI_HOME_DIR", path, 1); - - // JFROG_CLI_TEMP_DIR is where the obvious is stored - setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1); - return status; -} - -int delivery_artifact_upload(struct Delivery *ctx) { - int status = 0; - - if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { - fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); - return -1; - } - - for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) { - if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) { - break; - } - jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx); - - if (!globals.jfrog.repo) { - msg(OMC_MSG_WARN, "Artifactory repository path is not configured!\n"); - fprintf(stderr, "set OMC_JF_REPO environment variable...\nOr append to configuration file:\n\n"); - fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n"); - status++; - break; - } else if (!ctx->deploy.jfrog[i].repo) { - ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo); - } - - if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) { - // Unlikely to trigger if the config parser is working correctly - msg(OMC_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n"); - status++; - break; - } - - ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true; - ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name; - ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number; - - char files[PATH_MAX]; - char dest[PATH_MAX]; // repo + remote dir - - if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url); - return -1; - } - - if (strlist_count(ctx->deploy.jfrog[i].files)) { - for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) { - memset(dest, 0, sizeof(dest)); - memset(files, 0, sizeof(files)); - snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest); - snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f)); - status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest); - } - } - } - - if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) { - jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, ctx->deploy.jfrog[0].upload_ctx.build_number); - jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, ctx->deploy.jfrog[0].upload_ctx.build_number); - } - - return status; -} - -int delivery_mission_render_files(struct Delivery *ctx) { - if (!ctx->storage.mission_dir) { - fprintf(stderr, "Mission directory is not configured. Context not initialized?\n"); - return -1; - } - struct Data { - char *src; - char *dest; - } data; - struct INIFILE *cfg = ctx->_omc_ini_fp.mission; - union INIVal val; - - memset(&data, 0, sizeof(data)); - data.src = calloc(PATH_MAX, sizeof(*data.src)); - if (!data.src) { - perror("data.src"); - return -1; - } - - for (size_t i = 0; i < cfg->section_count; i++) { - char *section_name = cfg->section[i]->key; - if (!startswith(section_name, "template:")) { - continue; - } - val.as_char_p = strchr(section_name, ':') + 1; - if (val.as_char_p && isempty(val.as_char_p)) { - guard_free(data.src); - return 1; - } - sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p); - msg(OMC_MSG_L2, "%s\n", data.src); - - ini_getval_required(cfg, section_name, "destination", INIVAL_TYPE_STR, &val); - conv_str(&data.dest, val); - - char *contents; - struct stat st; - if (lstat(data.src, &st)) { - perror(data.src); - guard_free(data.dest); - continue; - } - - contents = calloc(st.st_size + 1, sizeof(*contents)); - if (!contents) { - perror("template file contents"); - guard_free(data.dest); - continue; - } - - FILE *fp; - fp = fopen(data.src, "rb"); - if (!fp) { - perror(data.src); - guard_free(contents); - guard_free(data.dest); - continue; - } - - if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) { - perror("while reading template file"); - guard_free(contents); - guard_free(data.dest); - fclose(fp); - continue; - } - fclose(fp); - - msg(OMC_MSG_L3, "Writing %s\n", data.dest); - if (tpl_render_to_file(contents, data.dest)) { - guard_free(contents); - guard_free(data.dest); - continue; - } - guard_free(contents); - guard_free(data.dest); - } - - guard_free(data.src); - return 0; -} - -int delivery_docker(struct Delivery *ctx) { - if (!docker_capable(&ctx->deploy.docker.capabilities)) { - return -1; - } - char tag[OMC_NAME_MAX]; - char args[PATH_MAX]; - int has_registry = ctx->deploy.docker.registry != NULL; - size_t total_tags = strlist_count(ctx->deploy.docker.tags); - size_t total_build_args = strlist_count(ctx->deploy.docker.build_args); - - if (!has_registry) { - msg(OMC_MSG_WARN | OMC_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n"); - } - - if (!total_tags) { - char default_tag[PATH_MAX]; - msg(OMC_MSG_WARN | OMC_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n"); - // generate local tag - memset(default_tag, 0, sizeof(default_tag)); - sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact); - tolower_s(default_tag); - - // Add tag - ctx->deploy.docker.tags = strlist_init(); - strlist_append(&ctx->deploy.docker.tags, default_tag); - - if (has_registry) { - // generate tag for target registry - memset(default_tag, 0, sizeof(default_tag)); - sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact); - tolower_s(default_tag); - - // Add tag - strlist_append(&ctx->deploy.docker.tags, default_tag); - } - // regenerate total tag available - total_tags = strlist_count(ctx->deploy.docker.tags); - } - - memset(args, 0, sizeof(args)); - - // Append image tags to command - for (size_t i = 0; i < total_tags; i++) { - char *tag_orig = strlist_item(ctx->deploy.docker.tags, i); - strcpy(tag, tag_orig); - docker_sanitize_tag(tag); - sprintf(args + strlen(args), " -t \"%s\" ", tag); - } - - // Append build arguments to command (i.e. --build-arg "key=value" - for (size_t i = 0; i < total_build_args; i++) { - char *build_arg = strlist_item(ctx->deploy.docker.build_args, i); - if (!build_arg) { - break; - } - sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg); - } - - // Build the image - char delivery_file[PATH_MAX]; - char dest[PATH_MAX]; - char rsync_cmd[PATH_MAX * 2]; - memset(delivery_file, 0, sizeof(delivery_file)); - memset(dest, 0, sizeof(dest)); - - sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name); - if (access(delivery_file, F_OK) < 0) { - fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file); - return -1; - } - - sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name); - if (copy2(delivery_file, dest, CT_PERM)) { - fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno)); - return -1; - } - - memset(dest, 0, sizeof(dest)); - sprintf(dest, "%s/packages", ctx->storage.build_docker_dir); - - msg(OMC_MSG_L2, "Copying conda packages\n"); - memset(rsync_cmd, 0, sizeof(rsync_cmd)); - sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest); - if (system(rsync_cmd)) { - fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n"); - return -1; - } - - msg(OMC_MSG_L2, "Copying wheel packages\n"); - memset(rsync_cmd, 0, sizeof(rsync_cmd)); - sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest); - if (system(rsync_cmd)) { - fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n"); - } - - if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) { - return -1; - } - - // Test the image - // All tags point back to the same image so test the first one we see - // regardless of how many are defined - strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0)); - docker_sanitize_tag(tag); - - msg(OMC_MSG_L2, "Executing image test script for %s\n", tag); - if (ctx->deploy.docker.test_script) { - if (isempty(ctx->deploy.docker.test_script)) { - msg(OMC_MSG_L2 | OMC_MSG_WARN, "Image test script has no content\n"); - } else { - int state; - if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) { - msg(OMC_MSG_L2 | OMC_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag); - // test failed -- don't save the image - return -1; - } - } - } else { - msg(OMC_MSG_L2 | OMC_MSG_WARN, "No image test script defined\n"); - } - - // Test successful, save image - if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) { - // save failed - return -1; - } - - return 0; -} - -int delivery_fixup_test_results(struct Delivery *ctx) { - struct dirent *rec; - DIR *dp; - - dp = opendir(ctx->storage.results_dir); - if (!dp) { - perror(ctx->storage.results_dir); - return -1; - } - - while ((rec = readdir(dp)) != NULL) { - char path[PATH_MAX]; - memset(path, 0, sizeof(path)); - - if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..")) { - continue; - } else if (!endswith(rec->d_name, ".xml")) { - continue; - } - - sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name); - msg(OMC_MSG_L2, "%s\n", rec->d_name); - if (xml_pretty_print_in_place(path, OMC_XML_PRETTY_PRINT_PROG, OMC_XML_PRETTY_PRINT_ARGS)) { - msg(OMC_MSG_L3 | OMC_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name); - } - } - - closedir(dp); - return 0; -} \ No newline at end of file diff --git a/src/delivery.c b/src/delivery.c new file mode 100644 index 0000000..e77d41c --- /dev/null +++ b/src/delivery.c @@ -0,0 +1,2100 @@ +#define _GNU_SOURCE + +#include +#include "core.h" + +extern struct STASIS_GLOBAL globals; + +static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) { + int status = ini_has_key(ini, section_name, key); + if (!status) { + SYSERROR("%s:%s key is required but not defined", section_name, key); + exit(1); + } +} + +static void ini_getval_required(struct INIFILE *ini, char *section_name, char *key, unsigned type, union INIVal *val) { + int status = ini_getval(ini, section_name, key, type, val); + if (status || isempty(val->as_char_p)) { + SYSERROR("%s:%s value is required but not defined", section_name, key); + exit(1); + } +} + +static void conv_int(int *x, union INIVal val) { + *x = val.as_int; +} + +static void conv_str(char **x, union INIVal val) { + if (*x) { + guard_free(*x); + } + if (val.as_char_p) { + char *tplop = tpl_render(val.as_char_p); + if (tplop) { + *x = tplop; + } else { + *x = NULL; + } + } else { + *x = NULL; + } +} + +static void conv_str_noexpand(char **x, union INIVal val) { + if (*x) { + guard_free(*x); + } + *x = strdup(val.as_char_p); +} + +static void conv_strlist(struct StrList **x, char *tok, union INIVal val) { + if (!(*x)) + (*x) = strlist_init(); + if (val.as_char_p) { + char *tplop = tpl_render(val.as_char_p); + if (tplop) { + strip(tplop); + strlist_append_tokenize((*x), tplop, tok); + guard_free(tplop); + } + } +} + +static void conv_bool(bool *x, union INIVal val) { + *x = val.as_bool; +} + +int delivery_init_tmpdir(struct Delivery *ctx) { + char *tmpdir = NULL; + char *x = NULL; + int unusable = 0; + errno = 0; + + x = getenv("TMPDIR"); + if (x) { + guard_free(ctx->storage.tmpdir); + tmpdir = strdup(x); + } else { + tmpdir = ctx->storage.tmpdir; + } + + if (!tmpdir) { + // memory error + return -1; + } + + // If the directory doesn't exist, create it + if (access(tmpdir, F_OK) < 0) { + if (mkdirs(tmpdir, 0755) < 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno)); + goto l_delivery_init_tmpdir_fatal; + } + } + + // If we can't read, write, or execute, then die + if (access(tmpdir, R_OK | W_OK | X_OK) < 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s requires at least 0755 permissions.\n"); + goto l_delivery_init_tmpdir_fatal; + } + + struct statvfs st; + if (statvfs(tmpdir, &st) < 0) { + goto l_delivery_init_tmpdir_fatal; + } + +#if defined(STASIS_OS_LINUX) + // If we can't execute programs, or write data to the file system at all, then die + if ((st.f_flag & ST_NOEXEC) != 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted with noexec\n", tmpdir); + goto l_delivery_init_tmpdir_fatal; + } +#endif + if ((st.f_flag & ST_RDONLY) != 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted read-only\n", tmpdir); + goto l_delivery_init_tmpdir_fatal; + } + + if (!globals.tmpdir) { + globals.tmpdir = strdup(tmpdir); + } + + if (!ctx->storage.tmpdir) { + ctx->storage.tmpdir = strdup(globals.tmpdir); + } + return unusable; + + l_delivery_init_tmpdir_fatal: + unusable = 1; + return unusable; +} + +void delivery_free(struct Delivery *ctx) { + guard_free(ctx->system.arch); + GENERIC_ARRAY_FREE(ctx->system.platform); + guard_free(ctx->meta.name); + guard_free(ctx->meta.version); + guard_free(ctx->meta.codename); + guard_free(ctx->meta.mission); + guard_free(ctx->meta.python); + guard_free(ctx->meta.mission); + guard_free(ctx->meta.python_compact); + guard_free(ctx->meta.based_on); + guard_runtime_free(ctx->runtime.environ); + guard_free(ctx->storage.root); + guard_free(ctx->storage.tmpdir); + guard_free(ctx->storage.delivery_dir); + guard_free(ctx->storage.tools_dir); + guard_free(ctx->storage.package_dir); + guard_free(ctx->storage.results_dir); + guard_free(ctx->storage.output_dir); + guard_free(ctx->storage.conda_install_prefix); + guard_free(ctx->storage.conda_artifact_dir); + guard_free(ctx->storage.conda_staging_dir); + guard_free(ctx->storage.conda_staging_url); + guard_free(ctx->storage.wheel_artifact_dir); + guard_free(ctx->storage.wheel_staging_dir); + guard_free(ctx->storage.wheel_staging_url); + guard_free(ctx->storage.build_dir); + guard_free(ctx->storage.build_recipes_dir); + guard_free(ctx->storage.build_sources_dir); + guard_free(ctx->storage.build_testing_dir); + guard_free(ctx->storage.build_docker_dir); + guard_free(ctx->storage.mission_dir); + guard_free(ctx->storage.docker_artifact_dir); + guard_free(ctx->info.time_str_epoch); + guard_free(ctx->info.build_name); + guard_free(ctx->info.build_number); + guard_free(ctx->info.release_name); + guard_free(ctx->conda.installer_baseurl); + guard_free(ctx->conda.installer_name); + guard_free(ctx->conda.installer_version); + guard_free(ctx->conda.installer_platform); + guard_free(ctx->conda.installer_arch); + guard_free(ctx->conda.installer_path); + guard_free(ctx->conda.tool_version); + guard_free(ctx->conda.tool_build_version); + guard_strlist_free(&ctx->conda.conda_packages); + guard_strlist_free(&ctx->conda.conda_packages_defer); + guard_strlist_free(&ctx->conda.pip_packages); + guard_strlist_free(&ctx->conda.pip_packages_defer); + guard_strlist_free(&ctx->conda.wheels_packages); + + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + guard_free(ctx->tests[i].name); + guard_free(ctx->tests[i].version); + guard_free(ctx->tests[i].repository); + guard_free(ctx->tests[i].repository_info_ref); + guard_free(ctx->tests[i].repository_info_tag); + guard_free(ctx->tests[i].script); + guard_free(ctx->tests[i].build_recipe); + // test-specific runtime variables + guard_runtime_free(ctx->tests[i].runtime.environ); + } + + guard_free(ctx->rules.release_fmt); + guard_free(ctx->rules.build_name_fmt); + guard_free(ctx->rules.build_number_fmt); + + guard_free(ctx->deploy.docker.test_script); + guard_free(ctx->deploy.docker.registry); + guard_free(ctx->deploy.docker.image_compression); + guard_strlist_free(&ctx->deploy.docker.tags); + guard_strlist_free(&ctx->deploy.docker.build_args); + + for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(ctx->deploy.jfrog[0]); i++) { + guard_free(ctx->deploy.jfrog[i].repo); + guard_free(ctx->deploy.jfrog[i].dest); + guard_strlist_free(&ctx->deploy.jfrog[i].files); + } + + if (ctx->_stasis_ini_fp.delivery) { + ini_free(&ctx->_stasis_ini_fp.delivery); + } + guard_free(ctx->_stasis_ini_fp.delivery_path); + + if (ctx->_stasis_ini_fp.cfg) { + // optional extras + ini_free(&ctx->_stasis_ini_fp.cfg); + } + guard_free(ctx->_stasis_ini_fp.cfg_path); + + if (ctx->_stasis_ini_fp.mission) { + ini_free(&ctx->_stasis_ini_fp.mission); + } + guard_free(ctx->_stasis_ini_fp.mission_path); +} + +void delivery_init_dirs_stage2(struct Delivery *ctx) { + path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes"); + path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources"); + path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing"); + path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker"); + + path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); + path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); + path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); + path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); + path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); + + path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); + path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); + path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker"); +} + +void delivery_init_dirs_stage1(struct Delivery *ctx) { + char *rootdir = getenv("STASIS_ROOT"); + if (rootdir) { + if (isempty(rootdir)) { + fprintf(stderr, "STASIS_ROOT is set, but empty. Please assign a file system path to this environment variable.\n"); + exit(1); + } + path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name); + } else { + // use "stasis" in current working directory + path_store(&ctx->storage.root, PATH_MAX, "stasis", ctx->info.build_name); + } + path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools"); + path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); + if (delivery_init_tmpdir(ctx)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir); + if (globals.tmpdir) + guard_free(globals.tmpdir); + exit(1); + } + + path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build"); + path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); + + if (!ctx->storage.mission_dir) { + path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission"); + } + + if (access(ctx->storage.mission_dir, F_OK)) { + msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno)); + exit(1); + } + + // Override installation prefix using global configuration key + if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) { + // user wants a specific path + globals.conda_fresh_start = false; + /* + if (mkdirs(globals.conda_install_prefix, 0755)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create directory: %s: %s\n", + strerror(errno), globals.conda_install_prefix); + exit(1); + } + */ + /* + ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL); + if (!ctx->storage.conda_install_prefix) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n"); + exit(1); + } + ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix); + */ + path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda"); + } else { + // install conda under the STASIS tree + path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda"); + } +} + +int delivery_init_platform(struct Delivery *ctx) { + msg(STASIS_MSG_L2, "Setting architecture\n"); + char archsuffix[20]; + struct utsname uts; + if (uname(&uts)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "uname() failed: %s\n", strerror(errno)); + return -1; + } + + ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform)); + if (!ctx->system.platform) { + SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX); + return -1; + } + for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) { + ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0])); + } + + ctx->system.arch = strdup(uts.machine); + if (!ctx->system.arch) { + // memory error + return -1; + } + + if (!strcmp(ctx->system.arch, "x86_64")) { + strcpy(archsuffix, "64"); + } else { + strcpy(archsuffix, ctx->system.arch); + } + + msg(STASIS_MSG_L2, "Setting platform\n"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname); + if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) { + sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos"); + } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) { + sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux"); + } else { + // Not explicitly supported systems + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]); + tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + } + + // Declare some important bits as environment variables + setenv("STASIS_ARCH", ctx->system.arch, 1); + setenv("STASIS_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1); + setenv("STASIS_CONDA_ARCH", ctx->system.arch, 1); + setenv("STASIS_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1); + setenv("STASIS_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1); + + // Register template variables + // These were moved out of main() because we can't take the address of system.platform[x] + // _before_ the array has been initialized. + tpl_register("system.arch", &ctx->system.arch); + tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + + return 0; +} + +static int populate_mission_ini(struct Delivery **ctx) { + union INIVal val; + struct INIFILE *ini; + + if ((*ctx)->_stasis_ini_fp.mission) { + return 0; + } + + // Now populate the rules + char missionfile[PATH_MAX] = {0}; + if (getenv("STASIS_SYSCONFDIR")) { + sprintf(missionfile, "%s/%s/%s/%s.ini", + getenv("STASIS_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); + } else { + sprintf(missionfile, "%s/%s/%s/%s.ini", + globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); + } + + msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile); + (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile); + ini = (*ctx)->_stasis_ini_fp.mission; + if (!ini) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read misson configuration: %s, %s\n", missionfile, strerror(errno)); + exit(1); + } + (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile); + + ini_getval_required(ini, "meta", "release_fmt", INIVAL_TYPE_STR, &val); + conv_str(&(*ctx)->rules.release_fmt, val); + + // Used for setting artifactory build info + ini_getval_required(ini, "meta", "build_name_fmt", INIVAL_TYPE_STR, &val); + conv_str(&(*ctx)->rules.build_name_fmt, val); + + // Used for setting artifactory build info + ini_getval_required(ini, "meta", "build_number_fmt", INIVAL_TYPE_STR, &val); + conv_str(&(*ctx)->rules.build_number_fmt, val); + return 0; +} + +void validate_delivery_ini(struct INIFILE *ini) { + if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) { + ini_has_key_required(ini, "meta", "name"); + ini_has_key_required(ini, "meta", "version"); + ini_has_key_required(ini, "meta", "rc"); + ini_has_key_required(ini, "meta", "mission"); + ini_has_key_required(ini, "meta", "python"); + } else { + SYSERROR("%s", "[meta] configuration section is required"); + exit(1); + } + + if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) { + ini_has_key_required(ini, "conda", "installer_name"); + ini_has_key_required(ini, "conda", "installer_version"); + ini_has_key_required(ini, "conda", "installer_platform"); + ini_has_key_required(ini, "conda", "installer_arch"); + } else { + SYSERROR("%s", "[conda] configuration section is required"); + exit(1); + } + + for (size_t i = 0; i < ini->section_count; i++) { + struct INISection *section = ini->section[i]; + if (section && startswith(section->key, "test:")) { + char *name = strstr(section->key, ":"); + if (name && strlen(name) > 1) { + name = &name[1]; + } + ini_has_key_required(ini, section->key, "version"); + ini_has_key_required(ini, section->key, "repository"); + ini_has_key_required(ini, section->key, "script"); + } + } + + if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) { + // yeah? + } + + for (size_t i = 0; i < ini->section_count; i++) { + struct INISection *section = ini->section[i]; + if (section && startswith(section->key, "deploy:artifactory")) { + ini_has_key_required(ini, section->key, "files"); + ini_has_key_required(ini, section->key, "dest"); + } + } +} + +static int populate_delivery_ini(struct Delivery *ctx) { + union INIVal val; + struct INIFILE *ini = ctx->_stasis_ini_fp.delivery; + struct INIData *rtdata; + RuntimeEnv *rt; + + validate_delivery_ini(ini); + // Populate runtime variables first they may be interpreted by other + // keys in the configuration + rt = runtime_copy(__environ); + while ((rtdata = ini_getall(ini, "runtime")) != NULL) { + char rec[STASIS_BUFSIZ]; + sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value))); + runtime_set(rt, rtdata->key, rtdata->value); + } + runtime_apply(rt); + ctx->runtime.environ = rt; + + ini_getval_required(ini, "meta", "mission", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.mission, val); + + if (!strcasecmp(ctx->meta.mission, "hst")) { + ini_getval(ini, "meta", "codename", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.codename, val); + } else { + ctx->meta.codename = NULL; + } + + /* + if (!strcasecmp(ctx->meta.mission, "jwst")) { + ini_getval(ini, "meta", "version", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.version, val); + + } else { + ctx->meta.version = NULL; + } + */ + ini_getval(ini, "meta", "version", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.version, val); + + ini_getval_required(ini, "meta", "name", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.name, val); + + ini_getval(ini, "meta", "rc", INIVAL_TYPE_INT, &val); + conv_int(&ctx->meta.rc, val); + + ini_getval(ini, "meta", "final", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->meta.final, val); + + ini_getval(ini, "meta", "based_on", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.based_on, val); + + if (!ctx->meta.python) { + ini_getval(ini, "meta", "python", INIVAL_TYPE_STR, &val); + conv_str(&ctx->meta.python, val); + guard_free(ctx->meta.python_compact); + ctx->meta.python_compact = to_short_version(ctx->meta.python); + } else { + ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python); + } + + ini_getval_required(ini, "conda", "installer_name", INIVAL_TYPE_STR, &val); + conv_str(&ctx->conda.installer_name, val); + + ini_getval_required(ini, "conda", "installer_version", INIVAL_TYPE_STR, &val); + conv_str(&ctx->conda.installer_version, val); + + ini_getval_required(ini, "conda", "installer_platform", INIVAL_TYPE_STR, &val); + conv_str(&ctx->conda.installer_platform, val); + + ini_getval_required(ini, "conda", "installer_arch", INIVAL_TYPE_STR, &val); + conv_str(&ctx->conda.installer_arch, val); + + ini_getval_required(ini, "conda", "installer_baseurl", INIVAL_TYPE_STR, &val); + conv_str(&ctx->conda.installer_baseurl, val); + + ini_getval(ini, "conda", "conda_packages", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->conda.conda_packages, LINE_SEP, val); + + for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { + char *pkg = strlist_item(ctx->conda.conda_packages, i); + if (strpbrk(pkg, ";#")) { + strlist_remove(ctx->conda.conda_packages, i); + } + } + + ini_getval(ini, "conda", "pip_packages", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->conda.pip_packages, LINE_SEP, val); + + for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { + char *pkg = strlist_item(ctx->conda.pip_packages, i); + if (strpbrk(pkg, ";#")) { + strlist_remove(ctx->conda.pip_packages, i); + } + } + + // Delivery metadata consumed + populate_mission_ini(&ctx); + + if (ctx->info.release_name) { + guard_free(ctx->info.release_name); + guard_free(ctx->info.build_name); + guard_free(ctx->info.build_number); + } + + if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) { + fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt); + return -1; + } + + if (!ctx->info.build_name) { + delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt); + } + if (!ctx->info.build_number) { + delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt); + } + + // Best I can do to make output directories unique. Annoying. + delivery_init_dirs_stage2(ctx); + + if (!ctx->conda.conda_packages_defer) { + ctx->conda.conda_packages_defer = strlist_init(); + } + if (!ctx->conda.pip_packages_defer) { + ctx->conda.pip_packages_defer = strlist_init(); + } + + for (size_t z = 0, i = 0; i < ini->section_count; i++) { + if (startswith(ini->section[i]->key, "test:")) { + val.as_char_p = strchr(ini->section[i]->key, ':') + 1; + if (val.as_char_p && isempty(val.as_char_p)) { + return 1; + } + conv_str(&ctx->tests[z].name, val); + + ini_getval_required(ini, ini->section[i]->key, "version", INIVAL_TYPE_STR, &val); + conv_str(&ctx->tests[z].version, val); + + ini_getval_required(ini, ini->section[i]->key, "repository", INIVAL_TYPE_STR, &val); + conv_str(&ctx->tests[z].repository, val); + + ini_getval_required(ini, ini->section[i]->key, "script", INIVAL_TYPE_STR, &val); + conv_str_noexpand(&ctx->tests[z].script, val); + + ini_getval(ini, ini->section[i]->key, "repository_remove_tags", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->tests[z].repository_remove_tags, LINE_SEP, val); + + ini_getval(ini, ini->section[i]->key, "build_recipe", INIVAL_TYPE_STR, &val); + conv_str(&ctx->tests[z].build_recipe, val); + + ini_getval(ini, ini->section[i]->key, "runtime", INIVAL_TO_LIST, &val); + conv_strlist(&ctx->tests[z].runtime.environ, LINE_SEP, val); + z++; + } + } + + for (size_t z = 0, i = 0; i < ini->section_count; i++) { + if (startswith(ini->section[i]->key, "deploy:artifactory")) { + // Artifactory base configuration + ini_getval(ini, ini->section[i]->key, "workaround_parent_only", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.workaround_parent_only, val); + + ini_getval(ini, ini->section[i]->key, "exclusions", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.jfrog[z].upload_ctx.exclusions, val); + + ini_getval(ini, ini->section[i]->key, "explode", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.explode, val); + + ini_getval(ini, ini->section[i]->key, "recursive", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.recursive, val); + + ini_getval(ini, ini->section[i]->key, "retries", INIVAL_TYPE_INT, &val); + conv_int(&ctx->deploy.jfrog[z].upload_ctx.retries, val); + + ini_getval(ini, ini->section[i]->key, "retry_wait_time", INIVAL_TYPE_INT, &val); + conv_int(&ctx->deploy.jfrog[z].upload_ctx.retry_wait_time, val); + + ini_getval(ini, ini->section[i]->key, "detailed_summary", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.detailed_summary, val); + + ini_getval(ini, ini->section[i]->key, "quiet", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.quiet, val); + + ini_getval(ini, ini->section[i]->key, "regexp", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.regexp, val); + + ini_getval(ini, ini->section[i]->key, "spec", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.jfrog[z].upload_ctx.spec, val); + + ini_getval(ini, ini->section[i]->key, "flat", INIVAL_TYPE_BOOL, &val); + conv_bool(&ctx->deploy.jfrog[z].upload_ctx.flat, val); + + ini_getval(ini, ini->section[i]->key, "repo", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.jfrog[z].repo, val); + + ini_getval(ini, ini->section[i]->key, "dest", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.jfrog[z].dest, val); + + ini_getval(ini, ini->section[i]->key, "files", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->deploy.jfrog[z].files, LINE_SEP, val); + z++; + } + } + + for (size_t i = 0; i < ini->section_count; i++) { + if (startswith(ini->section[i]->key, "deploy:docker")) { + ini_getval(ini, ini->section[i]->key, "registry", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.docker.registry, val); + + ini_getval(ini, ini->section[i]->key, "image_compression", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.docker.image_compression, val); + + ini_getval(ini, ini->section[i]->key, "test_script", INIVAL_TYPE_STR, &val); + conv_str(&ctx->deploy.docker.test_script, val); + + ini_getval(ini, ini->section[i]->key, "build_args", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->deploy.docker.build_args, LINE_SEP, val); + + ini_getval(ini, ini->section[i]->key, "tags", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&ctx->deploy.docker.tags, LINE_SEP, val); + } + } + return 0; +} + +static int populate_delivery_cfg(struct Delivery *ctx) { + union INIVal val; + struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg; + if (!cfg) { + return -1; + } + ini_getval(cfg, "default", "conda_staging_dir", INIVAL_TYPE_STR, &val); + conv_str(&ctx->storage.conda_staging_dir, val); + ini_getval(cfg, "default", "conda_staging_url", INIVAL_TYPE_STR, &val); + conv_str(&ctx->storage.conda_staging_url, val); + ini_getval(cfg, "default", "wheel_staging_dir", INIVAL_TYPE_STR, &val); + conv_str(&ctx->storage.wheel_staging_dir, val); + ini_getval(cfg, "default", "wheel_staging_url", INIVAL_TYPE_STR, &val); + conv_str(&ctx->storage.wheel_staging_url, val); + ini_getval(cfg, "default", "conda_fresh_start", INIVAL_TYPE_BOOL, &val); + conv_bool(&globals.conda_fresh_start, val); + // Below can also be toggled by command-line arguments + if (!globals.continue_on_error) { + ini_getval(cfg, "default", "continue_on_error", INIVAL_TYPE_BOOL, &val); + conv_bool(&globals.continue_on_error, val); + } + // Below can also be toggled by command-line arguments + if (!globals.always_update_base_environment) { + ini_getval(cfg, "default", "always_update_base_environment", INIVAL_TYPE_BOOL, &val); + conv_bool(&globals.always_update_base_environment, val); + } + ini_getval(cfg, "default", "conda_install_prefix", INIVAL_TYPE_STR, &val); + conv_str(&globals.conda_install_prefix, val); + ini_getval(cfg, "default", "conda_packages", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&globals.conda_packages, LINE_SEP, val); + ini_getval(cfg, "default", "pip_packages", INIVAL_TYPE_STR_ARRAY, &val); + conv_strlist(&globals.pip_packages, LINE_SEP, val); + // Configure jfrog cli downloader + ini_getval(cfg, "jfrog_cli_download", "url", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.jfrog_artifactory_base_url, val); + ini_getval(cfg, "jfrog_cli_download", "product", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.jfrog_artifactory_product, val); + ini_getval(cfg, "jfrog_cli_download", "version_series", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.cli_major_ver, val); + ini_getval(cfg, "jfrog_cli_download", "version", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.version, val); + ini_getval(cfg, "jfrog_cli_download", "filename", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.remote_filename, val); + ini_getval(cfg, "deploy:artifactory", "url", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.url, val); + ini_getval(cfg, "deploy:artifactory", "repo", INIVAL_TYPE_STR, &val); + conv_str(&globals.jfrog.repo, val); + return 0; +} + +static int populate_info(struct Delivery *ctx) { + if (!ctx->info.time_str_epoch) { + // Record timestamp used for release + time(&ctx->info.time_now); + ctx->info.time_info = localtime(&ctx->info.time_now); + + ctx->info.time_str_epoch = calloc(STASIS_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch)); + if (!ctx->info.time_str_epoch) { + msg(STASIS_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n"); + return -1; + } + snprintf(ctx->info.time_str_epoch, STASIS_TIME_STR_MAX - 1, "%li", ctx->info.time_now); + } + return 0; +} + +int *bootstrap_build_info(struct Delivery *ctx) { + struct Delivery local; + memset(&local, 0, sizeof(local)); + local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path); + local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path); + delivery_init_platform(&local); + populate_delivery_cfg(&local); + populate_delivery_ini(&local); + populate_info(&local); + ctx->info.build_name = strdup(local.info.build_name); + ctx->info.build_number = strdup(local.info.build_number); + ctx->info.release_name = strdup(local.info.release_name); + memcpy(&ctx->info.time_info, &local.info.time_info, sizeof(ctx->info.time_info)); + ctx->info.time_now = local.info.time_now; + ctx->info.time_str_epoch = strdup(local.info.time_str_epoch); + delivery_free(&local); + return 0; +} + +int delivery_init(struct Delivery *ctx) { + populate_info(ctx); + populate_delivery_cfg(ctx); + + // Set artifactory URL via environment variable if possible + char *jfurl = getenv("STASIS_JF_ARTIFACTORY_URL"); + if (jfurl) { + if (globals.jfrog.url) { + guard_free(globals.jfrog.url); + } + globals.jfrog.url = strdup(jfurl); + } + + // Set artifactory repository via environment if possible + char *jfrepo = getenv("STASIS_JF_REPO"); + if (jfrepo) { + if (globals.jfrog.repo) { + guard_free(globals.jfrog.repo); + } + globals.jfrog.repo = strdup(jfrepo); + } + + // Configure architecture and platform information + delivery_init_platform(ctx); + + // Create STASIS directory structure + delivery_init_dirs_stage1(ctx); + + char config_local[PATH_MAX]; + sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config"); + setenv("XDG_CONFIG_HOME", config_local, 1); + + char cache_local[PATH_MAX]; + sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache"); + setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1); + + // add tools to PATH + char pathvar_tmp[STASIS_BUFSIZ]; + sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH")); + setenv("PATH", pathvar_tmp, 1); + + // Prevent git from paginating output + setenv("GIT_PAGER", "", 1); + + populate_delivery_ini(ctx); + + if (ctx->deploy.docker.tags) { + for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) { + char *item = strlist_item(ctx->deploy.docker.tags, i); + tolower_s(item); + } + } + + if (ctx->deploy.docker.image_compression) { + if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) { + SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression); + return -1; + } + } + return 0; +} + +int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) { + size_t fmt_len = strlen(fmt); + + if (!*dest) { + *dest = calloc(STASIS_NAME_MAX, sizeof(**dest)); + if (!*dest) { + return -1; + } + } + + for (size_t i = 0; i < fmt_len; i++) { + if (fmt[i] == '%' && strlen(&fmt[i])) { + i++; + switch (fmt[i]) { + case 'n': // name + strcat(*dest, ctx->meta.name); + break; + case 'c': // codename + strcat(*dest, ctx->meta.codename); + break; + case 'm': // mission + strcat(*dest, ctx->meta.mission); + break; + case 'r': // revision + sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc); + break; + case 'R': // "final"-aware revision + if (ctx->meta.final) + strcat(*dest, "final"); + else + sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc); + break; + case 'v': // version + strcat(*dest, ctx->meta.version); + break; + case 'P': // python version + strcat(*dest, ctx->meta.python); + break; + case 'p': // python version major/minor + strcat(*dest, ctx->meta.python_compact); + break; + case 'a': // system architecture name + strcat(*dest, ctx->system.arch); + break; + case 'o': // system platform (OS) name + strcat(*dest, ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + break; + case 't': // unix epoch + sprintf(*dest + strlen(*dest), "%ld", ctx->info.time_now); + break; + default: // unknown formatter, write as-is + sprintf(*dest + strlen(*dest), "%c%c", fmt[i - 1], fmt[i]); + break; + } + } else { // write non-format text + sprintf(*dest + strlen(*dest), "%c", fmt[i]); + } + } + return 0; +} + +void delivery_debug_show(struct Delivery *ctx) { + printf("\n====DEBUG====\n"); + printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir); + printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir); + printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No"); + printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No"); + printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No"); +} + +void delivery_meta_show(struct Delivery *ctx) { + if (globals.verbose) { + delivery_debug_show(ctx); + } + + printf("\n====DELIVERY====\n"); + printf("%-20s %-10s\n", "Target Python:", ctx->meta.python); + printf("%-20s %-10s\n", "Name:", ctx->meta.name); + printf("%-20s %-10s\n", "Mission:", ctx->meta.mission); + if (ctx->meta.codename) { + printf("%-20s %-10s\n", "Codename:", ctx->meta.codename); + } + if (ctx->meta.version) { + printf("%-20s %-10s\n", "Version", ctx->meta.version); + } + if (!ctx->meta.final) { + printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc); + } + printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No"); + printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New"); +} + +void delivery_conda_show(struct Delivery *ctx) { + printf("\n====CONDA====\n"); + printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix); + + puts("Native Packages:"); + if (strlist_count(ctx->conda.conda_packages)) { + for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { + char *token = strlist_item(ctx->conda.conda_packages, i); + if (isempty(token) || isblank(*token) || startswith(token, "-")) { + continue; + } + printf("%21s%s\n", "", token); + } + } else { + printf("%21s%s\n", "", "N/A"); + } + + puts("Python Packages:"); + if (strlist_count(ctx->conda.pip_packages)) { + for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { + char *token = strlist_item(ctx->conda.pip_packages, i); + if (isempty(token) || isblank(*token) || startswith(token, "-")) { + continue; + } + printf("%21s%s\n", "", token); + } + } else { + printf("%21s%s\n", "", "N/A"); + } +} + +void delivery_tests_show(struct Delivery *ctx) { + printf("\n====TESTS====\n"); + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (!ctx->tests[i].name) { + continue; + } + printf("%-20s %-20s %s\n", ctx->tests[i].name, + ctx->tests[i].version, + ctx->tests[i].repository); + } +} + +void delivery_runtime_show(struct Delivery *ctx) { + printf("\n====RUNTIME====\n"); + struct StrList *rt = NULL; + rt = strlist_copy(ctx->runtime.environ); + if (!rt) { + // no data + return; + } + strlist_sort(rt, STASIS_SORT_ALPHA); + size_t total = strlist_count(rt); + for (size_t i = 0; i < total; i++) { + char *item = strlist_item(rt, i); + if (!item) { + // not supposed to occur + msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i); + return; + } + printf("%s\n", item); + } +} + +int delivery_build_recipes(struct Delivery *ctx) { + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + char *recipe_dir = NULL; + if (ctx->tests[i].build_recipe) { // build a conda recipe + int recipe_type; + int status; + if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) { + fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name); + return -1; + } + recipe_type = recipe_get_type(recipe_dir); + pushd(recipe_dir); + { + if (RECIPE_TYPE_ASTROCONDA == recipe_type) { + pushd(path_basename(ctx->tests[i].repository)); + } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { + pushd("recipe"); + } + + char recipe_version[100]; + char recipe_buildno[100]; + char recipe_git_url[PATH_MAX]; + char recipe_git_rev[PATH_MAX]; + + //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}"); + //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository); + //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version); + // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^ + sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version); + sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository); + strcpy(recipe_git_rev, ""); + sprintf(recipe_buildno, " number: 0"); + + unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH; + //file_replace_text("meta.yaml", "{% set version = ", recipe_version); + if (ctx->meta.final) { + sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version); + // TODO: replace sha256 of tagged archive + // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge. + // For now, remove the sha256 requirement + file_replace_text("meta.yaml", "sha256:", "\n", flags); + } else { + file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags); + file_replace_text("meta.yaml", " url:", recipe_git_url, flags); + //file_replace_text("meta.yaml", "sha256:", recipe_git_rev); + file_replace_text("meta.yaml", " sha256:", "\n", flags); + file_replace_text("meta.yaml", " number:", recipe_buildno, flags); + } + + char command[PATH_MAX]; + sprintf(command, "mambabuild --python=%s .", ctx->meta.python); + status = conda_exec(command); + if (status) { + return -1; + } + + if (RECIPE_TYPE_GENERIC != recipe_type) { + popd(); + } + popd(); + } + } + if (recipe_dir) { + guard_free(recipe_dir); + } + } + return 0; +} + +static int filter_repo_tags(char *repo, struct StrList *patterns) { + int result = 0; + + if (!pushd(repo)) { + int list_status = 0; + char *tags_raw = shell_output("git tag -l", &list_status); + struct StrList *tags = strlist_init(); + strlist_append_tokenize(tags, tags_raw, LINE_SEP); + + for (size_t i = 0; tags && i < strlist_count(tags); i++) { + char *tag = strlist_item(tags, i); + for (size_t p = 0; p < strlist_count(patterns); p++) { + char *pattern = strlist_item(patterns, p); + int match = fnmatch(pattern, tag, 0); + if (!match) { + char cmd[PATH_MAX] = {0}; + sprintf(cmd, "git tag -d %s", tag); + result += system(cmd); + break; + } + } + } + guard_strlist_free(&tags); + guard_free(tags_raw); + popd(); + } else { + result = -1; + } + return result; +} + +struct StrList *delivery_build_wheels(struct Delivery *ctx) { + struct StrList *result = NULL; + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + result = strlist_init(); + if (!result) { + perror("unable to allocate memory for string list"); + return NULL; + } + + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source + char srcdir[PATH_MAX]; + char wheeldir[PATH_MAX]; + memset(srcdir, 0, sizeof(srcdir)); + memset(wheeldir, 0, sizeof(wheeldir)); + + sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name); + git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version); + + if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { + filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags); + } + + pushd(srcdir); + { + char dname[NAME_MAX]; + char outdir[PATH_MAX]; + char cmd[PATH_MAX * 2]; + memset(dname, 0, sizeof(dname)); + memset(outdir, 0, sizeof(outdir)); + memset(cmd, 0, sizeof(outdir)); + + strcpy(dname, ctx->tests[i].name); + tolower_s(dname); + sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); + if (mkdirs(outdir, 0755)) { + fprintf(stderr, "failed to create output directory: %s\n", outdir); + } + + sprintf(cmd, "-m build -w -o %s", outdir); + if (python_exec(cmd)) { + fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version); + strlist_free(&result); + return NULL; + } + popd(); + } + } + } + return result; +} + +static const struct Test *requirement_from_test(struct Delivery *ctx, const char *name) { + struct Test *result; + + result = NULL; + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (strstr(name, ctx->tests[i].name)) { + result = &ctx->tests[i]; + break; + } + } + return result; +} + +int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) { + char cmd[PATH_MAX]; + char pkgs[STASIS_BUFSIZ]; + char *env_current = getenv("CONDA_DEFAULT_ENV"); + + if (env_current) { + // The requested environment is not the current environment + if (strcmp(env_current, env_name) != 0) { + // Activate the requested environment + printf("Activating: %s\n", env_name); + conda_activate(conda_install_dir, env_name); + runtime_replace(&ctx->runtime.environ, __environ); + } + } + + memset(cmd, 0, sizeof(cmd)); + memset(pkgs, 0, sizeof(pkgs)); + strcat(cmd, "install"); + + typedef int (*Runner)(const char *); + Runner runner = NULL; + if (INSTALL_PKG_CONDA & type) { + runner = conda_exec; + } else if (INSTALL_PKG_PIP & type) { + runner = pip_exec; + } + + if (INSTALL_PKG_CONDA_DEFERRED & type) { + strcat(cmd, " --use-local"); + } else if (INSTALL_PKG_PIP_DEFERRED & type) { + // Don't change the baseline package set unless we're working with a + // new build. Release candidates will need to keep packages as stable + // as possible between releases. + if (!ctx->meta.based_on) { + strcat(cmd, " --upgrade"); + } + sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); + } + + for (size_t x = 0; manifest[x] != NULL; x++) { + char *name = NULL; + for (size_t p = 0; p < strlist_count(manifest[x]); p++) { + name = strlist_item(manifest[x], p); + strip(name); + if (!strlen(name)) { + continue; + } + if (INSTALL_PKG_PIP_DEFERRED & type) { + const struct Test *info = requirement_from_test(ctx, name); + if (info) { + sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, info->version); + } else { + fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name); + return -1; + } + } else { + if (startswith(name, "--") || startswith(name, "-")) { + sprintf(cmd + strlen(cmd), " %s", name); + } else { + sprintf(cmd + strlen(cmd), " '%s'", name); + } + } + } + int status = runner(cmd); + if (status) { + return status; + } + } + return 0; +} + +void delivery_get_installer_url(struct Delivery *ctx, char *result) { + if (ctx->conda.installer_version) { + // Use version specified by configuration file + sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl, + ctx->conda.installer_name, + ctx->conda.installer_version, + ctx->conda.installer_platform, + ctx->conda.installer_arch); + } else { + // Use latest installer + sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl, + ctx->conda.installer_name, + ctx->conda.installer_platform, + ctx->conda.installer_arch); + } + +} + +int delivery_get_installer(struct Delivery *ctx, char *installer_url) { + char script_path[PATH_MAX]; + char *installer = path_basename(installer_url); + + memset(script_path, 0, sizeof(script_path)); + sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer); + if (access(script_path, F_OK)) { + // Script doesn't exist + int fetch_status = download(installer_url, script_path, NULL); + if (HTTP_ERROR(fetch_status) || fetch_status < 0) { + // download failed + return -1; + } + } else { + msg(STASIS_MSG_RESTRICT | STASIS_MSG_L3, "Skipped, installer already exists\n", script_path); + } + + ctx->conda.installer_path = strdup(script_path); + if (!ctx->conda.installer_path) { + SYSERROR("Unable to duplicate script_path: '%s'", script_path); + return -1; + } + + return 0; +} + +int delivery_copy_conda_artifacts(struct Delivery *ctx) { + char cmd[STASIS_BUFSIZ]; + char conda_build_dir[PATH_MAX]; + char subdir[PATH_MAX]; + memset(cmd, 0, sizeof(cmd)); + memset(conda_build_dir, 0, sizeof(conda_build_dir)); + memset(subdir, 0, sizeof(subdir)); + + sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld"); + // One must run conda build at least once to create the "conda-bld" directory. + // When this directory is missing there can be no build artifacts. + if (access(conda_build_dir, F_OK) < 0) { + msg(STASIS_MSG_RESTRICT | STASIS_MSG_WARN | STASIS_MSG_L3, + "Skipped: 'conda build' has never been executed.\n"); + return 0; + } + + snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s", + conda_build_dir, + ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], + ctx->storage.conda_artifact_dir); + + return system(cmd); +} + +int delivery_copy_wheel_artifacts(struct Delivery *ctx) { + char cmd[PATH_MAX]; + memset(cmd, 0, sizeof(cmd)); + snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s", + ctx->storage.build_sources_dir, + ctx->storage.wheel_artifact_dir); + return system(cmd); +} + +int delivery_index_wheel_artifacts(struct Delivery *ctx) { + struct dirent *rec; + DIR *dp; + FILE *top_fp; + + dp = opendir(ctx->storage.wheel_artifact_dir); + if (!dp) { + return -1; + } + + // Generate a "dumb" local pypi index that is compatible with: + // pip install --extra-index-url + char top_index[PATH_MAX]; + memset(top_index, 0, sizeof(top_index)); + sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir); + top_fp = fopen(top_index, "w+"); + if (!top_fp) { + return -2; + } + + while ((rec = readdir(dp)) != NULL) { + // skip directories + if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) { + continue; + } + + FILE *bottom_fp; + char bottom_index[PATH_MAX * 2]; + memset(bottom_index, 0, sizeof(bottom_index)); + sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name); + bottom_fp = fopen(bottom_index, "w+"); + if (!bottom_fp) { + return -3; + } + + if (globals.verbose) { + printf("+ %s\n", rec->d_name); + } + // Add record to top level index + fprintf(top_fp, "%s
\n", rec->d_name, rec->d_name); + + char dpath[PATH_MAX * 2]; + memset(dpath, 0, sizeof(dpath)); + sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name); + struct StrList *packages = listdir(dpath); + if (!packages) { + fclose(top_fp); + fclose(bottom_fp); + return -4; + } + + for (size_t i = 0; i < strlist_count(packages); i++) { + char *package = strlist_item(packages, i); + if (!endswith(package, ".whl")) { + continue; + } + if (globals.verbose) { + printf("`- %s\n", package); + } + // Write record to bottom level index + fprintf(bottom_fp, "%s
\n", package, package); + } + fclose(bottom_fp); + + guard_strlist_free(&packages); + } + closedir(dp); + fclose(top_fp); + return 0; +} + +void delivery_install_conda(char *install_script, char *conda_install_dir) { + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + if (globals.conda_fresh_start) { + if (!access(conda_install_dir, F_OK)) { + // directory exists so remove it + if (rmtree(conda_install_dir)) { + perror("unable to remove previous installation"); + exit(1); + } + + // Proceed with the installation + // -b = batch mode (non-interactive) + char cmd[255] = {0}; + snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", + find_program("bash"), + install_script, + conda_install_dir); + if (shell_safe(&proc, cmd)) { + fprintf(stderr, "conda installation failed\n"); + exit(1); + } + } + } else { + msg(STASIS_MSG_L3, "Conda removal disabled by configuration\n"); + } +} + +void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) { + if (conda_activate(conda_install_dir, "base")) { + fprintf(stderr, "conda activation failed\n"); + exit(1); + } + + // Setting the CONDARC environment variable appears to be the only consistent + // way to make sure the file is used. Not setting this variable leads to strange + // behavior, especially if a conda environment is already active when STASIS is loaded. + char rcpath[PATH_MAX]; + sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc"); + setenv("CONDARC", rcpath, 1); + if (runtime_replace(&ctx->runtime.environ, __environ)) { + perror("unable to replace runtime environment after activating conda"); + exit(1); + } + + conda_setup_headless(); +} + +void delivery_defer_packages(struct Delivery *ctx, int type) { + struct StrList *dataptr = NULL; + struct StrList *deferred = NULL; + char *name = NULL; + char cmd[PATH_MAX]; + + memset(cmd, 0, sizeof(cmd)); + + char mode[10]; + if (DEFER_CONDA == type) { + dataptr = ctx->conda.conda_packages; + deferred = ctx->conda.conda_packages_defer; + strcpy(mode, "conda"); + } else if (DEFER_PIP == type) { + dataptr = ctx->conda.pip_packages; + deferred = ctx->conda.pip_packages_defer; + strcpy(mode, "pip"); + } + msg(STASIS_MSG_L2, "Filtering %s packages by test definition...\n", mode); + + struct StrList *filtered = NULL; + filtered = strlist_init(); + for (size_t i = 0, z = 0; i < strlist_count(dataptr); i++) { + int ignore_pkg = 0; + + name = strlist_item(dataptr, i); + if (!strlen(name) || isblank(*name) || isspace(*name)) { + // no data + continue; + } + msg(STASIS_MSG_L3, "package '%s': ", name); + + // Compile a list of packages that are *also* to be tested. + char *version; + for (size_t x = 0; x < sizeof(ctx->tests) / sizeof(ctx->tests[0]); x++) { + version = NULL; + if (ctx->tests[x].name) { + if (strstr(name, ctx->tests[x].name)) { + version = ctx->tests[x].version; + ignore_pkg = 1; + z++; + break; + } + } + } + + if (ignore_pkg) { + char build_at[PATH_MAX]; + if (DEFER_CONDA == type) { + sprintf(build_at, "%s=%s", name, version); + name = build_at; + } + + printf("BUILD FOR HOST\n"); + strlist_append(&deferred, name); + } else { + printf("USE EXISTING\n"); + strlist_append(&filtered, name); + } + } + + if (!strlist_count(deferred)) { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No %s packages were filtered by test definitions\n", mode); + } else { + if (DEFER_CONDA == type) { + strlist_free(&ctx->conda.conda_packages); + ctx->conda.conda_packages = strlist_copy(filtered); + } else if (DEFER_PIP == type) { + strlist_free(&ctx->conda.pip_packages); + ctx->conda.pip_packages = strlist_copy(filtered); + } + } + if (filtered) { + strlist_free(&filtered); + } +} + +const char *release_header = "# delivery_name: %s\n" + "# delivery_fmt: %s\n" + "# creation_time: %s\n" + "# conda_ident: %s\n" + "# conda_build_ident: %s\n"; + +char *delivery_get_release_header(struct Delivery *ctx) { + char output[STASIS_BUFSIZ]; + char stamp[100]; + strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info); + sprintf(output, release_header, + ctx->info.release_name, + ctx->rules.release_fmt, + stamp, + ctx->conda.tool_version, + ctx->conda.tool_build_version); + return strdup(output); +} + +int delivery_dump_metadata(struct Delivery *ctx) { + FILE *fp; + char filename[PATH_MAX]; + sprintf(filename, "%s/meta-%s.stasis", ctx->storage.meta_dir, ctx->info.release_name); + fp = fopen(filename, "w+"); + if (!fp) { + return -1; + } + if (globals.verbose) { + printf("%s\n", filename); + } + fprintf(fp, "name %s\n", ctx->meta.name); + fprintf(fp, "version %s\n", ctx->meta.version); + fprintf(fp, "rc %d\n", ctx->meta.rc); + fprintf(fp, "python %s\n", ctx->meta.python); + fprintf(fp, "python_compact %s\n", ctx->meta.python_compact); + fprintf(fp, "mission %s\n", ctx->meta.mission); + fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : ""); + fprintf(fp, "platform %s %s %s %s\n", + ctx->system.platform[DELIVERY_PLATFORM], + ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], + ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], + ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + fprintf(fp, "arch %s\n", ctx->system.arch); + fprintf(fp, "time %s\n", ctx->info.time_str_epoch); + fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt); + fprintf(fp, "release_name %s\n", ctx->info.release_name); + fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt); + fprintf(fp, "build_name %s\n", ctx->info.build_name); + fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt); + fprintf(fp, "build_number %s\n", ctx->info.build_number); + fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl); + fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name); + fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version); + fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform); + fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch); + + fclose(fp); + return 0; +} + +void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) { + char output[PATH_MAX]; + char *header = NULL; + char *tempfile = NULL; + FILE *tp = NULL; + + if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) { + header = delivery_get_release_header(ctx); + if (!header) { + msg(STASIS_MSG_ERROR, "failed to generate release header string\n", filename); + exit(1); + } + tempfile = xmkstemp(&tp, "w+"); + if (!tempfile || !tp) { + msg(STASIS_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno)); + exit(1); + } + fprintf(tp, "%s", header); + + // Read the original file + char **contents = file_readlines(filename, 0, 0, NULL); + if (!contents) { + msg(STASIS_MSG_ERROR, "%s: unable to read %s", filename); + exit(1); + } + + // Write temporary data + for (size_t i = 0; contents[i] != NULL; i++) { + if (startswith(contents[i], "channels:")) { + // Allow for additional conda channel injection + if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) { + fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]); + continue; + } + } else if (strstr(contents[i], "- pip:")) { + if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) { + // Allow for additional pip argument injection + fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]); + continue; + } + } else if (startswith(contents[i], "prefix:")) { + // Remove the prefix key + if (strstr(contents[i], "/") || strstr(contents[i], "\\")) { + // path is on the same line as the key + continue; + } else { + // path is on the next line? + if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) { + i++; + } + continue; + } + } + fprintf(tp, "%s", contents[i]); + } + GENERIC_ARRAY_FREE(contents); + guard_free(header); + fflush(tp); + fclose(tp); + + // Replace the original file with our temporary data + if (copy2(tempfile, filename, CT_PERM) < 0) { + fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename); + exit(1); + } + remove(tempfile); + guard_free(tempfile); + } else if (stage == DELIVERY_REWRITE_SPEC_STAGE_2) { + // Replace "local" channel with the staging URL + if (ctx->storage.conda_staging_url) { + file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0); + } else if (globals.jfrog.repo) { + sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); + file_replace_text(filename, "@CONDA_CHANNEL@", output, 0); + } else { + msg(STASIS_MSG_WARN, "conda_staging_url is not configured\n", filename); + file_replace_text(filename, " - @CONDA_CHANNEL@", "", 0); + } + + if (ctx->storage.wheel_staging_url) { + file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0); + } else if (globals.jfrog.repo) { + sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); + file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); + } + } +} + +int delivery_index_conda_artifacts(struct Delivery *ctx) { + return conda_index(ctx->storage.conda_artifact_dir); +} + +void delivery_tests_run(struct Delivery *ctx) { + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + if (!ctx->tests[0].name) { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n"); + } else { + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (!ctx->tests[i].name && !ctx->tests[i].repository && !ctx->tests[i].script) { + // skip unused test records + continue; + } + msg(STASIS_MSG_L2, "Executing tests for %s %s\n", ctx->tests[i].name, ctx->tests[i].version); + if (!ctx->tests[i].script || !strlen(ctx->tests[i].script)) { + msg(STASIS_MSG_WARN | STASIS_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n", + ctx->tests[i].name); + continue; + } + + char destdir[PATH_MAX]; + sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(ctx->tests[i].repository)); + + if (!access(destdir, F_OK)) { + msg(STASIS_MSG_L3, "Purging repository %s\n", destdir); + if (rmtree(destdir)) { + COE_CHECK_ABORT(1, "Unable to remove repository\n"); + } + } + msg(STASIS_MSG_L3, "Cloning repository %s\n", ctx->tests[i].repository); + if (!git_clone(&proc, ctx->tests[i].repository, destdir, ctx->tests[i].version)) { + ctx->tests[i].repository_info_tag = strdup(git_describe(destdir)); + ctx->tests[i].repository_info_ref = strdup(git_rev_parse(destdir, "HEAD")); + } else { + COE_CHECK_ABORT(1, "Unable to clone repository\n"); + } + + if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { + filter_repo_tags(destdir, ctx->tests[i].repository_remove_tags); + } + + if (pushd(destdir)) { + COE_CHECK_ABORT(1, "Unable to enter repository directory\n"); + } else { +#if 1 + int status; + char cmd[PATH_MAX]; + + msg(STASIS_MSG_L3, "Testing %s\n", ctx->tests[i].name); + memset(&proc, 0, sizeof(proc)); + + // Apply workaround for tox positional arguments + char *toxconf = NULL; + if (!access("tox.ini", F_OK)) { + if (!fix_tox_conf("tox.ini", &toxconf)) { + msg(STASIS_MSG_L3, "Fixing tox positional arguments\n"); + if (!globals.workaround.tox_posargs) { + globals.workaround.tox_posargs = calloc(PATH_MAX, sizeof(*globals.workaround.tox_posargs)); + } else { + memset(globals.workaround.tox_posargs, 0, PATH_MAX); + } + snprintf(globals.workaround.tox_posargs, PATH_MAX - 1, "-c %s --root .", toxconf); + } + } + + // enable trace mode before executing each test script + memset(cmd, 0, sizeof(cmd)); + sprintf(cmd, "set -x ; %s", ctx->tests[i].script); + + char *cmd_rendered = tpl_render(cmd); + if (cmd_rendered) { + if (strcmp(cmd_rendered, cmd) != 0) { + strcpy(cmd, cmd_rendered); + } + guard_free(cmd_rendered); + } + + status = shell(&proc, cmd); + if (status) { + msg(STASIS_MSG_ERROR, "Script failure: %s\n%s\n\nExit code: %d\n", ctx->tests[i].name, ctx->tests[i].script, status); + COE_CHECK_ABORT(1, "Test failure"); + } + + if (toxconf) { + remove(toxconf); + guard_free(toxconf); + } + popd(); +#else + msg(STASIS_MSG_WARNING | STASIS_MSG_L3, "TESTING DISABLED BY CODE!\n"); +#endif + } + } + } +} + +void delivery_gather_tool_versions(struct Delivery *ctx) { + int status = 0; + + // Extract version from tool output + ctx->conda.tool_version = shell_output("conda --version", &status); + if (ctx->conda.tool_version) + strip(ctx->conda.tool_version); + + ctx->conda.tool_build_version = shell_output("conda build --version", &status); + if (ctx->conda.tool_build_version) + strip(ctx->conda.tool_version); +} + +int delivery_init_artifactory(struct Delivery *ctx) { + int status = 0; + char dest[PATH_MAX] = {0}; + char filepath[PATH_MAX] = {0}; + snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir); + snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir); + + if (!access(filepath, F_OK)) { + // already have it + msg(STASIS_MSG_L3, "Skipped download, %s already exists\n", filepath); + goto delivery_init_artifactory_envsetup; + } + + char *platform = ctx->system.platform[DELIVERY_PLATFORM]; + msg(STASIS_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch); + if ((status = artifactory_download_cli(dest, + globals.jfrog.jfrog_artifactory_base_url, + globals.jfrog.jfrog_artifactory_product, + globals.jfrog.cli_major_ver, + globals.jfrog.version, + platform, + ctx->system.arch, + globals.jfrog.remote_filename))) { + remove(filepath); + } + + delivery_init_artifactory_envsetup: + // CI (ridiculously generic, why?) disables interactive prompts and progress bar output + setenv("CI", "1", 1); + + // JFROG_CLI_HOME_DIR is where .jfrog is stored + char path[PATH_MAX] = {0}; + snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir); + setenv("JFROG_CLI_HOME_DIR", path, 1); + + // JFROG_CLI_TEMP_DIR is where the obvious is stored + setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1); + return status; +} + +int delivery_artifact_upload(struct Delivery *ctx) { + int status = 0; + + if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { + fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); + return -1; + } + + for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) { + if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) { + break; + } + jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx); + + if (!globals.jfrog.repo) { + msg(STASIS_MSG_WARN, "Artifactory repository path is not configured!\n"); + fprintf(stderr, "set STASIS_JF_REPO environment variable...\nOr append to configuration file:\n\n"); + fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n"); + status++; + break; + } else if (!ctx->deploy.jfrog[i].repo) { + ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo); + } + + if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) { + // Unlikely to trigger if the config parser is working correctly + msg(STASIS_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n"); + status++; + break; + } + + ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true; + ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name; + ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number; + + char files[PATH_MAX]; + char dest[PATH_MAX]; // repo + remote dir + + if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url); + return -1; + } + + if (strlist_count(ctx->deploy.jfrog[i].files)) { + for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) { + memset(dest, 0, sizeof(dest)); + memset(files, 0, sizeof(files)); + snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest); + snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f)); + status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest); + } + } + } + + if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) { + jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, ctx->deploy.jfrog[0].upload_ctx.build_number); + jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, ctx->deploy.jfrog[0].upload_ctx.build_number); + } + + return status; +} + +int delivery_mission_render_files(struct Delivery *ctx) { + if (!ctx->storage.mission_dir) { + fprintf(stderr, "Mission directory is not configured. Context not initialized?\n"); + return -1; + } + struct Data { + char *src; + char *dest; + } data; + struct INIFILE *cfg = ctx->_stasis_ini_fp.mission; + union INIVal val; + + memset(&data, 0, sizeof(data)); + data.src = calloc(PATH_MAX, sizeof(*data.src)); + if (!data.src) { + perror("data.src"); + return -1; + } + + for (size_t i = 0; i < cfg->section_count; i++) { + char *section_name = cfg->section[i]->key; + if (!startswith(section_name, "template:")) { + continue; + } + val.as_char_p = strchr(section_name, ':') + 1; + if (val.as_char_p && isempty(val.as_char_p)) { + guard_free(data.src); + return 1; + } + sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p); + msg(STASIS_MSG_L2, "%s\n", data.src); + + ini_getval_required(cfg, section_name, "destination", INIVAL_TYPE_STR, &val); + conv_str(&data.dest, val); + + char *contents; + struct stat st; + if (lstat(data.src, &st)) { + perror(data.src); + guard_free(data.dest); + continue; + } + + contents = calloc(st.st_size + 1, sizeof(*contents)); + if (!contents) { + perror("template file contents"); + guard_free(data.dest); + continue; + } + + FILE *fp; + fp = fopen(data.src, "rb"); + if (!fp) { + perror(data.src); + guard_free(contents); + guard_free(data.dest); + continue; + } + + if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) { + perror("while reading template file"); + guard_free(contents); + guard_free(data.dest); + fclose(fp); + continue; + } + fclose(fp); + + msg(STASIS_MSG_L3, "Writing %s\n", data.dest); + if (tpl_render_to_file(contents, data.dest)) { + guard_free(contents); + guard_free(data.dest); + continue; + } + guard_free(contents); + guard_free(data.dest); + } + + guard_free(data.src); + return 0; +} + +int delivery_docker(struct Delivery *ctx) { + if (!docker_capable(&ctx->deploy.docker.capabilities)) { + return -1; + } + char tag[STASIS_NAME_MAX]; + char args[PATH_MAX]; + int has_registry = ctx->deploy.docker.registry != NULL; + size_t total_tags = strlist_count(ctx->deploy.docker.tags); + size_t total_build_args = strlist_count(ctx->deploy.docker.build_args); + + if (!has_registry) { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n"); + } + + if (!total_tags) { + char default_tag[PATH_MAX]; + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n"); + // generate local tag + memset(default_tag, 0, sizeof(default_tag)); + sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact); + tolower_s(default_tag); + + // Add tag + ctx->deploy.docker.tags = strlist_init(); + strlist_append(&ctx->deploy.docker.tags, default_tag); + + if (has_registry) { + // generate tag for target registry + memset(default_tag, 0, sizeof(default_tag)); + sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact); + tolower_s(default_tag); + + // Add tag + strlist_append(&ctx->deploy.docker.tags, default_tag); + } + // regenerate total tag available + total_tags = strlist_count(ctx->deploy.docker.tags); + } + + memset(args, 0, sizeof(args)); + + // Append image tags to command + for (size_t i = 0; i < total_tags; i++) { + char *tag_orig = strlist_item(ctx->deploy.docker.tags, i); + strcpy(tag, tag_orig); + docker_sanitize_tag(tag); + sprintf(args + strlen(args), " -t \"%s\" ", tag); + } + + // Append build arguments to command (i.e. --build-arg "key=value" + for (size_t i = 0; i < total_build_args; i++) { + char *build_arg = strlist_item(ctx->deploy.docker.build_args, i); + if (!build_arg) { + break; + } + sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg); + } + + // Build the image + char delivery_file[PATH_MAX]; + char dest[PATH_MAX]; + char rsync_cmd[PATH_MAX * 2]; + memset(delivery_file, 0, sizeof(delivery_file)); + memset(dest, 0, sizeof(dest)); + + sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name); + if (access(delivery_file, F_OK) < 0) { + fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file); + return -1; + } + + sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name); + if (copy2(delivery_file, dest, CT_PERM)) { + fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno)); + return -1; + } + + memset(dest, 0, sizeof(dest)); + sprintf(dest, "%s/packages", ctx->storage.build_docker_dir); + + msg(STASIS_MSG_L2, "Copying conda packages\n"); + memset(rsync_cmd, 0, sizeof(rsync_cmd)); + sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest); + if (system(rsync_cmd)) { + fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n"); + return -1; + } + + msg(STASIS_MSG_L2, "Copying wheel packages\n"); + memset(rsync_cmd, 0, sizeof(rsync_cmd)); + sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest); + if (system(rsync_cmd)) { + fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n"); + } + + if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) { + return -1; + } + + // Test the image + // All tags point back to the same image so test the first one we see + // regardless of how many are defined + strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0)); + docker_sanitize_tag(tag); + + msg(STASIS_MSG_L2, "Executing image test script for %s\n", tag); + if (ctx->deploy.docker.test_script) { + if (isempty(ctx->deploy.docker.test_script)) { + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n"); + } else { + int state; + if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) { + msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag); + // test failed -- don't save the image + return -1; + } + } + } else { + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "No image test script defined\n"); + } + + // Test successful, save image + if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) { + // save failed + return -1; + } + + return 0; +} + +int delivery_fixup_test_results(struct Delivery *ctx) { + struct dirent *rec; + DIR *dp; + + dp = opendir(ctx->storage.results_dir); + if (!dp) { + perror(ctx->storage.results_dir); + return -1; + } + + while ((rec = readdir(dp)) != NULL) { + char path[PATH_MAX]; + memset(path, 0, sizeof(path)); + + if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..")) { + continue; + } else if (!endswith(rec->d_name, ".xml")) { + continue; + } + + sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name); + msg(STASIS_MSG_L2, "%s\n", rec->d_name); + if (xml_pretty_print_in_place(path, STASIS_XML_PRETTY_PRINT_PROG, STASIS_XML_PRETTY_PRINT_ARGS)) { + msg(STASIS_MSG_L3 | STASIS_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name); + } + } + + closedir(dp); + return 0; +} \ No newline at end of file diff --git a/src/docker.c b/src/docker.c index 308fbc7..da7c1ce 100644 --- a/src/docker.c +++ b/src/docker.c @@ -1,4 +1,4 @@ -#include "omc.h" +#include "core.h" #include "docker.h" @@ -9,11 +9,11 @@ int docker_exec(const char *args, unsigned flags) { memset(&proc, 0, sizeof(proc)); memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "docker %s", args); - if (flags & OMC_DOCKER_QUIET) { + if (flags & STASIS_DOCKER_QUIET) { strcpy(proc.f_stdout, "/dev/null"); strcpy(proc.f_stderr, "/dev/null"); } else { - msg(OMC_MSG_L2, "Executing: %s\n", cmd); + msg(STASIS_MSG_L2, "Executing: %s\n", cmd); } shell(&proc, cmd); @@ -25,7 +25,7 @@ int docker_script(const char *image, char *data, unsigned flags) { FILE *infile; FILE *outfile; char cmd[PATH_MAX]; - char buffer[OMC_BUFSIZ]; + char buffer[STASIS_BUFSIZ]; memset(cmd, 0, sizeof(cmd)); snprintf(cmd, sizeof(cmd) - 1, "docker run --rm -i %s /bin/sh -", image); @@ -59,10 +59,10 @@ int docker_build(const char *dirpath, const char *args, int engine) { memset(build, 0, sizeof(build)); memset(cmd, 0, sizeof(cmd)); - if (engine & OMC_DOCKER_BUILD) { + if (engine & STASIS_DOCKER_BUILD) { strcpy(build, "build"); } - if (engine & OMC_DOCKER_BUILD_X) { + if (engine & STASIS_DOCKER_BUILD_X) { strcpy(build, "buildx build"); } snprintf(cmd, sizeof(cmd) - 1, "%s %s %s", build, args, dirpath); @@ -149,7 +149,7 @@ int docker_capable(struct DockerCapabilities *result) { } result->available = true; - if (docker_exec("ps", OMC_DOCKER_QUIET)) { + if (docker_exec("ps", STASIS_DOCKER_QUIET)) { // user cannot connect to the socket return false; } @@ -160,11 +160,11 @@ int docker_capable(struct DockerCapabilities *result) { } guard_free(version); - if (!docker_exec("buildx build --help", OMC_DOCKER_QUIET)) { - result->build |= OMC_DOCKER_BUILD_X; + if (!docker_exec("buildx build --help", STASIS_DOCKER_QUIET)) { + result->build |= STASIS_DOCKER_BUILD_X; } - if (!docker_exec("build --help", OMC_DOCKER_QUIET)) { - result->build |= OMC_DOCKER_BUILD; + if (!docker_exec("build --help", STASIS_DOCKER_QUIET)) { + result->build |= STASIS_DOCKER_BUILD; } if (!result->build) { // can't use docker without a build plugin diff --git a/src/download.c b/src/download.c index cbb617f..1623560 100644 --- a/src/download.c +++ b/src/download.c @@ -17,7 +17,7 @@ long download(char *url, const char *filename, char **errmsg) { long http_code = -1; FILE *fp; char user_agent[20]; - sprintf(user_agent, "omc/%s", VERSION); + sprintf(user_agent, "stasis/%s", VERSION); curl_global_init(CURL_GLOBAL_ALL); c = curl_easy_init(); diff --git a/src/environment.c b/src/environment.c index 824b447..924fbf8 100644 --- a/src/environment.c +++ b/src/environment.c @@ -70,7 +70,7 @@ void runtime_export(RuntimeEnv *env, char **keys) { NULL, }; - char output[OMC_BUFSIZ]; + char output[STASIS_BUFSIZ]; char export_command[7]; // export=6 and setenv=6... convenient char *_sh = getenv("SHELL"); char *sh = path_basename(_sh); @@ -290,9 +290,9 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { return input; } - expanded = calloc(OMC_BUFSIZ, sizeof(char)); + expanded = calloc(STASIS_BUFSIZ, sizeof(char)); if (expanded == NULL) { - SYSERROR("could not allocate %d bytes for runtime_expand_var buffer", OMC_BUFSIZ); + SYSERROR("could not allocate %d bytes for runtime_expand_var buffer", STASIS_BUFSIZ); return NULL; } diff --git a/src/globals.c b/src/globals.c index 27c8aff..7ed7c3c 100644 --- a/src/globals.c +++ b/src/globals.c @@ -1,29 +1,30 @@ #include #include -#include "omc.h" +#include "core.h" const char *VERSION = "1.0.0"; const char *AUTHOR = "Joseph Hunkeler"; const char *BANNER = - "---------------------------------------------------------------------\n" -#if defined(OMC_DUMB_TERMINAL) - " OH MY CAL \n" + "------------------------------------------------------------------------\n" +#if defined(STASIS_DUMB_TERMINAL) + " STASIS \n" #else - " ██████╗ ██╗ ██╗ ███╗ ███╗██╗ ██╗ ██████╗ █████╗ ██╗ \n" - "██╔═══██╗██║ ██║ ████╗ ████║╚██╗ ██╔╝ ██╔════╝██╔══██╗██║ \n" - "██║ ██║███████║ ██╔████╔██║ ╚████╔╝ ██║ ███████║██║ \n" - "██║ ██║██╔══██║ ██║╚██╔╝██║ ╚██╔╝ ██║ ██╔══██║██║ \n" - "╚██████╔╝██║ ██║ ██║ ╚═╝ ██║ ██║ ╚██████╗██║ ██║███████╗\n" - " ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝\n" + " _____ _______ _____ _____ _____ \n" + " / ____|__ __|/\\ / ____|_ _|/ ____| \n" + " | (___ | | / \\ | (___ | | | (___ \n" + " \\___ \\ | | / /\\ \\ \\___ \\ | | \\___ \\ \n" + " ____) | | |/ ____ \\ ____) |_| |_ ____) | \n" + " |_____/ |_/_/ \\_\\_____/|_____|_____/ \n" + "\n" #endif - "---------------------------------------------------------------------\n" - " Delivery Generator \n" - " v%s \n" - "---------------------------------------------------------------------\n" + "------------------------------------------------------------------------\n" + " Delivery Generator \n" + " v%s \n" + "------------------------------------------------------------------------\n" "Copyright (C) 2023-2024 %s,\n" "Association of Universities for Research in Astronomy (AURA)\n"; -struct OMC_GLOBAL globals = { +struct STASIS_GLOBAL globals = { .verbose = false, .continue_on_error = false, .always_update_base_environment = false, diff --git a/src/ini.c b/src/ini.c index decd29f..d0757e6 100644 --- a/src/ini.c +++ b/src/ini.c @@ -2,7 +2,7 @@ #include #include #include -#include "omc.h" +#include "core.h" #include "ini.h" struct INIFILE *ini_init() { @@ -114,7 +114,7 @@ struct INIData *ini_getall(struct INIFILE *ini, char *section_name) { int ini_getval(struct INIFILE *ini, char *section_name, char *key, int type, union INIVal *result) { char *token = NULL; - char tbuf[OMC_BUFSIZ]; + char tbuf[STASIS_BUFSIZ]; char *tbufp = tbuf; struct INIData *data; data = ini_data_get(ini, section_name, key); @@ -282,7 +282,7 @@ int ini_write(struct INIFILE *ini, FILE **stream, unsigned mode) { for (size_t x = 0; x < ini->section_count; x++) { fprintf(*stream, "[%s]" LINE_SEP, ini->section[x]->key); for (size_t y = 0; y < ini->section[x]->data_count; y++) { - char outvalue[OMC_BUFSIZ]; + char outvalue[STASIS_BUFSIZ]; memset(outvalue, 0, sizeof(outvalue)); if (ini->section[x]->data[y]->value) { char **parts = split(ini->section[x]->data[y]->value, LINE_SEP, 0); @@ -354,8 +354,8 @@ void ini_free(struct INIFILE **ini) { struct INIFILE *ini_open(const char *filename) { FILE *fp; - char line[OMC_BUFSIZ] = {0}; - char current_section[OMC_BUFSIZ] = {0}; + char line[STASIS_BUFSIZ] = {0}; + char current_section[STASIS_BUFSIZ] = {0}; char reading_value = 0; struct INIFILE *ini = ini_init(); @@ -382,7 +382,7 @@ struct INIFILE *ini_open(const char *filename) { char inikey[2][255]; char *key = inikey[0]; char *key_last = inikey[1]; - char value[OMC_BUFSIZ]; + char value[STASIS_BUFSIZ]; memset(value, 0, sizeof(value)); memset(inikey, 0, sizeof(inikey)); diff --git a/src/omc_indexer.c b/src/omc_indexer.c deleted file mode 100644 index 971f389..0000000 --- a/src/omc_indexer.c +++ /dev/null @@ -1,753 +0,0 @@ -#include -#include -#include "omc.h" - -static struct option long_options[] = { - {"help", no_argument, 0, 'h'}, - {"destdir", required_argument, 0, 'd'}, - {"verbose", no_argument, 0, 'v'}, - {"unbuffered", no_argument, 0, 'U'}, - {"web", no_argument, 0, 'w'}, - {0, 0, 0, 0}, -}; - -const char *long_options_help[] = { - "Display this usage statement", - "Destination directory", - "Increase output verbosity", - "Disable line buffering", - "Generate HTML indexes (requires pandoc)", - NULL, -}; - -static void usage(char *name) { - int maxopts = sizeof(long_options) / sizeof(long_options[0]); - unsigned char *opts = calloc(maxopts + 1, sizeof(char)); - for (int i = 0; i < maxopts; i++) { - opts[i] = long_options[i].val; - } - printf("usage: %s [-%s] {{OMC_ROOT}...}\n", name, opts); - guard_free(opts); - - for (int i = 0; i < maxopts - 1; i++) { - char line[255]; - sprintf(line, " --%s -%c %-20s", long_options[i].name, long_options[i].val, long_options_help[i]); - puts(line); - } - -} - -int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t rootdirs_total) { - char cmd[PATH_MAX]; - - memset(cmd, 0, sizeof(cmd)); - sprintf(cmd, "rsync -ah%s --delete --exclude 'tools/' --exclude 'tmp/' --exclude 'build/' ", globals.verbose ? "v" : "q"); - for (size_t i = 0; i < rootdirs_total; i++) { - sprintf(cmd + strlen(cmd), "'%s'/ ", rootdirs[i]); - } - sprintf(cmd + strlen(cmd), "%s/", dest); - - if (globals.verbose) { - puts(cmd); - } - - if (system(cmd)) { - return -1; - } - return 0; -} - -int indexer_wheels(struct Delivery *ctx) { - return delivery_index_wheel_artifacts(ctx); -} - -int indexer_load_metadata(struct Delivery *ctx, const char *filename) { - char line[OMC_NAME_MAX] = {0}; - FILE *fp; - - fp = fopen(filename, "r"); - if (!fp) { - return -1; - } - - while (fgets(line, sizeof(line) - 1, fp) != NULL) { - char **parts = split(line, " ", 1); - char *name = parts[0]; - char *value = parts[1]; - strip(value); - if (!strcmp(name, "name")) { - ctx->meta.name = strdup(value); - } else if (!strcmp(name, "version")) { - ctx->meta.version = strdup(value); - } else if (!strcmp(name, "rc")) { - ctx->meta.rc = (int) strtol(value, NULL, 10); - } else if (!strcmp(name, "python")) { - ctx->meta.python = strdup(value); - } else if (!strcmp(name, "python_compact")) { - ctx->meta.python_compact = strdup(value); - } else if (!strcmp(name, "mission")) { - ctx->meta.mission = strdup(value); - } else if (!strcmp(name, "codename")) { - ctx->meta.codename = strdup(value); - } else if (!strcmp(name, "platform")) { - ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX, sizeof(*ctx->system.platform)); - char **platform = split(value, " ", 0); - ctx->system.platform[DELIVERY_PLATFORM] = platform[DELIVERY_PLATFORM]; - ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR] = platform[DELIVERY_PLATFORM_CONDA_SUBDIR]; - ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER] = platform[DELIVERY_PLATFORM_CONDA_INSTALLER]; - ctx->system.platform[DELIVERY_PLATFORM_RELEASE] = platform[DELIVERY_PLATFORM_RELEASE]; - } else if (!strcmp(name, "arch")) { - ctx->system.arch = strdup(value); - } else if (!strcmp(name, "time")) { - ctx->info.time_str_epoch = strdup(value); - } else if (!strcmp(name, "release_fmt")) { - ctx->rules.release_fmt = strdup(value); - } else if (!strcmp(name, "release_name")) { - ctx->info.release_name = strdup(value); - } else if (!strcmp(name, "build_name_fmt")) { - ctx->rules.build_name_fmt = strdup(value); - } else if (!strcmp(name, "build_name")) { - ctx->info.build_name = strdup(value); - } else if (!strcmp(name, "build_number_fmt")) { - ctx->rules.build_number_fmt = strdup(value); - } else if (!strcmp(name, "build_number")) { - ctx->info.build_number = strdup(value); - } else if (!strcmp(name, "conda_installer_baseurl")) { - ctx->conda.installer_baseurl = strdup(value); - } else if (!strcmp(name, "conda_installer_name")) { - ctx->conda.installer_name = strdup(value); - } else if (!strcmp(name, "conda_installer_version")) { - ctx->conda.installer_version = strdup(value); - } else if (!strcmp(name, "conda_installer_platform")) { - ctx->conda.installer_platform = strdup(value); - } else if (!strcmp(name, "conda_installer_arch")) { - ctx->conda.installer_arch = strdup(value); - } - GENERIC_ARRAY_FREE(parts); - } - fclose(fp); - - return 0; -} - -int indexer_get_files(struct StrList **out, const char *path, const char *pattern, ...) { - va_list args; - va_start(args, pattern); - char userpattern[PATH_MAX] = {0}; - vsprintf(userpattern, pattern, args); - va_end(args); - struct StrList *list = listdir(path); - if (!list) { - return -1; - } - - if (!(*out)) { - (*out) = strlist_init(); - if (!(*out)) { - guard_strlist_free(&list); - return -1; - } - } - - size_t no_match = 0; - for (size_t i = 0; i < strlist_count(list); i++) { - char *item = strlist_item(list, i); - if (fnmatch(userpattern, item, 0)) { - no_match++; - continue; - } else { - strlist_append(&(*out), item); - } - } - if (no_match >= strlist_count(list)) { - fprintf(stderr, "no files matching the pattern: %s\n", userpattern); - guard_strlist_free(&list); - return -1; - } - guard_strlist_free(&list); - return 0; -} - -int get_latest_rc(struct Delivery ctx[], size_t nelem) { - int result = 0; - for (size_t i = 0; i < nelem; i++) { - if (ctx[i].meta.rc > result) { - result = ctx[i].meta.rc; - } - } - return result; -} - -struct Delivery **get_latest_deliveries(struct Delivery ctx[], size_t nelem) { - struct Delivery **result = NULL; - int latest = 0; - size_t n = 0; - - result = calloc(nelem + 1, sizeof(result)); - if (!result) { - fprintf(stderr, "Unable to allocate %zu bytes for result delivery array: %s\n", nelem * sizeof(result), strerror(errno)); - return NULL; - } - - latest = get_latest_rc(ctx, nelem); - for (size_t i = 0; i < nelem; i++) { - if (ctx[i].meta.rc == latest) { - result[n] = &ctx[i]; - n++; - } - } - - return result; -} - -int micromamba(const char *write_to, const char *prefix, char *command, ...) { - struct utsname sys; - uname(&sys); - - tolower_s(sys.sysname); - if (!strcmp(sys.sysname, "darwin")) { - strcpy(sys.sysname, "osx"); - } - - if (!strcmp(sys.machine, "x86_64")) { - strcpy(sys.machine, "64"); - } - - char url[PATH_MAX]; - sprintf(url, "https://micro.mamba.pm/api/micromamba/%s-%s/latest", sys.sysname, sys.machine); - if (access("latest", F_OK)) { - download(url, "latest", NULL); - } - - char mmbin[PATH_MAX]; - sprintf(mmbin, "%s/micromamba", write_to); - - if (access(mmbin, F_OK)) { - char untarcmd[PATH_MAX]; - mkdirs(write_to, 0755); - sprintf(untarcmd, "tar -xvf latest -C %s --strip-components=1 bin/micromamba 1>/dev/null", write_to); - system(untarcmd); - } - - char cmd[OMC_BUFSIZ]; - memset(cmd, 0, sizeof(cmd)); - sprintf(cmd, "%s -r %s -p %s ", mmbin, prefix, prefix); - va_list args; - va_start(args, command); - vsprintf(cmd + strlen(cmd), command, args); - va_end(args); - - mkdirs(prefix, 0755); - - char rcpath[PATH_MAX]; - sprintf(rcpath, "%s/.condarc", prefix); - touch(rcpath); - - setenv("CONDARC", rcpath, 1); - setenv("MAMBA_ROOT_PREFIX", prefix, 1); - int status = system(cmd); - unsetenv("MAMBA_ROOT_PREFIX"); - - return status; -} - -int indexer_make_website(struct Delivery *ctx) { - char cmd[PATH_MAX]; - char *inputs[] = { - ctx->storage.delivery_dir, "/README.md", - ctx->storage.results_dir, "/README.md", - }; - - if (!find_program("pandoc")) { - return 0; - } - - for (size_t i = 0; i < sizeof(inputs) / sizeof(*inputs); i += 2) { - char fullpath[PATH_MAX]; - memset(fullpath, 0, sizeof(fullpath)); - sprintf(fullpath, "%s/%s", inputs[i], inputs[i + 1]); - if (access(fullpath, F_OK)) { - continue; - } - // Converts a markdown file to html - strcpy(cmd, "pandoc "); - strcat(cmd, fullpath); - strcat(cmd, " "); - strcat(cmd, "-o "); - strcat(cmd, inputs[i]); - strcat(cmd, "/index.html"); - if (globals.verbose) { - puts(cmd); - } - // This might be negative when killed by a signal. - // Otherwise, the return code is not critical to us. - if (system(cmd) < 0) { - return 1; - } - } - - return 0; -} - -int indexer_conda(struct Delivery *ctx) { - int status = 0; - char prefix[PATH_MAX]; - sprintf(prefix, "%s/%s", ctx->storage.tmpdir, "indexer"); - - status += micromamba(ctx->storage.tmpdir, prefix, "config prepend --env channels conda-forge"); - if (!globals.verbose) { - status += micromamba(ctx->storage.tmpdir, prefix, "config set --env quiet true"); - } - status += micromamba(ctx->storage.tmpdir, prefix, "config set --env always_yes true"); - status += micromamba(ctx->storage.tmpdir, prefix, "install conda-build"); - status += micromamba(ctx->storage.tmpdir, prefix, "run conda index %s", ctx->storage.conda_artifact_dir); - return status; -} - -static struct StrList *get_architectures(struct Delivery ctx[], size_t nelem) { - struct StrList *architectures = strlist_init(); - for (size_t i = 0; i < nelem; i++) { - if (!strstr_array(architectures->data, ctx[i].system.arch)) { - strlist_append(&architectures, ctx[i].system.arch); - } - } - return architectures; -} - -static struct StrList *get_platforms(struct Delivery ctx[], size_t nelem) { - struct StrList *platforms = strlist_init(); - for (size_t i = 0; i < nelem; i++) { - if (!strstr_array(platforms->data, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE])) { - strlist_append(&platforms, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE]); - } - } - return platforms; -} - -int indexer_symlinks(struct Delivery ctx[], size_t nelem) { - struct Delivery **data = NULL; - data = get_latest_deliveries(ctx, nelem); - //int latest = get_latest_rc(ctx, nelem); - - if (!pushd(ctx->storage.delivery_dir)) { - for (size_t i = 0; i < nelem; i++) { - char link_name_spec[PATH_MAX]; - char link_name_readme[PATH_MAX]; - - char file_name_spec[PATH_MAX]; - char file_name_readme[PATH_MAX]; - - if (!data[i]) { - continue; - } - sprintf(link_name_spec, "latest-py%s-%s-%s.yml", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); - sprintf(file_name_spec, "%s.yml", data[i]->info.release_name); - - sprintf(link_name_readme, "README-py%s-%s-%s.md", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); - sprintf(file_name_readme, "README-%s.md", data[i]->info.release_name); - - if (!access(link_name_spec, F_OK)) { - if (unlink(link_name_spec)) { - fprintf(stderr, "Unable to remove spec link: %s\n", link_name_spec); - } - } - if (!access(link_name_readme, F_OK)) { - if (unlink(link_name_readme)) { - fprintf(stderr, "Unable to remove readme link: %s\n", link_name_readme); - } - } - - if (globals.verbose) { - printf("%s -> %s\n", file_name_spec, link_name_spec); - } - if (symlink(file_name_spec, link_name_spec)) { - fprintf(stderr, "Unable to link %s as %s\n", file_name_spec, link_name_spec); - } - - if (globals.verbose) { - printf("%s -> %s\n", file_name_readme, link_name_readme); - } - if (symlink(file_name_readme, link_name_readme)) { - fprintf(stderr, "Unable to link %s as %s\n", file_name_readme, link_name_readme); - } - } - popd(); - } else { - fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); - guard_free(data); - return -1; - } - - // "latest" is an array of pointers to ctx[]. Do not free the contents of the array. - guard_free(data); - return 0; -} - -int indexer_readmes(struct Delivery ctx[], size_t nelem) { - struct Delivery **latest = NULL; - latest = get_latest_deliveries(ctx, nelem); - - char indexfile[PATH_MAX] = {0}; - sprintf(indexfile, "%s/README.md", ctx->storage.delivery_dir); - - if (!pushd(ctx->storage.delivery_dir)) { - FILE *indexfp; - indexfp = fopen(indexfile, "w+"); - if (!indexfp) { - fprintf(stderr, "Unable to open %s for writing\n", indexfile); - return -1; - } - struct StrList *archs = get_architectures(*latest, nelem); - struct StrList *platforms = get_platforms(*latest, nelem); - - fprintf(indexfp, "# %s-%s\n\n", ctx->meta.name, ctx->meta.version); - fprintf(indexfp, "## Current Release\n\n"); - for (size_t p = 0; p < strlist_count(platforms); p++) { - char *platform = strlist_item(platforms, p); - for (size_t a = 0; a < strlist_count(archs); a++) { - char *arch = strlist_item(archs, a); - int have_combo = 0; - for (size_t i = 0; i < nelem; i++) { - if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) && strstr(latest[i]->system.arch, arch)) { - have_combo = 1; - } - } - if (!have_combo) { - continue; - } - fprintf(indexfp, "### %s-%s\n\n", platform, arch); - - fprintf(indexfp, "|Release|Info|Receipt|\n"); - fprintf(indexfp, "|:----:|:----:|:----:|\n"); - for (size_t i = 0; i < nelem; i++) { - char link_name[PATH_MAX]; - char readme_name[PATH_MAX]; - char conf_name[PATH_MAX]; - char conf_name_relative[PATH_MAX]; - if (!latest[i]) { - continue; - } - sprintf(link_name, "latest-py%s-%s-%s.yml", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch); - sprintf(readme_name, "README-py%s-%s-%s.md", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch); - sprintf(conf_name, "%s.ini", latest[i]->info.release_name); - sprintf(conf_name_relative, "../config/%s-rendered.ini", latest[i]->info.release_name); - if (strstr(link_name, platform) && strstr(link_name, arch)) { - fprintf(indexfp, "|[%s](%s)|[%s](%s)|[%s](%s)|\n", link_name, link_name, readme_name, readme_name, conf_name, conf_name_relative); - } - } - fprintf(indexfp, "\n"); - } - fprintf(indexfp, "\n"); - } - guard_strlist_free(&archs); - guard_strlist_free(&platforms); - fclose(indexfp); - popd(); - } else { - fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); - guard_free(latest); - return -1; - } - - // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array. - guard_free(latest); - return 0; -} - -int indexer_junitxml_report(struct Delivery ctx[], size_t nelem) { - struct Delivery **latest = NULL; - latest = get_latest_deliveries(ctx, nelem); - - char indexfile[PATH_MAX] = {0}; - sprintf(indexfile, "%s/README.md", ctx->storage.results_dir); - - struct StrList *file_listing = listdir(ctx->storage.results_dir); - if (!file_listing) { - // no test results to process - return 0; - } - - if (!pushd(ctx->storage.results_dir)) { - FILE *indexfp; - indexfp = fopen(indexfile, "w+"); - if (!indexfp) { - fprintf(stderr, "Unable to open %s for writing\n", indexfile); - return -1; - } - struct StrList *archs = get_architectures(*latest, nelem); - struct StrList *platforms = get_platforms(*latest, nelem); - - fprintf(indexfp, "# %s-%s Test Report\n\n", ctx->meta.name, ctx->meta.version); - fprintf(indexfp, "## Current Release\n\n"); - for (size_t p = 0; p < strlist_count(platforms); p++) { - char *platform = strlist_item(platforms, p); - for (size_t a = 0; a < strlist_count(archs); a++) { - char *arch = strlist_item(archs, a); - int have_combo = 0; - for (size_t i = 0; i < nelem; i++) { - if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) && strstr(latest[i]->system.arch, arch)) { - have_combo = 1; - break; - } - } - if (!have_combo) { - continue; - } - fprintf(indexfp, "### %s-%s\n\n", platform, arch); - - fprintf(indexfp, "|Suite|Duration|Fail |Skip |Error |\n"); - fprintf(indexfp, "|:----|:------:|:------:|:---:|:----:|\n"); - for (size_t f = 0; f < strlist_count(file_listing); f++) { - char *filename = strlist_item(file_listing, f); - if (!endswith(filename, ".xml")) { - continue; - } - - if (strstr(filename, platform) && strstr(filename, arch)) { - struct JUNIT_Testsuite *testsuite = junitxml_testsuite_read(filename); - if (testsuite) { - if (globals.verbose) { - printf("%s: duration: %0.4f, failed: %d, skipped: %d, errors: %d\n", filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors); - } - fprintf(indexfp, "|[%s](%s)|%0.4f|%d|%d|%d|\n", filename, filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors); - /* - * TODO: Display failure/skip/error output. - * - for (size_t i = 0; i < testsuite->_tc_inuse; i++) { - if (testsuite->testcase[i]->tc_result_state_type) { - printf("testcase: %s :: %s\n", testsuite->testcase[i]->classname, testsuite->testcase[i]->name); - if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_FAILURE) { - printf("failure: %s\n", testsuite->testcase[i]->result_state.failure->message); - } else if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_SKIPPED) { - printf("skipped: %s\n", testsuite->testcase[i]->result_state.skipped->message); - } - } - } - */ - junitxml_testsuite_free(&testsuite); - } else { - fprintf(stderr, "bad test suite: %s: %s\n", strerror(errno), filename); - continue; - } - } - } - fprintf(indexfp, "\n"); - } - fprintf(indexfp, "\n"); - } - guard_strlist_free(&archs); - guard_strlist_free(&platforms); - fclose(indexfp); - popd(); - } else { - fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); - guard_free(latest); - return -1; - } - - // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array. - guard_free(latest); - return 0; -} - -void indexer_init_dirs(struct Delivery *ctx, const char *workdir) { - path_store(&ctx->storage.root, PATH_MAX, workdir, ""); - path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); - if (delivery_init_tmpdir(ctx)) { - fprintf(stderr, "Failed to configure temporary storage directory\n"); - exit(1); - } - path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); - path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); - path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); - path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); - path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); - path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); - path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); - path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); -} - -int main(int argc, char *argv[]) { - size_t rootdirs_total = 0; - char *destdir = NULL; - char **rootdirs = NULL; - int do_html = 0; - int c = 0; - int option_index = 0; - while ((c = getopt_long(argc, argv, "hd:vUw", long_options, &option_index)) != -1) { - switch (c) { - case 'h': - usage(path_basename(argv[0])); - exit(0); - case 'd': - destdir = strdup(optarg); - break; - case 'U': - fflush(stdout); - fflush(stderr); - setvbuf(stdout, NULL, _IONBF, 0); - setvbuf(stderr, NULL, _IONBF, 0); - break; - case 'v': - globals.verbose = 1; - break; - case 'w': - do_html = 1; - break; - case '?': - default: - exit(1); - } - } - - int current_index = optind; - if (optind < argc) { - rootdirs_total = argc - current_index; - while (optind < argc) { - // use first positional argument - rootdirs = &argv[optind++]; - break; - } - } - - if (isempty(destdir)) { - destdir = strdup("output"); - } - - if (!rootdirs || !rootdirs_total) { - fprintf(stderr, "You must specify at least one OMC root directory to index\n"); - exit(1); - } else { - for (size_t i = 0; i < rootdirs_total; i++) { - if (isempty(rootdirs[i]) || !strcmp(rootdirs[i], "/") || !strcmp(rootdirs[i], "\\")) { - SYSERROR("Unsafe directory: %s", rootdirs[i]); - exit(1); - } - } - } - - char *workdir; - char workdir_template[PATH_MAX]; - char *system_tmp = getenv("TMPDIR"); - if (system_tmp) { - strcat(workdir_template, system_tmp); - } else { - strcat(workdir_template, "/tmp"); - } - strcat(workdir_template, "/omc-combine.XXXXXX"); - workdir = mkdtemp(workdir_template); - if (!workdir) { - SYSERROR("Unable to create temporary directory: %s", workdir_template); - exit(1); - } else if (isempty(workdir) || !strcmp(workdir, "/") || !strcmp(workdir, "\\")) { - SYSERROR("Unsafe directory: %s", workdir); - exit(1); - } - - struct Delivery ctx; - memset(&ctx, 0, sizeof(ctx)); - - printf(BANNER, VERSION, AUTHOR); - - indexer_init_dirs(&ctx, workdir); - - msg(OMC_MSG_L1, "%s delivery root %s\n", - rootdirs_total > 1 ? "Merging" : "Indexing", - rootdirs_total > 1 ? "directories" : "directory"); - if (indexer_combine_rootdirs(workdir, rootdirs, rootdirs_total)) { - SYSERROR("%s", "Copy operation failed"); - rmtree(workdir); - exit(1); - } - - if (access(ctx.storage.conda_artifact_dir, F_OK)) { - mkdirs(ctx.storage.conda_artifact_dir, 0755); - } - - if (access(ctx.storage.wheel_artifact_dir, F_OK)) { - mkdirs(ctx.storage.wheel_artifact_dir, 0755); - } - - msg(OMC_MSG_L1, "Indexing conda packages\n"); - if (indexer_conda(&ctx)) { - SYSERROR("%s", "Conda package indexing operation failed"); - exit(1); - } - - msg(OMC_MSG_L1, "Indexing wheel packages\n"); - if (indexer_wheels(&ctx)) { - SYSERROR("%s", "Python package indexing operation failed"); - exit(1); - } - - msg(OMC_MSG_L1, "Loading metadata\n"); - struct StrList *metafiles = NULL; - indexer_get_files(&metafiles, ctx.storage.meta_dir, "*.omc"); - strlist_sort(metafiles, OMC_SORT_LEN_ASCENDING); - struct Delivery local[strlist_count(metafiles)]; - - for (size_t i = 0; i < strlist_count(metafiles); i++) { - char *item = strlist_item(metafiles, i); - memset(&local[i], 0, sizeof(ctx)); - memcpy(&local[i], &ctx, sizeof(ctx)); - char path[PATH_MAX]; - sprintf(path, "%s/%s", ctx.storage.meta_dir, item); - if (globals.verbose) { - puts(path); - } - indexer_load_metadata(&local[i], path); - } - - msg(OMC_MSG_L1, "Generating links to latest release iteration\n"); - if (indexer_symlinks(local, strlist_count(metafiles))) { - SYSERROR("%s", "Link generation failed"); - exit(1); - } - - msg(OMC_MSG_L1, "Generating README.md\n"); - if (indexer_readmes(local, strlist_count(metafiles))) { - SYSERROR("%s", "README indexing operation failed"); - exit(1); - } - - msg(OMC_MSG_L1, "Indexing test results\n"); - if (indexer_junitxml_report(local, strlist_count(metafiles))) { - SYSERROR("%s", "Test result indexing operation failed"); - exit(1); - } - - if (do_html) { - msg(OMC_MSG_L1, "Generating HTML indexes\n"); - if (indexer_make_website(local)) { - SYSERROR("%s", "Site creation failed"); - exit(1); - } - } - - msg(OMC_MSG_L1, "Copying indexed delivery to '%s'\n", destdir); - char cmd[PATH_MAX]; - memset(cmd, 0, sizeof(cmd)); - sprintf(cmd, "rsync -ah%s --delete --exclude 'tmp/' --exclude 'tools/' '%s/' '%s/'", globals.verbose ? "v" : "q", workdir, destdir); - guard_free(destdir); - - if (globals.verbose) { - puts(cmd); - } - - if (system(cmd)) { - SYSERROR("%s", "Copy operation failed"); - rmtree(workdir); - exit(1); - } - - msg(OMC_MSG_L1, "Removing work directory: %s\n", workdir); - if (rmtree(workdir)) { - SYSERROR("Failed to remove work directory: %s", strerror(errno)); - } - - guard_strlist_free(&metafiles); - delivery_free(&ctx); - globals_free(); - msg(OMC_MSG_L1, "Done!\n"); - return 0; -} diff --git a/src/omc_main.c b/src/omc_main.c deleted file mode 100644 index 90460ee..0000000 --- a/src/omc_main.c +++ /dev/null @@ -1,612 +0,0 @@ -#include -#include -#include -#include -#include -#include "omc.h" - -#define OPT_ALWAYS_UPDATE_BASE 1000 -#define OPT_NO_DOCKER 1001 -#define OPT_NO_ARTIFACTORY 1002 -#define OPT_NO_TESTING 1003 -static struct option long_options[] = { - {"help", no_argument, 0, 'h'}, - {"version", no_argument, 0, 'V'}, - {"continue-on-error", no_argument, 0, 'C'}, - {"config", required_argument, 0, 'c'}, - {"python", required_argument, 0, 'p'}, - {"verbose", no_argument, 0, 'v'}, - {"unbuffered", no_argument, 0, 'U'}, - {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE}, - {"no-docker", no_argument, 0, OPT_NO_DOCKER}, - {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY}, - {"no-testing", no_argument, 0, OPT_NO_TESTING}, - {0, 0, 0, 0}, -}; - -const char *long_options_help[] = { - "Display this usage statement", - "Display program version", - "Allow tests to fail", - "Read configuration file", - "Override version of Python in configuration", - "Increase output verbosity", - "Disable line buffering", - "Update conda installation prior to OMC environment creation", - "Do not build docker images", - "Do not upload artifacts to Artifactory", - "Do not execute test scripts", - NULL, -}; - -static int get_option_max_width(struct option option[]) { - int i = 0; - int max = 0; - const int indent = 4; - while (option[i].name != 0) { - int len = (int) strlen(option[i].name); - if (option[i].has_arg) { - len += indent; - } - if (len > max) { - max = len; - } - i++; - } - return max; -} - -static void usage(char *progname) { - printf("usage: %s ", progname); - printf("[-"); - for (int x = 0; long_options[x].val != 0; x++) { - if (long_options[x].has_arg == no_argument && long_options[x].val <= 'z') { - putchar(long_options[x].val); - } - } - printf("] {DELIVERY_FILE}\n"); - - int width = get_option_max_width(long_options); - for (int x = 0; long_options[x].name != 0; x++) { - char tmp[OMC_NAME_MAX] = {0}; - char output[sizeof(tmp)] = {0}; - char opt_long[50] = {0}; // --? [ARG]? - char opt_short[50] = {0}; // -? [ARG]? - - strcat(opt_long, "--"); - strcat(opt_long, long_options[x].name); - if (long_options[x].has_arg) { - strcat(opt_long, " ARG"); - } - - if (long_options[x].val <= 'z') { - strcat(opt_short, "-"); - opt_short[1] = (char) long_options[x].val; - if (long_options[x].has_arg) { - strcat(opt_short, " ARG"); - } - } else { - strcat(opt_short, " "); - } - - sprintf(tmp, " %%-%ds\t%%s\t\t%%s", width + 4); - sprintf(output, tmp, opt_long, opt_short, long_options_help[x]); - puts(output); - } -} - - - -static void check_system_requirements(struct Delivery *ctx) { - const char *tools_required[] = { - "rsync", - NULL, - }; - - msg(OMC_MSG_L1, "Checking system requirements\n"); - for (size_t i = 0; tools_required[i] != NULL; i++) { - if (!find_program(tools_required[i])) { - msg(OMC_MSG_L2 | OMC_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]); - exit(1); - } - } - - if (!globals.tmpdir && !ctx->storage.tmpdir) { - delivery_init_tmpdir(ctx); - } - - struct DockerCapabilities dcap; - if (!docker_capable(&dcap)) { - msg(OMC_MSG_L2 | OMC_MSG_WARN, "Docker is broken\n"); - msg(OMC_MSG_L3, "Available: %s\n", dcap.available ? "Yes" : "No"); - msg(OMC_MSG_L3, "Usable: %s\n", dcap.usable ? "Yes" : "No"); - msg(OMC_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap.podman ? "Yes" : "No"); - msg(OMC_MSG_L3, "Build plugin(s): "); - if (dcap.usable) { - if (dcap.build & OMC_DOCKER_BUILD) { - printf("build "); - } - if (dcap.build & OMC_DOCKER_BUILD_X) { - printf("buildx "); - } - puts(""); - } else { - printf("N/A\n"); - } - - // disable docker builds - globals.enable_docker = false; - } -} - -int main(int argc, char *argv[]) { - struct Delivery ctx; - struct Process proc = { - .f_stdout = "", - .f_stderr = "", - .redirect_stderr = 0, - }; - char env_name[OMC_NAME_MAX] = {0}; - char env_name_testing[OMC_NAME_MAX] = {0}; - char *delivery_input = NULL; - char *config_input = NULL; - char installer_url[PATH_MAX]; - char python_override_version[OMC_NAME_MAX]; - int user_disabled_docker = false; - - memset(env_name, 0, sizeof(env_name)); - memset(env_name_testing, 0, sizeof(env_name_testing)); - memset(installer_url, 0, sizeof(installer_url)); - memset(python_override_version, 0, sizeof(python_override_version)); - memset(&proc, 0, sizeof(proc)); - memset(&ctx, 0, sizeof(ctx)); - - int c; - int option_index = 0; - while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) { - switch (c) { - case 'h': - usage(path_basename(argv[0])); - exit(0); - case 'V': - puts(VERSION); - exit(0); - case 'c': - config_input = strdup(optarg); - break; - case 'C': - globals.continue_on_error = true; - break; - case 'p': - strcpy(python_override_version, optarg); - break; - case OPT_ALWAYS_UPDATE_BASE: - globals.always_update_base_environment = true; - break; - case 'U': - setenv("PYTHONUNBUFFERED", "1", 1); - fflush(stdout); - fflush(stderr); - setvbuf(stdout, NULL, _IONBF, 0); - setvbuf(stderr, NULL, _IONBF, 0); - break; - case 'v': - globals.verbose = true; - break; - case OPT_NO_DOCKER: - globals.enable_docker = false; - user_disabled_docker = true; - break; - case OPT_NO_ARTIFACTORY: - globals.enable_artifactory = false; - break; - case OPT_NO_TESTING: - globals.enable_testing = false; - break; - case '?': - default: - exit(1); - } - } - - if (optind < argc) { - while (optind < argc) { - // use first positional argument - delivery_input = argv[optind++]; - break; - } - } - - if (!delivery_input) { - fprintf(stderr, "error: a DELIVERY_FILE is required\n"); - usage(path_basename(argv[0])); - exit(1); - } - - printf(BANNER, VERSION, AUTHOR); - - msg(OMC_MSG_L1, "Setup\n"); - - // Expose variables for use with the template engine - // NOTE: These pointers are populated by delivery_init() so please avoid using - // tpl_render() until then. - tpl_register("meta.name", &ctx.meta.name); - tpl_register("meta.version", &ctx.meta.version); - tpl_register("meta.codename", &ctx.meta.codename); - tpl_register("meta.mission", &ctx.meta.mission); - tpl_register("meta.python", &ctx.meta.python); - tpl_register("meta.python_compact", &ctx.meta.python_compact); - tpl_register("info.time_str_epoch", &ctx.info.time_str_epoch); - tpl_register("info.release_name", &ctx.info.release_name); - tpl_register("info.build_name", &ctx.info.build_name); - tpl_register("info.build_number", &ctx.info.build_number); - tpl_register("storage.tmpdir", &ctx.storage.tmpdir); - tpl_register("storage.output_dir", &ctx.storage.output_dir); - tpl_register("storage.delivery_dir", &ctx.storage.delivery_dir); - tpl_register("storage.conda_artifact_dir", &ctx.storage.conda_artifact_dir); - tpl_register("storage.wheel_artifact_dir", &ctx.storage.wheel_artifact_dir); - tpl_register("storage.build_sources_dir", &ctx.storage.build_sources_dir); - tpl_register("storage.build_docker_dir", &ctx.storage.build_docker_dir); - tpl_register("storage.results_dir", &ctx.storage.results_dir); - tpl_register("conda.installer_baseurl", &ctx.conda.installer_baseurl); - tpl_register("conda.installer_name", &ctx.conda.installer_name); - tpl_register("conda.installer_version", &ctx.conda.installer_version); - tpl_register("conda.installer_arch", &ctx.conda.installer_arch); - tpl_register("conda.installer_platform", &ctx.conda.installer_platform); - tpl_register("deploy.jfrog.repo", &globals.jfrog.repo); - tpl_register("deploy.jfrog.url", &globals.jfrog.url); - tpl_register("deploy.docker.registry", &ctx.deploy.docker.registry); - tpl_register("workaround.tox_posargs", &globals.workaround.tox_posargs); - - // Set up PREFIX/etc directory information - // The user may manipulate the base directory path with OMC_SYSCONFDIR - // environment variable - char omc_sysconfdir_tmp[PATH_MAX]; - if (getenv("OMC_SYSCONFDIR")) { - strncpy(omc_sysconfdir_tmp, getenv("OMC_SYSCONFDIR"), sizeof(omc_sysconfdir_tmp) - 1); - } else { - strncpy(omc_sysconfdir_tmp, OMC_SYSCONFDIR, sizeof(omc_sysconfdir_tmp) - 1); - } - - globals.sysconfdir = realpath(omc_sysconfdir_tmp, NULL); - if (!globals.sysconfdir) { - msg(OMC_MSG_ERROR | OMC_MSG_L1, "Unable to resolve path to configuration directory: %s\n", omc_sysconfdir_tmp); - exit(1); - } - - // Override Python version from command-line, if any - if (strlen(python_override_version)) { - guard_free(ctx.meta.python); - ctx.meta.python = strdup(python_override_version); - guard_free(ctx.meta.python_compact); - ctx.meta.python_compact = to_short_version(ctx.meta.python); - } - - if (!config_input) { - // no configuration passed by argument. use basic config. - char cfgfile[PATH_MAX * 2]; - sprintf(cfgfile, "%s/%s", globals.sysconfdir, "omc.ini"); - if (!access(cfgfile, F_OK | R_OK)) { - config_input = strdup(cfgfile); - } else { - msg(OMC_MSG_WARN, "OMC global configuration is not readable, or does not exist: %s", cfgfile); - } - } - - if (config_input) { - msg(OMC_MSG_L2, "Reading OMC global configuration: %s\n", config_input); - ctx._omc_ini_fp.cfg = ini_open(config_input); - if (!ctx._omc_ini_fp.cfg) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Failed to read config file: %s, %s\n", delivery_input, strerror(errno)); - exit(1); - } - ctx._omc_ini_fp.cfg_path = strdup(config_input); - guard_free(config_input); - } - - msg(OMC_MSG_L2, "Reading OMC delivery configuration: %s\n", delivery_input); - ctx._omc_ini_fp.delivery = ini_open(delivery_input); - if (!ctx._omc_ini_fp.delivery) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Failed to read delivery file: %s, %s\n", delivery_input, strerror(errno)); - exit(1); - } - ctx._omc_ini_fp.delivery_path = strdup(delivery_input); - - msg(OMC_MSG_L2, "Bootstrapping delivery context\n"); - if (bootstrap_build_info(&ctx)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Failed to bootstrap delivery context\n"); - exit(1); - } - - msg(OMC_MSG_L2, "Initializing delivery context\n"); - if (delivery_init(&ctx)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Failed to initialize delivery context\n"); - exit(1); - } - check_system_requirements(&ctx); - - msg(OMC_MSG_L2, "Configuring JFrog CLI\n"); - if (delivery_init_artifactory(&ctx)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "JFrog CLI configuration failed\n"); - exit(1); - } - - runtime_apply(ctx.runtime.environ); - strcpy(env_name, ctx.info.release_name); - strcpy(env_name_testing, env_name); - strcat(env_name_testing, "-test"); - - msg(OMC_MSG_L1, "Overview\n"); - delivery_meta_show(&ctx); - delivery_conda_show(&ctx); - delivery_tests_show(&ctx); - if (globals.verbose) { - //delivery_runtime_show(&ctx); - } - - msg(OMC_MSG_L1, "Conda setup\n"); - delivery_get_installer_url(&ctx, installer_url); - msg(OMC_MSG_L2, "Downloading: %s\n", installer_url); - if (delivery_get_installer(&ctx, installer_url)) { - msg(OMC_MSG_ERROR, "download failed: %s\n", installer_url); - exit(1); - } - - // Unlikely to occur: this should help prevent rmtree() from destroying your entire filesystem - // if path is "/" then, die - // or if empty string, die - if (!strcmp(ctx.storage.conda_install_prefix, DIR_SEP) || !strlen(ctx.storage.conda_install_prefix)) { - fprintf(stderr, "error: ctx.storage.conda_install_prefix is malformed!\n"); - exit(1); - } - - msg(OMC_MSG_L2, "Installing: %s\n", ctx.conda.installer_name); - delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix); - - msg(OMC_MSG_L2, "Configuring: %s\n", ctx.storage.conda_install_prefix); - delivery_conda_enable(&ctx, ctx.storage.conda_install_prefix); - - char *pathvar = NULL; - pathvar = getenv("PATH"); - if (!pathvar) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "PATH variable is not set. Cannot continue.\n"); - exit(1); - } else { - char pathvar_tmp[OMC_BUFSIZ]; - sprintf(pathvar_tmp, "%s/bin:%s", ctx.storage.conda_install_prefix, pathvar); - setenv("PATH", pathvar_tmp, 1); - pathvar = NULL; - } - - msg(OMC_MSG_L1, "Creating release environment(s)\n"); - if (ctx.meta.based_on && strlen(ctx.meta.based_on)) { - if (conda_env_remove(env_name)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed to remove release environment: %s\n", env_name_testing); - exit(1); - } - msg(OMC_MSG_L2, "Based on release: %s\n", ctx.meta.based_on); - if (conda_env_create_from_uri(env_name, ctx.meta.based_on)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "unable to install release environment using configuration file\n"); - exit(1); - } - - if (conda_env_remove(env_name_testing)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed to remove testing environment\n"); - exit(1); - } - if (conda_env_create_from_uri(env_name_testing, ctx.meta.based_on)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "unable to install testing environment using configuration file\n"); - exit(1); - } - } else { - if (conda_env_create(env_name, ctx.meta.python, NULL)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed to create release environment\n"); - exit(1); - } - if (conda_env_create(env_name_testing, ctx.meta.python, NULL)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed to create release environment\n"); - exit(1); - } - } - - // Activate test environment - msg(OMC_MSG_L1, "Activating test environment\n"); - if (conda_activate(ctx.storage.conda_install_prefix, env_name_testing)) { - fprintf(stderr, "failed to activate test environment\n"); - exit(1); - } - - delivery_gather_tool_versions(&ctx); - if (!ctx.conda.tool_version) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Could not determine conda version\n"); - exit(1); - } - if (!ctx.conda.tool_build_version) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "Could not determine conda-build version\n"); - exit(1); - } - - if (pip_exec("install build")) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "'build' tool installation failed"); - exit(1); - } - - // Execute configuration-defined tests - if (globals.enable_testing) { - msg(OMC_MSG_L1, "Begin test execution\n"); - delivery_tests_run(&ctx); - msg(OMC_MSG_L1, "Rewriting test results\n"); - delivery_fixup_test_results(&ctx); - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Test execution is disabled\n"); - } - - msg(OMC_MSG_L1, "Generating deferred package listing\n"); - // Test succeeded so move on to producing package artifacts - delivery_defer_packages(&ctx, DEFER_CONDA); - delivery_defer_packages(&ctx, DEFER_PIP); - - if (ctx.conda.conda_packages_defer && strlist_count(ctx.conda.conda_packages_defer)) { - msg(OMC_MSG_L2, "Building Conda recipe(s)\n"); - if (delivery_build_recipes(&ctx)) { - exit(1); - } - msg(OMC_MSG_L3, "Copying artifacts\n"); - if (delivery_copy_conda_artifacts(&ctx)) { - exit(1); - } - msg(OMC_MSG_L3, "Indexing artifacts\n"); - if (delivery_index_conda_artifacts(&ctx)) { - exit(1); - } - } - - if (strlist_count(ctx.conda.pip_packages_defer)) { - if (!(ctx.conda.wheels_packages = delivery_build_wheels(&ctx))) { - exit(1); - } - if (delivery_index_wheel_artifacts(&ctx)) { - exit(1); - } - - } - - // Populate the release environment - msg(OMC_MSG_L1, "Populating release environment\n"); - msg(OMC_MSG_L2, "Installing conda packages\n"); - if (strlist_count(ctx.conda.conda_packages)) { - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA, (struct StrList *[]) {ctx.conda.conda_packages, NULL})) { - exit(1); - } - } - if (strlist_count(ctx.conda.conda_packages_defer)) { - msg(OMC_MSG_L3, "Installing deferred conda packages\n"); - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA | INSTALL_PKG_CONDA_DEFERRED, (struct StrList *[]) {ctx.conda.conda_packages_defer, NULL})) { - exit(1); - } - } else { - msg(OMC_MSG_L3, "No deferred conda packages\n"); - } - - msg(OMC_MSG_L2, "Installing pip packages\n"); - if (strlist_count(ctx.conda.pip_packages)) { - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP, (struct StrList *[]) {ctx.conda.pip_packages, NULL})) { - exit(1); - } - } - - if (strlist_count(ctx.conda.pip_packages_defer)) { - msg(OMC_MSG_L3, "Installing deferred pip packages\n"); - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP | INSTALL_PKG_PIP_DEFERRED, (struct StrList *[]) {ctx.conda.pip_packages_defer, NULL})) { - exit(1); - } - } else { - msg(OMC_MSG_L3, "No deferred pip packages\n"); - } - - conda_exec("list"); - - msg(OMC_MSG_L1, "Creating release\n"); - msg(OMC_MSG_L2, "Exporting delivery configuration\n"); - if (!pushd(ctx.storage.cfgdump_dir)) { - char filename[PATH_MAX] = {0}; - sprintf(filename, "%s.ini", ctx.info.release_name); - FILE *spec = fopen(filename, "w+"); - if (!spec) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed %s\n", filename); - exit(1); - } - ini_write(ctx._omc_ini_fp.delivery, &spec, INI_WRITE_RAW); - fclose(spec); - - memset(filename, 0, sizeof(filename)); - sprintf(filename, "%s-rendered.ini", ctx.info.release_name); - spec = fopen(filename, "w+"); - if (!spec) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed %s\n", filename); - exit(1); - } - ini_write(ctx._omc_ini_fp.delivery, &spec, INI_WRITE_PRESERVE); - fclose(spec); - popd(); - } else { - SYSERROR("Failed to enter directory: %s", ctx.storage.delivery_dir); - exit(1); - } - - msg(OMC_MSG_L2, "Exporting %s\n", env_name_testing); - if (conda_env_export(env_name_testing, ctx.storage.delivery_dir, env_name_testing)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed %s\n", env_name_testing); - exit(1); - } - - msg(OMC_MSG_L2, "Exporting %s\n", env_name); - if (conda_env_export(env_name, ctx.storage.delivery_dir, env_name)) { - msg(OMC_MSG_ERROR | OMC_MSG_L2, "failed %s\n", env_name); - exit(1); - } - - // Rewrite release environment output (i.e. set package origin(s) to point to the deployment server, etc.) - char specfile[PATH_MAX]; - sprintf(specfile, "%s/%s.yml", ctx.storage.delivery_dir, env_name); - msg(OMC_MSG_L3, "Rewriting release spec file (stage 1): %s\n", path_basename(specfile)); - delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_1); - - msg(OMC_MSG_L1, "Rendering mission templates\n"); - delivery_mission_render_files(&ctx); - - int want_docker = ini_section_search(&ctx._omc_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:docker") ? true : false; - int want_artifactory = ini_section_search(&ctx._omc_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:artifactory") ? true : false; - - if (want_docker) { - if (user_disabled_docker) { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Docker image building is disabled by CLI argument\n"); - } else { - char dockerfile[PATH_MAX] = {0}; - sprintf(dockerfile, "%s/%s", ctx.storage.build_docker_dir, "Dockerfile"); - if (globals.enable_docker) { - if (!access(dockerfile, F_OK)) { - msg(OMC_MSG_L1, "Building Docker image\n"); - if (delivery_docker(&ctx)) { - msg(OMC_MSG_L1 | OMC_MSG_ERROR, "Failed to build docker image!\n"); - COE_CHECK_ABORT(1, "Failed to build docker image"); - } - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Docker image building is disabled. No Dockerfile found in %s\n", ctx.storage.build_docker_dir); - } - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Docker image building is disabled. System configuration error\n"); - } - } - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Docker image building is disabled. deploy:docker is not configured\n"); - } - - msg(OMC_MSG_L3, "Rewriting release spec file (stage 2): %s\n", path_basename(specfile)); - delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_2); - - msg(OMC_MSG_L1, "Dumping metadata\n"); - if (delivery_dump_metadata(&ctx)) { - msg(OMC_MSG_L1 | OMC_MSG_ERROR, "Metadata dump failed\n"); - } - - if (want_artifactory) { - if (globals.enable_artifactory) { - msg(OMC_MSG_L1, "Uploading artifacts\n"); - delivery_artifact_upload(&ctx); - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Artifact uploading is disabled\n"); - } - } else { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "Artifact uploading is disabled. deploy:artifactory is not configured\n"); - } - - msg(OMC_MSG_L1, "Cleaning up\n"); - delivery_free(&ctx); - globals_free(); - tpl_free(); - - msg(OMC_MSG_L1, "Done!\n"); - return 0; -} - diff --git a/src/recipe.c b/src/recipe.c index 43899b8..e51fde6 100644 --- a/src/recipe.c +++ b/src/recipe.c @@ -20,7 +20,7 @@ int recipe_clone(char *recipe_dir, char *url, char *gitref, char **result) { if (!access(destdir, F_OK)) { if (!strcmp(destdir, "/")) { - fprintf(stderr, "OMC is misconfigured. Please check your output path(s) immediately.\n"); + fprintf(stderr, "STASIS is misconfigured. Please check your output path(s) immediately.\n"); fprintf(stderr, "recipe_dir = '%s'\nreponame = '%s'\ndestdir = '%s'\n", recipe_dir, reponame, destdir); exit(1); diff --git a/src/relocation.c b/src/relocation.c index 586d1ba..a86530d 100644 --- a/src/relocation.c +++ b/src/relocation.c @@ -24,7 +24,7 @@ * @return 0 on success, -1 on error */ int replace_text(char *original, const char *target, const char *replacement, unsigned flags) { - char buffer[OMC_BUFSIZ]; + char buffer[STASIS_BUFSIZ]; char *pos = original; char *match = NULL; size_t original_len = strlen(original); @@ -104,7 +104,7 @@ int replace_text(char *original, const char *target, const char *replacement, un */ int file_replace_text(const char* filename, const char* target, const char* replacement, unsigned flags) { int result; - char buffer[OMC_BUFSIZ]; + char buffer[STASIS_BUFSIZ]; char tempfilename[] = "tempfileXXXXXX"; FILE *fp; FILE *tfp; diff --git a/src/stasis_indexer.c b/src/stasis_indexer.c new file mode 100644 index 0000000..fb231e0 --- /dev/null +++ b/src/stasis_indexer.c @@ -0,0 +1,753 @@ +#include +#include +#include "core.h" + +static struct option long_options[] = { + {"help", no_argument, 0, 'h'}, + {"destdir", required_argument, 0, 'd'}, + {"verbose", no_argument, 0, 'v'}, + {"unbuffered", no_argument, 0, 'U'}, + {"web", no_argument, 0, 'w'}, + {0, 0, 0, 0}, +}; + +const char *long_options_help[] = { + "Display this usage statement", + "Destination directory", + "Increase output verbosity", + "Disable line buffering", + "Generate HTML indexes (requires pandoc)", + NULL, +}; + +static void usage(char *name) { + int maxopts = sizeof(long_options) / sizeof(long_options[0]); + unsigned char *opts = calloc(maxopts + 1, sizeof(char)); + for (int i = 0; i < maxopts; i++) { + opts[i] = long_options[i].val; + } + printf("usage: %s [-%s] {{STASIS_ROOT}...}\n", name, opts); + guard_free(opts); + + for (int i = 0; i < maxopts - 1; i++) { + char line[255]; + sprintf(line, " --%s -%c %-20s", long_options[i].name, long_options[i].val, long_options_help[i]); + puts(line); + } + +} + +int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t rootdirs_total) { + char cmd[PATH_MAX]; + + memset(cmd, 0, sizeof(cmd)); + sprintf(cmd, "rsync -ah%s --delete --exclude 'tools/' --exclude 'tmp/' --exclude 'build/' ", globals.verbose ? "v" : "q"); + for (size_t i = 0; i < rootdirs_total; i++) { + sprintf(cmd + strlen(cmd), "'%s'/ ", rootdirs[i]); + } + sprintf(cmd + strlen(cmd), "%s/", dest); + + if (globals.verbose) { + puts(cmd); + } + + if (system(cmd)) { + return -1; + } + return 0; +} + +int indexer_wheels(struct Delivery *ctx) { + return delivery_index_wheel_artifacts(ctx); +} + +int indexer_load_metadata(struct Delivery *ctx, const char *filename) { + char line[STASIS_NAME_MAX] = {0}; + FILE *fp; + + fp = fopen(filename, "r"); + if (!fp) { + return -1; + } + + while (fgets(line, sizeof(line) - 1, fp) != NULL) { + char **parts = split(line, " ", 1); + char *name = parts[0]; + char *value = parts[1]; + strip(value); + if (!strcmp(name, "name")) { + ctx->meta.name = strdup(value); + } else if (!strcmp(name, "version")) { + ctx->meta.version = strdup(value); + } else if (!strcmp(name, "rc")) { + ctx->meta.rc = (int) strtol(value, NULL, 10); + } else if (!strcmp(name, "python")) { + ctx->meta.python = strdup(value); + } else if (!strcmp(name, "python_compact")) { + ctx->meta.python_compact = strdup(value); + } else if (!strcmp(name, "mission")) { + ctx->meta.mission = strdup(value); + } else if (!strcmp(name, "codename")) { + ctx->meta.codename = strdup(value); + } else if (!strcmp(name, "platform")) { + ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX, sizeof(*ctx->system.platform)); + char **platform = split(value, " ", 0); + ctx->system.platform[DELIVERY_PLATFORM] = platform[DELIVERY_PLATFORM]; + ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR] = platform[DELIVERY_PLATFORM_CONDA_SUBDIR]; + ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER] = platform[DELIVERY_PLATFORM_CONDA_INSTALLER]; + ctx->system.platform[DELIVERY_PLATFORM_RELEASE] = platform[DELIVERY_PLATFORM_RELEASE]; + } else if (!strcmp(name, "arch")) { + ctx->system.arch = strdup(value); + } else if (!strcmp(name, "time")) { + ctx->info.time_str_epoch = strdup(value); + } else if (!strcmp(name, "release_fmt")) { + ctx->rules.release_fmt = strdup(value); + } else if (!strcmp(name, "release_name")) { + ctx->info.release_name = strdup(value); + } else if (!strcmp(name, "build_name_fmt")) { + ctx->rules.build_name_fmt = strdup(value); + } else if (!strcmp(name, "build_name")) { + ctx->info.build_name = strdup(value); + } else if (!strcmp(name, "build_number_fmt")) { + ctx->rules.build_number_fmt = strdup(value); + } else if (!strcmp(name, "build_number")) { + ctx->info.build_number = strdup(value); + } else if (!strcmp(name, "conda_installer_baseurl")) { + ctx->conda.installer_baseurl = strdup(value); + } else if (!strcmp(name, "conda_installer_name")) { + ctx->conda.installer_name = strdup(value); + } else if (!strcmp(name, "conda_installer_version")) { + ctx->conda.installer_version = strdup(value); + } else if (!strcmp(name, "conda_installer_platform")) { + ctx->conda.installer_platform = strdup(value); + } else if (!strcmp(name, "conda_installer_arch")) { + ctx->conda.installer_arch = strdup(value); + } + GENERIC_ARRAY_FREE(parts); + } + fclose(fp); + + return 0; +} + +int indexer_get_files(struct StrList **out, const char *path, const char *pattern, ...) { + va_list args; + va_start(args, pattern); + char userpattern[PATH_MAX] = {0}; + vsprintf(userpattern, pattern, args); + va_end(args); + struct StrList *list = listdir(path); + if (!list) { + return -1; + } + + if (!(*out)) { + (*out) = strlist_init(); + if (!(*out)) { + guard_strlist_free(&list); + return -1; + } + } + + size_t no_match = 0; + for (size_t i = 0; i < strlist_count(list); i++) { + char *item = strlist_item(list, i); + if (fnmatch(userpattern, item, 0)) { + no_match++; + continue; + } else { + strlist_append(&(*out), item); + } + } + if (no_match >= strlist_count(list)) { + fprintf(stderr, "no files matching the pattern: %s\n", userpattern); + guard_strlist_free(&list); + return -1; + } + guard_strlist_free(&list); + return 0; +} + +int get_latest_rc(struct Delivery ctx[], size_t nelem) { + int result = 0; + for (size_t i = 0; i < nelem; i++) { + if (ctx[i].meta.rc > result) { + result = ctx[i].meta.rc; + } + } + return result; +} + +struct Delivery **get_latest_deliveries(struct Delivery ctx[], size_t nelem) { + struct Delivery **result = NULL; + int latest = 0; + size_t n = 0; + + result = calloc(nelem + 1, sizeof(result)); + if (!result) { + fprintf(stderr, "Unable to allocate %zu bytes for result delivery array: %s\n", nelem * sizeof(result), strerror(errno)); + return NULL; + } + + latest = get_latest_rc(ctx, nelem); + for (size_t i = 0; i < nelem; i++) { + if (ctx[i].meta.rc == latest) { + result[n] = &ctx[i]; + n++; + } + } + + return result; +} + +int micromamba(const char *write_to, const char *prefix, char *command, ...) { + struct utsname sys; + uname(&sys); + + tolower_s(sys.sysname); + if (!strcmp(sys.sysname, "darwin")) { + strcpy(sys.sysname, "osx"); + } + + if (!strcmp(sys.machine, "x86_64")) { + strcpy(sys.machine, "64"); + } + + char url[PATH_MAX]; + sprintf(url, "https://micro.mamba.pm/api/micromamba/%s-%s/latest", sys.sysname, sys.machine); + if (access("latest", F_OK)) { + download(url, "latest", NULL); + } + + char mmbin[PATH_MAX]; + sprintf(mmbin, "%s/micromamba", write_to); + + if (access(mmbin, F_OK)) { + char untarcmd[PATH_MAX]; + mkdirs(write_to, 0755); + sprintf(untarcmd, "tar -xvf latest -C %s --strip-components=1 bin/micromamba 1>/dev/null", write_to); + system(untarcmd); + } + + char cmd[STASIS_BUFSIZ]; + memset(cmd, 0, sizeof(cmd)); + sprintf(cmd, "%s -r %s -p %s ", mmbin, prefix, prefix); + va_list args; + va_start(args, command); + vsprintf(cmd + strlen(cmd), command, args); + va_end(args); + + mkdirs(prefix, 0755); + + char rcpath[PATH_MAX]; + sprintf(rcpath, "%s/.condarc", prefix); + touch(rcpath); + + setenv("CONDARC", rcpath, 1); + setenv("MAMBA_ROOT_PREFIX", prefix, 1); + int status = system(cmd); + unsetenv("MAMBA_ROOT_PREFIX"); + + return status; +} + +int indexer_make_website(struct Delivery *ctx) { + char cmd[PATH_MAX]; + char *inputs[] = { + ctx->storage.delivery_dir, "/README.md", + ctx->storage.results_dir, "/README.md", + }; + + if (!find_program("pandoc")) { + return 0; + } + + for (size_t i = 0; i < sizeof(inputs) / sizeof(*inputs); i += 2) { + char fullpath[PATH_MAX]; + memset(fullpath, 0, sizeof(fullpath)); + sprintf(fullpath, "%s/%s", inputs[i], inputs[i + 1]); + if (access(fullpath, F_OK)) { + continue; + } + // Converts a markdown file to html + strcpy(cmd, "pandoc "); + strcat(cmd, fullpath); + strcat(cmd, " "); + strcat(cmd, "-o "); + strcat(cmd, inputs[i]); + strcat(cmd, "/index.html"); + if (globals.verbose) { + puts(cmd); + } + // This might be negative when killed by a signal. + // Otherwise, the return code is not critical to us. + if (system(cmd) < 0) { + return 1; + } + } + + return 0; +} + +int indexer_conda(struct Delivery *ctx) { + int status = 0; + char prefix[PATH_MAX]; + sprintf(prefix, "%s/%s", ctx->storage.tmpdir, "indexer"); + + status += micromamba(ctx->storage.tmpdir, prefix, "config prepend --env channels conda-forge"); + if (!globals.verbose) { + status += micromamba(ctx->storage.tmpdir, prefix, "config set --env quiet true"); + } + status += micromamba(ctx->storage.tmpdir, prefix, "config set --env always_yes true"); + status += micromamba(ctx->storage.tmpdir, prefix, "install conda-build"); + status += micromamba(ctx->storage.tmpdir, prefix, "run conda index %s", ctx->storage.conda_artifact_dir); + return status; +} + +static struct StrList *get_architectures(struct Delivery ctx[], size_t nelem) { + struct StrList *architectures = strlist_init(); + for (size_t i = 0; i < nelem; i++) { + if (!strstr_array(architectures->data, ctx[i].system.arch)) { + strlist_append(&architectures, ctx[i].system.arch); + } + } + return architectures; +} + +static struct StrList *get_platforms(struct Delivery ctx[], size_t nelem) { + struct StrList *platforms = strlist_init(); + for (size_t i = 0; i < nelem; i++) { + if (!strstr_array(platforms->data, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE])) { + strlist_append(&platforms, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE]); + } + } + return platforms; +} + +int indexer_symlinks(struct Delivery ctx[], size_t nelem) { + struct Delivery **data = NULL; + data = get_latest_deliveries(ctx, nelem); + //int latest = get_latest_rc(ctx, nelem); + + if (!pushd(ctx->storage.delivery_dir)) { + for (size_t i = 0; i < nelem; i++) { + char link_name_spec[PATH_MAX]; + char link_name_readme[PATH_MAX]; + + char file_name_spec[PATH_MAX]; + char file_name_readme[PATH_MAX]; + + if (!data[i]) { + continue; + } + sprintf(link_name_spec, "latest-py%s-%s-%s.yml", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); + sprintf(file_name_spec, "%s.yml", data[i]->info.release_name); + + sprintf(link_name_readme, "README-py%s-%s-%s.md", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); + sprintf(file_name_readme, "README-%s.md", data[i]->info.release_name); + + if (!access(link_name_spec, F_OK)) { + if (unlink(link_name_spec)) { + fprintf(stderr, "Unable to remove spec link: %s\n", link_name_spec); + } + } + if (!access(link_name_readme, F_OK)) { + if (unlink(link_name_readme)) { + fprintf(stderr, "Unable to remove readme link: %s\n", link_name_readme); + } + } + + if (globals.verbose) { + printf("%s -> %s\n", file_name_spec, link_name_spec); + } + if (symlink(file_name_spec, link_name_spec)) { + fprintf(stderr, "Unable to link %s as %s\n", file_name_spec, link_name_spec); + } + + if (globals.verbose) { + printf("%s -> %s\n", file_name_readme, link_name_readme); + } + if (symlink(file_name_readme, link_name_readme)) { + fprintf(stderr, "Unable to link %s as %s\n", file_name_readme, link_name_readme); + } + } + popd(); + } else { + fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); + guard_free(data); + return -1; + } + + // "latest" is an array of pointers to ctx[]. Do not free the contents of the array. + guard_free(data); + return 0; +} + +int indexer_readmes(struct Delivery ctx[], size_t nelem) { + struct Delivery **latest = NULL; + latest = get_latest_deliveries(ctx, nelem); + + char indexfile[PATH_MAX] = {0}; + sprintf(indexfile, "%s/README.md", ctx->storage.delivery_dir); + + if (!pushd(ctx->storage.delivery_dir)) { + FILE *indexfp; + indexfp = fopen(indexfile, "w+"); + if (!indexfp) { + fprintf(stderr, "Unable to open %s for writing\n", indexfile); + return -1; + } + struct StrList *archs = get_architectures(*latest, nelem); + struct StrList *platforms = get_platforms(*latest, nelem); + + fprintf(indexfp, "# %s-%s\n\n", ctx->meta.name, ctx->meta.version); + fprintf(indexfp, "## Current Release\n\n"); + for (size_t p = 0; p < strlist_count(platforms); p++) { + char *platform = strlist_item(platforms, p); + for (size_t a = 0; a < strlist_count(archs); a++) { + char *arch = strlist_item(archs, a); + int have_combo = 0; + for (size_t i = 0; i < nelem; i++) { + if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) && strstr(latest[i]->system.arch, arch)) { + have_combo = 1; + } + } + if (!have_combo) { + continue; + } + fprintf(indexfp, "### %s-%s\n\n", platform, arch); + + fprintf(indexfp, "|Release|Info|Receipt|\n"); + fprintf(indexfp, "|:----:|:----:|:----:|\n"); + for (size_t i = 0; i < nelem; i++) { + char link_name[PATH_MAX]; + char readme_name[PATH_MAX]; + char conf_name[PATH_MAX]; + char conf_name_relative[PATH_MAX]; + if (!latest[i]) { + continue; + } + sprintf(link_name, "latest-py%s-%s-%s.yml", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch); + sprintf(readme_name, "README-py%s-%s-%s.md", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch); + sprintf(conf_name, "%s.ini", latest[i]->info.release_name); + sprintf(conf_name_relative, "../config/%s-rendered.ini", latest[i]->info.release_name); + if (strstr(link_name, platform) && strstr(link_name, arch)) { + fprintf(indexfp, "|[%s](%s)|[%s](%s)|[%s](%s)|\n", link_name, link_name, readme_name, readme_name, conf_name, conf_name_relative); + } + } + fprintf(indexfp, "\n"); + } + fprintf(indexfp, "\n"); + } + guard_strlist_free(&archs); + guard_strlist_free(&platforms); + fclose(indexfp); + popd(); + } else { + fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); + guard_free(latest); + return -1; + } + + // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array. + guard_free(latest); + return 0; +} + +int indexer_junitxml_report(struct Delivery ctx[], size_t nelem) { + struct Delivery **latest = NULL; + latest = get_latest_deliveries(ctx, nelem); + + char indexfile[PATH_MAX] = {0}; + sprintf(indexfile, "%s/README.md", ctx->storage.results_dir); + + struct StrList *file_listing = listdir(ctx->storage.results_dir); + if (!file_listing) { + // no test results to process + return 0; + } + + if (!pushd(ctx->storage.results_dir)) { + FILE *indexfp; + indexfp = fopen(indexfile, "w+"); + if (!indexfp) { + fprintf(stderr, "Unable to open %s for writing\n", indexfile); + return -1; + } + struct StrList *archs = get_architectures(*latest, nelem); + struct StrList *platforms = get_platforms(*latest, nelem); + + fprintf(indexfp, "# %s-%s Test Report\n\n", ctx->meta.name, ctx->meta.version); + fprintf(indexfp, "## Current Release\n\n"); + for (size_t p = 0; p < strlist_count(platforms); p++) { + char *platform = strlist_item(platforms, p); + for (size_t a = 0; a < strlist_count(archs); a++) { + char *arch = strlist_item(archs, a); + int have_combo = 0; + for (size_t i = 0; i < nelem; i++) { + if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) && strstr(latest[i]->system.arch, arch)) { + have_combo = 1; + break; + } + } + if (!have_combo) { + continue; + } + fprintf(indexfp, "### %s-%s\n\n", platform, arch); + + fprintf(indexfp, "|Suite|Duration|Fail |Skip |Error |\n"); + fprintf(indexfp, "|:----|:------:|:------:|:---:|:----:|\n"); + for (size_t f = 0; f < strlist_count(file_listing); f++) { + char *filename = strlist_item(file_listing, f); + if (!endswith(filename, ".xml")) { + continue; + } + + if (strstr(filename, platform) && strstr(filename, arch)) { + struct JUNIT_Testsuite *testsuite = junitxml_testsuite_read(filename); + if (testsuite) { + if (globals.verbose) { + printf("%s: duration: %0.4f, failed: %d, skipped: %d, errors: %d\n", filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors); + } + fprintf(indexfp, "|[%s](%s)|%0.4f|%d|%d|%d|\n", filename, filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors); + /* + * TODO: Display failure/skip/error output. + * + for (size_t i = 0; i < testsuite->_tc_inuse; i++) { + if (testsuite->testcase[i]->tc_result_state_type) { + printf("testcase: %s :: %s\n", testsuite->testcase[i]->classname, testsuite->testcase[i]->name); + if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_FAILURE) { + printf("failure: %s\n", testsuite->testcase[i]->result_state.failure->message); + } else if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_SKIPPED) { + printf("skipped: %s\n", testsuite->testcase[i]->result_state.skipped->message); + } + } + } + */ + junitxml_testsuite_free(&testsuite); + } else { + fprintf(stderr, "bad test suite: %s: %s\n", strerror(errno), filename); + continue; + } + } + } + fprintf(indexfp, "\n"); + } + fprintf(indexfp, "\n"); + } + guard_strlist_free(&archs); + guard_strlist_free(&platforms); + fclose(indexfp); + popd(); + } else { + fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); + guard_free(latest); + return -1; + } + + // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array. + guard_free(latest); + return 0; +} + +void indexer_init_dirs(struct Delivery *ctx, const char *workdir) { + path_store(&ctx->storage.root, PATH_MAX, workdir, ""); + path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); + if (delivery_init_tmpdir(ctx)) { + fprintf(stderr, "Failed to configure temporary storage directory\n"); + exit(1); + } + path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); + path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); + path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); + path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); + path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); + path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); + path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); + path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); +} + +int main(int argc, char *argv[]) { + size_t rootdirs_total = 0; + char *destdir = NULL; + char **rootdirs = NULL; + int do_html = 0; + int c = 0; + int option_index = 0; + while ((c = getopt_long(argc, argv, "hd:vUw", long_options, &option_index)) != -1) { + switch (c) { + case 'h': + usage(path_basename(argv[0])); + exit(0); + case 'd': + destdir = strdup(optarg); + break; + case 'U': + fflush(stdout); + fflush(stderr); + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + break; + case 'v': + globals.verbose = 1; + break; + case 'w': + do_html = 1; + break; + case '?': + default: + exit(1); + } + } + + int current_index = optind; + if (optind < argc) { + rootdirs_total = argc - current_index; + while (optind < argc) { + // use first positional argument + rootdirs = &argv[optind++]; + break; + } + } + + if (isempty(destdir)) { + destdir = strdup("output"); + } + + if (!rootdirs || !rootdirs_total) { + fprintf(stderr, "You must specify at least one STASIS root directory to index\n"); + exit(1); + } else { + for (size_t i = 0; i < rootdirs_total; i++) { + if (isempty(rootdirs[i]) || !strcmp(rootdirs[i], "/") || !strcmp(rootdirs[i], "\\")) { + SYSERROR("Unsafe directory: %s", rootdirs[i]); + exit(1); + } + } + } + + char *workdir; + char workdir_template[PATH_MAX]; + char *system_tmp = getenv("TMPDIR"); + if (system_tmp) { + strcat(workdir_template, system_tmp); + } else { + strcat(workdir_template, "/tmp"); + } + strcat(workdir_template, "/stasis-combine.XXXXXX"); + workdir = mkdtemp(workdir_template); + if (!workdir) { + SYSERROR("Unable to create temporary directory: %s", workdir_template); + exit(1); + } else if (isempty(workdir) || !strcmp(workdir, "/") || !strcmp(workdir, "\\")) { + SYSERROR("Unsafe directory: %s", workdir); + exit(1); + } + + struct Delivery ctx; + memset(&ctx, 0, sizeof(ctx)); + + printf(BANNER, VERSION, AUTHOR); + + indexer_init_dirs(&ctx, workdir); + + msg(STASIS_MSG_L1, "%s delivery root %s\n", + rootdirs_total > 1 ? "Merging" : "Indexing", + rootdirs_total > 1 ? "directories" : "directory"); + if (indexer_combine_rootdirs(workdir, rootdirs, rootdirs_total)) { + SYSERROR("%s", "Copy operation failed"); + rmtree(workdir); + exit(1); + } + + if (access(ctx.storage.conda_artifact_dir, F_OK)) { + mkdirs(ctx.storage.conda_artifact_dir, 0755); + } + + if (access(ctx.storage.wheel_artifact_dir, F_OK)) { + mkdirs(ctx.storage.wheel_artifact_dir, 0755); + } + + msg(STASIS_MSG_L1, "Indexing conda packages\n"); + if (indexer_conda(&ctx)) { + SYSERROR("%s", "Conda package indexing operation failed"); + exit(1); + } + + msg(STASIS_MSG_L1, "Indexing wheel packages\n"); + if (indexer_wheels(&ctx)) { + SYSERROR("%s", "Python package indexing operation failed"); + exit(1); + } + + msg(STASIS_MSG_L1, "Loading metadata\n"); + struct StrList *metafiles = NULL; + indexer_get_files(&metafiles, ctx.storage.meta_dir, "*.stasis"); + strlist_sort(metafiles, STASIS_SORT_LEN_ASCENDING); + struct Delivery local[strlist_count(metafiles)]; + + for (size_t i = 0; i < strlist_count(metafiles); i++) { + char *item = strlist_item(metafiles, i); + memset(&local[i], 0, sizeof(ctx)); + memcpy(&local[i], &ctx, sizeof(ctx)); + char path[PATH_MAX]; + sprintf(path, "%s/%s", ctx.storage.meta_dir, item); + if (globals.verbose) { + puts(path); + } + indexer_load_metadata(&local[i], path); + } + + msg(STASIS_MSG_L1, "Generating links to latest release iteration\n"); + if (indexer_symlinks(local, strlist_count(metafiles))) { + SYSERROR("%s", "Link generation failed"); + exit(1); + } + + msg(STASIS_MSG_L1, "Generating README.md\n"); + if (indexer_readmes(local, strlist_count(metafiles))) { + SYSERROR("%s", "README indexing operation failed"); + exit(1); + } + + msg(STASIS_MSG_L1, "Indexing test results\n"); + if (indexer_junitxml_report(local, strlist_count(metafiles))) { + SYSERROR("%s", "Test result indexing operation failed"); + exit(1); + } + + if (do_html) { + msg(STASIS_MSG_L1, "Generating HTML indexes\n"); + if (indexer_make_website(local)) { + SYSERROR("%s", "Site creation failed"); + exit(1); + } + } + + msg(STASIS_MSG_L1, "Copying indexed delivery to '%s'\n", destdir); + char cmd[PATH_MAX]; + memset(cmd, 0, sizeof(cmd)); + sprintf(cmd, "rsync -ah%s --delete --exclude 'tmp/' --exclude 'tools/' '%s/' '%s/'", globals.verbose ? "v" : "q", workdir, destdir); + guard_free(destdir); + + if (globals.verbose) { + puts(cmd); + } + + if (system(cmd)) { + SYSERROR("%s", "Copy operation failed"); + rmtree(workdir); + exit(1); + } + + msg(STASIS_MSG_L1, "Removing work directory: %s\n", workdir); + if (rmtree(workdir)) { + SYSERROR("Failed to remove work directory: %s", strerror(errno)); + } + + guard_strlist_free(&metafiles); + delivery_free(&ctx); + globals_free(); + msg(STASIS_MSG_L1, "Done!\n"); + return 0; +} diff --git a/src/stasis_main.c b/src/stasis_main.c new file mode 100644 index 0000000..8b13b98 --- /dev/null +++ b/src/stasis_main.c @@ -0,0 +1,612 @@ +#include +#include +#include +#include +#include +#include "core.h" + +#define OPT_ALWAYS_UPDATE_BASE 1000 +#define OPT_NO_DOCKER 1001 +#define OPT_NO_ARTIFACTORY 1002 +#define OPT_NO_TESTING 1003 +static struct option long_options[] = { + {"help", no_argument, 0, 'h'}, + {"version", no_argument, 0, 'V'}, + {"continue-on-error", no_argument, 0, 'C'}, + {"config", required_argument, 0, 'c'}, + {"python", required_argument, 0, 'p'}, + {"verbose", no_argument, 0, 'v'}, + {"unbuffered", no_argument, 0, 'U'}, + {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE}, + {"no-docker", no_argument, 0, OPT_NO_DOCKER}, + {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY}, + {"no-testing", no_argument, 0, OPT_NO_TESTING}, + {0, 0, 0, 0}, +}; + +const char *long_options_help[] = { + "Display this usage statement", + "Display program version", + "Allow tests to fail", + "Read configuration file", + "Override version of Python in configuration", + "Increase output verbosity", + "Disable line buffering", + "Update conda installation prior to STASIS environment creation", + "Do not build docker images", + "Do not upload artifacts to Artifactory", + "Do not execute test scripts", + NULL, +}; + +static int get_option_max_width(struct option option[]) { + int i = 0; + int max = 0; + const int indent = 4; + while (option[i].name != 0) { + int len = (int) strlen(option[i].name); + if (option[i].has_arg) { + len += indent; + } + if (len > max) { + max = len; + } + i++; + } + return max; +} + +static void usage(char *progname) { + printf("usage: %s ", progname); + printf("[-"); + for (int x = 0; long_options[x].val != 0; x++) { + if (long_options[x].has_arg == no_argument && long_options[x].val <= 'z') { + putchar(long_options[x].val); + } + } + printf("] {DELIVERY_FILE}\n"); + + int width = get_option_max_width(long_options); + for (int x = 0; long_options[x].name != 0; x++) { + char tmp[STASIS_NAME_MAX] = {0}; + char output[sizeof(tmp)] = {0}; + char opt_long[50] = {0}; // --? [ARG]? + char opt_short[50] = {0}; // -? [ARG]? + + strcat(opt_long, "--"); + strcat(opt_long, long_options[x].name); + if (long_options[x].has_arg) { + strcat(opt_long, " ARG"); + } + + if (long_options[x].val <= 'z') { + strcat(opt_short, "-"); + opt_short[1] = (char) long_options[x].val; + if (long_options[x].has_arg) { + strcat(opt_short, " ARG"); + } + } else { + strcat(opt_short, " "); + } + + sprintf(tmp, " %%-%ds\t%%s\t\t%%s", width + 4); + sprintf(output, tmp, opt_long, opt_short, long_options_help[x]); + puts(output); + } +} + + + +static void check_system_requirements(struct Delivery *ctx) { + const char *tools_required[] = { + "rsync", + NULL, + }; + + msg(STASIS_MSG_L1, "Checking system requirements\n"); + for (size_t i = 0; tools_required[i] != NULL; i++) { + if (!find_program(tools_required[i])) { + msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]); + exit(1); + } + } + + if (!globals.tmpdir && !ctx->storage.tmpdir) { + delivery_init_tmpdir(ctx); + } + + struct DockerCapabilities dcap; + if (!docker_capable(&dcap)) { + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n"); + msg(STASIS_MSG_L3, "Available: %s\n", dcap.available ? "Yes" : "No"); + msg(STASIS_MSG_L3, "Usable: %s\n", dcap.usable ? "Yes" : "No"); + msg(STASIS_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap.podman ? "Yes" : "No"); + msg(STASIS_MSG_L3, "Build plugin(s): "); + if (dcap.usable) { + if (dcap.build & STASIS_DOCKER_BUILD) { + printf("build "); + } + if (dcap.build & STASIS_DOCKER_BUILD_X) { + printf("buildx "); + } + puts(""); + } else { + printf("N/A\n"); + } + + // disable docker builds + globals.enable_docker = false; + } +} + +int main(int argc, char *argv[]) { + struct Delivery ctx; + struct Process proc = { + .f_stdout = "", + .f_stderr = "", + .redirect_stderr = 0, + }; + char env_name[STASIS_NAME_MAX] = {0}; + char env_name_testing[STASIS_NAME_MAX] = {0}; + char *delivery_input = NULL; + char *config_input = NULL; + char installer_url[PATH_MAX]; + char python_override_version[STASIS_NAME_MAX]; + int user_disabled_docker = false; + + memset(env_name, 0, sizeof(env_name)); + memset(env_name_testing, 0, sizeof(env_name_testing)); + memset(installer_url, 0, sizeof(installer_url)); + memset(python_override_version, 0, sizeof(python_override_version)); + memset(&proc, 0, sizeof(proc)); + memset(&ctx, 0, sizeof(ctx)); + + int c; + int option_index = 0; + while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) { + switch (c) { + case 'h': + usage(path_basename(argv[0])); + exit(0); + case 'V': + puts(VERSION); + exit(0); + case 'c': + config_input = strdup(optarg); + break; + case 'C': + globals.continue_on_error = true; + break; + case 'p': + strcpy(python_override_version, optarg); + break; + case OPT_ALWAYS_UPDATE_BASE: + globals.always_update_base_environment = true; + break; + case 'U': + setenv("PYTHONUNBUFFERED", "1", 1); + fflush(stdout); + fflush(stderr); + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + break; + case 'v': + globals.verbose = true; + break; + case OPT_NO_DOCKER: + globals.enable_docker = false; + user_disabled_docker = true; + break; + case OPT_NO_ARTIFACTORY: + globals.enable_artifactory = false; + break; + case OPT_NO_TESTING: + globals.enable_testing = false; + break; + case '?': + default: + exit(1); + } + } + + if (optind < argc) { + while (optind < argc) { + // use first positional argument + delivery_input = argv[optind++]; + break; + } + } + + if (!delivery_input) { + fprintf(stderr, "error: a DELIVERY_FILE is required\n"); + usage(path_basename(argv[0])); + exit(1); + } + + printf(BANNER, VERSION, AUTHOR); + + msg(STASIS_MSG_L1, "Setup\n"); + + // Expose variables for use with the template engine + // NOTE: These pointers are populated by delivery_init() so please avoid using + // tpl_render() until then. + tpl_register("meta.name", &ctx.meta.name); + tpl_register("meta.version", &ctx.meta.version); + tpl_register("meta.codename", &ctx.meta.codename); + tpl_register("meta.mission", &ctx.meta.mission); + tpl_register("meta.python", &ctx.meta.python); + tpl_register("meta.python_compact", &ctx.meta.python_compact); + tpl_register("info.time_str_epoch", &ctx.info.time_str_epoch); + tpl_register("info.release_name", &ctx.info.release_name); + tpl_register("info.build_name", &ctx.info.build_name); + tpl_register("info.build_number", &ctx.info.build_number); + tpl_register("storage.tmpdir", &ctx.storage.tmpdir); + tpl_register("storage.output_dir", &ctx.storage.output_dir); + tpl_register("storage.delivery_dir", &ctx.storage.delivery_dir); + tpl_register("storage.conda_artifact_dir", &ctx.storage.conda_artifact_dir); + tpl_register("storage.wheel_artifact_dir", &ctx.storage.wheel_artifact_dir); + tpl_register("storage.build_sources_dir", &ctx.storage.build_sources_dir); + tpl_register("storage.build_docker_dir", &ctx.storage.build_docker_dir); + tpl_register("storage.results_dir", &ctx.storage.results_dir); + tpl_register("conda.installer_baseurl", &ctx.conda.installer_baseurl); + tpl_register("conda.installer_name", &ctx.conda.installer_name); + tpl_register("conda.installer_version", &ctx.conda.installer_version); + tpl_register("conda.installer_arch", &ctx.conda.installer_arch); + tpl_register("conda.installer_platform", &ctx.conda.installer_platform); + tpl_register("deploy.jfrog.repo", &globals.jfrog.repo); + tpl_register("deploy.jfrog.url", &globals.jfrog.url); + tpl_register("deploy.docker.registry", &ctx.deploy.docker.registry); + tpl_register("workaround.tox_posargs", &globals.workaround.tox_posargs); + + // Set up PREFIX/etc directory information + // The user may manipulate the base directory path with STASIS_SYSCONFDIR + // environment variable + char stasis_sysconfdir_tmp[PATH_MAX]; + if (getenv("STASIS_SYSCONFDIR")) { + strncpy(stasis_sysconfdir_tmp, getenv("STASIS_SYSCONFDIR"), sizeof(stasis_sysconfdir_tmp) - 1); + } else { + strncpy(stasis_sysconfdir_tmp, STASIS_SYSCONFDIR, sizeof(stasis_sysconfdir_tmp) - 1); + } + + globals.sysconfdir = realpath(stasis_sysconfdir_tmp, NULL); + if (!globals.sysconfdir) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to resolve path to configuration directory: %s\n", stasis_sysconfdir_tmp); + exit(1); + } + + // Override Python version from command-line, if any + if (strlen(python_override_version)) { + guard_free(ctx.meta.python); + ctx.meta.python = strdup(python_override_version); + guard_free(ctx.meta.python_compact); + ctx.meta.python_compact = to_short_version(ctx.meta.python); + } + + if (!config_input) { + // no configuration passed by argument. use basic config. + char cfgfile[PATH_MAX * 2]; + sprintf(cfgfile, "%s/%s", globals.sysconfdir, "stasis.ini"); + if (!access(cfgfile, F_OK | R_OK)) { + config_input = strdup(cfgfile); + } else { + msg(STASIS_MSG_WARN, "STASIS global configuration is not readable, or does not exist: %s", cfgfile); + } + } + + if (config_input) { + msg(STASIS_MSG_L2, "Reading STASIS global configuration: %s\n", config_input); + ctx._stasis_ini_fp.cfg = ini_open(config_input); + if (!ctx._stasis_ini_fp.cfg) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read config file: %s, %s\n", delivery_input, strerror(errno)); + exit(1); + } + ctx._stasis_ini_fp.cfg_path = strdup(config_input); + guard_free(config_input); + } + + msg(STASIS_MSG_L2, "Reading STASIS delivery configuration: %s\n", delivery_input); + ctx._stasis_ini_fp.delivery = ini_open(delivery_input); + if (!ctx._stasis_ini_fp.delivery) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read delivery file: %s, %s\n", delivery_input, strerror(errno)); + exit(1); + } + ctx._stasis_ini_fp.delivery_path = strdup(delivery_input); + + msg(STASIS_MSG_L2, "Bootstrapping delivery context\n"); + if (bootstrap_build_info(&ctx)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to bootstrap delivery context\n"); + exit(1); + } + + msg(STASIS_MSG_L2, "Initializing delivery context\n"); + if (delivery_init(&ctx)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to initialize delivery context\n"); + exit(1); + } + check_system_requirements(&ctx); + + msg(STASIS_MSG_L2, "Configuring JFrog CLI\n"); + if (delivery_init_artifactory(&ctx)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "JFrog CLI configuration failed\n"); + exit(1); + } + + runtime_apply(ctx.runtime.environ); + strcpy(env_name, ctx.info.release_name); + strcpy(env_name_testing, env_name); + strcat(env_name_testing, "-test"); + + msg(STASIS_MSG_L1, "Overview\n"); + delivery_meta_show(&ctx); + delivery_conda_show(&ctx); + delivery_tests_show(&ctx); + if (globals.verbose) { + //delivery_runtime_show(&ctx); + } + + msg(STASIS_MSG_L1, "Conda setup\n"); + delivery_get_installer_url(&ctx, installer_url); + msg(STASIS_MSG_L2, "Downloading: %s\n", installer_url); + if (delivery_get_installer(&ctx, installer_url)) { + msg(STASIS_MSG_ERROR, "download failed: %s\n", installer_url); + exit(1); + } + + // Unlikely to occur: this should help prevent rmtree() from destroying your entire filesystem + // if path is "/" then, die + // or if empty string, die + if (!strcmp(ctx.storage.conda_install_prefix, DIR_SEP) || !strlen(ctx.storage.conda_install_prefix)) { + fprintf(stderr, "error: ctx.storage.conda_install_prefix is malformed!\n"); + exit(1); + } + + msg(STASIS_MSG_L2, "Installing: %s\n", ctx.conda.installer_name); + delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix); + + msg(STASIS_MSG_L2, "Configuring: %s\n", ctx.storage.conda_install_prefix); + delivery_conda_enable(&ctx, ctx.storage.conda_install_prefix); + + char *pathvar = NULL; + pathvar = getenv("PATH"); + if (!pathvar) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "PATH variable is not set. Cannot continue.\n"); + exit(1); + } else { + char pathvar_tmp[STASIS_BUFSIZ]; + sprintf(pathvar_tmp, "%s/bin:%s", ctx.storage.conda_install_prefix, pathvar); + setenv("PATH", pathvar_tmp, 1); + pathvar = NULL; + } + + msg(STASIS_MSG_L1, "Creating release environment(s)\n"); + if (ctx.meta.based_on && strlen(ctx.meta.based_on)) { + if (conda_env_remove(env_name)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove release environment: %s\n", env_name_testing); + exit(1); + } + msg(STASIS_MSG_L2, "Based on release: %s\n", ctx.meta.based_on); + if (conda_env_create_from_uri(env_name, ctx.meta.based_on)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install release environment using configuration file\n"); + exit(1); + } + + if (conda_env_remove(env_name_testing)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove testing environment\n"); + exit(1); + } + if (conda_env_create_from_uri(env_name_testing, ctx.meta.based_on)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install testing environment using configuration file\n"); + exit(1); + } + } else { + if (conda_env_create(env_name, ctx.meta.python, NULL)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create release environment\n"); + exit(1); + } + if (conda_env_create(env_name_testing, ctx.meta.python, NULL)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create release environment\n"); + exit(1); + } + } + + // Activate test environment + msg(STASIS_MSG_L1, "Activating test environment\n"); + if (conda_activate(ctx.storage.conda_install_prefix, env_name_testing)) { + fprintf(stderr, "failed to activate test environment\n"); + exit(1); + } + + delivery_gather_tool_versions(&ctx); + if (!ctx.conda.tool_version) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda version\n"); + exit(1); + } + if (!ctx.conda.tool_build_version) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda-build version\n"); + exit(1); + } + + if (pip_exec("install build")) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed"); + exit(1); + } + + // Execute configuration-defined tests + if (globals.enable_testing) { + msg(STASIS_MSG_L1, "Begin test execution\n"); + delivery_tests_run(&ctx); + msg(STASIS_MSG_L1, "Rewriting test results\n"); + delivery_fixup_test_results(&ctx); + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Test execution is disabled\n"); + } + + msg(STASIS_MSG_L1, "Generating deferred package listing\n"); + // Test succeeded so move on to producing package artifacts + delivery_defer_packages(&ctx, DEFER_CONDA); + delivery_defer_packages(&ctx, DEFER_PIP); + + if (ctx.conda.conda_packages_defer && strlist_count(ctx.conda.conda_packages_defer)) { + msg(STASIS_MSG_L2, "Building Conda recipe(s)\n"); + if (delivery_build_recipes(&ctx)) { + exit(1); + } + msg(STASIS_MSG_L3, "Copying artifacts\n"); + if (delivery_copy_conda_artifacts(&ctx)) { + exit(1); + } + msg(STASIS_MSG_L3, "Indexing artifacts\n"); + if (delivery_index_conda_artifacts(&ctx)) { + exit(1); + } + } + + if (strlist_count(ctx.conda.pip_packages_defer)) { + if (!(ctx.conda.wheels_packages = delivery_build_wheels(&ctx))) { + exit(1); + } + if (delivery_index_wheel_artifacts(&ctx)) { + exit(1); + } + + } + + // Populate the release environment + msg(STASIS_MSG_L1, "Populating release environment\n"); + msg(STASIS_MSG_L2, "Installing conda packages\n"); + if (strlist_count(ctx.conda.conda_packages)) { + if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA, (struct StrList *[]) {ctx.conda.conda_packages, NULL})) { + exit(1); + } + } + if (strlist_count(ctx.conda.conda_packages_defer)) { + msg(STASIS_MSG_L3, "Installing deferred conda packages\n"); + if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA | INSTALL_PKG_CONDA_DEFERRED, (struct StrList *[]) {ctx.conda.conda_packages_defer, NULL})) { + exit(1); + } + } else { + msg(STASIS_MSG_L3, "No deferred conda packages\n"); + } + + msg(STASIS_MSG_L2, "Installing pip packages\n"); + if (strlist_count(ctx.conda.pip_packages)) { + if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP, (struct StrList *[]) {ctx.conda.pip_packages, NULL})) { + exit(1); + } + } + + if (strlist_count(ctx.conda.pip_packages_defer)) { + msg(STASIS_MSG_L3, "Installing deferred pip packages\n"); + if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP | INSTALL_PKG_PIP_DEFERRED, (struct StrList *[]) {ctx.conda.pip_packages_defer, NULL})) { + exit(1); + } + } else { + msg(STASIS_MSG_L3, "No deferred pip packages\n"); + } + + conda_exec("list"); + + msg(STASIS_MSG_L1, "Creating release\n"); + msg(STASIS_MSG_L2, "Exporting delivery configuration\n"); + if (!pushd(ctx.storage.cfgdump_dir)) { + char filename[PATH_MAX] = {0}; + sprintf(filename, "%s.ini", ctx.info.release_name); + FILE *spec = fopen(filename, "w+"); + if (!spec) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); + exit(1); + } + ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_RAW); + fclose(spec); + + memset(filename, 0, sizeof(filename)); + sprintf(filename, "%s-rendered.ini", ctx.info.release_name); + spec = fopen(filename, "w+"); + if (!spec) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); + exit(1); + } + ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_PRESERVE); + fclose(spec); + popd(); + } else { + SYSERROR("Failed to enter directory: %s", ctx.storage.delivery_dir); + exit(1); + } + + msg(STASIS_MSG_L2, "Exporting %s\n", env_name_testing); + if (conda_env_export(env_name_testing, ctx.storage.delivery_dir, env_name_testing)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name_testing); + exit(1); + } + + msg(STASIS_MSG_L2, "Exporting %s\n", env_name); + if (conda_env_export(env_name, ctx.storage.delivery_dir, env_name)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name); + exit(1); + } + + // Rewrite release environment output (i.e. set package origin(s) to point to the deployment server, etc.) + char specfile[PATH_MAX]; + sprintf(specfile, "%s/%s.yml", ctx.storage.delivery_dir, env_name); + msg(STASIS_MSG_L3, "Rewriting release spec file (stage 1): %s\n", path_basename(specfile)); + delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_1); + + msg(STASIS_MSG_L1, "Rendering mission templates\n"); + delivery_mission_render_files(&ctx); + + int want_docker = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:docker") ? true : false; + int want_artifactory = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:artifactory") ? true : false; + + if (want_docker) { + if (user_disabled_docker) { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled by CLI argument\n"); + } else { + char dockerfile[PATH_MAX] = {0}; + sprintf(dockerfile, "%s/%s", ctx.storage.build_docker_dir, "Dockerfile"); + if (globals.enable_docker) { + if (!access(dockerfile, F_OK)) { + msg(STASIS_MSG_L1, "Building Docker image\n"); + if (delivery_docker(&ctx)) { + msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Failed to build docker image!\n"); + COE_CHECK_ABORT(1, "Failed to build docker image"); + } + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. No Dockerfile found in %s\n", ctx.storage.build_docker_dir); + } + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. System configuration error\n"); + } + } + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. deploy:docker is not configured\n"); + } + + msg(STASIS_MSG_L3, "Rewriting release spec file (stage 2): %s\n", path_basename(specfile)); + delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_2); + + msg(STASIS_MSG_L1, "Dumping metadata\n"); + if (delivery_dump_metadata(&ctx)) { + msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Metadata dump failed\n"); + } + + if (want_artifactory) { + if (globals.enable_artifactory) { + msg(STASIS_MSG_L1, "Uploading artifacts\n"); + delivery_artifact_upload(&ctx); + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifact uploading is disabled\n"); + } + } else { + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifact uploading is disabled. deploy:artifactory is not configured\n"); + } + + msg(STASIS_MSG_L1, "Cleaning up\n"); + delivery_free(&ctx); + globals_free(); + tpl_free(); + + msg(STASIS_MSG_L1, "Done!\n"); + return 0; +} + diff --git a/src/str.c b/src/str.c index 16088b6..6afbf73 100644 --- a/src/str.c +++ b/src/str.c @@ -115,7 +115,7 @@ char** split(char *_sptr, const char* delim, size_t max) pos = token - sptr; break; } - result[i] = calloc(OMC_BUFSIZ, sizeof(char)); + result[i] = calloc(STASIS_BUFSIZ, sizeof(char)); if (!result[i]) { return NULL; } @@ -125,7 +125,7 @@ char** split(char *_sptr, const char* delim, size_t max) // pos is non-zero when maximum split is reached if (pos) { // append the remaining string contents to array - result[i] = calloc(OMC_BUFSIZ, sizeof(char)); + result[i] = calloc(STASIS_BUFSIZ, sizeof(char)); if (!result[i]) { return NULL; } @@ -319,13 +319,13 @@ void strsort(char **arr, unsigned int sort_mode) { // Default mode is alphabetic sort compar fn = _strsort_alpha_compare; - if (sort_mode == OMC_SORT_LEN_DESCENDING) { + if (sort_mode == STASIS_SORT_LEN_DESCENDING) { fn = _strsort_dsc_compare; - } else if (sort_mode == OMC_SORT_LEN_ASCENDING) { + } else if (sort_mode == STASIS_SORT_LEN_ASCENDING) { fn = _strsort_asc_compare; - } else if (sort_mode == OMC_SORT_ALPHA) { + } else if (sort_mode == STASIS_SORT_ALPHA) { fn = _strsort_alpha_compare; // ^ still selectable though ^ - } else if (sort_mode == OMC_SORT_NUMERIC) { + } else if (sort_mode == STASIS_SORT_NUMERIC) { fn = _strsort_numeric_compare; } diff --git a/src/system.c b/src/system.c index d5e77ce..526f0ec 100644 --- a/src/system.c +++ b/src/system.c @@ -1,5 +1,5 @@ #include "system.h" -#include "omc.h" +#include "core.h" int shell(struct Process *proc, char *args) { struct Process selfproc; @@ -106,7 +106,7 @@ int shell_safe(struct Process *proc, char *args) { char buf[1024] = {0}; int result; - char *invalid_ch = strpbrk(args, OMC_SHELL_SAFE_RESTRICT); + char *invalid_ch = strpbrk(args, STASIS_SHELL_SAFE_RESTRICT); if (invalid_ch) { args = NULL; } @@ -138,10 +138,10 @@ int shell_safe(struct Process *proc, char *args) { } char *shell_output(const char *command, int *status) { - const size_t initial_size = OMC_BUFSIZ; + const size_t initial_size = STASIS_BUFSIZ; size_t current_size = initial_size; char *result = NULL; - char line[OMC_BUFSIZ]; + char line[STASIS_BUFSIZ]; FILE *pp; errno = 0; diff --git a/src/template.c b/src/template.c index 0a57232..819fe92 100644 --- a/src/template.c +++ b/src/template.c @@ -216,7 +216,7 @@ char *tpl_render(char *str) { char *env_val = getenv(key); value = strdup(env_val ? env_val : ""); } else if (do_func) { // {{ func:NAME(a, ...) }} - char func_name_temp[OMC_NAME_MAX] = {0}; + char func_name_temp[STASIS_NAME_MAX] = {0}; strcpy(func_name_temp, type_stop + 1); char *param_begin = strchr(func_name_temp, '('); if (!param_begin) { diff --git a/src/utils.c b/src/utils.c index 86622ad..fb22f59 100644 --- a/src/utils.c +++ b/src/utils.c @@ -1,7 +1,7 @@ #include -#include "omc.h" +#include "core.h" -char *dirstack[OMC_DIRSTACK_MAX]; +char *dirstack[STASIS_DIRSTACK_MAX]; const ssize_t dirstack_max = sizeof(dirstack) / sizeof(dirstack[0]); ssize_t dirstack_len = 0; @@ -186,8 +186,8 @@ char **file_readlines(const char *filename, size_t start, size_t limit, ReaderFn } // Allocate buffer - if ((buffer = calloc(OMC_BUFSIZ, sizeof(char))) == NULL) { - SYSERROR("unable to allocate %d bytes for buffer", OMC_BUFSIZ); + if ((buffer = calloc(STASIS_BUFSIZ, sizeof(char))) == NULL) { + SYSERROR("unable to allocate %d bytes for buffer", STASIS_BUFSIZ); if (!use_stdin) { fclose(fp); } @@ -195,7 +195,7 @@ char **file_readlines(const char *filename, size_t start, size_t limit, ReaderFn } // count number the of lines in the file - while ((fgets(buffer, OMC_BUFSIZ - 1, fp)) != NULL) { + while ((fgets(buffer, STASIS_BUFSIZ - 1, fp)) != NULL) { lines++; } @@ -232,7 +232,7 @@ char **file_readlines(const char *filename, size_t start, size_t limit, ReaderFn continue; } - if (fgets(buffer, OMC_BUFSIZ - 1, fp) == NULL) { + if (fgets(buffer, STASIS_BUFSIZ - 1, fp) == NULL) { break; } @@ -249,7 +249,7 @@ char **file_readlines(const char *filename, size_t start, size_t limit, ReaderFn } } result[i] = strdup(buffer); - memset(buffer, '\0', OMC_BUFSIZ); + memset(buffer, '\0', STASIS_BUFSIZ); } guard_free(buffer); @@ -396,12 +396,12 @@ void msg(unsigned type, char *fmt, ...) { char header[255]; char status[20]; - if (type & OMC_MSG_NOP) { + if (type & STASIS_MSG_NOP) { // quiet mode return; } - if (!globals.verbose && type & OMC_MSG_RESTRICT) { + if (!globals.verbose && type & STASIS_MSG_RESTRICT) { // Verbose mode is not active return; } @@ -413,39 +413,39 @@ void msg(unsigned type, char *fmt, ...) { va_start(args, fmt); stream = stdout; - fprintf(stream, "%s", OMC_COLOR_RESET); - if (type & OMC_MSG_ERROR) { + fprintf(stream, "%s", STASIS_COLOR_RESET); + if (type & STASIS_MSG_ERROR) { // for error output stream = stderr; - fprintf(stream, "%s", OMC_COLOR_RED); + fprintf(stream, "%s", STASIS_COLOR_RED); strcpy(status, " ERROR: "); - } else if (type & OMC_MSG_WARN) { + } else if (type & STASIS_MSG_WARN) { stream = stderr; - fprintf(stream, "%s", OMC_COLOR_YELLOW); + fprintf(stream, "%s", STASIS_COLOR_YELLOW); strcpy(status, " WARNING: "); } else { - fprintf(stream, "%s", OMC_COLOR_GREEN); + fprintf(stream, "%s", STASIS_COLOR_GREEN); strcpy(status, " "); } - if (type & OMC_MSG_L1) { - sprintf(header, "==>%s" OMC_COLOR_RESET OMC_COLOR_WHITE, status); - } else if (type & OMC_MSG_L2) { - sprintf(header, " ->%s" OMC_COLOR_RESET, status); - } else if (type & OMC_MSG_L3) { - sprintf(header, OMC_COLOR_BLUE " ->%s" OMC_COLOR_RESET, status); + if (type & STASIS_MSG_L1) { + sprintf(header, "==>%s" STASIS_COLOR_RESET STASIS_COLOR_WHITE, status); + } else if (type & STASIS_MSG_L2) { + sprintf(header, " ->%s" STASIS_COLOR_RESET, status); + } else if (type & STASIS_MSG_L3) { + sprintf(header, STASIS_COLOR_BLUE " ->%s" STASIS_COLOR_RESET, status); } fprintf(stream, "%s", header); vfprintf(stream, fmt, args); - fprintf(stream, "%s", OMC_COLOR_RESET); + fprintf(stream, "%s", STASIS_COLOR_RESET); va_end(args); } void debug_shell() { - msg(OMC_MSG_L1 | OMC_MSG_WARN, "ENTERING OMC DEBUG SHELL\n" OMC_COLOR_RESET); - system("/bin/bash -c 'PS1=\"(OMC DEBUG) \\W $ \" bash --norc --noprofile'"); - msg(OMC_MSG_L1 | OMC_MSG_WARN, "EXITING OMC DEBUG SHELL\n" OMC_COLOR_RESET); + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "ENTERING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET); + system("/bin/bash -c 'PS1=\"(STASIS DEBUG) \\W $ \" bash --norc --noprofile'"); + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "EXITING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET); exit(255); } @@ -460,7 +460,7 @@ char *xmkstemp(FILE **fp, const char *mode) { strcpy(tmpdir, "/tmp"); } memset(t_name, 0, sizeof(t_name)); - sprintf(t_name, "%s/%s", tmpdir, "OMC.XXXXXX"); + sprintf(t_name, "%s/%s", tmpdir, "STASIS.XXXXXX"); fd = mkstemp(t_name); *fp = fdopen(fd, mode); -- cgit