aboutsummaryrefslogtreecommitdiff
path: root/src/cli
diff options
context:
space:
mode:
Diffstat (limited to 'src/cli')
-rw-r--r--src/cli/CMakeLists.txt2
-rw-r--r--src/cli/stasis/CMakeLists.txt12
-rw-r--r--src/cli/stasis/args.c102
-rw-r--r--src/cli/stasis/args.h23
-rw-r--r--src/cli/stasis/callbacks.c31
-rw-r--r--src/cli/stasis/callbacks.h10
-rw-r--r--src/cli/stasis/stasis_main.c544
-rw-r--r--src/cli/stasis/system_requirements.c82
-rw-r--r--src/cli/stasis/system_requirements.h13
-rw-r--r--src/cli/stasis/tpl.c46
-rw-r--r--src/cli/stasis/tpl.h10
-rw-r--r--src/cli/stasis_indexer/CMakeLists.txt6
-rw-r--r--src/cli/stasis_indexer/stasis_indexer.c949
13 files changed, 1830 insertions, 0 deletions
diff --git a/src/cli/CMakeLists.txt b/src/cli/CMakeLists.txt
new file mode 100644
index 0000000..92a21b7
--- /dev/null
+++ b/src/cli/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_subdirectory(stasis)
+add_subdirectory(stasis_indexer) \ No newline at end of file
diff --git a/src/cli/stasis/CMakeLists.txt b/src/cli/stasis/CMakeLists.txt
new file mode 100644
index 0000000..ff7fd88
--- /dev/null
+++ b/src/cli/stasis/CMakeLists.txt
@@ -0,0 +1,12 @@
+include_directories(${CMAKE_SOURCE_DIR})
+add_executable(stasis
+ stasis_main.c
+ args.c
+ callbacks.c
+ system_requirements.c
+ tpl.c
+)
+target_link_libraries(stasis PRIVATE stasis_core)
+target_link_libraries(stasis PUBLIC LibXml2::LibXml2)
+
+install(TARGETS stasis RUNTIME)
diff --git a/src/cli/stasis/args.c b/src/cli/stasis/args.c
new file mode 100644
index 0000000..ed11ab9
--- /dev/null
+++ b/src/cli/stasis/args.c
@@ -0,0 +1,102 @@
+#include "core.h"
+#include "args.h"
+
+struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"version", no_argument, 0, 'V'},
+ {"continue-on-error", no_argument, 0, 'C'},
+ {"config", required_argument, 0, 'c'},
+ {"cpu-limit", required_argument, 0, 'l'},
+ {"pool-status-interval", required_argument, 0, OPT_POOL_STATUS_INTERVAL},
+ {"python", required_argument, 0, 'p'},
+ {"verbose", no_argument, 0, 'v'},
+ {"unbuffered", no_argument, 0, 'U'},
+ {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE},
+ {"fail-fast", no_argument, 0, OPT_FAIL_FAST},
+ {"overwrite", no_argument, 0, OPT_OVERWRITE},
+ {"no-docker", no_argument, 0, OPT_NO_DOCKER},
+ {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY},
+ {"no-artifactory-build-info", no_argument, 0, OPT_NO_ARTIFACTORY_BUILD_INFO},
+ {"no-testing", no_argument, 0, OPT_NO_TESTING},
+ {"no-parallel", no_argument, 0, OPT_NO_PARALLEL},
+ {"no-rewrite", no_argument, 0, OPT_NO_REWRITE_SPEC_STAGE_2},
+ {0, 0, 0, 0},
+};
+
+const char *long_options_help[] = {
+ "Display this usage statement",
+ "Display program version",
+ "Allow tests to fail",
+ "Read configuration file",
+ "Number of processes to spawn concurrently (default: cpus - 1)",
+ "Report task status every n seconds (default: 30)",
+ "Override version of Python in configuration",
+ "Increase output verbosity",
+ "Disable line buffering",
+ "Update conda installation prior to STASIS environment creation",
+ "On error, immediately terminate all tasks",
+ "Overwrite an existing release",
+ "Do not build docker images",
+ "Do not upload artifacts to Artifactory",
+ "Do not upload build info objects to Artifactory",
+ "Do not execute test scripts",
+ "Do not execute tests in parallel",
+ "Do not rewrite paths and URLs in output files",
+ NULL,
+};
+
+static int get_option_max_width(struct option option[]) {
+ int i = 0;
+ int max = 0;
+ const int indent = 4;
+ while (option[i].name != 0) {
+ int len = (int) strlen(option[i].name);
+ if (option[i].has_arg) {
+ len += indent;
+ }
+ if (len > max) {
+ max = len;
+ }
+ i++;
+ }
+ return max;
+}
+
+void usage(char *progname) {
+ printf("usage: %s ", progname);
+ printf("[-");
+ for (int x = 0; long_options[x].val != 0; x++) {
+ if (long_options[x].has_arg == no_argument && long_options[x].val <= 'z') {
+ putchar(long_options[x].val);
+ }
+ }
+ printf("] {DELIVERY_FILE}\n");
+
+ int width = get_option_max_width(long_options);
+ for (int x = 0; long_options[x].name != 0; x++) {
+ char tmp[STASIS_NAME_MAX] = {0};
+ char output[sizeof(tmp)] = {0};
+ char opt_long[50] = {0}; // --? [ARG]?
+ char opt_short[50] = {0}; // -? [ARG]?
+
+ strcat(opt_long, "--");
+ strcat(opt_long, long_options[x].name);
+ if (long_options[x].has_arg) {
+ strcat(opt_long, " ARG");
+ }
+
+ if (long_options[x].val <= 'z') {
+ strcat(opt_short, "-");
+ opt_short[1] = (char) long_options[x].val;
+ if (long_options[x].has_arg) {
+ strcat(opt_short, " ARG");
+ }
+ } else {
+ strcat(opt_short, " ");
+ }
+
+ sprintf(tmp, " %%-%ds\t%%s\t\t%%s", width + 4);
+ sprintf(output, tmp, opt_long, opt_short, long_options_help[x]);
+ puts(output);
+ }
+}
diff --git a/src/cli/stasis/args.h b/src/cli/stasis/args.h
new file mode 100644
index 0000000..932eac7
--- /dev/null
+++ b/src/cli/stasis/args.h
@@ -0,0 +1,23 @@
+#ifndef STASIS_ARGS_H
+#define STASIS_ARGS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define OPT_ALWAYS_UPDATE_BASE 1000
+#define OPT_NO_DOCKER 1001
+#define OPT_NO_ARTIFACTORY 1002
+#define OPT_NO_ARTIFACTORY_BUILD_INFO 1003
+#define OPT_NO_TESTING 1004
+#define OPT_OVERWRITE 1005
+#define OPT_NO_REWRITE_SPEC_STAGE_2 1006
+#define OPT_FAIL_FAST 1007
+#define OPT_NO_PARALLEL 1008
+#define OPT_POOL_STATUS_INTERVAL 1009
+
+extern struct option long_options[];
+void usage(char *progname);
+
+#endif //STASIS_ARGS_H
diff --git a/src/cli/stasis/callbacks.c b/src/cli/stasis/callbacks.c
new file mode 100644
index 0000000..aeaa25d
--- /dev/null
+++ b/src/cli/stasis/callbacks.c
@@ -0,0 +1,31 @@
+#include "callbacks.h"
+
+int callback_except_jf(const void *a, const void *b) {
+ const struct EnvCtl_Item *item = a;
+ const char *name = b;
+
+ if (!globals.enable_artifactory) {
+ return STASIS_ENVCTL_RET_IGNORE;
+ }
+
+ if (envctl_check_required(item->flags)) {
+ const char *content = getenv(name);
+ if (!content || isempty((char *) content)) {
+ return STASIS_ENVCTL_RET_FAIL;
+ }
+ }
+
+ return STASIS_ENVCTL_RET_SUCCESS;
+}
+
+int callback_except_gh(const void *a, const void *b) {
+ const struct EnvCtl_Item *item = a;
+ const char *name = b;
+ //printf("GH exception check: %s\n", name);
+ if (envctl_check_required(item->flags) && envctl_check_present(item, name)) {
+ return STASIS_ENVCTL_RET_SUCCESS;
+ }
+
+ return STASIS_ENVCTL_RET_FAIL;
+}
+
diff --git a/src/cli/stasis/callbacks.h b/src/cli/stasis/callbacks.h
new file mode 100644
index 0000000..369ce56
--- /dev/null
+++ b/src/cli/stasis/callbacks.h
@@ -0,0 +1,10 @@
+#ifndef STASIS_CALLBACKS_H
+#define STASIS_CALLBACKS_H
+
+#include "core.h"
+#include "envctl.h"
+
+int callback_except_jf(const void *a, const void *b);
+int callback_except_gh(const void *a, const void *b);
+
+#endif //STASIS_CALLBACKS_H
diff --git a/src/cli/stasis/stasis_main.c b/src/cli/stasis/stasis_main.c
new file mode 100644
index 0000000..5325892
--- /dev/null
+++ b/src/cli/stasis/stasis_main.c
@@ -0,0 +1,544 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include "core.h"
+#include "delivery.h"
+
+// local includes
+#include "args.h"
+#include "system_requirements.h"
+#include "tpl.h"
+
+
+int main(int argc, char *argv[]) {
+ struct Delivery ctx;
+ struct Process proc = {
+ .f_stdout = "",
+ .f_stderr = "",
+ .redirect_stderr = 0,
+ };
+ char env_name[STASIS_NAME_MAX] = {0};
+ char env_name_testing[STASIS_NAME_MAX] = {0};
+ char *delivery_input = NULL;
+ char *config_input = NULL;
+ char installer_url[PATH_MAX];
+ char python_override_version[STASIS_NAME_MAX];
+ int user_disabled_docker = false;
+ globals.cpu_limit = get_cpu_count();
+ if (globals.cpu_limit > 1) {
+ globals.cpu_limit--; // max - 1
+ }
+
+ memset(env_name, 0, sizeof(env_name));
+ memset(env_name_testing, 0, sizeof(env_name_testing));
+ memset(installer_url, 0, sizeof(installer_url));
+ memset(python_override_version, 0, sizeof(python_override_version));
+ memset(&proc, 0, sizeof(proc));
+ memset(&ctx, 0, sizeof(ctx));
+
+ int c;
+ int option_index = 0;
+ while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) {
+ switch (c) {
+ case 'h':
+ usage(path_basename(argv[0]));
+ exit(0);
+ case 'V':
+ puts(VERSION);
+ exit(0);
+ case 'c':
+ config_input = strdup(optarg);
+ break;
+ case 'C':
+ globals.continue_on_error = true;
+ break;
+ case 'p':
+ strcpy(python_override_version, optarg);
+ break;
+ case 'l':
+ globals.cpu_limit = strtol(optarg, NULL, 10);
+ if (globals.cpu_limit <= 1) {
+ globals.cpu_limit = 1;
+ globals.enable_parallel = false; // No point
+ }
+ break;
+ case OPT_ALWAYS_UPDATE_BASE:
+ globals.always_update_base_environment = true;
+ break;
+ case OPT_FAIL_FAST:
+ globals.parallel_fail_fast = true;
+ break;
+ case OPT_POOL_STATUS_INTERVAL:
+ globals.pool_status_interval = (int) strtol(optarg, NULL, 10);
+ if (globals.pool_status_interval < 1) {
+ globals.pool_status_interval = 1;
+ } else if (globals.pool_status_interval > 60 * 10) {
+ // Possible poor choice alert
+ fprintf(stderr, "Caution: Excessive pausing between status updates may cause third-party CI/CD"
+ " jobs to fail if the stdout/stderr streams are idle for too long!\n");
+ }
+ break;
+ case 'U':
+ setenv("PYTHONUNBUFFERED", "1", 1);
+ fflush(stdout);
+ fflush(stderr);
+ setvbuf(stdout, NULL, _IONBF, 0);
+ setvbuf(stderr, NULL, _IONBF, 0);
+ break;
+ case 'v':
+ globals.verbose = true;
+ break;
+ case OPT_OVERWRITE:
+ globals.enable_overwrite = true;
+ break;
+ case OPT_NO_DOCKER:
+ globals.enable_docker = false;
+ user_disabled_docker = true;
+ break;
+ case OPT_NO_ARTIFACTORY:
+ globals.enable_artifactory = false;
+ break;
+ case OPT_NO_ARTIFACTORY_BUILD_INFO:
+ globals.enable_artifactory_build_info = false;
+ break;
+ case OPT_NO_TESTING:
+ globals.enable_testing = false;
+ break;
+ case OPT_NO_REWRITE_SPEC_STAGE_2:
+ globals.enable_rewrite_spec_stage_2 = false;
+ break;
+ case OPT_NO_PARALLEL:
+ globals.enable_parallel = false;
+ break;
+ case '?':
+ default:
+ exit(1);
+ }
+ }
+
+ if (optind < argc) {
+ while (optind < argc) {
+ // use first positional argument
+ delivery_input = argv[optind++];
+ break;
+ }
+ }
+
+ if (!delivery_input) {
+ fprintf(stderr, "error: a DELIVERY_FILE is required\n");
+ usage(path_basename(argv[0]));
+ exit(1);
+ }
+
+ printf(BANNER, VERSION, AUTHOR);
+
+ msg(STASIS_MSG_L1, "Setup\n");
+
+ tpl_setup_vars(&ctx);
+ tpl_setup_funcs(&ctx);
+
+ // Set up PREFIX/etc directory information
+ // The user may manipulate the base directory path with STASIS_SYSCONFDIR
+ // environment variable
+ char stasis_sysconfdir_tmp[PATH_MAX];
+ if (getenv("STASIS_SYSCONFDIR")) {
+ strncpy(stasis_sysconfdir_tmp, getenv("STASIS_SYSCONFDIR"), sizeof(stasis_sysconfdir_tmp) - 1);
+ } else {
+ strncpy(stasis_sysconfdir_tmp, STASIS_SYSCONFDIR, sizeof(stasis_sysconfdir_tmp) - 1);
+ }
+
+ globals.sysconfdir = realpath(stasis_sysconfdir_tmp, NULL);
+ if (!globals.sysconfdir) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to resolve path to configuration directory: %s\n", stasis_sysconfdir_tmp);
+ exit(1);
+ }
+
+ // Override Python version from command-line, if any
+ if (strlen(python_override_version)) {
+ guard_free(ctx.meta.python);
+ ctx.meta.python = strdup(python_override_version);
+ guard_free(ctx.meta.python_compact);
+ ctx.meta.python_compact = to_short_version(ctx.meta.python);
+ }
+
+ if (!config_input) {
+ // no configuration passed by argument. use basic config.
+ char cfgfile[PATH_MAX * 2];
+ sprintf(cfgfile, "%s/%s", globals.sysconfdir, "stasis.ini");
+ if (!access(cfgfile, F_OK | R_OK)) {
+ config_input = strdup(cfgfile);
+ } else {
+ msg(STASIS_MSG_WARN, "STASIS global configuration is not readable, or does not exist: %s", cfgfile);
+ }
+ }
+
+ if (config_input) {
+ msg(STASIS_MSG_L2, "Reading STASIS global configuration: %s\n", config_input);
+ ctx._stasis_ini_fp.cfg = ini_open(config_input);
+ if (!ctx._stasis_ini_fp.cfg) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read config file: %s, %s\n", delivery_input, strerror(errno));
+ exit(1);
+ }
+ ctx._stasis_ini_fp.cfg_path = strdup(config_input);
+ guard_free(config_input);
+ }
+
+ msg(STASIS_MSG_L2, "Reading STASIS delivery configuration: %s\n", delivery_input);
+ ctx._stasis_ini_fp.delivery = ini_open(delivery_input);
+ if (!ctx._stasis_ini_fp.delivery) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read delivery file: %s, %s\n", delivery_input, strerror(errno));
+ exit(1);
+ }
+ ctx._stasis_ini_fp.delivery_path = strdup(delivery_input);
+
+ msg(STASIS_MSG_L2, "Bootstrapping delivery context\n");
+ if (bootstrap_build_info(&ctx)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to bootstrap delivery context\n");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L2, "Initializing delivery context\n");
+ if (delivery_init(&ctx, INI_READ_RENDER)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to initialize delivery context\n");
+ exit(1);
+ }
+ check_requirements(&ctx);
+
+ msg(STASIS_MSG_L2, "Configuring JFrog CLI\n");
+ if (delivery_init_artifactory(&ctx)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "JFrog CLI configuration failed\n");
+ exit(1);
+ }
+
+ runtime_apply(ctx.runtime.environ);
+ strcpy(env_name, ctx.info.release_name);
+ strcpy(env_name_testing, env_name);
+ strcat(env_name_testing, "-test");
+
+ // Safety gate: Avoid clobbering a delivered release unless the user wants that behavior
+ msg(STASIS_MSG_L1, "Checking release history\n");
+ if (delivery_exists(&ctx)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Refusing to overwrite release: %s\nUse --overwrite to enable release clobbering.\n", ctx.info.release_name);
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Conda setup\n");
+ delivery_get_conda_installer_url(&ctx, installer_url);
+ msg(STASIS_MSG_L2, "Downloading: %s\n", installer_url);
+ if (delivery_get_conda_installer(&ctx, installer_url)) {
+ msg(STASIS_MSG_ERROR, "download failed: %s\n", installer_url);
+ exit(1);
+ }
+
+ // Unlikely to occur: this should help prevent rmtree() from destroying your entire filesystem
+ // if path is "/" then, die
+ // or if empty string, die
+ if (!strcmp(ctx.storage.conda_install_prefix, DIR_SEP) || !strlen(ctx.storage.conda_install_prefix)) {
+ fprintf(stderr, "error: ctx.storage.conda_install_prefix is malformed!\n");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L2, "Installing: %s\n", ctx.conda.installer_name);
+ delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix);
+
+ msg(STASIS_MSG_L2, "Configuring: %s\n", ctx.storage.conda_install_prefix);
+ delivery_conda_enable(&ctx, ctx.storage.conda_install_prefix);
+ check_pathvar(&ctx);
+
+ //
+ // Implied environment creation modes/actions
+ //
+ // 1. No base environment config
+ // 1a. Caller is warned
+ // 1b. Caller has full control over all packages
+ // 2. Default base environment (etc/stasis/mission/[name]/base.yml)
+ // 2a. Depends on packages defined by base.yml
+ // 2b. Caller may issue a reduced package set in the INI config
+ // 2c. Caller must be vigilant to avoid incompatible packages (base.yml
+ // *should* have no version constraints)
+ // 3. External base environment (based_on=schema://[release_name].yml)
+ // 3a. Depends on a previous release or arbitrary yaml configuration
+ // 3b. Bugs, conflicts, and dependency resolution issues are inherited and
+ // must be handled in the INI config
+ msg(STASIS_MSG_L1, "Creating release environment(s)\n");
+
+ char *mission_base = NULL;
+ if (isempty(ctx.meta.based_on)) {
+ guard_free(ctx.meta.based_on);
+ char *mission_base_orig = NULL;
+
+ if (asprintf(&mission_base_orig, "%s/%s/base.yml", ctx.storage.mission_dir, ctx.meta.mission) < 0) {
+ SYSERROR("Unable to allocate bytes for %s/%s/base.yml path\n", ctx.storage.mission_dir, ctx.meta.mission);
+ exit(1);
+ }
+
+ if (access(mission_base_orig, F_OK) < 0) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Mission does not provide a base.yml configuration: %s (%s)\n",
+ ctx.meta.mission, ctx.storage.mission_dir);
+ } else {
+ msg(STASIS_MSG_L2, "Using base environment configuration: %s\n", mission_base_orig);
+ if (asprintf(&mission_base, "%s/%s-base.yml", ctx.storage.tmpdir, ctx.info.release_name) < 0) {
+ SYSERROR("%s", "Unable to allocate bytes for temporary base.yml configuration");
+ remove(mission_base);
+ exit(1);
+ }
+ copy2(mission_base_orig, mission_base, CT_OWNER | CT_PERM);
+ char spec[255] = {0};
+ snprintf(spec, sizeof(spec) - 1, "- python=%s\n", ctx.meta.python);
+ file_replace_text(mission_base, "- python\n", spec, 0);
+ ctx.meta.based_on = mission_base;
+ }
+ guard_free(mission_base_orig);
+ }
+
+ if (!isempty(ctx.meta.based_on)) {
+ if (conda_env_remove(env_name)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove release environment: %s\n", env_name);
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L2, "Based on: %s\n", ctx.meta.based_on);
+ if (conda_env_create_from_uri(env_name, ctx.meta.based_on)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install release environment using configuration file\n");
+ exit(1);
+ }
+
+ if (conda_env_remove(env_name_testing)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove testing environment %s\n", env_name_testing);
+ exit(1);
+ }
+ if (conda_env_create_from_uri(env_name_testing, ctx.meta.based_on)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install testing environment using configuration file\n");
+ exit(1);
+ }
+ } else {
+ if (conda_env_create(env_name, ctx.meta.python, NULL)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create release environment\n");
+ exit(1);
+ }
+ if (conda_env_create(env_name_testing, ctx.meta.python, NULL)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create testing environment\n");
+ exit(1);
+ }
+ }
+ // The base environment configuration not used past this point
+ remove(mission_base);
+
+ // Activate test environment
+ msg(STASIS_MSG_L1, "Activating test environment\n");
+ if (conda_activate(ctx.storage.conda_install_prefix, env_name_testing)) {
+ fprintf(stderr, "failed to activate test environment\n");
+ exit(1);
+ }
+
+ delivery_gather_tool_versions(&ctx);
+ if (!ctx.conda.tool_version) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda version\n");
+ exit(1);
+ }
+ if (!ctx.conda.tool_build_version) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda-build version\n");
+ exit(1);
+ }
+
+ if (pip_exec("install build")) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed\n");
+ exit(1);
+ }
+
+ if (!isempty(ctx.meta.based_on)) {
+ msg(STASIS_MSG_L1, "Generating package overlay from environment: %s\n", env_name);
+ if (delivery_overlay_packages_from_env(&ctx, env_name)) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "%s", "Failed to generate package overlay. Resulting environment integrity cannot be guaranteed.\n");
+ exit(1);
+ }
+ }
+
+ msg(STASIS_MSG_L1, "Filter deliverable packages\n");
+ delivery_defer_packages(&ctx, DEFER_CONDA);
+ delivery_defer_packages(&ctx, DEFER_PIP);
+
+ msg(STASIS_MSG_L1, "Overview\n");
+ delivery_meta_show(&ctx);
+ delivery_conda_show(&ctx);
+ if (globals.verbose) {
+ //delivery_runtime_show(&ctx);
+ }
+
+ // Execute configuration-defined tests
+ if (globals.enable_testing) {
+ delivery_tests_show(&ctx);
+
+ msg(STASIS_MSG_L1, "Begin test execution\n");
+ delivery_tests_run(&ctx);
+ msg(STASIS_MSG_L2, "Rewriting test results\n");
+ delivery_fixup_test_results(&ctx);
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Test execution is disabled\n");
+ }
+
+ if (ctx.conda.conda_packages_defer && strlist_count(ctx.conda.conda_packages_defer)) {
+ msg(STASIS_MSG_L2, "Building Conda recipe(s)\n");
+ if (delivery_build_recipes(&ctx)) {
+ exit(1);
+ }
+ msg(STASIS_MSG_L3, "Copying artifacts\n");
+ if (delivery_copy_conda_artifacts(&ctx)) {
+ exit(1);
+ }
+ msg(STASIS_MSG_L3, "Indexing artifacts\n");
+ if (delivery_index_conda_artifacts(&ctx)) {
+ exit(1);
+ }
+ }
+
+ if (strlist_count(ctx.conda.pip_packages_defer)) {
+ if (!(ctx.conda.wheels_packages = delivery_build_wheels(&ctx))) {
+ exit(1);
+ }
+ if (delivery_index_wheel_artifacts(&ctx)) {
+ exit(1);
+ }
+
+ }
+
+ // Populate the release environment
+ msg(STASIS_MSG_L1, "Populating release environment\n");
+ msg(STASIS_MSG_L2, "Installing conda packages\n");
+ if (strlist_count(ctx.conda.conda_packages)) {
+ if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA, (struct StrList *[]) {ctx.conda.conda_packages, NULL})) {
+ exit(1);
+ }
+ }
+ if (strlist_count(ctx.conda.conda_packages_defer)) {
+ msg(STASIS_MSG_L3, "Installing deferred conda packages\n");
+ if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA | INSTALL_PKG_CONDA_DEFERRED, (struct StrList *[]) {ctx.conda.conda_packages_defer, NULL})) {
+ exit(1);
+ }
+ } else {
+ msg(STASIS_MSG_L3, "No deferred conda packages\n");
+ }
+
+ msg(STASIS_MSG_L2, "Installing pip packages\n");
+ if (strlist_count(ctx.conda.pip_packages)) {
+ if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP, (struct StrList *[]) {ctx.conda.pip_packages, NULL})) {
+ exit(1);
+ }
+ }
+
+ if (strlist_count(ctx.conda.pip_packages_defer)) {
+ msg(STASIS_MSG_L3, "Installing deferred pip packages\n");
+ if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP | INSTALL_PKG_PIP_DEFERRED, (struct StrList *[]) {ctx.conda.pip_packages_defer, NULL})) {
+ exit(1);
+ }
+ } else {
+ msg(STASIS_MSG_L3, "No deferred pip packages\n");
+ }
+
+ conda_exec("list");
+
+ msg(STASIS_MSG_L1, "Creating release\n");
+ msg(STASIS_MSG_L2, "Exporting delivery configuration\n");
+ if (!pushd(ctx.storage.cfgdump_dir)) {
+ char filename[PATH_MAX] = {0};
+ sprintf(filename, "%s.ini", ctx.info.release_name);
+ FILE *spec = fopen(filename, "w+");
+ if (!spec) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename);
+ exit(1);
+ }
+ ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_RAW);
+ fclose(spec);
+
+ memset(filename, 0, sizeof(filename));
+ sprintf(filename, "%s-rendered.ini", ctx.info.release_name);
+ spec = fopen(filename, "w+");
+ if (!spec) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename);
+ exit(1);
+ }
+ ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_PRESERVE);
+ fclose(spec);
+ popd();
+ } else {
+ SYSERROR("Failed to enter directory: %s", ctx.storage.delivery_dir);
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L2, "Exporting %s\n", env_name_testing);
+ if (conda_env_export(env_name_testing, ctx.storage.delivery_dir, env_name_testing)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name_testing);
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L2, "Exporting %s\n", env_name);
+ if (conda_env_export(env_name, ctx.storage.delivery_dir, env_name)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name);
+ exit(1);
+ }
+
+ // Rewrite release environment output (i.e. set package origin(s) to point to the deployment server, etc.)
+ char specfile[PATH_MAX];
+ sprintf(specfile, "%s/%s.yml", ctx.storage.delivery_dir, env_name);
+ msg(STASIS_MSG_L3, "Rewriting release spec file (stage 1): %s\n", path_basename(specfile));
+ delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_1);
+
+ msg(STASIS_MSG_L1, "Rendering mission templates\n");
+ delivery_mission_render_files(&ctx);
+
+ int want_docker = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:docker") ? true : false;
+ int want_artifactory = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:artifactory") ? true : false;
+
+ if (want_docker) {
+ if (user_disabled_docker) {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled by CLI argument\n");
+ } else {
+ char dockerfile[PATH_MAX] = {0};
+ sprintf(dockerfile, "%s/%s", ctx.storage.build_docker_dir, "Dockerfile");
+ if (globals.enable_docker) {
+ if (!access(dockerfile, F_OK)) {
+ msg(STASIS_MSG_L1, "Building Docker image\n");
+ if (delivery_docker(&ctx)) {
+ msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Failed to build docker image!\n");
+ COE_CHECK_ABORT(1, "Failed to build docker image");
+ }
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. No Dockerfile found in %s\n", ctx.storage.build_docker_dir);
+ }
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. System configuration error\n");
+ }
+ }
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. deploy:docker is not configured\n");
+ }
+
+ msg(STASIS_MSG_L3, "Rewriting release spec file (stage 2): %s\n", path_basename(specfile));
+ delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_2);
+
+ msg(STASIS_MSG_L1, "Dumping metadata\n");
+ if (delivery_dump_metadata(&ctx)) {
+ msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Metadata dump failed\n");
+ }
+
+ if (want_artifactory) {
+ if (globals.enable_artifactory) {
+ msg(STASIS_MSG_L1, "Uploading artifacts\n");
+ delivery_artifact_upload(&ctx);
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifactory upload is disabled by CLI argument\n");
+ }
+ } else {
+ msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifactory upload is disabled. deploy:artifactory is not configured\n");
+ }
+
+ msg(STASIS_MSG_L1, "Cleaning up\n");
+ delivery_free(&ctx);
+ globals_free();
+ tpl_free();
+
+ msg(STASIS_MSG_L1, "Done!\n");
+ return 0;
+}
+
diff --git a/src/cli/stasis/system_requirements.c b/src/cli/stasis/system_requirements.c
new file mode 100644
index 0000000..4554b93
--- /dev/null
+++ b/src/cli/stasis/system_requirements.c
@@ -0,0 +1,82 @@
+#include "system_requirements.h"
+
+void check_system_env_requirements() {
+ msg(STASIS_MSG_L1, "Checking environment\n");
+ globals.envctl = envctl_init();
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "TMPDIR");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_ROOT");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_SYSCONFDIR");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_CPU_COUNT");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED | STASIS_ENVCTL_REDACT, callback_except_gh, "STASIS_GH_TOKEN");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_ARTIFACTORY_URL");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_ACCESS_TOKEN");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_JF_USER");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_PASSWORD");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_KEY_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_PASSPHRASE");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_CERT_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_KEY_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_REPO");
+ envctl_do_required(globals.envctl, globals.verbose);
+}
+
+void check_system_requirements(struct Delivery *ctx) {
+ const char *tools_required[] = {
+ "rsync",
+ NULL,
+ };
+
+ msg(STASIS_MSG_L1, "Checking system requirements\n");
+ for (size_t i = 0; tools_required[i] != NULL; i++) {
+ if (!find_program(tools_required[i])) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]);
+ exit(1);
+ }
+ }
+
+ if (!globals.tmpdir && !ctx->storage.tmpdir) {
+ delivery_init_tmpdir(ctx);
+ }
+
+ struct DockerCapabilities dcap;
+ if (!docker_capable(&dcap)) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n");
+ msg(STASIS_MSG_L3, "Available: %s\n", dcap.available ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Usable: %s\n", dcap.usable ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap.podman ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Build plugin(s): ");
+ if (dcap.usable) {
+ if (dcap.build & STASIS_DOCKER_BUILD) {
+ printf("build ");
+ }
+ if (dcap.build & STASIS_DOCKER_BUILD_X) {
+ printf("buildx ");
+ }
+ puts("");
+ } else {
+ printf("N/A\n");
+ }
+
+ // disable docker builds
+ globals.enable_docker = false;
+ }
+}
+
+void check_requirements(struct Delivery *ctx) {
+ check_system_requirements(ctx);
+ check_system_env_requirements();
+}
+
+char *check_pathvar(struct Delivery *ctx) {
+ char *pathvar = NULL;
+ pathvar = getenv("PATH");
+ if (!pathvar) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "PATH variable is not set. Cannot continue.\n");
+ exit(1);
+ } else {
+ char pathvar_tmp[STASIS_BUFSIZ];
+ sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.conda_install_prefix, pathvar);
+ setenv("PATH", pathvar_tmp, 1);
+ pathvar = NULL;
+ }
+} \ No newline at end of file
diff --git a/src/cli/stasis/system_requirements.h b/src/cli/stasis/system_requirements.h
new file mode 100644
index 0000000..4c2231a
--- /dev/null
+++ b/src/cli/stasis/system_requirements.h
@@ -0,0 +1,13 @@
+#ifndef STASIS_SYSTEM_REQUIREMENTS_H
+#define STASIS_SYSTEM_REQUIREMENTS_H
+
+#include "delivery.h"
+#include "callbacks.h"
+#include "envctl.h"
+
+void check_system_env_requirements();
+void check_system_requirements(struct Delivery *ctx);
+void check_requirements(struct Delivery *ctx);
+char *check_pathvar(struct Delivery *ctx);
+
+#endif //STASIS_SYSTEM_REQUIREMENTS_H
diff --git a/src/cli/stasis/tpl.c b/src/cli/stasis/tpl.c
new file mode 100644
index 0000000..08eb1f3
--- /dev/null
+++ b/src/cli/stasis/tpl.c
@@ -0,0 +1,46 @@
+#include "delivery.h"
+#include "tpl.h"
+
+void tpl_setup_vars(struct Delivery *ctx) {
+ // Expose variables for use with the template engine
+ // NOTE: These pointers are populated by delivery_init() so please avoid using
+ // tpl_render() until then.
+ tpl_register("meta.name", &ctx->meta.name);
+ tpl_register("meta.version", &ctx->meta.version);
+ tpl_register("meta.codename", &ctx->meta.codename);
+ tpl_register("meta.mission", &ctx->meta.mission);
+ tpl_register("meta.python", &ctx->meta.python);
+ tpl_register("meta.python_compact", &ctx->meta.python_compact);
+ tpl_register("info.time_str_epoch", &ctx->info.time_str_epoch);
+ tpl_register("info.release_name", &ctx->info.release_name);
+ tpl_register("info.build_name", &ctx->info.build_name);
+ tpl_register("info.build_number", &ctx->info.build_number);
+ tpl_register("storage.tmpdir", &ctx->storage.tmpdir);
+ tpl_register("storage.output_dir", &ctx->storage.output_dir);
+ tpl_register("storage.delivery_dir", &ctx->storage.delivery_dir);
+ tpl_register("storage.conda_artifact_dir", &ctx->storage.conda_artifact_dir);
+ tpl_register("storage.wheel_artifact_dir", &ctx->storage.wheel_artifact_dir);
+ tpl_register("storage.build_sources_dir", &ctx->storage.build_sources_dir);
+ tpl_register("storage.build_docker_dir", &ctx->storage.build_docker_dir);
+ tpl_register("storage.results_dir", &ctx->storage.results_dir);
+ tpl_register("storage.tools_dir", &ctx->storage.tools_dir);
+ tpl_register("conda.installer_baseurl", &ctx->conda.installer_baseurl);
+ tpl_register("conda.installer_name", &ctx->conda.installer_name);
+ tpl_register("conda.installer_version", &ctx->conda.installer_version);
+ tpl_register("conda.installer_arch", &ctx->conda.installer_arch);
+ tpl_register("conda.installer_platform", &ctx->conda.installer_platform);
+ tpl_register("deploy.jfrog.repo", &globals.jfrog.repo);
+ tpl_register("deploy.jfrog.url", &globals.jfrog.url);
+ tpl_register("deploy.docker.registry", &ctx->deploy.docker.registry);
+ tpl_register("workaround.conda_reactivate", &globals.workaround.conda_reactivate);
+}
+
+void tpl_setup_funcs(struct Delivery *ctx) {
+ // Expose function(s) to the template engine
+ // Prototypes can be found in template_func_proto.h
+ tpl_register_func("get_github_release_notes", &get_github_release_notes_tplfunc_entrypoint, 3, NULL);
+ tpl_register_func("get_github_release_notes_auto", &get_github_release_notes_auto_tplfunc_entrypoint, 1, ctx);
+ tpl_register_func("junitxml_file", &get_junitxml_file_entrypoint, 1, ctx);
+ tpl_register_func("basetemp_dir", &get_basetemp_dir_entrypoint, 1, ctx);
+ tpl_register_func("tox_run", &tox_run_entrypoint, 2, ctx);
+} \ No newline at end of file
diff --git a/src/cli/stasis/tpl.h b/src/cli/stasis/tpl.h
new file mode 100644
index 0000000..398f0fe
--- /dev/null
+++ b/src/cli/stasis/tpl.h
@@ -0,0 +1,10 @@
+#ifndef STASIS_TPL_H
+#define STASIS_TPL_H
+
+#include "template.h"
+#include "template_func_proto.h"
+
+void tpl_setup_vars(struct Delivery *ctx);
+void tpl_setup_funcs(struct Delivery *ctx);
+
+#endif //STASIS_TPL_H
diff --git a/src/cli/stasis_indexer/CMakeLists.txt b/src/cli/stasis_indexer/CMakeLists.txt
new file mode 100644
index 0000000..eae1394
--- /dev/null
+++ b/src/cli/stasis_indexer/CMakeLists.txt
@@ -0,0 +1,6 @@
+add_executable(stasis_indexer
+ stasis_indexer.c
+)
+target_link_libraries(stasis_indexer PRIVATE stasis_core)
+
+install(TARGETS stasis_indexer RUNTIME)
diff --git a/src/cli/stasis_indexer/stasis_indexer.c b/src/cli/stasis_indexer/stasis_indexer.c
new file mode 100644
index 0000000..bd59920
--- /dev/null
+++ b/src/cli/stasis_indexer/stasis_indexer.c
@@ -0,0 +1,949 @@
+#include <getopt.h>
+#include <fnmatch.h>
+#include "delivery.h"
+#include "junitxml.h"
+
+static struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"destdir", required_argument, 0, 'd'},
+ {"verbose", no_argument, 0, 'v'},
+ {"unbuffered", no_argument, 0, 'U'},
+ {"web", no_argument, 0, 'w'},
+ {0, 0, 0, 0},
+};
+
+const char *long_options_help[] = {
+ "Display this usage statement",
+ "Destination directory",
+ "Increase output verbosity",
+ "Disable line buffering",
+ "Generate HTML indexes (requires pandoc)",
+ NULL,
+};
+
+static void usage(char *name) {
+ int maxopts = sizeof(long_options) / sizeof(long_options[0]);
+ unsigned char *opts = calloc(maxopts + 1, sizeof(char));
+ for (int i = 0; i < maxopts; i++) {
+ opts[i] = long_options[i].val;
+ }
+ printf("usage: %s [-%s] {{STASIS_ROOT}...}\n", name, opts);
+ guard_free(opts);
+
+ for (int i = 0; i < maxopts - 1; i++) {
+ char line[255];
+ sprintf(line, " --%s -%c %-20s", long_options[i].name, long_options[i].val, long_options_help[i]);
+ puts(line);
+ }
+
+}
+
+int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t rootdirs_total) {
+ char cmd[PATH_MAX];
+ char destdir_bare[PATH_MAX];
+ char destdir_with_output[PATH_MAX];
+ char *destdir = destdir_bare;
+
+ memset(cmd, 0, sizeof(cmd));
+ memset(destdir_bare, 0, sizeof(destdir_bare));
+ memset(destdir_with_output, 0, sizeof(destdir_bare));
+
+ strcpy(destdir_bare, dest);
+ strcpy(destdir_with_output, dest);
+ strcat(destdir_with_output, "/output");
+
+ if (!access(destdir_with_output, F_OK)) {
+ destdir = destdir_with_output;
+ }
+
+ sprintf(cmd, "rsync -ah%s --delete --exclude 'tools/' --exclude 'tmp/' --exclude 'build/' ", globals.verbose ? "v" : "q");
+ for (size_t i = 0; i < rootdirs_total; i++) {
+ char srcdir_bare[PATH_MAX] = {0};
+ char srcdir_with_output[PATH_MAX] = {0};
+ char *srcdir = srcdir_bare;
+ strcpy(srcdir_bare, rootdirs[i]);
+ strcpy(srcdir_with_output, rootdirs[i]);
+ strcat(srcdir_with_output, "/output");
+
+ if (access(srcdir_bare, F_OK)) {
+ fprintf(stderr, "%s does not exist\n", srcdir_bare);
+ continue;
+ }
+
+ if (!access(srcdir_with_output, F_OK)) {
+ srcdir = srcdir_with_output;
+ }
+ snprintf(cmd + strlen(cmd), sizeof(srcdir) - strlen(srcdir) + 4, "'%s'/ ", srcdir);
+ }
+ snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(destdir) + 1, " %s/", destdir);
+
+ if (globals.verbose) {
+ puts(cmd);
+ }
+
+ if (system(cmd)) {
+ return -1;
+ }
+ return 0;
+}
+
+int indexer_wheels(struct Delivery *ctx) {
+ return delivery_index_wheel_artifacts(ctx);
+}
+
+int indexer_load_metadata(struct Delivery *ctx, const char *filename) {
+ char line[STASIS_NAME_MAX] = {0};
+ FILE *fp;
+
+ fp = fopen(filename, "r");
+ if (!fp) {
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line) - 1, fp) != NULL) {
+ char **parts = split(line, " ", 1);
+ char *name = parts[0];
+ char *value = parts[1];
+ strip(value);
+ if (!strcmp(name, "name")) {
+ ctx->meta.name = strdup(value);
+ } else if (!strcmp(name, "version")) {
+ ctx->meta.version = strdup(value);
+ } else if (!strcmp(name, "rc")) {
+ ctx->meta.rc = (int) strtol(value, NULL, 10);
+ } else if (!strcmp(name, "python")) {
+ ctx->meta.python = strdup(value);
+ } else if (!strcmp(name, "python_compact")) {
+ ctx->meta.python_compact = strdup(value);
+ } else if (!strcmp(name, "mission")) {
+ ctx->meta.mission = strdup(value);
+ } else if (!strcmp(name, "codename")) {
+ ctx->meta.codename = strdup(value);
+ } else if (!strcmp(name, "platform")) {
+ ctx->system.platform = split(value, " ", 0);
+ } else if (!strcmp(name, "arch")) {
+ ctx->system.arch = strdup(value);
+ } else if (!strcmp(name, "time")) {
+ ctx->info.time_str_epoch = strdup(value);
+ } else if (!strcmp(name, "release_fmt")) {
+ ctx->rules.release_fmt = strdup(value);
+ } else if (!strcmp(name, "release_name")) {
+ ctx->info.release_name = strdup(value);
+ } else if (!strcmp(name, "build_name_fmt")) {
+ ctx->rules.build_name_fmt = strdup(value);
+ } else if (!strcmp(name, "build_name")) {
+ ctx->info.build_name = strdup(value);
+ } else if (!strcmp(name, "build_number_fmt")) {
+ ctx->rules.build_number_fmt = strdup(value);
+ } else if (!strcmp(name, "build_number")) {
+ ctx->info.build_number = strdup(value);
+ } else if (!strcmp(name, "conda_installer_baseurl")) {
+ ctx->conda.installer_baseurl = strdup(value);
+ } else if (!strcmp(name, "conda_installer_name")) {
+ ctx->conda.installer_name = strdup(value);
+ } else if (!strcmp(name, "conda_installer_version")) {
+ ctx->conda.installer_version = strdup(value);
+ } else if (!strcmp(name, "conda_installer_platform")) {
+ ctx->conda.installer_platform = strdup(value);
+ } else if (!strcmp(name, "conda_installer_arch")) {
+ ctx->conda.installer_arch = strdup(value);
+ }
+ GENERIC_ARRAY_FREE(parts);
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+int indexer_get_files(struct StrList **out, const char *path, const char *pattern, ...) {
+ va_list args;
+ va_start(args, pattern);
+ char userpattern[PATH_MAX] = {0};
+ vsprintf(userpattern, pattern, args);
+ va_end(args);
+ struct StrList *list = listdir(path);
+ if (!list) {
+ return -1;
+ }
+
+ if (!(*out)) {
+ (*out) = strlist_init();
+ if (!(*out)) {
+ guard_strlist_free(&list);
+ return -1;
+ }
+ }
+
+ size_t no_match = 0;
+ for (size_t i = 0; i < strlist_count(list); i++) {
+ char *item = strlist_item(list, i);
+ if (fnmatch(userpattern, item, 0)) {
+ no_match++;
+ continue;
+ } else {
+ strlist_append(&(*out), item);
+ }
+ }
+ if (no_match >= strlist_count(list)) {
+ fprintf(stderr, "no files matching the pattern: %s\n", userpattern);
+ guard_strlist_free(&list);
+ return -1;
+ }
+ guard_strlist_free(&list);
+ return 0;
+}
+
+int get_latest_rc(struct Delivery ctx[], size_t nelem) {
+ int result = 0;
+ for (size_t i = 0; i < nelem; i++) {
+ if (ctx[i].meta.rc > result) {
+ result = ctx[i].meta.rc;
+ }
+ }
+ return result;
+}
+
+struct Delivery **get_latest_deliveries(struct Delivery ctx[], size_t nelem) {
+ struct Delivery **result = NULL;
+ int latest = 0;
+ size_t n = 0;
+
+ result = calloc(nelem + 1, sizeof(result));
+ if (!result) {
+ fprintf(stderr, "Unable to allocate %zu bytes for result delivery array: %s\n", nelem * sizeof(result), strerror(errno));
+ return NULL;
+ }
+
+ latest = get_latest_rc(ctx, nelem);
+ for (size_t i = 0; i < nelem; i++) {
+ if (ctx[i].meta.rc == latest) {
+ result[n] = &ctx[i];
+ n++;
+ }
+ }
+
+ return result;
+}
+
+int get_pandoc_version(size_t *result) {
+ *result = 0;
+ int state = 0;
+ char *version_str = shell_output("pandoc --version", &state);
+ if (state || !version_str) {
+ // an error occurred
+ return -1;
+ }
+
+ // Verify that we're looking at pandoc
+ if (strlen(version_str) > 7 && !strncmp(version_str, "pandoc ", 7)) {
+ // we have pandoc
+ char *v_begin = &version_str[7];
+ if (!v_begin) {
+ SYSERROR("unexpected pandoc output: %s", version_str);
+ return -1;
+ }
+ char *v_end = strchr(version_str, '\n');
+ if (v_end) {
+ *v_end = 0;
+ }
+
+ char **parts = split(v_begin, ".", 0);
+ if (!parts) {
+ SYSERROR("unable to split pandoc version string, '%s': %s", version_str, strerror(errno));
+ return -1;
+ }
+
+ size_t parts_total;
+ for (parts_total = 0; parts[parts_total] != NULL; parts_total++);
+
+ // generate the version as an integer
+ // note: pandoc version scheme never exceeds four elements (or bytes in this case)
+ for (size_t i = 0; i < 4; i++) {
+ unsigned char tmp = 0;
+ if (i < parts_total) {
+ // only process version elements we have. the rest will be zeros.
+ tmp = strtoul(parts[i], NULL, 10);
+ }
+ // pack version element into result
+ *result = (*result << 8) | tmp;
+ }
+ } else {
+ // invalid version string
+ return 1;
+ }
+
+ return 0;
+}
+
+int indexer_make_website(struct Delivery *ctx) {
+ char cmd[PATH_MAX];
+ const char *pattern = "*.md";
+
+ if (!find_program("pandoc")) {
+ fprintf(stderr, "pandoc is not installed: unable to generate HTML indexes\n");
+ return 0;
+ }
+
+ char *css_filename = calloc(PATH_MAX, sizeof(*css_filename));
+ if (!css_filename) {
+ SYSERROR("unable to allocate string for CSS file path: %s", strerror(errno));
+ return -1;
+ }
+
+ sprintf(css_filename, "%s/%s", globals.sysconfdir, "stasis_pandoc.css");
+ int have_css = access(css_filename, F_OK | R_OK) == 0;
+
+ char pandoc_versioned_args[255] = {0};
+ size_t pandoc_version = 0;
+
+ if (!get_pandoc_version(&pandoc_version)) {
+ // < 2.19
+ if (pandoc_version < 0x02130000) {
+ strcat(pandoc_versioned_args, "--self-contained ");
+ } else {
+ // >= 2.19
+ strcat(pandoc_versioned_args, "--embed-resources ");
+ }
+
+ // >= 1.15.0.4
+ if (pandoc_version >= 0x010f0004) {
+ strcat(pandoc_versioned_args, "--standalone ");
+ }
+
+ // >= 1.10.0.1
+ if (pandoc_version >= 0x010a0001) {
+ strcat(pandoc_versioned_args, "-f gfm+autolink_bare_uris ");
+ }
+
+ // > 3.1.9
+ if (pandoc_version > 0x03010900) {
+ strcat(pandoc_versioned_args, "-f gfm+alerts ");
+ }
+ }
+
+ struct StrList *dirs = strlist_init();
+ strlist_append(&dirs, ctx->storage.delivery_dir);
+ strlist_append(&dirs, ctx->storage.results_dir);
+
+ struct StrList *inputs = NULL;
+ for (size_t i = 0; i < strlist_count(dirs); i++) {
+ if (indexer_get_files(&inputs, ctx->storage.delivery_dir, pattern)) {
+ SYSERROR("%s does not contain files with pattern: %s", ctx->storage.delivery_dir, pattern);
+ guard_strlist_free(&inputs);
+ continue;
+ }
+ char *root = strlist_item(dirs, i);
+ for (size_t x = 0; x < strlist_count(inputs); x++) {
+ char *filename = strlist_item(inputs, x);
+ char fullpath_src[PATH_MAX] = {0};
+ char fullpath_dest[PATH_MAX] = {0};
+ sprintf(fullpath_src, "%s/%s", root, filename);
+ if (access(fullpath_src, F_OK)) {
+ continue;
+ }
+
+ // Replace *.md extension with *.html.
+ strcpy(fullpath_dest, fullpath_src);
+ char *ext = strrchr(fullpath_dest, '.');
+ if (ext) {
+ *ext = '\0';
+ }
+ strcat(fullpath_dest, ".html");
+
+ // Converts a markdown file to html
+ strcpy(cmd, "pandoc ");
+ strcat(cmd, pandoc_versioned_args);
+ if (have_css) {
+ strcat(cmd, "--css ");
+ strcat(cmd, css_filename);
+ }
+ strcat(cmd, " ");
+ strcat(cmd, "--metadata title=\"STASIS\" ");
+ strcat(cmd, "-o ");
+ strcat(cmd, fullpath_dest);
+ strcat(cmd, " ");
+ strcat(cmd, fullpath_src);
+ if (globals.verbose) {
+ puts(cmd);
+ }
+ // This might be negative when killed by a signal.
+ // Otherwise, the return code is not critical to us.
+ if (system(cmd) < 0) {
+ guard_free(css_filename);
+ guard_strlist_free(&dirs);
+ return 1;
+ }
+ if (file_replace_text(fullpath_dest, ".md", ".html", 0)) {
+ // inform-only
+ SYSERROR("%s: failed to rewrite *.md urls with *.html extension", fullpath_dest);
+ }
+
+ // Link the nearest README.html to index.html
+ if (!strcmp(filename, "README.md")) {
+ char link_from[PATH_MAX] = {0};
+ char link_dest[PATH_MAX] = {0};
+ strcpy(link_from, "README.html");
+ sprintf(link_dest, "%s/%s", root, "index.html");
+ if (symlink(link_from, link_dest)) {
+ SYSERROR("Warning: symlink(%s, %s) failed: %s", link_from, link_dest, strerror(errno));
+ }
+ }
+ }
+ guard_strlist_free(&inputs);
+ }
+ guard_free(css_filename);
+ guard_strlist_free(&dirs);
+
+ return 0;
+}
+
+static int micromamba_configure(const struct Delivery *ctx, struct MicromambaInfo *m) {
+ int status = 0;
+ char *micromamba_prefix = NULL;
+ if (asprintf(&micromamba_prefix, "%s/bin", ctx->storage.tools_dir) < 0) {
+ return -1;
+ }
+ m->conda_prefix = globals.conda_install_prefix;
+ m->micromamba_prefix = micromamba_prefix;
+
+ size_t pathvar_len = (strlen(getenv("PATH")) + strlen(m->micromamba_prefix) + strlen(m->conda_prefix)) + 3 + 4 + 1;
+ // ^^^^^^^^^^^^^^^^^^
+ // 3 = separators
+ // 4 = chars (/bin)
+ // 1 = nul terminator
+ char *pathvar = calloc(pathvar_len, sizeof(*pathvar));
+ if (!pathvar) {
+ SYSERROR("%s", "Unable to allocate bytes for temporary path string");
+ exit(1);
+ }
+ snprintf(pathvar, pathvar_len, "%s/bin:%s:%s", m->conda_prefix, m->micromamba_prefix, getenv("PATH"));
+ setenv("PATH", pathvar, 1);
+ guard_free(pathvar);
+
+ status += micromamba(m, "config prepend --env channels conda-forge");
+ if (!globals.verbose) {
+ status += micromamba(m, "config set --env quiet true");
+ }
+ status += micromamba(m, "config set --env always_yes true");
+ status += micromamba(m, "install conda-build pandoc");
+
+ return status;
+}
+
+int indexer_conda(struct Delivery *ctx, struct MicromambaInfo m) {
+ int status = 0;
+
+ status += micromamba(&m, "run conda index %s", ctx->storage.conda_artifact_dir);
+ return status;
+}
+
+static struct StrList *get_architectures(struct Delivery ctx[], size_t nelem) {
+ struct StrList *architectures = strlist_init();
+ for (size_t i = 0; i < nelem; i++) {
+ if (!strstr_array(architectures->data, ctx[i].system.arch)) {
+ strlist_append(&architectures, ctx[i].system.arch);
+ }
+ }
+ return architectures;
+}
+
+static struct StrList *get_platforms(struct Delivery ctx[], size_t nelem) {
+ struct StrList *platforms = strlist_init();
+ for (size_t i = 0; i < nelem; i++) {
+ if (!strstr_array(platforms->data, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE])) {
+ strlist_append(&platforms, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE]);
+ }
+ }
+ return platforms;
+}
+
+int indexer_symlinks(struct Delivery ctx[], size_t nelem) {
+ struct Delivery **data = NULL;
+ data = get_latest_deliveries(ctx, nelem);
+ //int latest = get_latest_rc(ctx, nelem);
+
+ if (!pushd(ctx->storage.delivery_dir)) {
+ for (size_t i = 0; i < nelem; i++) {
+ char link_name_spec[PATH_MAX];
+ char link_name_readme[PATH_MAX];
+
+ char file_name_spec[PATH_MAX];
+ char file_name_readme[PATH_MAX];
+
+ if (!data[i]) {
+ continue;
+ }
+ sprintf(link_name_spec, "latest-py%s-%s-%s.yml", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch);
+ sprintf(file_name_spec, "%s.yml", data[i]->info.release_name);
+
+ sprintf(link_name_readme, "README-py%s-%s-%s.md", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch);
+ sprintf(file_name_readme, "README-%s.md", data[i]->info.release_name);
+
+ if (!access(link_name_spec, F_OK)) {
+ if (unlink(link_name_spec)) {
+ fprintf(stderr, "Unable to remove spec link: %s\n", link_name_spec);
+ }
+ }
+ if (!access(link_name_readme, F_OK)) {
+ if (unlink(link_name_readme)) {
+ fprintf(stderr, "Unable to remove readme link: %s\n", link_name_readme);
+ }
+ }
+
+ if (globals.verbose) {
+ printf("%s -> %s\n", file_name_spec, link_name_spec);
+ }
+ if (symlink(file_name_spec, link_name_spec)) {
+ fprintf(stderr, "Unable to link %s as %s\n", file_name_spec, link_name_spec);
+ }
+
+ if (globals.verbose) {
+ printf("%s -> %s\n", file_name_readme, link_name_readme);
+ }
+ if (symlink(file_name_readme, link_name_readme)) {
+ fprintf(stderr, "Unable to link %s as %s\n", file_name_readme, link_name_readme);
+ }
+ }
+ popd();
+ } else {
+ fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir);
+ guard_free(data);
+ return -1;
+ }
+
+ // "latest" is an array of pointers to ctx[]. Do not free the contents of the array.
+ guard_free(data);
+ return 0;
+}
+
+int indexer_readmes(struct Delivery ctx[], size_t nelem) {
+ struct Delivery **latest = NULL;
+ latest = get_latest_deliveries(ctx, nelem);
+
+ char indexfile[PATH_MAX] = {0};
+ sprintf(indexfile, "%s/README.md", ctx->storage.delivery_dir);
+
+ if (!pushd(ctx->storage.delivery_dir)) {
+ FILE *indexfp;
+ indexfp = fopen(indexfile, "w+");
+ if (!indexfp) {
+ fprintf(stderr, "Unable to open %s for writing\n", indexfile);
+ return -1;
+ }
+ struct StrList *archs = get_architectures(*latest, nelem);
+ struct StrList *platforms = get_platforms(*latest, nelem);
+
+ fprintf(indexfp, "# %s-%s\n\n", ctx->meta.name, ctx->meta.version);
+ fprintf(indexfp, "## Current Release\n\n");
+ for (size_t p = 0; p < strlist_count(platforms); p++) {
+ char *platform = strlist_item(platforms, p);
+ for (size_t a = 0; a < strlist_count(archs); a++) {
+ char *arch = strlist_item(archs, a);
+ int have_combo = 0;
+ for (size_t i = 0; i < nelem; i++) {
+ if (latest[i] && latest[i]->system.platform) {
+ if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) &&
+ strstr(latest[i]->system.arch, arch)) {
+ have_combo = 1;
+ }
+ }
+ }
+ if (!have_combo) {
+ continue;
+ }
+ fprintf(indexfp, "### %s-%s\n\n", platform, arch);
+
+ fprintf(indexfp, "|Release|Info|Receipt|\n");
+ fprintf(indexfp, "|:----:|:----:|:----:|\n");
+ for (size_t i = 0; i < nelem; i++) {
+ char link_name[PATH_MAX];
+ char readme_name[PATH_MAX];
+ char conf_name[PATH_MAX];
+ char conf_name_relative[PATH_MAX];
+ if (!latest[i]) {
+ continue;
+ }
+ sprintf(link_name, "latest-py%s-%s-%s.yml", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch);
+ sprintf(readme_name, "README-py%s-%s-%s.md", latest[i]->meta.python_compact, latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest[i]->system.arch);
+ sprintf(conf_name, "%s.ini", latest[i]->info.release_name);
+ sprintf(conf_name_relative, "../config/%s-rendered.ini", latest[i]->info.release_name);
+ if (strstr(link_name, platform) && strstr(link_name, arch)) {
+ fprintf(indexfp, "|[%s](%s)|[%s](%s)|[%s](%s)|\n", link_name, link_name, readme_name, readme_name, conf_name, conf_name_relative);
+ }
+ }
+ fprintf(indexfp, "\n");
+ }
+ fprintf(indexfp, "\n");
+ }
+ guard_strlist_free(&archs);
+ guard_strlist_free(&platforms);
+ fclose(indexfp);
+ popd();
+ } else {
+ fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir);
+ guard_free(latest);
+ return -1;
+ }
+
+ // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array.
+ guard_free(latest);
+ return 0;
+}
+
+int indexer_junitxml_report(struct Delivery ctx[], size_t nelem) {
+ struct Delivery **latest = NULL;
+ latest = get_latest_deliveries(ctx, nelem);
+
+ char indexfile[PATH_MAX] = {0};
+ sprintf(indexfile, "%s/README.md", ctx->storage.results_dir);
+
+ struct StrList *file_listing = listdir(ctx->storage.results_dir);
+ if (!file_listing) {
+ // no test results to process
+ return 0;
+ }
+
+ if (!pushd(ctx->storage.results_dir)) {
+ FILE *indexfp;
+ indexfp = fopen(indexfile, "w+");
+ if (!indexfp) {
+ fprintf(stderr, "Unable to open %s for writing\n", indexfile);
+ return -1;
+ }
+ struct StrList *archs = get_architectures(*latest, nelem);
+ struct StrList *platforms = get_platforms(*latest, nelem);
+
+ fprintf(indexfp, "# %s-%s Test Report\n\n", ctx->meta.name, ctx->meta.version);
+ fprintf(indexfp, "## Current Release\n\n");
+ for (size_t p = 0; p < strlist_count(platforms); p++) {
+ char *platform = strlist_item(platforms, p);
+ for (size_t a = 0; a < strlist_count(archs); a++) {
+ char *arch = strlist_item(archs, a);
+ int have_combo = 0;
+ for (size_t i = 0; i < nelem; i++) {
+ if (latest[i] && latest[i]->system.platform) {
+ if (strstr(latest[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) &&
+ strstr(latest[i]->system.arch, arch)) {
+ have_combo = 1;
+ break;
+ }
+ }
+ }
+ if (!have_combo) {
+ continue;
+ }
+ fprintf(indexfp, "### %s-%s\n\n", platform, arch);
+
+ fprintf(indexfp, "|Suite|Duration|Fail |Skip |Error |\n");
+ fprintf(indexfp, "|:----|:------:|:------:|:---:|:----:|\n");
+ for (size_t f = 0; f < strlist_count(file_listing); f++) {
+ char *filename = strlist_item(file_listing, f);
+ if (!endswith(filename, ".xml")) {
+ continue;
+ }
+
+ if (strstr(filename, platform) && strstr(filename, arch)) {
+ struct JUNIT_Testsuite *testsuite = junitxml_testsuite_read(filename);
+ if (testsuite) {
+ if (globals.verbose) {
+ printf("%s: duration: %0.4f, failed: %d, skipped: %d, errors: %d\n", filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors);
+ }
+ fprintf(indexfp, "|[%s](%s)|%0.4f|%d|%d|%d|\n", filename, filename, testsuite->time, testsuite->failures, testsuite->skipped, testsuite->errors);
+ /*
+ * TODO: Display failure/skip/error output.
+ *
+ for (size_t i = 0; i < testsuite->_tc_inuse; i++) {
+ if (testsuite->testcase[i]->tc_result_state_type) {
+ printf("testcase: %s :: %s\n", testsuite->testcase[i]->classname, testsuite->testcase[i]->name);
+ if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_FAILURE) {
+ printf("failure: %s\n", testsuite->testcase[i]->result_state.failure->message);
+ } else if (testsuite->testcase[i]->tc_result_state_type == JUNIT_RESULT_STATE_SKIPPED) {
+ printf("skipped: %s\n", testsuite->testcase[i]->result_state.skipped->message);
+ }
+ }
+ }
+ */
+ junitxml_testsuite_free(&testsuite);
+ } else {
+ fprintf(stderr, "bad test suite: %s: %s\n", strerror(errno), filename);
+ continue;
+ }
+ }
+ }
+ fprintf(indexfp, "\n");
+ }
+ fprintf(indexfp, "\n");
+ }
+ guard_strlist_free(&archs);
+ guard_strlist_free(&platforms);
+ fclose(indexfp);
+ popd();
+ } else {
+ fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir);
+ guard_free(latest);
+ return -1;
+ }
+
+ // "latest" is an array of pointers to ctxs[]. Do not free the contents of the array.
+ guard_free(latest);
+ return 0;
+}
+
+void indexer_init_dirs(struct Delivery *ctx, const char *workdir) {
+ path_store(&ctx->storage.root, PATH_MAX, workdir, "");
+ path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp");
+ if (delivery_init_tmpdir(ctx)) {
+ fprintf(stderr, "Failed to configure temporary storage directory\n");
+ exit(1);
+ }
+ path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "");
+ path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.output_dir, "tools");
+ path_store(&globals.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda");
+ path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config");
+ path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta");
+ path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery");
+ path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages");
+ path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results");
+ path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels");
+ path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda");
+
+ char newpath[PATH_MAX] = {0};
+ if (getenv("PATH")) {
+ sprintf(newpath, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH"));
+ setenv("PATH", newpath, 1);
+ } else {
+ SYSERROR("%s", "environment variable PATH is undefined. Unable to continue.");
+ exit(1);
+ }
+}
+
+int main(int argc, char *argv[]) {
+ size_t rootdirs_total = 0;
+ char *destdir = NULL;
+ char **rootdirs = NULL;
+ int do_html = 0;
+ int c = 0;
+ int option_index = 0;
+ while ((c = getopt_long(argc, argv, "hd:vUw", long_options, &option_index)) != -1) {
+ switch (c) {
+ case 'h':
+ usage(path_basename(argv[0]));
+ exit(0);
+ case 'd':
+ if (mkdir(optarg, 0755)) {
+ if (errno != 0 && errno != EEXIST) {
+ SYSERROR("Unable to create destination directory, '%s': %s", optarg, strerror(errno));
+ exit(1);
+ }
+ }
+ destdir = realpath(optarg, NULL);
+ break;
+ case 'U':
+ fflush(stdout);
+ fflush(stderr);
+ setvbuf(stdout, NULL, _IONBF, 0);
+ setvbuf(stderr, NULL, _IONBF, 0);
+ break;
+ case 'v':
+ globals.verbose = 1;
+ break;
+ case 'w':
+ do_html = 1;
+ break;
+ case '?':
+ default:
+ exit(1);
+ }
+ }
+
+ int current_index = optind;
+ if (optind < argc) {
+ rootdirs_total = argc - current_index;
+ rootdirs = calloc(rootdirs_total + 1, sizeof(**rootdirs));
+
+ int i = 0;
+ while (optind < argc) {
+ if (argv[optind]) {
+ if (access(argv[optind], F_OK) < 0) {
+ fprintf(stderr, "%s: %s\n", argv[optind], strerror(errno));
+ exit(1);
+ }
+ }
+ // use first positional argument
+ rootdirs[i] = realpath(argv[optind], NULL);
+ optind++;
+ break;
+ }
+ }
+
+ if (isempty(destdir)) {
+ if (mkdir("output", 0755)) {
+ if (errno != 0 && errno != EEXIST) {
+ SYSERROR("Unable to create destination directory, '%s': %s", "output", strerror(errno));
+ exit(1);
+ }
+ }
+ destdir = realpath("output", NULL);
+ }
+
+ if (!rootdirs || !rootdirs_total) {
+ fprintf(stderr, "You must specify at least one STASIS root directory to index\n");
+ exit(1);
+ } else {
+ for (size_t i = 0; i < rootdirs_total; i++) {
+ if (isempty(rootdirs[i]) || !strcmp(rootdirs[i], "/") || !strcmp(rootdirs[i], "\\")) {
+ SYSERROR("Unsafe directory: %s", rootdirs[i]);
+ exit(1);
+ } else if (access(rootdirs[i], F_OK)) {
+ SYSERROR("%s: %s", rootdirs[i], strerror(errno));
+ exit(1);
+ }
+ }
+ }
+
+ char stasis_sysconfdir_tmp[PATH_MAX];
+ if (getenv("STASIS_SYSCONFDIR")) {
+ strncpy(stasis_sysconfdir_tmp, getenv("STASIS_SYSCONFDIR"), sizeof(stasis_sysconfdir_tmp) - 1);
+ } else {
+ strncpy(stasis_sysconfdir_tmp, STASIS_SYSCONFDIR, sizeof(stasis_sysconfdir_tmp) - 1);
+ }
+
+ globals.sysconfdir = realpath(stasis_sysconfdir_tmp, NULL);
+ if (!globals.sysconfdir) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to resolve path to configuration directory: %s\n", stasis_sysconfdir_tmp);
+ exit(1);
+ }
+
+ char *workdir;
+ char workdir_template[PATH_MAX] = {0};
+ char *system_tmp = getenv("TMPDIR");
+ if (system_tmp) {
+ strcat(workdir_template, system_tmp);
+ } else {
+ strcat(workdir_template, "/tmp");
+ }
+ strcat(workdir_template, "/stasis-combine.XXXXXX");
+ workdir = mkdtemp(workdir_template);
+ if (!workdir) {
+ SYSERROR("Unable to create temporary directory: %s", workdir_template);
+ exit(1);
+ } else if (isempty(workdir) || !strcmp(workdir, "/") || !strcmp(workdir, "\\")) {
+ SYSERROR("Unsafe directory: %s", workdir);
+ exit(1);
+ }
+
+ struct Delivery ctx;
+ memset(&ctx, 0, sizeof(ctx));
+
+ printf(BANNER, VERSION, AUTHOR);
+
+ indexer_init_dirs(&ctx, workdir);
+
+ msg(STASIS_MSG_L1, "%s delivery root %s\n",
+ rootdirs_total > 1 ? "Merging" : "Indexing",
+ rootdirs_total > 1 ? "directories" : "directory");
+ if (indexer_combine_rootdirs(workdir, rootdirs, rootdirs_total)) {
+ SYSERROR("%s", "Copy operation failed");
+ rmtree(workdir);
+ exit(1);
+ }
+
+ if (access(ctx.storage.conda_artifact_dir, F_OK)) {
+ mkdirs(ctx.storage.conda_artifact_dir, 0755);
+ }
+
+ if (access(ctx.storage.wheel_artifact_dir, F_OK)) {
+ mkdirs(ctx.storage.wheel_artifact_dir, 0755);
+ }
+
+ struct MicromambaInfo m;
+ if (micromamba_configure(&ctx, &m)) {
+ SYSERROR("%s", "Unable to configure micromamba");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Indexing conda packages\n");
+ if (indexer_conda(&ctx, m)) {
+ SYSERROR("%s", "Conda package indexing operation failed");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Indexing wheel packages\n");
+ if (indexer_wheels(&ctx)) {
+ SYSERROR("%s", "Python package indexing operation failed");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Loading metadata\n");
+ struct StrList *metafiles = NULL;
+ indexer_get_files(&metafiles, ctx.storage.meta_dir, "*.stasis");
+ strlist_sort(metafiles, STASIS_SORT_LEN_ASCENDING);
+ struct Delivery local[strlist_count(metafiles)];
+
+ for (size_t i = 0; i < strlist_count(metafiles); i++) {
+ char *item = strlist_item(metafiles, i);
+ memset(&local[i], 0, sizeof(ctx));
+ memcpy(&local[i], &ctx, sizeof(ctx));
+ char path[PATH_MAX];
+ sprintf(path, "%s/%s", ctx.storage.meta_dir, item);
+ if (globals.verbose) {
+ puts(path);
+ }
+ indexer_load_metadata(&local[i], path);
+ }
+
+ msg(STASIS_MSG_L1, "Generating links to latest release iteration\n");
+ if (indexer_symlinks(local, strlist_count(metafiles))) {
+ SYSERROR("%s", "Link generation failed");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Generating README.md\n");
+ if (indexer_readmes(local, strlist_count(metafiles))) {
+ SYSERROR("%s", "README indexing operation failed");
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Indexing test results\n");
+ if (indexer_junitxml_report(local, strlist_count(metafiles))) {
+ SYSERROR("%s", "Test result indexing operation failed");
+ exit(1);
+ }
+
+ if (do_html) {
+ msg(STASIS_MSG_L1, "Generating HTML indexes\n");
+ if (indexer_make_website(local)) {
+ SYSERROR("%s", "Site creation failed");
+ exit(1);
+ }
+ }
+
+ msg(STASIS_MSG_L1, "Copying indexed delivery to '%s'\n", destdir);
+ char cmd[PATH_MAX];
+ memset(cmd, 0, sizeof(cmd));
+ sprintf(cmd, "rsync -ah%s --delete --exclude 'tmp/' --exclude 'tools/' '%s/' '%s/'", globals.verbose ? "v" : "q", workdir, destdir);
+ guard_free(destdir);
+
+ if (globals.verbose) {
+ puts(cmd);
+ }
+
+ if (system(cmd)) {
+ SYSERROR("%s", "Copy operation failed");
+ rmtree(workdir);
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L1, "Removing work directory: %s\n", workdir);
+ if (rmtree(workdir)) {
+ SYSERROR("Failed to remove work directory: %s", strerror(errno));
+ }
+
+ guard_free(destdir);
+ GENERIC_ARRAY_FREE(rootdirs);
+ guard_strlist_free(&metafiles);
+ delivery_free(&ctx);
+ globals_free();
+ msg(STASIS_MSG_L1, "Done!\n");
+ return 0;
+}