diff options
author | Joseph Hunkeler <jhunkeler@users.noreply.github.com> | 2024-10-04 08:40:39 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-04 08:40:39 -0400 |
commit | d7e3deba72703ad36c497f5becf6772ca00a0d6d (patch) | |
tree | eff3b2ec3dcc31126041529c8e00a714997f2d7b | |
parent | 9691ccf51b3efd8113e9620c4afa8b5382d7f161 (diff) | |
parent | f0ba8cd378a460f927644e41f49be95d0e956f81 (diff) | |
download | stasis-d7e3deba72703ad36c497f5becf6772ca00a0d6d.tar.gz |
Merge pull request #46 from jhunkeler/split-delivery-code
Add multiprocessing / Split delivery code
40 files changed, 3128 insertions, 2006 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index abb700c..caed929 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.15) project(STASIS C) include(GNUInstallDirs) -set(nix_cflags -Wall -Wextra -fPIC) +set(nix_cflags -Wall -Wextra -fPIC -D_GNU_SOURCE) set(win_cflags /Wall) set(CMAKE_C_STANDARD 99) find_package(LibXml2) @@ -11,13 +11,16 @@ link_libraries(CURL::libcurl) link_libraries(LibXml2::LibXml2) include_directories(${LIBXML2_INCLUDE_DIR}) +option(FORTIFY_SOURCE OFF) +if (FORTIFY_SOURCE) + set(nix_cflags ${nix_cflags} -O -D_FORTIFY_SOURCE=1) +endif () + if (CMAKE_C_COMPILER_ID STREQUAL "GNU") - message("gnu options") add_compile_options(${nix_cflags}) elseif (CMAKE_C_COMPILER_ID MATCHES "Clang") add_compile_options(${nix_cflags}) elseif (CMAKE_C_COMPILER_ID STREQUAL "MSVC") - message("microsoft visual c options") add_compile_options(${win_cflags}) endif() @@ -117,6 +117,8 @@ Create some test cases for packages. [test:our_cool_program] version = 1.2.3 repository = https://github.com/org/our_cool_program +script_setup = + pip install -e '.[test]' script = pytest -fEsx \ --basetemp="{{ func:basetemp_dir() }}" \ @@ -126,6 +128,8 @@ script = [test:our_other_cool_program] version = 4.5.6 repository = https://github.com/org/our_other_cool_program +script_setup = + pip install -e '.[test]' script = pytest -fEsx \ --basetemp="{{ func:basetemp_dir() }}" \ @@ -143,22 +147,24 @@ stasis mydelivery.ini ## Command Line Options -| Long Option | Short Option | Purpose | -|:--------------------|:------------:|:---------------------------------------------------------------| -| --help | -h | Display usage statement | -| --version | -V | Display program version | -| --continue-on-error | -C | Allow tests to fail | -| --config ARG | -c ARG | Read STASIS configuration file | -| --python ARG | -p ARG | Override version of Python in configuration | -| --verbose | -v | Increase output verbosity | -| --unbuffered | -U | Disable line buffering | -| --update-base | n/a | Update conda installation prior to STATIS environment creation | -| --overwrite | n/a | Overwrite an existing release | -| --no-docker | n/a | Do not build docker images | -| --no-artifactory | n/a | Do not upload artifacts to Artifactory | -| --no-testing | n/a | Do not execute test scripts | -| --no-rewrite | n/a | Do not rewrite paths and URLs in output files | -| DELIVERY_FILE | n/a | STASIS delivery file | +| Long Option | Short Option | Purpose | +|:---------------------|:------------:|:---------------------------------------------------------------| +| --help | -h | Display usage statement | +| --version | -V | Display program version | +| --continue-on-error | -C | Allow tests to fail | +| --config ARG | -c ARG | Read STASIS configuration file | +| --cpu-limit ARG | -l ARG | Number of processes to spawn concurrently (default: cpus - 1) | +| --python ARG | -p ARG | Override version of Python in configuration | +| --verbose | -v | Increase output verbosity | +| --unbuffered | -U | Disable line buffering | +| --update-base | n/a | Update conda installation prior to STATIS environment creation | +| --parallel-fail-fast | n/a | On test error, terminate all concurrent tasks | +| --overwrite | n/a | Overwrite an existing release | +| --no-docker | n/a | Do not build docker images | +| --no-artifactory | n/a | Do not upload artifacts to Artifactory | +| --no-testing | n/a | Do not execute test scripts | +| --no-rewrite | n/a | Do not rewrite paths and URLs in output files | +| DELIVERY_FILE | n/a | STASIS delivery file | ## Environment variables @@ -259,13 +265,16 @@ Environment variables exported are _global_ to all programs executed by stasis. Sections starting with `test:` will be used during the testing phase of the stasis pipeline. Where the value of `name` following the colon is an arbitrary value, and only used for reporting which test-run is executing. Section names must be unique. -| Key | Type | Purpose | Required | -|--------------|--------|-------------------------------------------------------|----------| -| build_recipe | String | Git repository path to package's conda recipe | N | -| repository | String | Git repository path or URL to clone | Y | -| version | String | Git commit or tag to check out | Y | -| runtime | List | Export environment variables specific to test context | Y | -| script | List | Body of a shell script that will execute the tests | Y | +| Key | Type | Purpose | Required | +|--------------|---------|-------------------------------------------------------------|----------| +| disable | Boolean | Disable `script` execution (`script_setup` always executes) | N | +| parallel | Boolean | Execute test block in parallel (default) or sequentially | N | +| build_recipe | String | Git repository path to package's conda recipe | N | +| repository | String | Git repository path or URL to clone | Y | +| version | String | Git commit or tag to check out | Y | +| runtime | List | Export environment variables specific to test context | Y | +| script_setup | List | Body of a shell script that will install dependencies | N | +| script | List | Body of a shell script that will execute the tests | Y | ### deploy:artifactory:_name_ @@ -320,7 +329,6 @@ Template strings can be accessed using the `{{ subject.key }}` notation in any S | system.platform | System Platform (OS) | | deploy.docker.registry | Docker registry | | deploy.jfrog.repo | Artifactory destination repository | -| workaround.tox_posargs | Return populated `-c` and `--root` tox arguments.<br/>Force-enables positional arguments in tox's command line parser. | | workaround.conda_reactivate | Reinitialize the conda runtime environment.<br/>Use this after calling `conda install` from within a `[test:*].script`. | The template engine also provides an interface to environment variables using the `{{ env:VARIABLE_NAME }}` notation. diff --git a/examples/template/example.ini b/examples/template/example.ini index 4a4c579..cca2089 100644 --- a/examples/template/example.ini +++ b/examples/template/example.ini @@ -43,11 +43,24 @@ pip_packages = ; key=value [test:name] ; where test:"name" denotes the package name +; (boolean) Do not execute "script" +disable = + +; (boolean) Add to parallel task pool? +; true = yes (default) +; false = no (send to serial task pool) +parallel = + ; (string) Version of tested package version = ; (string) Git repository of tested package repository = +; (list) Commands to execute before "script" +; e.g. pip install -e '.[test]' +script_setup = + ; (list) Commands to execute against tested package +; e.g. pytest script = diff --git a/include/core.h b/include/core.h index ef90e96..e09b212 100644 --- a/include/core.h +++ b/include/core.h @@ -21,6 +21,8 @@ #define HTTP_ERROR(X) X >= 400 #include "config.h" +#include "core_mem.h" +#include "multiprocessing.h" #include "envctl.h" #include "template.h" #include "utils.h" @@ -42,16 +44,6 @@ #include "github.h" #include "template_func_proto.h" -#define guard_runtime_free(X) do { if (X) { runtime_free(X); X = NULL; } } while (0) -#define guard_strlist_free(X) do { if ((*X)) { strlist_free(X); (*X) = NULL; } } while (0) -#define guard_free(X) do { if (X) { free(X); X = NULL; } } while (0) -#define GENERIC_ARRAY_FREE(ARR) do { \ - for (size_t ARR_I = 0; ARR && ARR[ARR_I] != NULL; ARR_I++) { \ - guard_free(ARR[ARR_I]); \ - } \ - guard_free(ARR); \ -} while (0) - #define COE_CHECK_ABORT(COND, MSG) \ do {\ if (!globals.continue_on_error && COND) { \ @@ -71,6 +63,10 @@ struct STASIS_GLOBAL { bool enable_testing; //!< Enable package testing bool enable_overwrite; //!< Enable release file clobbering bool enable_rewrite_spec_stage_2; //!< Enable automatic @STR@ replacement in output files + bool enable_parallel; //!< Enable testing in parallel + long cpu_limit; //!< Limit parallel processing to n cores (default: max - 1) + long parallel_fail_fast; //!< Fail immediately on error + int pool_status_interval; //!< Report "Task is running" every n seconds struct StrList *conda_packages; //!< Conda packages to install after initial activation struct StrList *pip_packages; //!< Pip packages to install after initial activation char *tmpdir; //!< Path to temporary storage directory diff --git a/include/core_mem.h b/include/core_mem.h new file mode 100644 index 0000000..ef07a00 --- /dev/null +++ b/include/core_mem.h @@ -0,0 +1,18 @@ +// +// Created by jhunk on 9/13/24. +// + +#ifndef STASIS_CORE_MEM_H +#define STASIS_CORE_MEM_H + +#define guard_runtime_free(X) do { if (X) { runtime_free(X); X = NULL; } } while (0) +#define guard_strlist_free(X) do { if ((*X)) { strlist_free(X); (*X) = NULL; } } while (0) +#define guard_free(X) do { if (X) { free(X); X = NULL; } } while (0) +#define GENERIC_ARRAY_FREE(ARR) do { \ + for (size_t ARR_I = 0; ARR && ARR[ARR_I] != NULL; ARR_I++) { \ + guard_free(ARR[ARR_I]); \ + } \ + guard_free(ARR); \ +} while (0) + +#endif //STASIS_CORE_MEM_H diff --git a/include/delivery.h b/include/delivery.h index 067cd0b..6da890a 100644 --- a/include/delivery.h +++ b/include/delivery.h @@ -7,6 +7,8 @@ #include <stdbool.h> #include <unistd.h> #include <sys/utsname.h> +#include <fnmatch.h> +#include <sys/statvfs.h> #include "core.h" #define DELIVERY_PLATFORM_MAX 4 @@ -149,7 +151,10 @@ struct Delivery { char *name; ///< Name of package char *version; ///< Version of package char *repository; ///< Git repository of package + char *script_setup; ///< Commands to execute before the main script char *script; ///< Commands to execute + bool disable; ///< Toggle a test block + bool parallel; ///< Toggle parallel or serial execution char *build_recipe; ///< Conda recipe to build (optional) char *repository_info_ref; ///< Git commit hash char *repository_info_tag; ///< Git tag (first parent) @@ -286,7 +291,7 @@ int delivery_copy_conda_artifacts(struct Delivery *ctx); * Retrieve Conda installer * @param installer_url URL to installation script */ -int delivery_get_installer(struct Delivery *ctx, char *installer_url); +int delivery_get_conda_installer(struct Delivery *ctx, char *installer_url); /** * Generate URL based on Delivery context @@ -294,7 +299,7 @@ int delivery_get_installer(struct Delivery *ctx, char *installer_url); * @param result pointer to char * @return in result */ -void delivery_get_installer_url(struct Delivery *ctx, char *result); +void delivery_get_conda_installer_url(struct Delivery *ctx, char *result); /** * Install packages based on Delivery context @@ -376,6 +381,12 @@ void delivery_gather_tool_versions(struct Delivery *ctx); // helper function int delivery_init_tmpdir(struct Delivery *ctx); +void delivery_init_dirs_stage1(struct Delivery *ctx); + +void delivery_init_dirs_stage2(struct Delivery *ctx); + +int delivery_init_platform(struct Delivery *ctx); + int delivery_init_artifactory(struct Delivery *ctx); int delivery_artifact_upload(struct Delivery *ctx); @@ -386,10 +397,21 @@ int delivery_docker(struct Delivery *ctx); int delivery_fixup_test_results(struct Delivery *ctx); -int *bootstrap_build_info(struct Delivery *ctx); +int bootstrap_build_info(struct Delivery *ctx); int delivery_dump_metadata(struct Delivery *ctx); +int populate_info(struct Delivery *ctx); + +int populate_delivery_cfg(struct Delivery *ctx, int render_mode); + +int populate_delivery_ini(struct Delivery *ctx, int render_mode); + +int populate_mission_ini(struct Delivery **ctx, int render_mode); + +void validate_delivery_ini(struct INIFILE *ini); + +int filter_repo_tags(char *repo, struct StrList *patterns); /** * Determine whether a release on-disk matches the release name in use * @param ctx Delivery context diff --git a/include/multiprocessing.h b/include/multiprocessing.h new file mode 100644 index 0000000..5919462 --- /dev/null +++ b/include/multiprocessing.h @@ -0,0 +1,131 @@ +/// @file multiprocessing.h +#ifndef STASIS_MULTIPROCESSING_H +#define STASIS_MULTIPROCESSING_H + +#include "core.h" +#include <signal.h> +#include <sys/wait.h> +#include <semaphore.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <sys/stat.h> + +struct MultiProcessingTask { + pid_t pid; ///< Program PID + pid_t parent_pid; ///< Program PID (parent process) + int status; ///< Child process exit status + int signaled_by; ///< Last signal received, if any + time_t _now; ///< Current time + time_t _seconds; ///< Time elapsed (used by MultiprocessingPool.status_interval) + char ident[255]; ///< Identity of the pool task + char *cmd; ///< Shell command(s) to be executed + size_t cmd_len; ///< Length of command string (for mmap/munmap) + char working_dir[PATH_MAX]; ///< Path to directory `cmd` should be executed in + char log_file[PATH_MAX]; ///< Full path to stdout/stderr log file + char parent_script[PATH_MAX]; ///< Path to temporary script executing the task + struct { + struct timespec t_start; + struct timespec t_stop; + } time_data; ///< Wall-time counters +}; + +struct MultiProcessingPool { + struct MultiProcessingTask *task; ///< Array of tasks to execute + size_t num_used; ///< Number of tasks populated in the task array + size_t num_alloc; ///< Number of tasks allocated by the task array + char ident[255]; ///< Identity of task pool + char log_root[PATH_MAX]; ///< Base directory to store stderr/stdout log files + int status_interval; ///< Report a pooled task is "running" every n seconds +}; + +/// Maximum number of multiprocessing tasks STASIS can execute +#define MP_POOL_TASK_MAX 1000 + +/// Value signifies a process is unused or finished executing +#define MP_POOL_PID_UNUSED 0 + +/// Option flags for mp_pool_join() +#define MP_POOL_FAIL_FAST 1 << 1 + +/** + * Create a multiprocessing pool + * + * ```c + * #include "multiprocessing.h" + * #include "utils.h" // for get_cpu_count() + * + * int main(int argc, char *argv[]) { + * struct MultiProcessingPool *mp; + * mp = mp_pool_init("mypool", "/tmp/mypool_logs"); + * if (mp) { + * char *commands[] = { + * "/bin/echo hello world", + * "/bin/echo world hello", + * NULL + * } + * for (size_t i = 0; commands[i] != NULL); i++) { + * struct MultiProcessingTask *task; + * char task_name[100]; + * + * sprintf(task_name, "mytask%zu", i); + * task = mp_task(mp, task_name, commands[i]); + * if (!task) { + * // handle task creation error + * } + * } + * if (mp_pool_join(mp, get_cpu_count(), MP_POOL_FAIL_FAST)) { + * // handle pool execution error + * } + * mp_pool_free(&mp); + * } else { + * // handle pool initialization error + * } + * } + * ``` + * + * @param ident a name to identify the pool + * @param log_root the path to store program output + * @return pointer to initialized MultiProcessingPool + * @return NULL on error + */ +struct MultiProcessingPool *mp_pool_init(const char *ident, const char *log_root); + +/** + * Create a multiprocessing pool task + * + * @param pool a pointer to MultiProcessingPool + * @param ident a name to identify the task + * @param cmd a command to execute + * @return pointer to MultiProcessingTask structure + * @return NULL on error + */ +struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const char *ident, char *working_dir, char *cmd); + +/** + * Execute all tasks in a pool + * + * @param pool a pointer to MultiProcessingPool + * @param jobs the number of processes to spawn at once (for serial execution use `1`) + * @param flags option to be OR'd (MP_POOL_FAIL_FAST) + * @return 0 on success + * @return >0 on failure + * @return <0 on error + */ +int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags); + +/** + * Show summary of pool tasks + * + * @param pool a pointer to MultiProcessingPool + */ +void mp_pool_show_summary(struct MultiProcessingPool *pool); + +/** + * Release resources allocated by mp_pool_init() + * + * @param a pointer to MultiProcessingPool + */ +void mp_pool_free(struct MultiProcessingPool **pool); + + +#endif //STASIS_MULTIPROCESSING_H diff --git a/include/template_func_proto.h b/include/template_func_proto.h index 7778a11..0c8bbb7 100644 --- a/include/template_func_proto.h +++ b/include/template_func_proto.h @@ -7,5 +7,6 @@ int get_github_release_notes_tplfunc_entrypoint(void *frame, void *data_out); int get_github_release_notes_auto_tplfunc_entrypoint(void *frame, void *data_out); int get_junitxml_file_entrypoint(void *frame, void *data_out); int get_basetemp_dir_entrypoint(void *frame, void *data_out); +int tox_run_entrypoint(void *frame, void *data_out); #endif //TEMPLATE_FUNC_PROTO_H
\ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2399dc5..5c5bc6e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -12,6 +12,16 @@ add_library(stasis_core STATIC utils.c system.c download.c + delivery_postprocess.c + delivery_conda.c + delivery_docker.c + delivery_install.c + delivery_artifactory.c + delivery_test.c + delivery_build.c + delivery_show.c + delivery_populate.c + delivery_init.c delivery.c recipe.c relocation.c @@ -25,6 +35,7 @@ add_library(stasis_core STATIC github.c template_func_proto.c envctl.c + multiprocessing.c ) add_executable(stasis diff --git a/src/conda.c b/src/conda.c index ff55f14..37a7793 100644 --- a/src/conda.c +++ b/src/conda.c @@ -222,7 +222,7 @@ int conda_activate(const char *root, const char *env_name) { // Fully activate conda and record its effect on the runtime environment char command[PATH_MAX * 3]; - snprintf(command, sizeof(command) - 1, "source %s; source %s; conda activate %s &>/dev/null; env -0", path_conda, path_mamba, env_name); + snprintf(command, sizeof(command) - 1, "set -a; source %s; source %s; conda activate %s &>/dev/null; env -0", path_conda, path_mamba, env_name); int retval = shell(&proc, command); if (retval) { // it didn't work; drop out for cleanup diff --git a/src/delivery.c b/src/delivery.c index 3e99aad..a689db2 100644 --- a/src/delivery.c +++ b/src/delivery.c @@ -1,98 +1,5 @@ -#define _GNU_SOURCE - -#include <fnmatch.h> #include "core.h" -extern struct STASIS_GLOBAL globals; - -static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) { - int status = ini_has_key(ini, section_name, key); - if (!status) { - SYSERROR("%s:%s key is required but not defined", section_name, key); - exit(1); - } -} - -static void conv_str(char **x, union INIVal val) { - if (*x) { - guard_free(*x); - } - if (val.as_char_p) { - char *tplop = tpl_render(val.as_char_p); - if (tplop) { - *x = tplop; - } else { - *x = NULL; - } - } else { - *x = NULL; - } -} - -int delivery_init_tmpdir(struct Delivery *ctx) { - char *tmpdir = NULL; - char *x = NULL; - int unusable = 0; - errno = 0; - - x = getenv("TMPDIR"); - if (x) { - guard_free(ctx->storage.tmpdir); - tmpdir = strdup(x); - } else { - tmpdir = ctx->storage.tmpdir; - } - - if (!tmpdir) { - // memory error - return -1; - } - - // If the directory doesn't exist, create it - if (access(tmpdir, F_OK) < 0) { - if (mkdirs(tmpdir, 0755) < 0) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno)); - goto l_delivery_init_tmpdir_fatal; - } - } - - // If we can't read, write, or execute, then die - if (access(tmpdir, R_OK | W_OK | X_OK) < 0) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s requires at least 0755 permissions.\n"); - goto l_delivery_init_tmpdir_fatal; - } - - struct statvfs st; - if (statvfs(tmpdir, &st) < 0) { - goto l_delivery_init_tmpdir_fatal; - } - -#if defined(STASIS_OS_LINUX) - // If we can't execute programs, or write data to the file system at all, then die - if ((st.f_flag & ST_NOEXEC) != 0) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted with noexec\n", tmpdir); - goto l_delivery_init_tmpdir_fatal; - } -#endif - if ((st.f_flag & ST_RDONLY) != 0) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted read-only\n", tmpdir); - goto l_delivery_init_tmpdir_fatal; - } - - if (!globals.tmpdir) { - globals.tmpdir = strdup(tmpdir); - } - - if (!ctx->storage.tmpdir) { - ctx->storage.tmpdir = strdup(globals.tmpdir); - } - return unusable; - - l_delivery_init_tmpdir_fatal: - unusable = 1; - return unusable; -} - void delivery_free(struct Delivery *ctx) { guard_free(ctx->system.arch); GENERIC_ARRAY_FREE(ctx->system.platform); @@ -193,551 +100,6 @@ void delivery_free(struct Delivery *ctx) { guard_free(ctx->_stasis_ini_fp.mission_path); } -void delivery_init_dirs_stage2(struct Delivery *ctx) { - path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes"); - path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources"); - path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing"); - path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker"); - - path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); - path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); - path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); - path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); - path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); - - path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); - path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); - path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker"); -} - -void delivery_init_dirs_stage1(struct Delivery *ctx) { - char *rootdir = getenv("STASIS_ROOT"); - if (rootdir) { - if (isempty(rootdir)) { - fprintf(stderr, "STASIS_ROOT is set, but empty. Please assign a file system path to this environment variable.\n"); - exit(1); - } - path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name); - } else { - // use "stasis" in current working directory - path_store(&ctx->storage.root, PATH_MAX, "stasis", ctx->info.build_name); - } - path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools"); - path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); - if (delivery_init_tmpdir(ctx)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir); - if (globals.tmpdir) - guard_free(globals.tmpdir); - exit(1); - } - - path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build"); - path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); - - if (!ctx->storage.mission_dir) { - path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission"); - } - - if (access(ctx->storage.mission_dir, F_OK)) { - msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno)); - exit(1); - } - - // Override installation prefix using global configuration key - if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) { - // user wants a specific path - globals.conda_fresh_start = false; - /* - if (mkdirs(globals.conda_install_prefix, 0755)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create directory: %s: %s\n", - strerror(errno), globals.conda_install_prefix); - exit(1); - } - */ - /* - ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL); - if (!ctx->storage.conda_install_prefix) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n"); - exit(1); - } - ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix); - */ - path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda"); - } else { - // install conda under the STASIS tree - path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda"); - } -} - -int delivery_init_platform(struct Delivery *ctx) { - msg(STASIS_MSG_L2, "Setting architecture\n"); - char archsuffix[20]; - struct utsname uts; - if (uname(&uts)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "uname() failed: %s\n", strerror(errno)); - return -1; - } - - ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform)); - if (!ctx->system.platform) { - SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX); - return -1; - } - for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) { - ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0])); - } - - ctx->system.arch = strdup(uts.machine); - if (!ctx->system.arch) { - // memory error - return -1; - } - - if (!strcmp(ctx->system.arch, "x86_64")) { - strcpy(archsuffix, "64"); - } else { - strcpy(archsuffix, ctx->system.arch); - } - - msg(STASIS_MSG_L2, "Setting platform\n"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname); - if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) { - sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos"); - } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) { - sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux"); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux"); - } else { - // Not explicitly supported systems - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]); - strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]); - tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - } - - long cpu_count = get_cpu_count(); - if (!cpu_count) { - fprintf(stderr, "Unable to determine CPU count. Falling back to 1.\n"); - cpu_count = 1; - } - char ncpus[100] = {0}; - sprintf(ncpus, "%ld", cpu_count); - - // Declare some important bits as environment variables - setenv("CPU_COUNT", ncpus, 1); - setenv("STASIS_CPU_COUNT", ncpus, 1); - setenv("STASIS_ARCH", ctx->system.arch, 1); - setenv("STASIS_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1); - setenv("STASIS_CONDA_ARCH", ctx->system.arch, 1); - setenv("STASIS_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1); - setenv("STASIS_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1); - - // Register template variables - // These were moved out of main() because we can't take the address of system.platform[x] - // _before_ the array has been initialized. - tpl_register("system.arch", &ctx->system.arch); - tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - - return 0; -} - -static int populate_mission_ini(struct Delivery **ctx, int render_mode) { - int err = 0; - struct INIFILE *ini; - - if ((*ctx)->_stasis_ini_fp.mission) { - return 0; - } - - // Now populate the rules - char missionfile[PATH_MAX] = {0}; - if (getenv("STASIS_SYSCONFDIR")) { - sprintf(missionfile, "%s/%s/%s/%s.ini", - getenv("STASIS_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); - } else { - sprintf(missionfile, "%s/%s/%s/%s.ini", - globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); - } - - msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile); - (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile); - ini = (*ctx)->_stasis_ini_fp.mission; - if (!ini) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read misson configuration: %s, %s\n", missionfile, strerror(errno)); - exit(1); - } - (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile); - - (*ctx)->rules.release_fmt = ini_getval_str(ini, "meta", "release_fmt", render_mode, &err); - - // Used for setting artifactory build info - (*ctx)->rules.build_name_fmt = ini_getval_str(ini, "meta", "build_name_fmt", render_mode, &err); - - // Used for setting artifactory build info - (*ctx)->rules.build_number_fmt = ini_getval_str(ini, "meta", "build_number_fmt", render_mode, &err); - return 0; -} - -void validate_delivery_ini(struct INIFILE *ini) { - if (!ini) { - SYSERROR("%s", "INIFILE is NULL!"); - exit(1); - } - if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) { - ini_has_key_required(ini, "meta", "name"); - ini_has_key_required(ini, "meta", "version"); - ini_has_key_required(ini, "meta", "rc"); - ini_has_key_required(ini, "meta", "mission"); - ini_has_key_required(ini, "meta", "python"); - } else { - SYSERROR("%s", "[meta] configuration section is required"); - exit(1); - } - - if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) { - ini_has_key_required(ini, "conda", "installer_name"); - ini_has_key_required(ini, "conda", "installer_version"); - ini_has_key_required(ini, "conda", "installer_platform"); - ini_has_key_required(ini, "conda", "installer_arch"); - } else { - SYSERROR("%s", "[conda] configuration section is required"); - exit(1); - } - - for (size_t i = 0; i < ini->section_count; i++) { - struct INISection *section = ini->section[i]; - if (section && startswith(section->key, "test:")) { - char *name = strstr(section->key, ":"); - if (name && strlen(name) > 1) { - name = &name[1]; - } - //ini_has_key_required(ini, section->key, "version"); - //ini_has_key_required(ini, section->key, "repository"); - if (globals.enable_testing) { - ini_has_key_required(ini, section->key, "script"); - } - } - } - - if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) { - // yeah? - } - - for (size_t i = 0; i < ini->section_count; i++) { - struct INISection *section = ini->section[i]; - if (section && startswith(section->key, "deploy:artifactory")) { - ini_has_key_required(ini, section->key, "files"); - ini_has_key_required(ini, section->key, "dest"); - } - } -} - -static int populate_delivery_ini(struct Delivery *ctx, int render_mode) { - union INIVal val; - struct INIFILE *ini = ctx->_stasis_ini_fp.delivery; - struct INIData *rtdata; - RuntimeEnv *rt; - - validate_delivery_ini(ini); - // Populate runtime variables first they may be interpreted by other - // keys in the configuration - rt = runtime_copy(__environ); - while ((rtdata = ini_getall(ini, "runtime")) != NULL) { - char rec[STASIS_BUFSIZ]; - sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value))); - runtime_set(rt, rtdata->key, rtdata->value); - } - runtime_apply(rt); - ctx->runtime.environ = rt; - - int err = 0; - ctx->meta.mission = ini_getval_str(ini, "meta", "mission", render_mode, &err); - - if (!strcasecmp(ctx->meta.mission, "hst")) { - ctx->meta.codename = ini_getval_str(ini, "meta", "codename", render_mode, &err); - } else { - ctx->meta.codename = NULL; - } - - ctx->meta.version = ini_getval_str(ini, "meta", "version", render_mode, &err); - ctx->meta.name = ini_getval_str(ini, "meta", "name", render_mode, &err); - ctx->meta.rc = ini_getval_int(ini, "meta", "rc", render_mode, &err); - ctx->meta.final = ini_getval_bool(ini, "meta", "final", render_mode, &err); - ctx->meta.based_on = ini_getval_str(ini, "meta", "based_on", render_mode, &err); - - if (!ctx->meta.python) { - ctx->meta.python = ini_getval_str(ini, "meta", "python", render_mode, &err); - guard_free(ctx->meta.python_compact); - ctx->meta.python_compact = to_short_version(ctx->meta.python); - } else { - ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python); - } - - ctx->conda.installer_name = ini_getval_str(ini, "conda", "installer_name", render_mode, &err); - ctx->conda.installer_version = ini_getval_str(ini, "conda", "installer_version", render_mode, &err); - ctx->conda.installer_platform = ini_getval_str(ini, "conda", "installer_platform", render_mode, &err); - ctx->conda.installer_arch = ini_getval_str(ini, "conda", "installer_arch", render_mode, &err); - ctx->conda.installer_baseurl = ini_getval_str(ini, "conda", "installer_baseurl", render_mode, &err); - ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", " "LINE_SEP, render_mode, &err); - - if (ctx->conda.conda_packages->data && ctx->conda.conda_packages->data[0] && strpbrk(ctx->conda.conda_packages->data[0], " \t")) { - normalize_space(ctx->conda.conda_packages->data[0]); - replace_text(ctx->conda.conda_packages->data[0], " ", LINE_SEP, 0); - char *pip_packages_replacement = join(ctx->conda.conda_packages->data, LINE_SEP); - ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "conda_packages", pip_packages_replacement); - guard_free(pip_packages_replacement); - guard_strlist_free(&ctx->conda.conda_packages); - ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", LINE_SEP, render_mode, &err); - } - - for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { - char *pkg = strlist_item(ctx->conda.conda_packages, i); - if (strpbrk(pkg, ";#") || isempty(pkg)) { - strlist_remove(ctx->conda.conda_packages, i); - } - } - - ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err); - if (ctx->conda.pip_packages->data && ctx->conda.pip_packages->data[0] && strpbrk(ctx->conda.pip_packages->data[0], " \t")) { - normalize_space(ctx->conda.pip_packages->data[0]); - replace_text(ctx->conda.pip_packages->data[0], " ", LINE_SEP, 0); - char *pip_packages_replacement = join(ctx->conda.pip_packages->data, LINE_SEP); - ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "pip_packages", pip_packages_replacement); - guard_free(pip_packages_replacement); - guard_strlist_free(&ctx->conda.pip_packages); - ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err); - } - - for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { - char *pkg = strlist_item(ctx->conda.pip_packages, i); - if (strpbrk(pkg, ";#") || isempty(pkg)) { - strlist_remove(ctx->conda.pip_packages, i); - } - } - - // Delivery metadata consumed - populate_mission_ini(&ctx, render_mode); - - if (ctx->info.release_name) { - guard_free(ctx->info.release_name); - guard_free(ctx->info.build_name); - guard_free(ctx->info.build_number); - } - - if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) { - fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt); - return -1; - } - - if (!ctx->info.build_name) { - delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt); - } - if (!ctx->info.build_number) { - delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt); - } - - // Best I can do to make output directories unique. Annoying. - delivery_init_dirs_stage2(ctx); - - if (!ctx->conda.conda_packages_defer) { - ctx->conda.conda_packages_defer = strlist_init(); - } - if (!ctx->conda.pip_packages_defer) { - ctx->conda.pip_packages_defer = strlist_init(); - } - - for (size_t z = 0, i = 0; i < ini->section_count; i++) { - char *section_name = ini->section[i]->key; - if (startswith(section_name, "test:")) { - struct Test *test = &ctx->tests[z]; - val.as_char_p = strchr(ini->section[i]->key, ':') + 1; - if (val.as_char_p && isempty(val.as_char_p)) { - return 1; - } - conv_str(&test->name, val); - - test->version = ini_getval_str(ini, section_name, "version", render_mode, &err); - test->repository = ini_getval_str(ini, section_name, "repository", render_mode, &err); - test->script = ini_getval_str(ini, section_name, "script", INI_READ_RAW, &err); - test->repository_remove_tags = ini_getval_strlist(ini, section_name, "repository_remove_tags", LINE_SEP, render_mode, &err); - test->build_recipe = ini_getval_str(ini, section_name, "build_recipe", render_mode, &err); - test->runtime.environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err); - z++; - } - } - - for (size_t z = 0, i = 0; i < ini->section_count; i++) { - char *section_name = ini->section[i]->key; - struct Deploy *deploy = &ctx->deploy; - if (startswith(section_name, "deploy:artifactory")) { - struct JFrog *jfrog = &deploy->jfrog[z]; - // Artifactory base configuration - - jfrog->upload_ctx.workaround_parent_only = ini_getval_bool(ini, section_name, "workaround_parent_only", render_mode, &err); - jfrog->upload_ctx.exclusions = ini_getval_str(ini, section_name, "exclusions", render_mode, &err); - jfrog->upload_ctx.explode = ini_getval_bool(ini, section_name, "explode", render_mode, &err); - jfrog->upload_ctx.recursive = ini_getval_bool(ini, section_name, "recursive", render_mode, &err); - jfrog->upload_ctx.retries = ini_getval_int(ini, section_name, "retries", render_mode, &err); - jfrog->upload_ctx.retry_wait_time = ini_getval_int(ini, section_name, "retry_wait_time", render_mode, &err); - jfrog->upload_ctx.detailed_summary = ini_getval_bool(ini, section_name, "detailed_summary", render_mode, &err); - jfrog->upload_ctx.quiet = ini_getval_bool(ini, section_name, "quiet", render_mode, &err); - jfrog->upload_ctx.regexp = ini_getval_bool(ini, section_name, "regexp", render_mode, &err); - jfrog->upload_ctx.spec = ini_getval_str(ini, section_name, "spec", render_mode, &err); - jfrog->upload_ctx.flat = ini_getval_bool(ini, section_name, "flat", render_mode, &err); - jfrog->repo = ini_getval_str(ini, section_name, "repo", render_mode, &err); - jfrog->dest = ini_getval_str(ini, section_name, "dest", render_mode, &err); - jfrog->files = ini_getval_strlist(ini, section_name, "files", LINE_SEP, render_mode, &err); - z++; - } - } - - for (size_t i = 0; i < ini->section_count; i++) { - char *section_name = ini->section[i]->key; - struct Deploy *deploy = &ctx->deploy; - if (startswith(ini->section[i]->key, "deploy:docker")) { - struct Docker *docker = &deploy->docker; - - docker->registry = ini_getval_str(ini, section_name, "registry", render_mode, &err); - docker->image_compression = ini_getval_str(ini, section_name, "image_compression", render_mode, &err); - docker->test_script = ini_getval_str(ini, section_name, "test_script", render_mode, &err); - docker->build_args = ini_getval_strlist(ini, section_name, "build_args", LINE_SEP, render_mode, &err); - docker->tags = ini_getval_strlist(ini, section_name, "tags", LINE_SEP, render_mode, &err); - } - } - return 0; -} - -int populate_delivery_cfg(struct Delivery *ctx, int render_mode) { - struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg; - if (!cfg) { - return -1; - } - int err = 0; - ctx->storage.conda_staging_dir = ini_getval_str(cfg, "default", "conda_staging_dir", render_mode, &err); - ctx->storage.conda_staging_url = ini_getval_str(cfg, "default", "conda_staging_url", render_mode, &err); - ctx->storage.wheel_staging_dir = ini_getval_str(cfg, "default", "wheel_staging_dir", render_mode, &err); - ctx->storage.wheel_staging_url = ini_getval_str(cfg, "default", "wheel_staging_url", render_mode, &err); - globals.conda_fresh_start = ini_getval_bool(cfg, "default", "conda_fresh_start", render_mode, &err); - if (!globals.continue_on_error) { - globals.continue_on_error = ini_getval_bool(cfg, "default", "continue_on_error", render_mode, &err); - } - if (!globals.always_update_base_environment) { - globals.always_update_base_environment = ini_getval_bool(cfg, "default", "always_update_base_environment", render_mode, &err); - } - globals.conda_install_prefix = ini_getval_str(cfg, "default", "conda_install_prefix", render_mode, &err); - globals.conda_packages = ini_getval_strlist(cfg, "default", "conda_packages", LINE_SEP, render_mode, &err); - globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err); - - globals.jfrog.jfrog_artifactory_base_url = ini_getval_str(cfg, "jfrog_cli_download", "url", render_mode, &err); - globals.jfrog.jfrog_artifactory_product = ini_getval_str(cfg, "jfrog_cli_download", "product", render_mode, &err); - globals.jfrog.cli_major_ver = ini_getval_str(cfg, "jfrog_cli_download", "version_series", render_mode, &err); - globals.jfrog.version = ini_getval_str(cfg, "jfrog_cli_download", "version", render_mode, &err); - globals.jfrog.remote_filename = ini_getval_str(cfg, "jfrog_cli_download", "filename", render_mode, &err); - globals.jfrog.url = ini_getval_str(cfg, "deploy:artifactory", "url", render_mode, &err); - globals.jfrog.repo = ini_getval_str(cfg, "deploy:artifactory", "repo", render_mode, &err); - - return 0; -} - -static int populate_info(struct Delivery *ctx) { - if (!ctx->info.time_str_epoch) { - // Record timestamp used for release - time(&ctx->info.time_now); - ctx->info.time_info = localtime(&ctx->info.time_now); - - ctx->info.time_str_epoch = calloc(STASIS_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch)); - if (!ctx->info.time_str_epoch) { - msg(STASIS_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n"); - return -1; - } - snprintf(ctx->info.time_str_epoch, STASIS_TIME_STR_MAX - 1, "%li", ctx->info.time_now); - } - return 0; -} - -int *bootstrap_build_info(struct Delivery *ctx) { - struct Delivery local; - memset(&local, 0, sizeof(local)); - local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path); - local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path); - delivery_init_platform(&local); - populate_delivery_cfg(&local, INI_READ_RENDER); - populate_delivery_ini(&local, INI_READ_RENDER); - populate_info(&local); - ctx->info.build_name = strdup(local.info.build_name); - ctx->info.build_number = strdup(local.info.build_number); - ctx->info.release_name = strdup(local.info.release_name); - memcpy(&ctx->info.time_info, &local.info.time_info, sizeof(ctx->info.time_info)); - ctx->info.time_now = local.info.time_now; - ctx->info.time_str_epoch = strdup(local.info.time_str_epoch); - delivery_free(&local); - return 0; -} - -int delivery_init(struct Delivery *ctx, int render_mode) { - populate_info(ctx); - populate_delivery_cfg(ctx, INI_READ_RENDER); - - // Set artifactory URL via environment variable if possible - char *jfurl = getenv("STASIS_JF_ARTIFACTORY_URL"); - if (jfurl) { - if (globals.jfrog.url) { - guard_free(globals.jfrog.url); - } - globals.jfrog.url = strdup(jfurl); - } - - // Set artifactory repository via environment if possible - char *jfrepo = getenv("STASIS_JF_REPO"); - if (jfrepo) { - if (globals.jfrog.repo) { - guard_free(globals.jfrog.repo); - } - globals.jfrog.repo = strdup(jfrepo); - } - - // Configure architecture and platform information - delivery_init_platform(ctx); - - // Create STASIS directory structure - delivery_init_dirs_stage1(ctx); - - char config_local[PATH_MAX]; - sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config"); - setenv("XDG_CONFIG_HOME", config_local, 1); - - char cache_local[PATH_MAX]; - sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache"); - setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1); - - // add tools to PATH - char pathvar_tmp[STASIS_BUFSIZ]; - sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH")); - setenv("PATH", pathvar_tmp, 1); - - // Prevent git from paginating output - setenv("GIT_PAGER", "", 1); - - populate_delivery_ini(ctx, render_mode); - - if (ctx->deploy.docker.tags) { - for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) { - char *item = strlist_item(ctx->deploy.docker.tags, i); - tolower_s(item); - } - } - - if (ctx->deploy.docker.image_compression) { - if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) { - SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression); - return -1; - } - } - return 0; -} - int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) { size_t fmt_len = strlen(fmt); @@ -799,625 +161,6 @@ int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) { return 0; } -void delivery_debug_show(struct Delivery *ctx) { - printf("\n====DEBUG====\n"); - printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir); - printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir); - printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No"); - printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No"); - printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No"); -} - -void delivery_meta_show(struct Delivery *ctx) { - if (globals.verbose) { - delivery_debug_show(ctx); - } - - printf("\n====DELIVERY====\n"); - printf("%-20s %-10s\n", "Target Python:", ctx->meta.python); - printf("%-20s %-10s\n", "Name:", ctx->meta.name); - printf("%-20s %-10s\n", "Mission:", ctx->meta.mission); - if (ctx->meta.codename) { - printf("%-20s %-10s\n", "Codename:", ctx->meta.codename); - } - if (ctx->meta.version) { - printf("%-20s %-10s\n", "Version", ctx->meta.version); - } - if (!ctx->meta.final) { - printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc); - } - printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No"); - printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New"); -} - -void delivery_conda_show(struct Delivery *ctx) { - printf("\n====CONDA====\n"); - printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix); - - puts("Native Packages:"); - if (strlist_count(ctx->conda.conda_packages) || strlist_count(ctx->conda.conda_packages_defer)) { - struct StrList *list_conda = strlist_init(); - if (strlist_count(ctx->conda.conda_packages)) { - strlist_append_strlist(list_conda, ctx->conda.conda_packages); - } - if (strlist_count(ctx->conda.conda_packages_defer)) { - strlist_append_strlist(list_conda, ctx->conda.conda_packages_defer); - } - strlist_sort(list_conda, STASIS_SORT_ALPHA); - - for (size_t i = 0; i < strlist_count(list_conda); i++) { - char *token = strlist_item(list_conda, i); - if (isempty(token) || isblank(*token) || startswith(token, "-")) { - continue; - } - printf("%21s%s\n", "", token); - } - guard_strlist_free(&list_conda); - } else { - printf("%21s%s\n", "", "N/A"); - } - - puts("Python Packages:"); - if (strlist_count(ctx->conda.pip_packages) || strlist_count(ctx->conda.pip_packages_defer)) { - struct StrList *list_python = strlist_init(); - if (strlist_count(ctx->conda.pip_packages)) { - strlist_append_strlist(list_python, ctx->conda.pip_packages); - } - if (strlist_count(ctx->conda.pip_packages_defer)) { - strlist_append_strlist(list_python, ctx->conda.pip_packages_defer); - } - strlist_sort(list_python, STASIS_SORT_ALPHA); - - for (size_t i = 0; i < strlist_count(list_python); i++) { - char *token = strlist_item(list_python, i); - if (isempty(token) || isblank(*token) || startswith(token, "-")) { - continue; - } - printf("%21s%s\n", "", token); - } - guard_strlist_free(&list_python); - } else { - printf("%21s%s\n", "", "N/A"); - } -} - -void delivery_tests_show(struct Delivery *ctx) { - printf("\n====TESTS====\n"); - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].name) { - continue; - } - printf("%-20s %-20s %s\n", ctx->tests[i].name, - ctx->tests[i].version, - ctx->tests[i].repository); - } -} - -void delivery_runtime_show(struct Delivery *ctx) { - printf("\n====RUNTIME====\n"); - struct StrList *rt = NULL; - rt = strlist_copy(ctx->runtime.environ); - if (!rt) { - // no data - return; - } - strlist_sort(rt, STASIS_SORT_ALPHA); - size_t total = strlist_count(rt); - for (size_t i = 0; i < total; i++) { - char *item = strlist_item(rt, i); - if (!item) { - // not supposed to occur - msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i); - return; - } - printf("%s\n", item); - } -} - -int delivery_build_recipes(struct Delivery *ctx) { - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - char *recipe_dir = NULL; - if (ctx->tests[i].build_recipe) { // build a conda recipe - int recipe_type; - int status; - if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) { - fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name); - return -1; - } - recipe_type = recipe_get_type(recipe_dir); - pushd(recipe_dir); - { - if (RECIPE_TYPE_ASTROCONDA == recipe_type) { - pushd(path_basename(ctx->tests[i].repository)); - } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { - pushd("recipe"); - } - - char recipe_version[100]; - char recipe_buildno[100]; - char recipe_git_url[PATH_MAX]; - char recipe_git_rev[PATH_MAX]; - - //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}"); - //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository); - //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version); - // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^ - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version); - sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository); - strcpy(recipe_git_rev, ""); - sprintf(recipe_buildno, " number: 0"); - - unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH; - //file_replace_text("meta.yaml", "{% set version = ", recipe_version); - if (ctx->meta.final) { - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version); - // TODO: replace sha256 of tagged archive - // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge. - // For now, remove the sha256 requirement - file_replace_text("meta.yaml", "sha256:", "\n", flags); - } else { - file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags); - file_replace_text("meta.yaml", " url:", recipe_git_url, flags); - //file_replace_text("meta.yaml", "sha256:", recipe_git_rev); - file_replace_text("meta.yaml", " sha256:", "\n", flags); - file_replace_text("meta.yaml", " number:", recipe_buildno, flags); - } - - char command[PATH_MAX]; - if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { - char arch[STASIS_NAME_MAX] = {0}; - char platform[STASIS_NAME_MAX] = {0}; - - strcpy(platform, ctx->system.platform[DELIVERY_PLATFORM]); - if (strstr(platform, "Darwin")) { - memset(platform, 0, sizeof(platform)); - strcpy(platform, "osx"); - } - tolower_s(platform); - if (strstr(ctx->system.arch, "arm64")) { - strcpy(arch, "arm64"); - } else if (strstr(ctx->system.arch, "64")) { - strcpy(arch, "64"); - } else { - strcat(arch, "32"); // blind guess - } - tolower_s(arch); - - sprintf(command, "mambabuild --python=%s -m ../.ci_support/%s_%s_.yaml .", - ctx->meta.python, platform, arch); - } else { - sprintf(command, "mambabuild --python=%s .", ctx->meta.python); - } - status = conda_exec(command); - if (status) { - return -1; - } - - if (RECIPE_TYPE_GENERIC != recipe_type) { - popd(); - } - popd(); - } - } - if (recipe_dir) { - guard_free(recipe_dir); - } - } - return 0; -} - -static int filter_repo_tags(char *repo, struct StrList *patterns) { - int result = 0; - - if (!pushd(repo)) { - int list_status = 0; - char *tags_raw = shell_output("git tag -l", &list_status); - struct StrList *tags = strlist_init(); - strlist_append_tokenize(tags, tags_raw, LINE_SEP); - - for (size_t i = 0; tags && i < strlist_count(tags); i++) { - char *tag = strlist_item(tags, i); - for (size_t p = 0; p < strlist_count(patterns); p++) { - char *pattern = strlist_item(patterns, p); - int match = fnmatch(pattern, tag, 0); - if (!match) { - char cmd[PATH_MAX] = {0}; - sprintf(cmd, "git tag -d %s", tag); - result += system(cmd); - break; - } - } - } - guard_strlist_free(&tags); - guard_free(tags_raw); - popd(); - } else { - result = -1; - } - return result; -} - -struct StrList *delivery_build_wheels(struct Delivery *ctx) { - struct StrList *result = NULL; - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - result = strlist_init(); - if (!result) { - perror("unable to allocate memory for string list"); - return NULL; - } - - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source - char srcdir[PATH_MAX]; - char wheeldir[PATH_MAX]; - memset(srcdir, 0, sizeof(srcdir)); - memset(wheeldir, 0, sizeof(wheeldir)); - - sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name); - git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version); - - if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { - filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags); - } - - pushd(srcdir); - { - char dname[NAME_MAX]; - char outdir[PATH_MAX]; - char cmd[PATH_MAX * 2]; - memset(dname, 0, sizeof(dname)); - memset(outdir, 0, sizeof(outdir)); - memset(cmd, 0, sizeof(outdir)); - - strcpy(dname, ctx->tests[i].name); - tolower_s(dname); - sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); - if (mkdirs(outdir, 0755)) { - fprintf(stderr, "failed to create output directory: %s\n", outdir); - } - - sprintf(cmd, "-m build -w -o %s", outdir); - if (python_exec(cmd)) { - fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version); - strlist_free(&result); - return NULL; - } - popd(); - } - } - } - return result; -} - -static const struct Test *requirement_from_test(struct Delivery *ctx, const char *name) { - struct Test *result; - - result = NULL; - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (ctx->tests[i].name && strstr(name, ctx->tests[i].name)) { - result = &ctx->tests[i]; - break; - } - } - return result; -} - -int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) { - char cmd[PATH_MAX]; - char pkgs[STASIS_BUFSIZ]; - char *env_current = getenv("CONDA_DEFAULT_ENV"); - - if (env_current) { - // The requested environment is not the current environment - if (strcmp(env_current, env_name) != 0) { - // Activate the requested environment - printf("Activating: %s\n", env_name); - conda_activate(conda_install_dir, env_name); - runtime_replace(&ctx->runtime.environ, __environ); - } - } - - memset(cmd, 0, sizeof(cmd)); - memset(pkgs, 0, sizeof(pkgs)); - strcat(cmd, "install"); - - typedef int (*Runner)(const char *); - Runner runner = NULL; - if (INSTALL_PKG_CONDA & type) { - runner = conda_exec; - } else if (INSTALL_PKG_PIP & type) { - runner = pip_exec; - } - - if (INSTALL_PKG_CONDA_DEFERRED & type) { - strcat(cmd, " --use-local"); - } else if (INSTALL_PKG_PIP_DEFERRED & type) { - // Don't change the baseline package set unless we're working with a - // new build. Release candidates will need to keep packages as stable - // as possible between releases. - if (!ctx->meta.based_on) { - strcat(cmd, " --upgrade"); - } - sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); - } - - for (size_t x = 0; manifest[x] != NULL; x++) { - char *name = NULL; - for (size_t p = 0; p < strlist_count(manifest[x]); p++) { - name = strlist_item(manifest[x], p); - strip(name); - if (!strlen(name)) { - continue; - } - if (INSTALL_PKG_PIP_DEFERRED & type) { - struct Test *info = (struct Test *) requirement_from_test(ctx, name); - if (info) { - if (!strcmp(info->version, "HEAD")) { - struct StrList *tag_data = strlist_init(); - if (!tag_data) { - SYSERROR("%s", "Unable to allocate memory for tag data\n"); - return -1; - } - strlist_append_tokenize(tag_data, info->repository_info_tag, "-"); - - struct Wheel *whl = NULL; - char *post_commit = NULL; - char *hash = NULL; - if (strlist_count(tag_data) > 1) { - post_commit = strlist_item(tag_data, 1); - hash = strlist_item(tag_data, 2); - } - - // We can't match on version here (index 0). The wheel's version is not guaranteed to be - // equal to the tag; setuptools_scm auto-increments the value, the user can change it manually, - // etc. - whl = get_wheel_file(ctx->storage.wheel_artifact_dir, info->name, - (char *[]) {ctx->meta.python_compact, ctx->system.arch, - "none", "any", - post_commit, hash, - NULL}, WHEEL_MATCH_ANY); - - guard_strlist_free(&tag_data); - info->version = whl->version; - sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, whl->version); - } else { - sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, info->version); - } - } else { - fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name); - return -1; - } - } else { - if (startswith(name, "--") || startswith(name, "-")) { - sprintf(cmd + strlen(cmd), " %s", name); - } else { - sprintf(cmd + strlen(cmd), " '%s'", name); - } - } - } - int status = runner(cmd); - if (status) { - return status; - } - } - return 0; -} - -void delivery_get_installer_url(struct Delivery *ctx, char *result) { - if (ctx->conda.installer_version) { - // Use version specified by configuration file - sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl, - ctx->conda.installer_name, - ctx->conda.installer_version, - ctx->conda.installer_platform, - ctx->conda.installer_arch); - } else { - // Use latest installer - sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl, - ctx->conda.installer_name, - ctx->conda.installer_platform, - ctx->conda.installer_arch); - } - -} - -int delivery_get_installer(struct Delivery *ctx, char *installer_url) { - char script_path[PATH_MAX]; - char *installer = path_basename(installer_url); - - memset(script_path, 0, sizeof(script_path)); - sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer); - if (access(script_path, F_OK)) { - // Script doesn't exist - long fetch_status = download(installer_url, script_path, NULL); - if (HTTP_ERROR(fetch_status) || fetch_status < 0) { - // download failed - return -1; - } - } else { - msg(STASIS_MSG_RESTRICT | STASIS_MSG_L3, "Skipped, installer already exists\n", script_path); - } - - ctx->conda.installer_path = strdup(script_path); - if (!ctx->conda.installer_path) { - SYSERROR("Unable to duplicate script_path: '%s'", script_path); - return -1; - } - - return 0; -} - -int delivery_copy_conda_artifacts(struct Delivery *ctx) { - char cmd[STASIS_BUFSIZ]; - char conda_build_dir[PATH_MAX]; - char subdir[PATH_MAX]; - memset(cmd, 0, sizeof(cmd)); - memset(conda_build_dir, 0, sizeof(conda_build_dir)); - memset(subdir, 0, sizeof(subdir)); - - sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld"); - // One must run conda build at least once to create the "conda-bld" directory. - // When this directory is missing there can be no build artifacts. - if (access(conda_build_dir, F_OK) < 0) { - msg(STASIS_MSG_RESTRICT | STASIS_MSG_WARN | STASIS_MSG_L3, - "Skipped: 'conda build' has never been executed.\n"); - return 0; - } - - snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s", - conda_build_dir, - ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], - ctx->storage.conda_artifact_dir); - - return system(cmd); -} - -int delivery_copy_wheel_artifacts(struct Delivery *ctx) { - char cmd[PATH_MAX]; - memset(cmd, 0, sizeof(cmd)); - snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s", - ctx->storage.build_sources_dir, - ctx->storage.wheel_artifact_dir); - return system(cmd); -} - -int delivery_index_wheel_artifacts(struct Delivery *ctx) { - struct dirent *rec; - DIR *dp; - FILE *top_fp; - - dp = opendir(ctx->storage.wheel_artifact_dir); - if (!dp) { - return -1; - } - - // Generate a "dumb" local pypi index that is compatible with: - // pip install --extra-index-url - char top_index[PATH_MAX]; - memset(top_index, 0, sizeof(top_index)); - sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir); - top_fp = fopen(top_index, "w+"); - if (!top_fp) { - return -2; - } - - while ((rec = readdir(dp)) != NULL) { - // skip directories - if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) { - continue; - } - - FILE *bottom_fp; - char bottom_index[PATH_MAX * 2]; - memset(bottom_index, 0, sizeof(bottom_index)); - sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name); - bottom_fp = fopen(bottom_index, "w+"); - if (!bottom_fp) { - return -3; - } - - if (globals.verbose) { - printf("+ %s\n", rec->d_name); - } - // Add record to top level index - fprintf(top_fp, "<a href=\"%s/\">%s</a><br/>\n", rec->d_name, rec->d_name); - - char dpath[PATH_MAX * 2]; - memset(dpath, 0, sizeof(dpath)); - sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name); - struct StrList *packages = listdir(dpath); - if (!packages) { - fclose(top_fp); - fclose(bottom_fp); - return -4; - } - - for (size_t i = 0; i < strlist_count(packages); i++) { - char *package = strlist_item(packages, i); - if (!endswith(package, ".whl")) { - continue; - } - if (globals.verbose) { - printf("`- %s\n", package); - } - // Write record to bottom level index - fprintf(bottom_fp, "<a href=\"%s\">%s</a><br/>\n", package, package); - } - fclose(bottom_fp); - - guard_strlist_free(&packages); - } - closedir(dp); - fclose(top_fp); - return 0; -} - -void delivery_install_conda(char *install_script, char *conda_install_dir) { - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - if (globals.conda_fresh_start) { - if (!access(conda_install_dir, F_OK)) { - // directory exists so remove it - if (rmtree(conda_install_dir)) { - perror("unable to remove previous installation"); - exit(1); - } - - // Proceed with the installation - // -b = batch mode (non-interactive) - char cmd[PATH_MAX] = {0}; - snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", - find_program("bash"), - install_script, - conda_install_dir); - if (shell_safe(&proc, cmd)) { - fprintf(stderr, "conda installation failed\n"); - exit(1); - } - } else { - // Proceed with the installation - // -b = batch mode (non-interactive) - char cmd[PATH_MAX] = {0}; - snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", - find_program("bash"), - install_script, - conda_install_dir); - if (shell_safe(&proc, cmd)) { - fprintf(stderr, "conda installation failed\n"); - exit(1); - } - } - } else { - msg(STASIS_MSG_L3, "Conda removal disabled by configuration\n"); - } -} - -void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) { - if (conda_activate(conda_install_dir, "base")) { - fprintf(stderr, "conda activation failed\n"); - exit(1); - } - - // Setting the CONDARC environment variable appears to be the only consistent - // way to make sure the file is used. Not setting this variable leads to strange - // behavior, especially if a conda environment is already active when STASIS is loaded. - char rcpath[PATH_MAX]; - sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc"); - setenv("CONDARC", rcpath, 1); - if (runtime_replace(&ctx->runtime.environ, __environ)) { - perror("unable to replace runtime environment after activating conda"); - exit(1); - } - - if (conda_setup_headless()) { - // no COE check. this call must succeed. - exit(1); - } -} - void delivery_defer_packages(struct Delivery *ctx, int type) { struct StrList *dataptr = NULL; struct StrList *deferred = NULL; @@ -1545,280 +288,6 @@ void delivery_defer_packages(struct Delivery *ctx, int type) { } } -const char *release_header = "# delivery_name: %s\n" - "# delivery_fmt: %s\n" - "# creation_time: %s\n" - "# conda_ident: %s\n" - "# conda_build_ident: %s\n"; - -char *delivery_get_release_header(struct Delivery *ctx) { - char output[STASIS_BUFSIZ]; - char stamp[100]; - strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info); - sprintf(output, release_header, - ctx->info.release_name, - ctx->rules.release_fmt, - stamp, - ctx->conda.tool_version, - ctx->conda.tool_build_version); - return strdup(output); -} - -int delivery_dump_metadata(struct Delivery *ctx) { - FILE *fp; - char filename[PATH_MAX]; - sprintf(filename, "%s/meta-%s.stasis", ctx->storage.meta_dir, ctx->info.release_name); - fp = fopen(filename, "w+"); - if (!fp) { - return -1; - } - if (globals.verbose) { - printf("%s\n", filename); - } - fprintf(fp, "name %s\n", ctx->meta.name); - fprintf(fp, "version %s\n", ctx->meta.version); - fprintf(fp, "rc %d\n", ctx->meta.rc); - fprintf(fp, "python %s\n", ctx->meta.python); - fprintf(fp, "python_compact %s\n", ctx->meta.python_compact); - fprintf(fp, "mission %s\n", ctx->meta.mission); - fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : ""); - fprintf(fp, "platform %s %s %s %s\n", - ctx->system.platform[DELIVERY_PLATFORM], - ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], - ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], - ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); - fprintf(fp, "arch %s\n", ctx->system.arch); - fprintf(fp, "time %s\n", ctx->info.time_str_epoch); - fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt); - fprintf(fp, "release_name %s\n", ctx->info.release_name); - fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt); - fprintf(fp, "build_name %s\n", ctx->info.build_name); - fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt); - fprintf(fp, "build_number %s\n", ctx->info.build_number); - fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl); - fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name); - fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version); - fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform); - fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch); - - fclose(fp); - return 0; -} - -void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) { - char output[PATH_MAX]; - char *header = NULL; - char *tempfile = NULL; - FILE *tp = NULL; - - if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) { - header = delivery_get_release_header(ctx); - if (!header) { - msg(STASIS_MSG_ERROR, "failed to generate release header string\n", filename); - exit(1); - } - tempfile = xmkstemp(&tp, "w+"); - if (!tempfile || !tp) { - msg(STASIS_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno)); - exit(1); - } - fprintf(tp, "%s", header); - - // Read the original file - char **contents = file_readlines(filename, 0, 0, NULL); - if (!contents) { - msg(STASIS_MSG_ERROR, "%s: unable to read %s", filename); - exit(1); - } - - // Write temporary data - for (size_t i = 0; contents[i] != NULL; i++) { - if (startswith(contents[i], "channels:")) { - // Allow for additional conda channel injection - if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) { - fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]); - continue; - } - } else if (strstr(contents[i], "- pip:")) { - if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) { - // Allow for additional pip argument injection - fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]); - continue; - } - } else if (startswith(contents[i], "prefix:")) { - // Remove the prefix key - if (strstr(contents[i], "/") || strstr(contents[i], "\\")) { - // path is on the same line as the key - continue; - } else { - // path is on the next line? - if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) { - i++; - } - continue; - } - } - fprintf(tp, "%s", contents[i]); - } - GENERIC_ARRAY_FREE(contents); - guard_free(header); - fflush(tp); - fclose(tp); - - // Replace the original file with our temporary data - if (copy2(tempfile, filename, CT_PERM) < 0) { - fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename); - exit(1); - } - remove(tempfile); - guard_free(tempfile); - } else if (globals.enable_rewrite_spec_stage_2 && stage == DELIVERY_REWRITE_SPEC_STAGE_2) { - // Replace "local" channel with the staging URL - if (ctx->storage.conda_staging_url) { - file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0); - } else if (globals.jfrog.repo) { - sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); - file_replace_text(filename, "@CONDA_CHANNEL@", output, 0); - } else { - msg(STASIS_MSG_WARN, "conda_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.conda_artifact_dir); - file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_artifact_dir, 0); - } - - if (ctx->storage.wheel_staging_url) { - file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0); - } else if (globals.enable_artifactory && globals.jfrog.url && globals.jfrog.repo) { - sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); - file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); - } else { - msg(STASIS_MSG_WARN, "wheel_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.wheel_artifact_dir); - sprintf(output, "--extra-index-url file://%s", ctx->storage.wheel_artifact_dir); - file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); - } - } -} - -int delivery_index_conda_artifacts(struct Delivery *ctx) { - return conda_index(ctx->storage.conda_artifact_dir); -} - -void delivery_tests_run(struct Delivery *ctx) { - struct Process proc; - memset(&proc, 0, sizeof(proc)); - - if (!globals.workaround.conda_reactivate) { - globals.workaround.conda_reactivate = calloc(PATH_MAX, sizeof(*globals.workaround.conda_reactivate)); - } else { - memset(globals.workaround.conda_reactivate, 0, PATH_MAX); - } - snprintf(globals.workaround.conda_reactivate, PATH_MAX - 1, "\nmamba activate ${CONDA_DEFAULT_ENV}\n"); - - if (!ctx->tests[0].name) { - msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n"); - } else { - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].name && !ctx->tests[i].repository && !ctx->tests[i].script) { - // skip unused test records - continue; - } - msg(STASIS_MSG_L2, "Executing tests for %s %s\n", ctx->tests[i].name, ctx->tests[i].version); - if (!ctx->tests[i].script || !strlen(ctx->tests[i].script)) { - msg(STASIS_MSG_WARN | STASIS_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n", - ctx->tests[i].name); - continue; - } - - char destdir[PATH_MAX]; - sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(ctx->tests[i].repository)); - - if (!access(destdir, F_OK)) { - msg(STASIS_MSG_L3, "Purging repository %s\n", destdir); - if (rmtree(destdir)) { - COE_CHECK_ABORT(1, "Unable to remove repository\n"); - } - } - msg(STASIS_MSG_L3, "Cloning repository %s\n", ctx->tests[i].repository); - if (!git_clone(&proc, ctx->tests[i].repository, destdir, ctx->tests[i].version)) { - ctx->tests[i].repository_info_tag = strdup(git_describe(destdir)); - ctx->tests[i].repository_info_ref = strdup(git_rev_parse(destdir, "HEAD")); - } else { - COE_CHECK_ABORT(1, "Unable to clone repository\n"); - } - - if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { - filter_repo_tags(destdir, ctx->tests[i].repository_remove_tags); - } - - if (pushd(destdir)) { - COE_CHECK_ABORT(1, "Unable to enter repository directory\n"); - } else { -#if 1 - int status; - char *cmd = calloc(strlen(ctx->tests[i].script) + STASIS_BUFSIZ, sizeof(*cmd)); - - msg(STASIS_MSG_L3, "Testing %s\n", ctx->tests[i].name); - memset(&proc, 0, sizeof(proc)); - - // Apply workaround for tox positional arguments - char *toxconf = NULL; - if (!access("tox.ini", F_OK)) { - if (!fix_tox_conf("tox.ini", &toxconf)) { - msg(STASIS_MSG_L3, "Fixing tox positional arguments\n"); - if (!globals.workaround.tox_posargs) { - globals.workaround.tox_posargs = calloc(PATH_MAX, sizeof(*globals.workaround.tox_posargs)); - } else { - memset(globals.workaround.tox_posargs, 0, PATH_MAX); - } - snprintf(globals.workaround.tox_posargs, PATH_MAX - 1, "-c %s --root .", toxconf); - } - } - - // enable trace mode before executing each test script - strcpy(cmd, ctx->tests[i].script); - char *cmd_rendered = tpl_render(cmd); - if (cmd_rendered) { - if (strcmp(cmd_rendered, cmd) != 0) { - strcpy(cmd, cmd_rendered); - cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0; - } - guard_free(cmd_rendered); - } else { - SYSERROR("An error occurred while rendering the following:\n%s", cmd); - exit(1); - } - - puts(cmd); - char runner_cmd[0xFFFF] = {0}; - sprintf(runner_cmd, "set +x\nsource %s/etc/profile.d/conda.sh\nsource %s/etc/profile.d/mamba.sh\n\nmamba activate ${CONDA_DEFAULT_ENV}\n\n%s\n", - ctx->storage.conda_install_prefix, - ctx->storage.conda_install_prefix, - cmd); - status = shell(&proc, runner_cmd); - if (status) { - msg(STASIS_MSG_ERROR, "Script failure: %s\n%s\n\nExit code: %d\n", ctx->tests[i].name, ctx->tests[i].script, status); - popd(); - guard_free(cmd); - if (!globals.continue_on_error) { - tpl_free(); - delivery_free(ctx); - globals_free(); - } - COE_CHECK_ABORT(1, "Test failure"); - } - guard_free(cmd); - - if (toxconf) { - remove(toxconf); - guard_free(toxconf); - } - popd(); -#else - msg(STASIS_MSG_WARNING | STASIS_MSG_L3, "TESTING DISABLED BY CODE!\n"); -#endif - } - } - } -} - void delivery_gather_tool_versions(struct Delivery *ctx) { int status = 0; @@ -1832,388 +301,3 @@ void delivery_gather_tool_versions(struct Delivery *ctx) { strip(ctx->conda.tool_version); } -int delivery_init_artifactory(struct Delivery *ctx) { - int status = 0; - char dest[PATH_MAX] = {0}; - char filepath[PATH_MAX] = {0}; - snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir); - snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir); - - if (!access(filepath, F_OK)) { - // already have it - msg(STASIS_MSG_L3, "Skipped download, %s already exists\n", filepath); - goto delivery_init_artifactory_envsetup; - } - - char *platform = ctx->system.platform[DELIVERY_PLATFORM]; - msg(STASIS_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch); - if ((status = artifactory_download_cli(dest, - globals.jfrog.jfrog_artifactory_base_url, - globals.jfrog.jfrog_artifactory_product, - globals.jfrog.cli_major_ver, - globals.jfrog.version, - platform, - ctx->system.arch, - globals.jfrog.remote_filename))) { - remove(filepath); - } - - delivery_init_artifactory_envsetup: - // CI (ridiculously generic, why?) disables interactive prompts and progress bar output - setenv("CI", "1", 1); - - // JFROG_CLI_HOME_DIR is where .jfrog is stored - char path[PATH_MAX] = {0}; - snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir); - setenv("JFROG_CLI_HOME_DIR", path, 1); - - // JFROG_CLI_TEMP_DIR is where the obvious is stored - setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1); - return status; -} - -int delivery_artifact_upload(struct Delivery *ctx) { - int status = 0; - - if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { - fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); - return -1; - } - - for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) { - if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) { - break; - } - jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx); - - if (!globals.jfrog.repo) { - msg(STASIS_MSG_WARN, "Artifactory repository path is not configured!\n"); - fprintf(stderr, "set STASIS_JF_REPO environment variable...\nOr append to configuration file:\n\n"); - fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n"); - status++; - break; - } else if (!ctx->deploy.jfrog[i].repo) { - ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo); - } - - if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) { - // Unlikely to trigger if the config parser is working correctly - msg(STASIS_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n"); - status++; - break; - } - - ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true; - ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name; - ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number; - - char files[PATH_MAX]; - char dest[PATH_MAX]; // repo + remote dir - - if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url); - return -1; - } - - if (strlist_count(ctx->deploy.jfrog[i].files)) { - for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) { - memset(dest, 0, sizeof(dest)); - memset(files, 0, sizeof(files)); - snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest); - snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f)); - status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest); - } - } - } - - if (globals.enable_artifactory_build_info) { - if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) { - jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, - ctx->deploy.jfrog[0].upload_ctx.build_number); - jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, - ctx->deploy.jfrog[0].upload_ctx.build_number); - } - } else { - msg(STASIS_MSG_WARN | STASIS_MSG_L2, "Artifactory build info upload is disabled by CLI argument\n"); - } - - return status; -} - -int delivery_mission_render_files(struct Delivery *ctx) { - if (!ctx->storage.mission_dir) { - fprintf(stderr, "Mission directory is not configured. Context not initialized?\n"); - return -1; - } - struct Data { - char *src; - char *dest; - } data; - struct INIFILE *cfg = ctx->_stasis_ini_fp.mission; - union INIVal val; - - memset(&data, 0, sizeof(data)); - data.src = calloc(PATH_MAX, sizeof(*data.src)); - if (!data.src) { - perror("data.src"); - return -1; - } - - for (size_t i = 0; i < cfg->section_count; i++) { - char *section_name = cfg->section[i]->key; - if (!startswith(section_name, "template:")) { - continue; - } - val.as_char_p = strchr(section_name, ':') + 1; - if (val.as_char_p && isempty(val.as_char_p)) { - guard_free(data.src); - return 1; - } - sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p); - msg(STASIS_MSG_L2, "%s\n", data.src); - - int err = 0; - data.dest = ini_getval_str(cfg, section_name, "destination", INI_READ_RENDER, &err); - - char *contents; - struct stat st; - if (lstat(data.src, &st)) { - perror(data.src); - guard_free(data.dest); - continue; - } - - contents = calloc(st.st_size + 1, sizeof(*contents)); - if (!contents) { - perror("template file contents"); - guard_free(data.dest); - continue; - } - - FILE *fp; - fp = fopen(data.src, "rb"); - if (!fp) { - perror(data.src); - guard_free(contents); - guard_free(data.dest); - continue; - } - - if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) { - perror("while reading template file"); - guard_free(contents); - guard_free(data.dest); - fclose(fp); - continue; - } - fclose(fp); - - msg(STASIS_MSG_L3, "Writing %s\n", data.dest); - if (tpl_render_to_file(contents, data.dest)) { - guard_free(contents); - guard_free(data.dest); - continue; - } - guard_free(contents); - guard_free(data.dest); - } - - guard_free(data.src); - return 0; -} - -int delivery_docker(struct Delivery *ctx) { - if (!docker_capable(&ctx->deploy.docker.capabilities)) { - return -1; - } - char tag[STASIS_NAME_MAX]; - char args[PATH_MAX]; - int has_registry = ctx->deploy.docker.registry != NULL; - size_t total_tags = strlist_count(ctx->deploy.docker.tags); - size_t total_build_args = strlist_count(ctx->deploy.docker.build_args); - - if (!has_registry) { - msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n"); - } - - if (!total_tags) { - char default_tag[PATH_MAX]; - msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n"); - // generate local tag - memset(default_tag, 0, sizeof(default_tag)); - sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact); - tolower_s(default_tag); - - // Add tag - ctx->deploy.docker.tags = strlist_init(); - strlist_append(&ctx->deploy.docker.tags, default_tag); - - if (has_registry) { - // generate tag for target registry - memset(default_tag, 0, sizeof(default_tag)); - sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact); - tolower_s(default_tag); - - // Add tag - strlist_append(&ctx->deploy.docker.tags, default_tag); - } - // regenerate total tag available - total_tags = strlist_count(ctx->deploy.docker.tags); - } - - memset(args, 0, sizeof(args)); - - // Append image tags to command - for (size_t i = 0; i < total_tags; i++) { - char *tag_orig = strlist_item(ctx->deploy.docker.tags, i); - strcpy(tag, tag_orig); - docker_sanitize_tag(tag); - sprintf(args + strlen(args), " -t \"%s\" ", tag); - } - - // Append build arguments to command (i.e. --build-arg "key=value" - for (size_t i = 0; i < total_build_args; i++) { - char *build_arg = strlist_item(ctx->deploy.docker.build_args, i); - if (!build_arg) { - break; - } - sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg); - } - - // Build the image - char delivery_file[PATH_MAX]; - char dest[PATH_MAX]; - char rsync_cmd[PATH_MAX * 2]; - memset(delivery_file, 0, sizeof(delivery_file)); - memset(dest, 0, sizeof(dest)); - - sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name); - if (access(delivery_file, F_OK) < 0) { - fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file); - return -1; - } - - sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name); - if (copy2(delivery_file, dest, CT_PERM)) { - fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno)); - return -1; - } - - memset(dest, 0, sizeof(dest)); - sprintf(dest, "%s/packages", ctx->storage.build_docker_dir); - - msg(STASIS_MSG_L2, "Copying conda packages\n"); - memset(rsync_cmd, 0, sizeof(rsync_cmd)); - sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest); - if (system(rsync_cmd)) { - fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n"); - return -1; - } - - msg(STASIS_MSG_L2, "Copying wheel packages\n"); - memset(rsync_cmd, 0, sizeof(rsync_cmd)); - sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest); - if (system(rsync_cmd)) { - fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n"); - } - - if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) { - return -1; - } - - // Test the image - // All tags point back to the same image so test the first one we see - // regardless of how many are defined - strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0)); - docker_sanitize_tag(tag); - - msg(STASIS_MSG_L2, "Executing image test script for %s\n", tag); - if (ctx->deploy.docker.test_script) { - if (isempty(ctx->deploy.docker.test_script)) { - msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n"); - } else { - int state; - if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) { - msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag); - // test failed -- don't save the image - return -1; - } - } - } else { - msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "No image test script defined\n"); - } - - // Test successful, save image - if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) { - // save failed - return -1; - } - - return 0; -} - -int delivery_fixup_test_results(struct Delivery *ctx) { - struct dirent *rec; - DIR *dp; - - dp = opendir(ctx->storage.results_dir); - if (!dp) { - perror(ctx->storage.results_dir); - return -1; - } - - while ((rec = readdir(dp)) != NULL) { - char path[PATH_MAX]; - memset(path, 0, sizeof(path)); - - if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..")) { - continue; - } else if (!endswith(rec->d_name, ".xml")) { - continue; - } - - sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name); - msg(STASIS_MSG_L3, "%s\n", rec->d_name); - if (xml_pretty_print_in_place(path, STASIS_XML_PRETTY_PRINT_PROG, STASIS_XML_PRETTY_PRINT_ARGS)) { - msg(STASIS_MSG_L3 | STASIS_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name); - } - } - - closedir(dp); - return 0; -} - -int delivery_exists(struct Delivery *ctx) { - int release_exists = 0; - char release_pattern[PATH_MAX] = {0}; - sprintf(release_pattern, "*%s*", ctx->info.release_name); - - if (globals.enable_artifactory) { - if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { - fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); - return -1; // error - } - - struct JFRT_Search search = {.fail_no_op = true}; - release_exists = jfrog_cli_rt_search(&ctx->deploy.jfrog_auth, &search, globals.jfrog.repo, release_pattern); - if (release_exists != 2) { - if (!globals.enable_overwrite && !release_exists) { - // --fail_no_op returns 2 on failure - // without: it returns an empty list "[]" and exit code 0 - return 1; // found - } - } - } else { - struct StrList *files = listdir(ctx->storage.delivery_dir); - for (size_t i = 0; i < strlist_count(files); i++) { - char *filename = strlist_item(files, i); - release_exists = fnmatch(release_pattern, filename, FNM_PATHNAME); - if (!globals.enable_overwrite && !release_exists) { - guard_strlist_free(&files); - return 1; // found - } - } - guard_strlist_free(&files); - } - return 0; // not found -} diff --git a/src/delivery_artifactory.c b/src/delivery_artifactory.c new file mode 100644 index 0000000..27f4823 --- /dev/null +++ b/src/delivery_artifactory.c @@ -0,0 +1,192 @@ +#include "delivery.h" + +int delivery_init_artifactory(struct Delivery *ctx) { + int status = 0; + char dest[PATH_MAX] = {0}; + char filepath[PATH_MAX] = {0}; + snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir); + snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir); + + if (!access(filepath, F_OK)) { + // already have it + msg(STASIS_MSG_L3, "Skipped download, %s already exists\n", filepath); + goto delivery_init_artifactory_envsetup; + } + + char *platform = ctx->system.platform[DELIVERY_PLATFORM]; + msg(STASIS_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch); + if ((status = artifactory_download_cli(dest, + globals.jfrog.jfrog_artifactory_base_url, + globals.jfrog.jfrog_artifactory_product, + globals.jfrog.cli_major_ver, + globals.jfrog.version, + platform, + ctx->system.arch, + globals.jfrog.remote_filename))) { + remove(filepath); + } + + delivery_init_artifactory_envsetup: + // CI (ridiculously generic, why?) disables interactive prompts and progress bar output + setenv("CI", "1", 1); + + // JFROG_CLI_HOME_DIR is where .jfrog is stored + char path[PATH_MAX] = {0}; + snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir); + setenv("JFROG_CLI_HOME_DIR", path, 1); + + // JFROG_CLI_TEMP_DIR is where the obvious is stored + setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1); + return status; +} + +int delivery_artifact_upload(struct Delivery *ctx) { + int status = 0; + + if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { + fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); + return -1; + } + + for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) { + if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) { + break; + } + jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx); + + if (!globals.jfrog.repo) { + msg(STASIS_MSG_WARN, "Artifactory repository path is not configured!\n"); + fprintf(stderr, "set STASIS_JF_REPO environment variable...\nOr append to configuration file:\n\n"); + fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n"); + status++; + break; + } else if (!ctx->deploy.jfrog[i].repo) { + ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo); + } + + if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) { + // Unlikely to trigger if the config parser is working correctly + msg(STASIS_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n"); + status++; + break; + } + + ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true; + ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name; + ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number; + + char files[PATH_MAX]; + char dest[PATH_MAX]; // repo + remote dir + + if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url); + return -1; + } + + if (strlist_count(ctx->deploy.jfrog[i].files)) { + for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) { + memset(dest, 0, sizeof(dest)); + memset(files, 0, sizeof(files)); + snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest); + snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f)); + status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest); + } + } + } + + if (globals.enable_artifactory_build_info) { + if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) { + jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, + ctx->deploy.jfrog[0].upload_ctx.build_number); + jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name, + ctx->deploy.jfrog[0].upload_ctx.build_number); + } + } else { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "Artifactory build info upload is disabled by CLI argument\n"); + } + + return status; +} + +int delivery_mission_render_files(struct Delivery *ctx) { + if (!ctx->storage.mission_dir) { + fprintf(stderr, "Mission directory is not configured. Context not initialized?\n"); + return -1; + } + struct Data { + char *src; + char *dest; + } data; + struct INIFILE *cfg = ctx->_stasis_ini_fp.mission; + union INIVal val; + + memset(&data, 0, sizeof(data)); + data.src = calloc(PATH_MAX, sizeof(*data.src)); + if (!data.src) { + perror("data.src"); + return -1; + } + + for (size_t i = 0; i < cfg->section_count; i++) { + char *section_name = cfg->section[i]->key; + if (!startswith(section_name, "template:")) { + continue; + } + val.as_char_p = strchr(section_name, ':') + 1; + if (val.as_char_p && isempty(val.as_char_p)) { + guard_free(data.src); + return 1; + } + sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p); + msg(STASIS_MSG_L2, "%s\n", data.src); + + int err = 0; + data.dest = ini_getval_str(cfg, section_name, "destination", INI_READ_RENDER, &err); + + char *contents; + struct stat st; + if (lstat(data.src, &st)) { + perror(data.src); + guard_free(data.dest); + continue; + } + + contents = calloc(st.st_size + 1, sizeof(*contents)); + if (!contents) { + perror("template file contents"); + guard_free(data.dest); + continue; + } + + FILE *fp; + fp = fopen(data.src, "rb"); + if (!fp) { + perror(data.src); + guard_free(contents); + guard_free(data.dest); + continue; + } + + if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) { + perror("while reading template file"); + guard_free(contents); + guard_free(data.dest); + fclose(fp); + continue; + } + fclose(fp); + + msg(STASIS_MSG_L3, "Writing %s\n", data.dest); + if (tpl_render_to_file(contents, data.dest)) { + guard_free(contents); + guard_free(data.dest); + continue; + } + guard_free(contents); + guard_free(data.dest); + } + + guard_free(data.src); + return 0; +} + diff --git a/src/delivery_build.c b/src/delivery_build.c new file mode 100644 index 0000000..6a8d371 --- /dev/null +++ b/src/delivery_build.c @@ -0,0 +1,180 @@ +#include <fnmatch.h> +#include "delivery.h" + +int delivery_build_recipes(struct Delivery *ctx) { + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + char *recipe_dir = NULL; + if (ctx->tests[i].build_recipe) { // build a conda recipe + int recipe_type; + int status; + if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) { + fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name); + return -1; + } + recipe_type = recipe_get_type(recipe_dir); + pushd(recipe_dir); + { + if (RECIPE_TYPE_ASTROCONDA == recipe_type) { + pushd(path_basename(ctx->tests[i].repository)); + } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { + pushd("recipe"); + } + + char recipe_version[100]; + char recipe_buildno[100]; + char recipe_git_url[PATH_MAX]; + char recipe_git_rev[PATH_MAX]; + + //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}"); + //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository); + //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version); + // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^ + sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version); + sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository); + strcpy(recipe_git_rev, ""); + sprintf(recipe_buildno, " number: 0"); + + unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH; + //file_replace_text("meta.yaml", "{% set version = ", recipe_version); + if (ctx->meta.final) { + sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version); + // TODO: replace sha256 of tagged archive + // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge. + // For now, remove the sha256 requirement + file_replace_text("meta.yaml", "sha256:", "\n", flags); + } else { + file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags); + file_replace_text("meta.yaml", " url:", recipe_git_url, flags); + //file_replace_text("meta.yaml", "sha256:", recipe_git_rev); + file_replace_text("meta.yaml", " sha256:", "\n", flags); + file_replace_text("meta.yaml", " number:", recipe_buildno, flags); + } + + char command[PATH_MAX]; + if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { + char arch[STASIS_NAME_MAX] = {0}; + char platform[STASIS_NAME_MAX] = {0}; + + strcpy(platform, ctx->system.platform[DELIVERY_PLATFORM]); + if (strstr(platform, "Darwin")) { + memset(platform, 0, sizeof(platform)); + strcpy(platform, "osx"); + } + tolower_s(platform); + if (strstr(ctx->system.arch, "arm64")) { + strcpy(arch, "arm64"); + } else if (strstr(ctx->system.arch, "64")) { + strcpy(arch, "64"); + } else { + strcat(arch, "32"); // blind guess + } + tolower_s(arch); + + sprintf(command, "mambabuild --python=%s -m ../.ci_support/%s_%s_.yaml .", + ctx->meta.python, platform, arch); + } else { + sprintf(command, "mambabuild --python=%s .", ctx->meta.python); + } + status = conda_exec(command); + if (status) { + return -1; + } + + if (RECIPE_TYPE_GENERIC != recipe_type) { + popd(); + } + popd(); + } + } + if (recipe_dir) { + guard_free(recipe_dir); + } + } + return 0; +} + +int filter_repo_tags(char *repo, struct StrList *patterns) { + int result = 0; + + if (!pushd(repo)) { + int list_status = 0; + char *tags_raw = shell_output("git tag -l", &list_status); + struct StrList *tags = strlist_init(); + strlist_append_tokenize(tags, tags_raw, LINE_SEP); + + for (size_t i = 0; tags && i < strlist_count(tags); i++) { + char *tag = strlist_item(tags, i); + for (size_t p = 0; p < strlist_count(patterns); p++) { + char *pattern = strlist_item(patterns, p); + int match = fnmatch(pattern, tag, 0); + if (!match) { + char cmd[PATH_MAX] = {0}; + sprintf(cmd, "git tag -d %s", tag); + result += system(cmd); + break; + } + } + } + guard_strlist_free(&tags); + guard_free(tags_raw); + popd(); + } else { + result = -1; + } + return result; +} + +struct StrList *delivery_build_wheels(struct Delivery *ctx) { + struct StrList *result = NULL; + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + result = strlist_init(); + if (!result) { + perror("unable to allocate memory for string list"); + return NULL; + } + + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source + char srcdir[PATH_MAX]; + char wheeldir[PATH_MAX]; + memset(srcdir, 0, sizeof(srcdir)); + memset(wheeldir, 0, sizeof(wheeldir)); + + sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name); + git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version); + + if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { + filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags); + } + + pushd(srcdir); + { + char dname[NAME_MAX]; + char outdir[PATH_MAX]; + char cmd[PATH_MAX * 2]; + memset(dname, 0, sizeof(dname)); + memset(outdir, 0, sizeof(outdir)); + memset(cmd, 0, sizeof(outdir)); + + strcpy(dname, ctx->tests[i].name); + tolower_s(dname); + sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); + if (mkdirs(outdir, 0755)) { + fprintf(stderr, "failed to create output directory: %s\n", outdir); + } + + sprintf(cmd, "-m build -w -o %s", outdir); + if (python_exec(cmd)) { + fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version); + strlist_free(&result); + return NULL; + } + popd(); + } + } + } + return result; +} + diff --git a/src/delivery_conda.c b/src/delivery_conda.c new file mode 100644 index 0000000..93a06fc --- /dev/null +++ b/src/delivery_conda.c @@ -0,0 +1,110 @@ +#include "delivery.h" + +void delivery_get_conda_installer_url(struct Delivery *ctx, char *result) { + if (ctx->conda.installer_version) { + // Use version specified by configuration file + sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl, + ctx->conda.installer_name, + ctx->conda.installer_version, + ctx->conda.installer_platform, + ctx->conda.installer_arch); + } else { + // Use latest installer + sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl, + ctx->conda.installer_name, + ctx->conda.installer_platform, + ctx->conda.installer_arch); + } + +} + +int delivery_get_conda_installer(struct Delivery *ctx, char *installer_url) { + char script_path[PATH_MAX]; + char *installer = path_basename(installer_url); + + memset(script_path, 0, sizeof(script_path)); + sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer); + if (access(script_path, F_OK)) { + // Script doesn't exist + long fetch_status = download(installer_url, script_path, NULL); + if (HTTP_ERROR(fetch_status) || fetch_status < 0) { + // download failed + return -1; + } + } else { + msg(STASIS_MSG_RESTRICT | STASIS_MSG_L3, "Skipped, installer already exists\n", script_path); + } + + ctx->conda.installer_path = strdup(script_path); + if (!ctx->conda.installer_path) { + SYSERROR("Unable to duplicate script_path: '%s'", script_path); + return -1; + } + + return 0; +} + +void delivery_install_conda(char *install_script, char *conda_install_dir) { + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + if (globals.conda_fresh_start) { + if (!access(conda_install_dir, F_OK)) { + // directory exists so remove it + if (rmtree(conda_install_dir)) { + perror("unable to remove previous installation"); + exit(1); + } + + // Proceed with the installation + // -b = batch mode (non-interactive) + char cmd[PATH_MAX] = {0}; + snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", + find_program("bash"), + install_script, + conda_install_dir); + if (shell_safe(&proc, cmd)) { + fprintf(stderr, "conda installation failed\n"); + exit(1); + } + } else { + // Proceed with the installation + // -b = batch mode (non-interactive) + char cmd[PATH_MAX] = {0}; + snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s", + find_program("bash"), + install_script, + conda_install_dir); + if (shell_safe(&proc, cmd)) { + fprintf(stderr, "conda installation failed\n"); + exit(1); + } + } + } else { + msg(STASIS_MSG_L3, "Conda removal disabled by configuration\n"); + } +} + +void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) { + if (conda_activate(conda_install_dir, "base")) { + fprintf(stderr, "conda activation failed\n"); + exit(1); + } + + // Setting the CONDARC environment variable appears to be the only consistent + // way to make sure the file is used. Not setting this variable leads to strange + // behavior, especially if a conda environment is already active when STASIS is loaded. + char rcpath[PATH_MAX]; + sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc"); + setenv("CONDARC", rcpath, 1); + if (runtime_replace(&ctx->runtime.environ, __environ)) { + perror("unable to replace runtime environment after activating conda"); + exit(1); + } + + if (conda_setup_headless()) { + // no COE check. this call must succeed. + exit(1); + } +} + diff --git a/src/delivery_docker.c b/src/delivery_docker.c new file mode 100644 index 0000000..e1d7f60 --- /dev/null +++ b/src/delivery_docker.c @@ -0,0 +1,132 @@ +#include "delivery.h" + +int delivery_docker(struct Delivery *ctx) { + if (!docker_capable(&ctx->deploy.docker.capabilities)) { + return -1; + } + char tag[STASIS_NAME_MAX]; + char args[PATH_MAX]; + int has_registry = ctx->deploy.docker.registry != NULL; + size_t total_tags = strlist_count(ctx->deploy.docker.tags); + size_t total_build_args = strlist_count(ctx->deploy.docker.build_args); + + if (!has_registry) { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n"); + } + + if (!total_tags) { + char default_tag[PATH_MAX]; + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n"); + // generate local tag + memset(default_tag, 0, sizeof(default_tag)); + sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact); + tolower_s(default_tag); + + // Add tag + ctx->deploy.docker.tags = strlist_init(); + strlist_append(&ctx->deploy.docker.tags, default_tag); + + if (has_registry) { + // generate tag for target registry + memset(default_tag, 0, sizeof(default_tag)); + sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact); + tolower_s(default_tag); + + // Add tag + strlist_append(&ctx->deploy.docker.tags, default_tag); + } + // regenerate total tag available + total_tags = strlist_count(ctx->deploy.docker.tags); + } + + memset(args, 0, sizeof(args)); + + // Append image tags to command + for (size_t i = 0; i < total_tags; i++) { + char *tag_orig = strlist_item(ctx->deploy.docker.tags, i); + strcpy(tag, tag_orig); + docker_sanitize_tag(tag); + sprintf(args + strlen(args), " -t \"%s\" ", tag); + } + + // Append build arguments to command (i.e. --build-arg "key=value" + for (size_t i = 0; i < total_build_args; i++) { + char *build_arg = strlist_item(ctx->deploy.docker.build_args, i); + if (!build_arg) { + break; + } + sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg); + } + + // Build the image + char delivery_file[PATH_MAX]; + char dest[PATH_MAX]; + char rsync_cmd[PATH_MAX * 2]; + memset(delivery_file, 0, sizeof(delivery_file)); + memset(dest, 0, sizeof(dest)); + + sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name); + if (access(delivery_file, F_OK) < 0) { + fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file); + return -1; + } + + sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name); + if (copy2(delivery_file, dest, CT_PERM)) { + fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno)); + return -1; + } + + memset(dest, 0, sizeof(dest)); + sprintf(dest, "%s/packages", ctx->storage.build_docker_dir); + + msg(STASIS_MSG_L2, "Copying conda packages\n"); + memset(rsync_cmd, 0, sizeof(rsync_cmd)); + sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest); + if (system(rsync_cmd)) { + fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n"); + return -1; + } + + msg(STASIS_MSG_L2, "Copying wheel packages\n"); + memset(rsync_cmd, 0, sizeof(rsync_cmd)); + sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest); + if (system(rsync_cmd)) { + fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n"); + } + + if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) { + return -1; + } + + // Test the image + // All tags point back to the same image so test the first one we see + // regardless of how many are defined + strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0)); + docker_sanitize_tag(tag); + + msg(STASIS_MSG_L2, "Executing image test script for %s\n", tag); + if (ctx->deploy.docker.test_script) { + if (isempty(ctx->deploy.docker.test_script)) { + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n"); + } else { + int state; + if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) { + msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag); + // test failed -- don't save the image + return -1; + } + } + } else { + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "No image test script defined\n"); + } + + // Test successful, save image + if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) { + // save failed + return -1; + } + + return 0; +} + diff --git a/src/delivery_init.c b/src/delivery_init.c new file mode 100644 index 0000000..e914f99 --- /dev/null +++ b/src/delivery_init.c @@ -0,0 +1,345 @@ +#include "delivery.h" + +int has_mount_flags(const char *mount_point, const unsigned long flags) { + struct statvfs st; + if (statvfs(mount_point, &st)) { + SYSERROR("Unable to determine mount-point flags: %s", strerror(errno)); + return -1; + } + return (st.f_flag & flags) != 0; +} + +int delivery_init_tmpdir(struct Delivery *ctx) { + char *tmpdir = NULL; + char *x = NULL; + int unusable = 0; + errno = 0; + + x = getenv("TMPDIR"); + if (x) { + guard_free(ctx->storage.tmpdir); + tmpdir = strdup(x); + } else { + tmpdir = ctx->storage.tmpdir; + } + + if (!tmpdir) { + // memory error + return -1; + } + + // If the directory doesn't exist, create it + if (access(tmpdir, F_OK) < 0) { + if (mkdirs(tmpdir, 0755) < 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno)); + goto l_delivery_init_tmpdir_fatal; + } + } + + // If we can't read, write, or execute, then die + if (access(tmpdir, R_OK | W_OK | X_OK) < 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s requires at least 0755 permissions.\n"); + goto l_delivery_init_tmpdir_fatal; + } + + struct statvfs st; + if (statvfs(tmpdir, &st) < 0) { + goto l_delivery_init_tmpdir_fatal; + } + +#if defined(STASIS_OS_LINUX) + // If we can't execute programs, or write data to the file system at all, then die + if ((st.f_flag & ST_NOEXEC) != 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted with noexec\n", tmpdir); + goto l_delivery_init_tmpdir_fatal; + } +#endif + if ((st.f_flag & ST_RDONLY) != 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted read-only\n", tmpdir); + goto l_delivery_init_tmpdir_fatal; + } + + if (!globals.tmpdir) { + globals.tmpdir = strdup(tmpdir); + } + + if (!ctx->storage.tmpdir) { + ctx->storage.tmpdir = strdup(globals.tmpdir); + } + return unusable; + + l_delivery_init_tmpdir_fatal: + unusable = 1; + return unusable; +} + +void delivery_init_dirs_stage2(struct Delivery *ctx) { + path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes"); + path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources"); + path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing"); + path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker"); + + path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery"); + path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results"); + path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages"); + path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config"); + path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta"); + + path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda"); + path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels"); + path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker"); +} + +void delivery_init_dirs_stage1(struct Delivery *ctx) { + char *rootdir = getenv("STASIS_ROOT"); + if (rootdir) { + if (isempty(rootdir)) { + fprintf(stderr, "STASIS_ROOT is set, but empty. Please assign a file system path to this environment variable.\n"); + exit(1); + } + path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name); + } else { + // use "stasis" in current working directory + path_store(&ctx->storage.root, PATH_MAX, "stasis", ctx->info.build_name); + } + path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools"); + path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp"); + if (delivery_init_tmpdir(ctx)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir); + if (globals.tmpdir) + guard_free(globals.tmpdir); + exit(1); + } + + path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build"); + path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output"); + + if (!ctx->storage.mission_dir) { + path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission"); + } + + if (access(ctx->storage.mission_dir, F_OK)) { + msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno)); + exit(1); + } + + // Override installation prefix using global configuration key + if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) { + // user wants a specific path + globals.conda_fresh_start = false; + /* + if (mkdirs(globals.conda_install_prefix, 0755)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create directory: %s: %s\n", + strerror(errno), globals.conda_install_prefix); + exit(1); + } + */ + /* + ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL); + if (!ctx->storage.conda_install_prefix) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n"); + exit(1); + } + ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix); + */ + path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda"); + } else { + // install conda under the STASIS tree + path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda"); + } +} + +int delivery_init_platform(struct Delivery *ctx) { + msg(STASIS_MSG_L2, "Setting architecture\n"); + char archsuffix[20]; + struct utsname uts; + if (uname(&uts)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "uname() failed: %s\n", strerror(errno)); + return -1; + } + + ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform)); + if (!ctx->system.platform) { + SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX); + return -1; + } + for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) { + ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0])); + } + + ctx->system.arch = strdup(uts.machine); + if (!ctx->system.arch) { + // memory error + return -1; + } + + if (!strcmp(ctx->system.arch, "x86_64")) { + strcpy(archsuffix, "64"); + } else { + strcpy(archsuffix, ctx->system.arch); + } + + msg(STASIS_MSG_L2, "Setting platform\n"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname); + if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) { + sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos"); + } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) { + sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux"); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux"); + } else { + // Not explicitly supported systems + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]); + strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]); + tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + } + + long cpu_count = get_cpu_count(); + if (!cpu_count) { + fprintf(stderr, "Unable to determine CPU count. Falling back to 1.\n"); + cpu_count = 1; + } + char ncpus[100] = {0}; + sprintf(ncpus, "%ld", cpu_count); + + // Declare some important bits as environment variables + setenv("CPU_COUNT", ncpus, 1); + setenv("STASIS_CPU_COUNT", ncpus, 1); + setenv("STASIS_ARCH", ctx->system.arch, 1); + setenv("STASIS_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1); + setenv("STASIS_CONDA_ARCH", ctx->system.arch, 1); + setenv("STASIS_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1); + setenv("STASIS_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1); + + // Register template variables + // These were moved out of main() because we can't take the address of system.platform[x] + // _before_ the array has been initialized. + tpl_register("system.arch", &ctx->system.arch); + tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + + return 0; +} + +int delivery_init(struct Delivery *ctx, int render_mode) { + populate_info(ctx); + populate_delivery_cfg(ctx, INI_READ_RENDER); + + // Set artifactory URL via environment variable if possible + char *jfurl = getenv("STASIS_JF_ARTIFACTORY_URL"); + if (jfurl) { + if (globals.jfrog.url) { + guard_free(globals.jfrog.url); + } + globals.jfrog.url = strdup(jfurl); + } + + // Set artifactory repository via environment if possible + char *jfrepo = getenv("STASIS_JF_REPO"); + if (jfrepo) { + if (globals.jfrog.repo) { + guard_free(globals.jfrog.repo); + } + globals.jfrog.repo = strdup(jfrepo); + } + + // Configure architecture and platform information + delivery_init_platform(ctx); + + // Create STASIS directory structure + delivery_init_dirs_stage1(ctx); + + char config_local[PATH_MAX]; + sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config"); + setenv("XDG_CONFIG_HOME", config_local, 1); + + char cache_local[PATH_MAX]; + sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache"); + setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1); + + // add tools to PATH + char pathvar_tmp[STASIS_BUFSIZ]; + sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH")); + setenv("PATH", pathvar_tmp, 1); + + // Prevent git from paginating output + setenv("GIT_PAGER", "", 1); + + populate_delivery_ini(ctx, render_mode); + + if (ctx->deploy.docker.tags) { + for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) { + char *item = strlist_item(ctx->deploy.docker.tags, i); + tolower_s(item); + } + } + + if (ctx->deploy.docker.image_compression) { + if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) { + SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression); + return -1; + } + } + return 0; +} + +int bootstrap_build_info(struct Delivery *ctx) { + struct Delivery local; + memset(&local, 0, sizeof(local)); + local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path); + local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path); + delivery_init_platform(&local); + populate_delivery_cfg(&local, INI_READ_RENDER); + populate_delivery_ini(&local, INI_READ_RENDER); + populate_info(&local); + ctx->info.build_name = strdup(local.info.build_name); + ctx->info.build_number = strdup(local.info.build_number); + ctx->info.release_name = strdup(local.info.release_name); + ctx->info.time_info = malloc(sizeof(*ctx->info.time_info)); + if (!ctx->info.time_info) { + SYSERROR("Unable to allocate %zu bytes for tm struct: %s", sizeof(*local.info.time_info), strerror(errno)); + return -1; + } + memcpy(ctx->info.time_info, local.info.time_info, sizeof(*local.info.time_info)); + ctx->info.time_now = local.info.time_now; + ctx->info.time_str_epoch = strdup(local.info.time_str_epoch); + delivery_free(&local); + return 0; +} + +int delivery_exists(struct Delivery *ctx) { + int release_exists = 0; + char release_pattern[PATH_MAX] = {0}; + sprintf(release_pattern, "*%s*", ctx->info.release_name); + + if (globals.enable_artifactory) { + if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) { + fprintf(stderr, "Failed to initialize Artifactory authentication context\n"); + return -1; // error + } + + struct JFRT_Search search = {.fail_no_op = true}; + release_exists = jfrog_cli_rt_search(&ctx->deploy.jfrog_auth, &search, globals.jfrog.repo, release_pattern); + if (release_exists != 2) { + if (!globals.enable_overwrite && !release_exists) { + // --fail_no_op returns 2 on failure + // without: it returns an empty list "[]" and exit code 0 + return 1; // found + } + } + } else { + struct StrList *files = listdir(ctx->storage.delivery_dir); + for (size_t i = 0; i < strlist_count(files); i++) { + char *filename = strlist_item(files, i); + release_exists = fnmatch(release_pattern, filename, FNM_PATHNAME); + if (!globals.enable_overwrite && !release_exists) { + guard_strlist_free(&files); + return 1; // found + } + } + guard_strlist_free(&files); + } + return 0; // not found +} diff --git a/src/delivery_install.c b/src/delivery_install.c new file mode 100644 index 0000000..72f2663 --- /dev/null +++ b/src/delivery_install.c @@ -0,0 +1,116 @@ +#include "delivery.h" + +static const struct Test *requirement_from_test(struct Delivery *ctx, const char *name) { + struct Test *result; + + result = NULL; + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (ctx->tests[i].name && strstr(name, ctx->tests[i].name)) { + result = &ctx->tests[i]; + break; + } + } + return result; +} + +int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) { + char cmd[PATH_MAX]; + char pkgs[STASIS_BUFSIZ]; + char *env_current = getenv("CONDA_DEFAULT_ENV"); + + if (env_current) { + // The requested environment is not the current environment + if (strcmp(env_current, env_name) != 0) { + // Activate the requested environment + printf("Activating: %s\n", env_name); + conda_activate(conda_install_dir, env_name); + runtime_replace(&ctx->runtime.environ, __environ); + } + } + + memset(cmd, 0, sizeof(cmd)); + memset(pkgs, 0, sizeof(pkgs)); + strcat(cmd, "install"); + + typedef int (*Runner)(const char *); + Runner runner = NULL; + if (INSTALL_PKG_CONDA & type) { + runner = conda_exec; + } else if (INSTALL_PKG_PIP & type) { + runner = pip_exec; + } + + if (INSTALL_PKG_CONDA_DEFERRED & type) { + strcat(cmd, " --use-local"); + } else if (INSTALL_PKG_PIP_DEFERRED & type) { + // Don't change the baseline package set unless we're working with a + // new build. Release candidates will need to keep packages as stable + // as possible between releases. + if (!ctx->meta.based_on) { + strcat(cmd, " --upgrade"); + } + sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); + } + + for (size_t x = 0; manifest[x] != NULL; x++) { + char *name = NULL; + for (size_t p = 0; p < strlist_count(manifest[x]); p++) { + name = strlist_item(manifest[x], p); + strip(name); + if (!strlen(name)) { + continue; + } + if (INSTALL_PKG_PIP_DEFERRED & type) { + struct Test *info = (struct Test *) requirement_from_test(ctx, name); + if (info) { + if (!strcmp(info->version, "HEAD")) { + struct StrList *tag_data = strlist_init(); + if (!tag_data) { + SYSERROR("%s", "Unable to allocate memory for tag data\n"); + return -1; + } + strlist_append_tokenize(tag_data, info->repository_info_tag, "-"); + + struct Wheel *whl = NULL; + char *post_commit = NULL; + char *hash = NULL; + if (strlist_count(tag_data) > 1) { + post_commit = strlist_item(tag_data, 1); + hash = strlist_item(tag_data, 2); + } + + // We can't match on version here (index 0). The wheel's version is not guaranteed to be + // equal to the tag; setuptools_scm auto-increments the value, the user can change it manually, + // etc. + whl = get_wheel_file(ctx->storage.wheel_artifact_dir, info->name, + (char *[]) {ctx->meta.python_compact, ctx->system.arch, + "none", "any", + post_commit, hash, + NULL}, WHEEL_MATCH_ANY); + + guard_strlist_free(&tag_data); + info->version = whl->version; + sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, whl->version); + } else { + sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, info->version); + } + } else { + fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name); + return -1; + } + } else { + if (startswith(name, "--") || startswith(name, "-")) { + sprintf(cmd + strlen(cmd), " %s", name); + } else { + sprintf(cmd + strlen(cmd), " '%s'", name); + } + } + } + int status = runner(cmd); + if (status) { + return status; + } + } + return 0; +} + diff --git a/src/delivery_populate.c b/src/delivery_populate.c new file mode 100644 index 0000000..02ce257 --- /dev/null +++ b/src/delivery_populate.c @@ -0,0 +1,348 @@ +#include "delivery.h" + +static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) { + int status = ini_has_key(ini, section_name, key); + if (!status) { + SYSERROR("%s:%s key is required but not defined", section_name, key); + exit(1); + } +} + +static void conv_str(char **x, union INIVal val) { + if (*x) { + guard_free(*x); + } + if (val.as_char_p) { + char *tplop = tpl_render(val.as_char_p); + if (tplop) { + *x = tplop; + } else { + *x = NULL; + } + } else { + *x = NULL; + } +} + + + +int populate_info(struct Delivery *ctx) { + if (!ctx->info.time_str_epoch) { + // Record timestamp used for release + time(&ctx->info.time_now); + ctx->info.time_info = localtime(&ctx->info.time_now); + + ctx->info.time_str_epoch = calloc(STASIS_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch)); + if (!ctx->info.time_str_epoch) { + msg(STASIS_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n"); + return -1; + } + snprintf(ctx->info.time_str_epoch, STASIS_TIME_STR_MAX - 1, "%li", ctx->info.time_now); + } + return 0; +} + +int populate_delivery_cfg(struct Delivery *ctx, int render_mode) { + struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg; + if (!cfg) { + return -1; + } + int err = 0; + ctx->storage.conda_staging_dir = ini_getval_str(cfg, "default", "conda_staging_dir", render_mode, &err); + ctx->storage.conda_staging_url = ini_getval_str(cfg, "default", "conda_staging_url", render_mode, &err); + ctx->storage.wheel_staging_dir = ini_getval_str(cfg, "default", "wheel_staging_dir", render_mode, &err); + ctx->storage.wheel_staging_url = ini_getval_str(cfg, "default", "wheel_staging_url", render_mode, &err); + globals.conda_fresh_start = ini_getval_bool(cfg, "default", "conda_fresh_start", render_mode, &err); + if (!globals.continue_on_error) { + globals.continue_on_error = ini_getval_bool(cfg, "default", "continue_on_error", render_mode, &err); + } + if (!globals.always_update_base_environment) { + globals.always_update_base_environment = ini_getval_bool(cfg, "default", "always_update_base_environment", render_mode, &err); + } + globals.conda_install_prefix = ini_getval_str(cfg, "default", "conda_install_prefix", render_mode, &err); + globals.conda_packages = ini_getval_strlist(cfg, "default", "conda_packages", LINE_SEP, render_mode, &err); + globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err); + + globals.jfrog.jfrog_artifactory_base_url = ini_getval_str(cfg, "jfrog_cli_download", "url", render_mode, &err); + globals.jfrog.jfrog_artifactory_product = ini_getval_str(cfg, "jfrog_cli_download", "product", render_mode, &err); + globals.jfrog.cli_major_ver = ini_getval_str(cfg, "jfrog_cli_download", "version_series", render_mode, &err); + globals.jfrog.version = ini_getval_str(cfg, "jfrog_cli_download", "version", render_mode, &err); + globals.jfrog.remote_filename = ini_getval_str(cfg, "jfrog_cli_download", "filename", render_mode, &err); + globals.jfrog.url = ini_getval_str(cfg, "deploy:artifactory", "url", render_mode, &err); + globals.jfrog.repo = ini_getval_str(cfg, "deploy:artifactory", "repo", render_mode, &err); + + return 0; +} + +int populate_delivery_ini(struct Delivery *ctx, int render_mode) { + union INIVal val; + struct INIFILE *ini = ctx->_stasis_ini_fp.delivery; + struct INIData *rtdata; + RuntimeEnv *rt; + + validate_delivery_ini(ini); + // Populate runtime variables first they may be interpreted by other + // keys in the configuration + rt = runtime_copy(__environ); + while ((rtdata = ini_getall(ini, "runtime")) != NULL) { + char rec[STASIS_BUFSIZ]; + sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value))); + runtime_set(rt, rtdata->key, rtdata->value); + } + runtime_apply(rt); + ctx->runtime.environ = rt; + + int err = 0; + ctx->meta.mission = ini_getval_str(ini, "meta", "mission", render_mode, &err); + + if (!strcasecmp(ctx->meta.mission, "hst")) { + ctx->meta.codename = ini_getval_str(ini, "meta", "codename", render_mode, &err); + } else { + ctx->meta.codename = NULL; + } + + ctx->meta.version = ini_getval_str(ini, "meta", "version", render_mode, &err); + ctx->meta.name = ini_getval_str(ini, "meta", "name", render_mode, &err); + ctx->meta.rc = ini_getval_int(ini, "meta", "rc", render_mode, &err); + ctx->meta.final = ini_getval_bool(ini, "meta", "final", render_mode, &err); + ctx->meta.based_on = ini_getval_str(ini, "meta", "based_on", render_mode, &err); + + if (!ctx->meta.python) { + ctx->meta.python = ini_getval_str(ini, "meta", "python", render_mode, &err); + guard_free(ctx->meta.python_compact); + ctx->meta.python_compact = to_short_version(ctx->meta.python); + } else { + ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python); + } + + ctx->conda.installer_name = ini_getval_str(ini, "conda", "installer_name", render_mode, &err); + ctx->conda.installer_version = ini_getval_str(ini, "conda", "installer_version", render_mode, &err); + ctx->conda.installer_platform = ini_getval_str(ini, "conda", "installer_platform", render_mode, &err); + ctx->conda.installer_arch = ini_getval_str(ini, "conda", "installer_arch", render_mode, &err); + ctx->conda.installer_baseurl = ini_getval_str(ini, "conda", "installer_baseurl", render_mode, &err); + ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", " "LINE_SEP, render_mode, &err); + + if (ctx->conda.conda_packages->data && ctx->conda.conda_packages->data[0] && strpbrk(ctx->conda.conda_packages->data[0], " \t")) { + normalize_space(ctx->conda.conda_packages->data[0]); + replace_text(ctx->conda.conda_packages->data[0], " ", LINE_SEP, 0); + char *pip_packages_replacement = join(ctx->conda.conda_packages->data, LINE_SEP); + ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "conda_packages", pip_packages_replacement); + guard_free(pip_packages_replacement); + guard_strlist_free(&ctx->conda.conda_packages); + ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", LINE_SEP, render_mode, &err); + } + + for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) { + char *pkg = strlist_item(ctx->conda.conda_packages, i); + if (strpbrk(pkg, ";#") || isempty(pkg)) { + strlist_remove(ctx->conda.conda_packages, i); + } + } + + ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err); + if (ctx->conda.pip_packages->data && ctx->conda.pip_packages->data[0] && strpbrk(ctx->conda.pip_packages->data[0], " \t")) { + normalize_space(ctx->conda.pip_packages->data[0]); + replace_text(ctx->conda.pip_packages->data[0], " ", LINE_SEP, 0); + char *pip_packages_replacement = join(ctx->conda.pip_packages->data, LINE_SEP); + ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "pip_packages", pip_packages_replacement); + guard_free(pip_packages_replacement); + guard_strlist_free(&ctx->conda.pip_packages); + ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err); + } + + for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) { + char *pkg = strlist_item(ctx->conda.pip_packages, i); + if (strpbrk(pkg, ";#") || isempty(pkg)) { + strlist_remove(ctx->conda.pip_packages, i); + } + } + + // Delivery metadata consumed + populate_mission_ini(&ctx, render_mode); + + if (ctx->info.release_name) { + guard_free(ctx->info.release_name); + guard_free(ctx->info.build_name); + guard_free(ctx->info.build_number); + } + + if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) { + fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt); + return -1; + } + + if (!ctx->info.build_name) { + delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt); + } + if (!ctx->info.build_number) { + delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt); + } + + // Best I can do to make output directories unique. Annoying. + delivery_init_dirs_stage2(ctx); + + if (!ctx->conda.conda_packages_defer) { + ctx->conda.conda_packages_defer = strlist_init(); + } + if (!ctx->conda.pip_packages_defer) { + ctx->conda.pip_packages_defer = strlist_init(); + } + + for (size_t z = 0, i = 0; i < ini->section_count; i++) { + char *section_name = ini->section[i]->key; + if (startswith(section_name, "test:")) { + struct Test *test = &ctx->tests[z]; + val.as_char_p = strchr(ini->section[i]->key, ':') + 1; + if (val.as_char_p && isempty(val.as_char_p)) { + return 1; + } + conv_str(&test->name, val); + + test->version = ini_getval_str(ini, section_name, "version", render_mode, &err); + test->repository = ini_getval_str(ini, section_name, "repository", render_mode, &err); + test->script_setup = ini_getval_str(ini, section_name, "script_setup", INI_READ_RAW, &err); + test->script = ini_getval_str(ini, section_name, "script", INI_READ_RAW, &err); + test->disable = ini_getval_bool(ini, section_name, "disable", render_mode, &err); + test->parallel = ini_getval_bool(ini, section_name, "parallel", render_mode, &err); + if (err) { + test->parallel = true; + } + test->repository_remove_tags = ini_getval_strlist(ini, section_name, "repository_remove_tags", LINE_SEP, render_mode, &err); + test->build_recipe = ini_getval_str(ini, section_name, "build_recipe", render_mode, &err); + test->runtime.environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err); + z++; + } + } + + for (size_t z = 0, i = 0; i < ini->section_count; i++) { + char *section_name = ini->section[i]->key; + struct Deploy *deploy = &ctx->deploy; + if (startswith(section_name, "deploy:artifactory")) { + struct JFrog *jfrog = &deploy->jfrog[z]; + // Artifactory base configuration + + jfrog->upload_ctx.workaround_parent_only = ini_getval_bool(ini, section_name, "workaround_parent_only", render_mode, &err); + jfrog->upload_ctx.exclusions = ini_getval_str(ini, section_name, "exclusions", render_mode, &err); + jfrog->upload_ctx.explode = ini_getval_bool(ini, section_name, "explode", render_mode, &err); + jfrog->upload_ctx.recursive = ini_getval_bool(ini, section_name, "recursive", render_mode, &err); + jfrog->upload_ctx.retries = ini_getval_int(ini, section_name, "retries", render_mode, &err); + jfrog->upload_ctx.retry_wait_time = ini_getval_int(ini, section_name, "retry_wait_time", render_mode, &err); + jfrog->upload_ctx.detailed_summary = ini_getval_bool(ini, section_name, "detailed_summary", render_mode, &err); + jfrog->upload_ctx.quiet = ini_getval_bool(ini, section_name, "quiet", render_mode, &err); + jfrog->upload_ctx.regexp = ini_getval_bool(ini, section_name, "regexp", render_mode, &err); + jfrog->upload_ctx.spec = ini_getval_str(ini, section_name, "spec", render_mode, &err); + jfrog->upload_ctx.flat = ini_getval_bool(ini, section_name, "flat", render_mode, &err); + jfrog->repo = ini_getval_str(ini, section_name, "repo", render_mode, &err); + jfrog->dest = ini_getval_str(ini, section_name, "dest", render_mode, &err); + jfrog->files = ini_getval_strlist(ini, section_name, "files", LINE_SEP, render_mode, &err); + z++; + } + } + + for (size_t i = 0; i < ini->section_count; i++) { + char *section_name = ini->section[i]->key; + struct Deploy *deploy = &ctx->deploy; + if (startswith(ini->section[i]->key, "deploy:docker")) { + struct Docker *docker = &deploy->docker; + + docker->registry = ini_getval_str(ini, section_name, "registry", render_mode, &err); + docker->image_compression = ini_getval_str(ini, section_name, "image_compression", render_mode, &err); + docker->test_script = ini_getval_str(ini, section_name, "test_script", render_mode, &err); + docker->build_args = ini_getval_strlist(ini, section_name, "build_args", LINE_SEP, render_mode, &err); + docker->tags = ini_getval_strlist(ini, section_name, "tags", LINE_SEP, render_mode, &err); + } + } + return 0; +} + +int populate_mission_ini(struct Delivery **ctx, int render_mode) { + int err = 0; + struct INIFILE *ini; + + if ((*ctx)->_stasis_ini_fp.mission) { + return 0; + } + + // Now populate the rules + char missionfile[PATH_MAX] = {0}; + if (getenv("STASIS_SYSCONFDIR")) { + sprintf(missionfile, "%s/%s/%s/%s.ini", + getenv("STASIS_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); + } else { + sprintf(missionfile, "%s/%s/%s/%s.ini", + globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); + } + + msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile); + (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile); + ini = (*ctx)->_stasis_ini_fp.mission; + if (!ini) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read misson configuration: %s, %s\n", missionfile, strerror(errno)); + exit(1); + } + (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile); + + (*ctx)->rules.release_fmt = ini_getval_str(ini, "meta", "release_fmt", render_mode, &err); + + // Used for setting artifactory build info + (*ctx)->rules.build_name_fmt = ini_getval_str(ini, "meta", "build_name_fmt", render_mode, &err); + + // Used for setting artifactory build info + (*ctx)->rules.build_number_fmt = ini_getval_str(ini, "meta", "build_number_fmt", render_mode, &err); + return 0; +} + +void validate_delivery_ini(struct INIFILE *ini) { + if (!ini) { + SYSERROR("%s", "INIFILE is NULL!"); + exit(1); + } + if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) { + ini_has_key_required(ini, "meta", "name"); + ini_has_key_required(ini, "meta", "version"); + ini_has_key_required(ini, "meta", "rc"); + ini_has_key_required(ini, "meta", "mission"); + ini_has_key_required(ini, "meta", "python"); + } else { + SYSERROR("%s", "[meta] configuration section is required"); + exit(1); + } + + if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) { + ini_has_key_required(ini, "conda", "installer_name"); + ini_has_key_required(ini, "conda", "installer_version"); + ini_has_key_required(ini, "conda", "installer_platform"); + ini_has_key_required(ini, "conda", "installer_arch"); + } else { + SYSERROR("%s", "[conda] configuration section is required"); + exit(1); + } + + for (size_t i = 0; i < ini->section_count; i++) { + struct INISection *section = ini->section[i]; + if (section && startswith(section->key, "test:")) { + char *name = strstr(section->key, ":"); + if (name && strlen(name) > 1) { + name = &name[1]; + } + //ini_has_key_required(ini, section->key, "version"); + //ini_has_key_required(ini, section->key, "repository"); + if (globals.enable_testing) { + ini_has_key_required(ini, section->key, "script"); + } + } + } + + if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) { + // yeah? + } + + for (size_t i = 0; i < ini->section_count; i++) { + struct INISection *section = ini->section[i]; + if (section && startswith(section->key, "deploy:artifactory")) { + ini_has_key_required(ini, section->key, "files"); + ini_has_key_required(ini, section->key, "dest"); + } + } +} + diff --git a/src/delivery_postprocess.c b/src/delivery_postprocess.c new file mode 100644 index 0000000..1a902e3 --- /dev/null +++ b/src/delivery_postprocess.c @@ -0,0 +1,266 @@ +#include "delivery.h" + + +const char *release_header = "# delivery_name: %s\n" + "# delivery_fmt: %s\n" + "# creation_time: %s\n" + "# conda_ident: %s\n" + "# conda_build_ident: %s\n"; + +char *delivery_get_release_header(struct Delivery *ctx) { + char output[STASIS_BUFSIZ]; + char stamp[100]; + strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info); + sprintf(output, release_header, + ctx->info.release_name, + ctx->rules.release_fmt, + stamp, + ctx->conda.tool_version, + ctx->conda.tool_build_version); + return strdup(output); +} + +int delivery_dump_metadata(struct Delivery *ctx) { + FILE *fp; + char filename[PATH_MAX]; + sprintf(filename, "%s/meta-%s.stasis", ctx->storage.meta_dir, ctx->info.release_name); + fp = fopen(filename, "w+"); + if (!fp) { + return -1; + } + if (globals.verbose) { + printf("%s\n", filename); + } + fprintf(fp, "name %s\n", ctx->meta.name); + fprintf(fp, "version %s\n", ctx->meta.version); + fprintf(fp, "rc %d\n", ctx->meta.rc); + fprintf(fp, "python %s\n", ctx->meta.python); + fprintf(fp, "python_compact %s\n", ctx->meta.python_compact); + fprintf(fp, "mission %s\n", ctx->meta.mission); + fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : ""); + fprintf(fp, "platform %s %s %s %s\n", + ctx->system.platform[DELIVERY_PLATFORM], + ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], + ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], + ctx->system.platform[DELIVERY_PLATFORM_RELEASE]); + fprintf(fp, "arch %s\n", ctx->system.arch); + fprintf(fp, "time %s\n", ctx->info.time_str_epoch); + fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt); + fprintf(fp, "release_name %s\n", ctx->info.release_name); + fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt); + fprintf(fp, "build_name %s\n", ctx->info.build_name); + fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt); + fprintf(fp, "build_number %s\n", ctx->info.build_number); + fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl); + fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name); + fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version); + fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform); + fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch); + + fclose(fp); + return 0; +} + +void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) { + char output[PATH_MAX]; + char *header = NULL; + char *tempfile = NULL; + FILE *tp = NULL; + + if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) { + header = delivery_get_release_header(ctx); + if (!header) { + msg(STASIS_MSG_ERROR, "failed to generate release header string\n", filename); + exit(1); + } + tempfile = xmkstemp(&tp, "w+"); + if (!tempfile || !tp) { + msg(STASIS_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno)); + exit(1); + } + fprintf(tp, "%s", header); + + // Read the original file + char **contents = file_readlines(filename, 0, 0, NULL); + if (!contents) { + msg(STASIS_MSG_ERROR, "%s: unable to read %s", filename); + exit(1); + } + + // Write temporary data + for (size_t i = 0; contents[i] != NULL; i++) { + if (startswith(contents[i], "channels:")) { + // Allow for additional conda channel injection + if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) { + fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]); + continue; + } + } else if (strstr(contents[i], "- pip:")) { + if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) { + // Allow for additional pip argument injection + fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]); + continue; + } + } else if (startswith(contents[i], "prefix:")) { + // Remove the prefix key + if (strstr(contents[i], "/") || strstr(contents[i], "\\")) { + // path is on the same line as the key + continue; + } else { + // path is on the next line? + if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) { + i++; + } + continue; + } + } + fprintf(tp, "%s", contents[i]); + } + GENERIC_ARRAY_FREE(contents); + guard_free(header); + fflush(tp); + fclose(tp); + + // Replace the original file with our temporary data + if (copy2(tempfile, filename, CT_PERM) < 0) { + fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename); + exit(1); + } + remove(tempfile); + guard_free(tempfile); + } else if (globals.enable_rewrite_spec_stage_2 && stage == DELIVERY_REWRITE_SPEC_STAGE_2) { + // Replace "local" channel with the staging URL + if (ctx->storage.conda_staging_url) { + file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0); + } else if (globals.jfrog.repo) { + sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); + file_replace_text(filename, "@CONDA_CHANNEL@", output, 0); + } else { + msg(STASIS_MSG_WARN, "conda_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.conda_artifact_dir); + file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_artifact_dir, 0); + } + + if (ctx->storage.wheel_staging_url) { + file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0); + } else if (globals.enable_artifactory && globals.jfrog.url && globals.jfrog.repo) { + sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name); + file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); + } else { + msg(STASIS_MSG_WARN, "wheel_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.wheel_artifact_dir); + sprintf(output, "--extra-index-url file://%s", ctx->storage.wheel_artifact_dir); + file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0); + } + } +} + +int delivery_copy_conda_artifacts(struct Delivery *ctx) { + char cmd[STASIS_BUFSIZ]; + char conda_build_dir[PATH_MAX]; + char subdir[PATH_MAX]; + memset(cmd, 0, sizeof(cmd)); + memset(conda_build_dir, 0, sizeof(conda_build_dir)); + memset(subdir, 0, sizeof(subdir)); + + sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld"); + // One must run conda build at least once to create the "conda-bld" directory. + // When this directory is missing there can be no build artifacts. + if (access(conda_build_dir, F_OK) < 0) { + msg(STASIS_MSG_RESTRICT | STASIS_MSG_WARN | STASIS_MSG_L3, + "Skipped: 'conda build' has never been executed.\n"); + return 0; + } + + snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s", + conda_build_dir, + ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], + ctx->storage.conda_artifact_dir); + + return system(cmd); +} + +int delivery_index_conda_artifacts(struct Delivery *ctx) { + return conda_index(ctx->storage.conda_artifact_dir); +} + +int delivery_copy_wheel_artifacts(struct Delivery *ctx) { + char cmd[PATH_MAX]; + memset(cmd, 0, sizeof(cmd)); + snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s", + ctx->storage.build_sources_dir, + ctx->storage.wheel_artifact_dir); + return system(cmd); +} + +int delivery_index_wheel_artifacts(struct Delivery *ctx) { + struct dirent *rec; + DIR *dp; + FILE *top_fp; + + dp = opendir(ctx->storage.wheel_artifact_dir); + if (!dp) { + return -1; + } + + // Generate a "dumb" local pypi index that is compatible with: + // pip install --extra-index-url + char top_index[PATH_MAX]; + memset(top_index, 0, sizeof(top_index)); + sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir); + top_fp = fopen(top_index, "w+"); + if (!top_fp) { + closedir(dp); + return -2; + } + + while ((rec = readdir(dp)) != NULL) { + // skip directories + if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) { + continue; + } + + FILE *bottom_fp; + char bottom_index[PATH_MAX * 2]; + memset(bottom_index, 0, sizeof(bottom_index)); + sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name); + bottom_fp = fopen(bottom_index, "w+"); + if (!bottom_fp) { + closedir(dp); + return -3; + } + + if (globals.verbose) { + printf("+ %s\n", rec->d_name); + } + // Add record to top level index + fprintf(top_fp, "<a href=\"%s/\">%s</a><br/>\n", rec->d_name, rec->d_name); + + char dpath[PATH_MAX * 2]; + memset(dpath, 0, sizeof(dpath)); + sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name); + struct StrList *packages = listdir(dpath); + if (!packages) { + closedir(dp); + fclose(top_fp); + fclose(bottom_fp); + return -4; + } + + for (size_t i = 0; i < strlist_count(packages); i++) { + char *package = strlist_item(packages, i); + if (!endswith(package, ".whl")) { + continue; + } + if (globals.verbose) { + printf("`- %s\n", package); + } + // Write record to bottom level index + fprintf(bottom_fp, "<a href=\"%s\">%s</a><br/>\n", package, package); + } + fclose(bottom_fp); + + guard_strlist_free(&packages); + } + closedir(dp); + fclose(top_fp); + return 0; +} diff --git a/src/delivery_show.c b/src/delivery_show.c new file mode 100644 index 0000000..adfa1be --- /dev/null +++ b/src/delivery_show.c @@ -0,0 +1,117 @@ +#include "delivery.h" + +void delivery_debug_show(struct Delivery *ctx) { + printf("\n====DEBUG====\n"); + printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir); + printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir); + printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No"); + printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No"); + printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No"); +} + +void delivery_meta_show(struct Delivery *ctx) { + if (globals.verbose) { + delivery_debug_show(ctx); + } + + printf("\n====DELIVERY====\n"); + printf("%-20s %-10s\n", "Target Python:", ctx->meta.python); + printf("%-20s %-10s\n", "Name:", ctx->meta.name); + printf("%-20s %-10s\n", "Mission:", ctx->meta.mission); + if (ctx->meta.codename) { + printf("%-20s %-10s\n", "Codename:", ctx->meta.codename); + } + if (ctx->meta.version) { + printf("%-20s %-10s\n", "Version", ctx->meta.version); + } + if (!ctx->meta.final) { + printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc); + } + printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No"); + printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New"); +} + +void delivery_conda_show(struct Delivery *ctx) { + printf("\n====CONDA====\n"); + printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix); + + puts("Native Packages:"); + if (strlist_count(ctx->conda.conda_packages) || strlist_count(ctx->conda.conda_packages_defer)) { + struct StrList *list_conda = strlist_init(); + if (strlist_count(ctx->conda.conda_packages)) { + strlist_append_strlist(list_conda, ctx->conda.conda_packages); + } + if (strlist_count(ctx->conda.conda_packages_defer)) { + strlist_append_strlist(list_conda, ctx->conda.conda_packages_defer); + } + strlist_sort(list_conda, STASIS_SORT_ALPHA); + + for (size_t i = 0; i < strlist_count(list_conda); i++) { + char *token = strlist_item(list_conda, i); + if (isempty(token) || isblank(*token) || startswith(token, "-")) { + continue; + } + printf("%21s%s\n", "", token); + } + guard_strlist_free(&list_conda); + } else { + printf("%21s%s\n", "", "N/A"); + } + + puts("Python Packages:"); + if (strlist_count(ctx->conda.pip_packages) || strlist_count(ctx->conda.pip_packages_defer)) { + struct StrList *list_python = strlist_init(); + if (strlist_count(ctx->conda.pip_packages)) { + strlist_append_strlist(list_python, ctx->conda.pip_packages); + } + if (strlist_count(ctx->conda.pip_packages_defer)) { + strlist_append_strlist(list_python, ctx->conda.pip_packages_defer); + } + strlist_sort(list_python, STASIS_SORT_ALPHA); + + for (size_t i = 0; i < strlist_count(list_python); i++) { + char *token = strlist_item(list_python, i); + if (isempty(token) || isblank(*token) || startswith(token, "-")) { + continue; + } + printf("%21s%s\n", "", token); + } + guard_strlist_free(&list_python); + } else { + printf("%21s%s\n", "", "N/A"); + } +} + +void delivery_tests_show(struct Delivery *ctx) { + printf("\n====TESTS====\n"); + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + if (!ctx->tests[i].name) { + continue; + } + printf("%-20s %-20s %s\n", ctx->tests[i].name, + ctx->tests[i].version, + ctx->tests[i].repository); + } +} + +void delivery_runtime_show(struct Delivery *ctx) { + printf("\n====RUNTIME====\n"); + struct StrList *rt = NULL; + rt = strlist_copy(ctx->runtime.environ); + if (!rt) { + // no data + return; + } + strlist_sort(rt, STASIS_SORT_ALPHA); + size_t total = strlist_count(rt); + for (size_t i = 0; i < total; i++) { + char *item = strlist_item(rt, i); + if (!item) { + // not supposed to occur + msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i); + return; + } + printf("%s\n", item); + } +} + diff --git a/src/delivery_test.c b/src/delivery_test.c new file mode 100644 index 0000000..8ff08cc --- /dev/null +++ b/src/delivery_test.c @@ -0,0 +1,297 @@ +#include "delivery.h" + +void delivery_tests_run(struct Delivery *ctx) { + static const int SETUP = 0; + static const int PARALLEL = 1; + static const int SERIAL = 2; + struct MultiProcessingPool *pool[3]; + struct Process proc; + memset(&proc, 0, sizeof(proc)); + + if (!globals.workaround.conda_reactivate) { + globals.workaround.conda_reactivate = calloc(PATH_MAX, sizeof(*globals.workaround.conda_reactivate)); + } else { + memset(globals.workaround.conda_reactivate, 0, PATH_MAX); + } + // Test blocks always run with xtrace enabled. Disable, and reenable it. Conda's wrappers produce an incredible + // amount of debug information. + snprintf(globals.workaround.conda_reactivate, PATH_MAX - 1, "\nset +x; mamba activate ${CONDA_DEFAULT_ENV}; set -x\n"); + + if (!ctx->tests[0].name) { + msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n"); + } else { + pool[PARALLEL] = mp_pool_init("parallel", ctx->storage.tmpdir); + if (!pool[PARALLEL]) { + perror("mp_pool_init/parallel"); + exit(1); + } + pool[PARALLEL]->status_interval = globals.pool_status_interval; + + pool[SERIAL] = mp_pool_init("serial", ctx->storage.tmpdir); + if (!pool[SERIAL]) { + perror("mp_pool_init/serial"); + exit(1); + } + pool[SERIAL]->status_interval = globals.pool_status_interval; + + pool[SETUP] = mp_pool_init("setup", ctx->storage.tmpdir); + if (!pool[SETUP]) { + perror("mp_pool_init/setup"); + exit(1); + } + pool[SETUP]->status_interval = globals.pool_status_interval; + + // Test block scripts shall exit non-zero on error. + // This will fail a test block immediately if "string" is not found in file.txt: + // grep string file.txt + // + // And this is how to avoid that scenario: + // #1: + // if ! grep string file.txt; then + // # handle error + // fi + // + // #2: + // grep string file.txt || handle error + // + // #3: + // # Use ':' as a NO-OP if/when the result doesn't matter + // grep string file.txt || : + const char *runner_cmd_fmt = "set -e -x\n%s\n"; + + // Iterate over our test records, retrieving the source code for each package, and assigning its scripted tasks + // to the appropriate processing pool + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + struct Test *test = &ctx->tests[i]; + if (!test->name && !test->repository && !test->script) { + // skip unused test records + continue; + } + msg(STASIS_MSG_L2, "Loading tests for %s %s\n", test->name, test->version); + if (!test->script || !strlen(test->script)) { + msg(STASIS_MSG_WARN | STASIS_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n", + test->name); + continue; + } + + char destdir[PATH_MAX]; + sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(test->repository)); + + if (!access(destdir, F_OK)) { + msg(STASIS_MSG_L3, "Purging repository %s\n", destdir); + if (rmtree(destdir)) { + COE_CHECK_ABORT(1, "Unable to remove repository\n"); + } + } + msg(STASIS_MSG_L3, "Cloning repository %s\n", test->repository); + if (!git_clone(&proc, test->repository, destdir, test->version)) { + test->repository_info_tag = strdup(git_describe(destdir)); + test->repository_info_ref = strdup(git_rev_parse(destdir, "HEAD")); + } else { + COE_CHECK_ABORT(1, "Unable to clone repository\n"); + } + + if (test->repository_remove_tags && strlist_count(test->repository_remove_tags)) { + filter_repo_tags(destdir, test->repository_remove_tags); + } + + if (pushd(destdir)) { + COE_CHECK_ABORT(1, "Unable to enter repository directory\n"); + } else { + char *cmd = calloc(strlen(test->script) + STASIS_BUFSIZ, sizeof(*cmd)); + if (!cmd) { + SYSERROR("Unable to allocate test script buffer: %s", strerror(errno)); + exit(1); + } + + msg(STASIS_MSG_L3, "Queuing task for %s\n", test->name); + memset(&proc, 0, sizeof(proc)); + + strcpy(cmd, test->script); + char *cmd_rendered = tpl_render(cmd); + if (cmd_rendered) { + if (strcmp(cmd_rendered, cmd) != 0) { + strcpy(cmd, cmd_rendered); + cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0; + } + guard_free(cmd_rendered); + } else { + SYSERROR("An error occurred while rendering the following:\n%s", cmd); + exit(1); + } + + if (test->disable) { + msg(STASIS_MSG_L2, "Script execution disabled by configuration\n", test->name); + guard_free(cmd); + continue; + } + + char *runner_cmd = NULL; + char pool_name[100] = "parallel"; + struct MultiProcessingTask *task = NULL; + int selected = PARALLEL; + if (!globals.enable_parallel || !test->parallel) { + selected = SERIAL; + memset(pool_name, 0, sizeof(pool_name)); + strcpy(pool_name, "serial"); + } + + if (asprintf(&runner_cmd, runner_cmd_fmt, cmd) < 0) { + SYSERROR("Unable to allocate memory for runner command: %s", strerror(errno)); + exit(1); + } + task = mp_pool_task(pool[selected], test->name, destdir, runner_cmd); + if (!task) { + SYSERROR("Failed to add task to %s pool: %s", pool_name, runner_cmd); + popd(); + if (!globals.continue_on_error) { + guard_free(runner_cmd); + tpl_free(); + delivery_free(ctx); + globals_free(); + } + exit(1); + } + guard_free(runner_cmd); + guard_free(cmd); + popd(); + + } + } + + // Configure "script_setup" tasks + // Directories should exist now, so no need to go through initializing everything all over again. + for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + struct Test *test = &ctx->tests[i]; + if (test->script_setup) { + char destdir[PATH_MAX]; + sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(test->repository)); + if (access(destdir, F_OK)) { + SYSERROR("%s: %s", destdir, strerror(errno)); + exit(1); + } + if (!pushd(destdir)) { + const size_t cmd_len = strlen(test->script_setup) + STASIS_BUFSIZ; + char *cmd = calloc(cmd_len, sizeof(*cmd)); + if (!cmd) { + SYSERROR("Unable to allocate test script_setup buffer: %s", strerror(errno)); + exit(1); + } + + strncpy(cmd, test->script_setup, cmd_len - 1); + char *cmd_rendered = tpl_render(cmd); + if (cmd_rendered) { + if (strcmp(cmd_rendered, cmd) != 0) { + strncpy(cmd, cmd_rendered, cmd_len - 1); + cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0; + } + guard_free(cmd_rendered); + } else { + SYSERROR("An error occurred while rendering the following:\n%s", cmd); + exit(1); + } + + struct MultiProcessingTask *task = NULL; + char *runner_cmd = NULL; + if (asprintf(&runner_cmd, runner_cmd_fmt, cmd) < 0) { + SYSERROR("Unable to allocate memory for runner command: %s", strerror(errno)); + exit(1); + } + + task = mp_pool_task(pool[SETUP], test->name, destdir, runner_cmd); + if (!task) { + SYSERROR("Failed to add task %s to setup pool: %s", test->name, runner_cmd); + popd(); + if (!globals.continue_on_error) { + guard_free(runner_cmd); + tpl_free(); + delivery_free(ctx); + globals_free(); + } + exit(1); + } + guard_free(runner_cmd); + guard_free(cmd); + popd(); + } else { + SYSERROR("Failed to change directory: %s\n", destdir); + exit(1); + } + } + } + + size_t opt_flags = 0; + if (globals.parallel_fail_fast) { + opt_flags |= MP_POOL_FAIL_FAST; + } + + // Execute all queued tasks + for (size_t p = 0; p < sizeof(pool) / sizeof(*pool); p++) { + int pool_status; + long jobs = globals.cpu_limit; + + if (!pool[p]->num_used) { + // Skip empty pool + continue; + } + + // Setup tasks run sequentially + if (p == (size_t) SETUP) { + jobs = 1; + } + + // Run tasks in the pool + // 1. Setup (builds) + // 2. Parallel (fast jobs) + // 3. Serial (long jobs) + pool_status = mp_pool_join(pool[p], jobs, opt_flags); + + // On error show a summary of the current pool, and die + if (pool_status != 0) { + mp_pool_show_summary(pool[p]); + COE_CHECK_ABORT(true, "Task failure"); + } + } + + // All tasks were successful + for (size_t p = 0; p < sizeof(pool) / sizeof(*pool); p++) { + if (pool[p]->num_used) { + // Only show pools that actually had jobs to run + mp_pool_show_summary(pool[p]); + } + mp_pool_free(&pool[p]); + } + } +} + +int delivery_fixup_test_results(struct Delivery *ctx) { + struct dirent *rec; + DIR *dp; + + dp = opendir(ctx->storage.results_dir); + if (!dp) { + perror(ctx->storage.results_dir); + return -1; + } + + while ((rec = readdir(dp)) != NULL) { + char path[PATH_MAX]; + memset(path, 0, sizeof(path)); + + if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..")) { + continue; + } else if (!endswith(rec->d_name, ".xml")) { + continue; + } + + sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name); + msg(STASIS_MSG_L3, "%s\n", rec->d_name); + if (xml_pretty_print_in_place(path, STASIS_XML_PRETTY_PRINT_PROG, STASIS_XML_PRETTY_PRINT_ARGS)) { + msg(STASIS_MSG_L3 | STASIS_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name); + } + } + + closedir(dp); + return 0; +} + diff --git a/src/docker.c b/src/docker.c index da7c1ce..2d5e2cc 100644 --- a/src/docker.c +++ b/src/docker.c @@ -44,8 +44,9 @@ int docker_script(const char *image, char *data, unsigned flags) { do { memset(buffer, 0, sizeof(buffer)); - fgets(buffer, sizeof(buffer) - 1, infile); - fputs(buffer, outfile); + if (fgets(buffer, sizeof(buffer) - 1, infile) != NULL) { + fputs(buffer, outfile); + } } while (!feof(infile)); fclose(infile); diff --git a/src/environment.c b/src/environment.c index 924fbf8..580062c 100644 --- a/src/environment.c +++ b/src/environment.c @@ -305,7 +305,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { // Handle literal statement "$$var" // Value becomes "$var" (unexpanded) if (strncmp(&input[i], delim_literal, strlen(delim_literal)) == 0) { - strncat(expanded, &delim, 1); + strncat(expanded, &delim, 2); i += strlen(delim_literal); // Ignore opening brace if (input[i] == '{') { @@ -349,7 +349,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { continue; } // Append expanded environment variable to output - strncat(expanded, tmp, strlen(tmp)); + strncat(expanded, tmp, STASIS_BUFSIZ - 1); if (env) { guard_free(tmp); } diff --git a/src/globals.c b/src/globals.c index 1e27959..1b682cb 100644 --- a/src/globals.c +++ b/src/globals.c @@ -25,19 +25,22 @@ const char *BANNER = "Association of Universities for Research in Astronomy (AURA)\n"; struct STASIS_GLOBAL globals = { - .verbose = false, - .continue_on_error = false, - .always_update_base_environment = false, - .conda_fresh_start = true, - .conda_install_prefix = NULL, - .conda_packages = NULL, - .pip_packages = NULL, - .tmpdir = NULL, - .enable_docker = true, - .enable_artifactory = true, - .enable_artifactory_build_info = true, - .enable_testing = true, - .enable_rewrite_spec_stage_2 = true, + .verbose = false, ///< Toggle verbose mode + .continue_on_error = false, ///< Do not stop program on error + .always_update_base_environment = false, ///< Run "conda update --all" after installing Conda + .conda_fresh_start = true, ///< Remove/reinstall Conda at startup + .conda_install_prefix = NULL, ///< Path to install Conda + .conda_packages = NULL, ///< Conda packages to install + .pip_packages = NULL, ///< Python packages to install + .tmpdir = NULL, ///< Path to store temporary data + .enable_docker = true, ///< Toggle docker usage + .enable_artifactory = true, ///< Toggle artifactory server usage + .enable_artifactory_build_info = true, ///< Toggle build-info uploads + .enable_testing = true, ///< Toggle [test] block "script" execution. "script_setup" always executes. + .enable_rewrite_spec_stage_2 = true, ///< Leave template stings in output files + .enable_parallel = true, ///< Toggle testing in parallel + .parallel_fail_fast = false, ///< Kill ALL multiprocessing tasks immediately on error + .pool_status_interval = 30, ///< Report "Task is running" }; void globals_free() { @@ -55,7 +58,6 @@ void globals_free() { guard_free(globals.jfrog.jfrog_artifactory_base_url); guard_free(globals.jfrog.jfrog_artifactory_product); guard_free(globals.jfrog.remote_filename); - guard_free(globals.workaround.tox_posargs); guard_free(globals.workaround.conda_reactivate); if (globals.envctl) { envctl_free(&globals.envctl); diff --git a/src/multiprocessing.c b/src/multiprocessing.c new file mode 100644 index 0000000..2a1b350 --- /dev/null +++ b/src/multiprocessing.c @@ -0,0 +1,448 @@ +#include "core.h" + +/// The sum of all tasks started by mp_task() +size_t mp_global_task_count = 0; + +static struct MultiProcessingTask *mp_pool_next_available(struct MultiProcessingPool *pool) { + return &pool->task[pool->num_used]; +} + +int child(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { + FILE *fp_log = NULL; + + // The task starts inside the requested working directory + if (chdir(task->working_dir)) { + perror(task->working_dir); + exit(1); + } + + // Record the task start time + if (clock_gettime(CLOCK_REALTIME, &task->time_data.t_start) < 0) { + perror("clock_gettime"); + exit(1); + } + + // Redirect stdout and stderr to the log file + fflush(stdout); + fflush(stderr); + // Set log file name + sprintf(task->log_file + strlen(task->log_file), "task-%zu-%d.log", mp_global_task_count, task->parent_pid); + fp_log = freopen(task->log_file, "w+", stdout); + if (!fp_log) { + fprintf(stderr, "unable to open '%s' for writing: %s\n", task->log_file, strerror(errno)); + return -1; + } + dup2(fileno(stdout), fileno(stderr)); + + // Generate timestamp for log header + time_t t = time(NULL); + char *timebuf = ctime(&t); + if (timebuf) { + // strip line feed from timestamp + timebuf[strlen(timebuf) ? strlen(timebuf) - 1 : 0] = 0; + } + + // Generate log header + fprintf(fp_log, "# STARTED: %s\n", timebuf ? timebuf : "unknown"); + fprintf(fp_log, "# PID: %d\n", task->parent_pid); + fprintf(fp_log, "# WORKDIR: %s\n", task->working_dir); + fprintf(fp_log, "# COMMAND:\n%s\n", task->cmd); + fprintf(fp_log, "# OUTPUT:\n"); + // Commit header to log file / clean up + fflush(fp_log); + + // Execute task + fflush(stdout); + fflush(stderr); + char *args[] = {"bash", "--norc", task->parent_script, (char *) NULL}; + return execvp("/bin/bash", args); +} + +int parent(struct MultiProcessingPool *pool, struct MultiProcessingTask *task, pid_t pid, int *child_status) { + printf("[%s:%s] Task started (pid: %d)\n", pool->ident, task->ident, pid); + + // Give the child process access to our PID value + task->pid = pid; + task->parent_pid = pid; + + mp_global_task_count++; + + // Check child's status + pid_t code = waitpid(pid, child_status, WUNTRACED | WCONTINUED | WNOHANG); + if (code < 0) { + perror("waitpid failed"); + return -1; + } + return 0; +} + +static int mp_task_fork(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { + pid_t pid = fork(); + int child_status = 0; + if (pid == -1) { + return -1; + } else if (pid == 0) { + child(pool, task); + } + return parent(pool, task, pid, &child_status); +} + +struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const char *ident, char *working_dir, char *cmd) { + struct MultiProcessingTask *slot = mp_pool_next_available(pool); + if (pool->num_used != pool->num_alloc) { + pool->num_used++; + } else { + fprintf(stderr, "Maximum number of tasks reached\n"); + return NULL; + } + + // Set default status to "error" + slot->status = -1; + + // Set task identifier string + memset(slot->ident, 0, sizeof(slot->ident)); + strncpy(slot->ident, ident, sizeof(slot->ident) - 1); + + // Set log file path + memset(slot->log_file, 0, sizeof(*slot->log_file)); + strcat(slot->log_file, pool->log_root); + strcat(slot->log_file, "/"); + + // Set working directory + if (isempty(working_dir)) { + strcpy(slot->working_dir, "."); + } else { + strncpy(slot->working_dir, working_dir, PATH_MAX - 1); + } + + // Create a temporary file to act as our intermediate command script + FILE *tp = NULL; + char *t_name = NULL; + t_name = xmkstemp(&tp, "w"); + if (!t_name || !tp) { + return NULL; + } + + // Set the script's permissions so that only the calling user can use it + // This should help prevent eavesdropping if keys are applied in plain-text + // somewhere. + chmod(t_name, 0700); + + // Record the script path + memset(slot->parent_script, 0, sizeof(slot->parent_script)); + strncpy(slot->parent_script, t_name, PATH_MAX - 1); + guard_free(t_name); + + // Populate the script + fprintf(tp, "#!/bin/bash\n%s\n", cmd); + fflush(tp); + fclose(tp); + + // Record the command(s) + slot->cmd_len = (strlen(cmd) * sizeof(*cmd)) + 1; + slot->cmd = mmap(NULL, slot->cmd_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + memset(slot->cmd, 0, slot->cmd_len); + strncpy(slot->cmd, cmd, slot->cmd_len); + + return slot; +} + +static void get_task_duration(struct MultiProcessingTask *task, struct timespec *result) { + // based on the timersub() macro in time.h + // This implementation uses timespec and increases the resolution from microseconds to nanoseconds. + struct timespec *start = &task->time_data.t_start; + struct timespec *stop = &task->time_data.t_stop; + result->tv_sec = (stop->tv_sec - start->tv_sec); + result->tv_nsec = (stop->tv_nsec - start->tv_nsec); + if (result->tv_nsec < 0) { + --result->tv_sec; + result->tv_nsec += 1000000000L; + } +} + +void mp_pool_show_summary(struct MultiProcessingPool *pool) { + print_banner("=", 79); + printf("Pool execution summary for \"%s\"\n", pool->ident); + print_banner("=", 79); + printf("STATUS PID DURATION IDENT\n"); + for (size_t i = 0; i < pool->num_used; i++) { + struct MultiProcessingTask *task = &pool->task[i]; + char status_str[10] = {0}; + if (!task->status && !task->signaled_by) { + strcpy(status_str, "DONE"); + } else if (task->signaled_by) { + strcpy(status_str, "TERM"); + } else { + strcpy(status_str, "FAIL"); + } + + struct timespec duration; + get_task_duration(task, &duration); + long diff = duration.tv_sec + duration.tv_nsec / 1000000000L; + printf("%-4s %10d %7lds %-10s\n", status_str, task->parent_pid, diff, task->ident) ; + } + puts(""); +} + +static int show_log_contents(FILE *stream, struct MultiProcessingTask *task) { + FILE *fp = fopen(task->log_file, "r"); + if (!fp) { + return -1; + } + char buf[BUFSIZ] = {0}; + while ((fgets(buf, sizeof(buf) - 1, fp)) != NULL) { + fprintf(stream, "%s", buf); + memset(buf, 0, sizeof(buf)); + } + fprintf(stream, "\n"); + fclose(fp); + return 0; +} + +int mp_pool_kill(struct MultiProcessingPool *pool, int signum) { + printf("Sending signal %d to pool '%s'\n", signum, pool->ident); + for (size_t i = 0; i < pool->num_used; i++) { + struct MultiProcessingTask *slot = &pool->task[i]; + if (!slot) { + return -1; + } + // Kill tasks in progress + if (slot->pid > 0) { + int status; + printf("Sending signal %d to task '%s' (pid: %d)\n", signum, slot->ident, slot->pid); + status = kill(slot->pid, signum); + if (status && errno != ESRCH) { + fprintf(stderr, "Task '%s' (pid: %d) did not respond: %s\n", slot->ident, slot->pid, strerror(errno)); + } else { + // Wait for process to handle the signal, then set the status accordingly + if (waitpid(slot->pid, &status, 0) >= 0) { + slot->signaled_by = WTERMSIG(status); + // Record the task stop time + if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) { + perror("clock_gettime"); + exit(1); + } + // We are short-circuiting the normal flow, and the process is now dead, so mark it as such + slot->pid = MP_POOL_PID_UNUSED; + } + } + } + if (!access(slot->log_file, F_OK)) { + remove(slot->log_file); + } + if (!access(slot->parent_script, F_OK)) { + remove(slot->parent_script); + } + } + return 0; +} + +int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { + int status = 0; + int failures = 0; + size_t tasks_complete = 0; + size_t lower_i = 0; + size_t upper_i = jobs; + + do { + size_t hang_check = 0; + if (upper_i >= pool->num_used) { + size_t x = upper_i - pool->num_used; + upper_i -= (size_t) x; + } + + for (size_t i = lower_i; i < upper_i; i++) { + struct MultiProcessingTask *slot = &pool->task[i]; + if (slot->status == -1) { + if (mp_task_fork(pool, slot)) { + fprintf(stderr, "%s: mp_task_fork failed\n", slot->ident); + kill(0, SIGTERM); + } + } + + // Has the child been processed already? + if (slot->pid == MP_POOL_PID_UNUSED) { + // Child is already used up, skip it + hang_check++; + if (hang_check >= pool->num_used) { + // If you join a pool that's already finished it will spin + // forever. This protects the program from entering an + // infinite loop. + fprintf(stderr, "%s is deadlocked\n", pool->ident); + failures++; + goto pool_deadlocked; + } + continue; + } + + // Is the process finished? + pid_t pid = waitpid(slot->pid, &status, WNOHANG | WUNTRACED | WCONTINUED); + int task_ended = WIFEXITED(status); + int task_ended_by_signal = WIFSIGNALED(status); + int task_stopped = WIFSTOPPED(status); + int task_continued = WIFCONTINUED(status); + int status_exit = WEXITSTATUS(status); + int status_signal = WTERMSIG(status); + int status_stopped = WSTOPSIG(status); + + // Update status + slot->status = status_exit; + slot->signaled_by = status_signal; + + char progress[1024] = {0}; + if (pid > 0) { + double percent = ((double) (tasks_complete + 1) / (double) pool->num_used) * 100; + snprintf(progress, sizeof(progress) - 1, "[%s:%s] [%3.1f%%]", pool->ident, slot->ident, percent); + + // The process ended in one the following ways + // Note: SIGSTOP nor SIGCONT will not increment the tasks_complete counter + if (task_stopped) { + printf("%s Task was suspended (%d)\n", progress, status_stopped); + continue; + } else if (task_continued) { + printf("%s Task was resumed\n", progress); + continue; + } else if (task_ended_by_signal) { + printf("%s Task ended by signal %d (%s)\n", progress, status_signal, strsignal(status_signal)); + tasks_complete++; + } else if (task_ended) { + printf("%s Task ended (status: %d)\n", progress, status_exit); + tasks_complete++; + } else { + fprintf(stderr, "%s Task state is unknown (0x%04X)\n", progress, status); + } + + // Show the log (always) + if (show_log_contents(stdout, slot)) { + perror(slot->log_file); + } + + // Record the task stop time + if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) { + perror("clock_gettime"); + exit(1); + } + + if (status >> 8 != 0 || (status & 0xff) != 0) { + fprintf(stderr, "%s Task failed\n", progress); + failures++; + + if (flags & MP_POOL_FAIL_FAST && pool->num_used > 1) { + mp_pool_kill(pool, SIGTERM); + return -2; + } + } else { + printf("%s Task finished\n", progress); + } + + // Clean up logs and scripts left behind by the task + if (remove(slot->log_file)) { + fprintf(stderr, "%s Unable to remove log file: '%s': %s\n", progress, slot->parent_script, strerror(errno)); + } + if (remove(slot->parent_script)) { + fprintf(stderr, "%s Unable to remove temporary script '%s': %s\n", progress, slot->parent_script, strerror(errno)); + } + + // Update progress and tell the poller to ignore the PID. The process is gone. + slot->pid = MP_POOL_PID_UNUSED; + } else if (pid < 0) { + fprintf(stderr, "waitpid failed: %s\n", strerror(errno)); + return -1; + } else { + // Track the number of seconds elapsed for each task. + // When a task has executed for longer than status_intervals, print a status update + // _seconds represents the time between intervals, not the total runtime of the task + slot->_seconds = time(NULL) - slot->_now; + if (slot->_seconds > pool->status_interval) { + slot->_now = time(NULL); + slot->_seconds = 0; + } + if (slot->_seconds == 0) { + printf("[%s:%s] Task is running (pid: %d)\n", pool->ident, slot->ident, slot->parent_pid); + } + } + } + + if (tasks_complete == pool->num_used) { + break; + } + + if (tasks_complete == upper_i) { + lower_i += jobs; + upper_i += jobs; + } + + // Poll again after a short delay + sleep(1); + } while (1); + + pool_deadlocked: + puts(""); + return failures; +} + + +struct MultiProcessingPool *mp_pool_init(const char *ident, const char *log_root) { + struct MultiProcessingPool *pool; + + if (!ident || !log_root) { + // Pool must have an ident string + // log_root must be set + return NULL; + } + + // The pool is shared with children + pool = mmap(NULL, sizeof(*pool), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + // Set pool identity string + memset(pool->ident, 0, sizeof(pool->ident)); + strncpy(pool->ident, ident, sizeof(pool->ident) - 1); + + // Set logging base directory + memset(pool->log_root, 0, sizeof(pool->log_root)); + strncpy(pool->log_root, log_root, sizeof(pool->log_root) - 1); + pool->num_used = 0; + pool->num_alloc = MP_POOL_TASK_MAX; + + // Create the log directory + if (mkdirs(log_root, 0700) < 0) { + if (errno != EEXIST) { + perror(log_root); + mp_pool_free(&pool); + return NULL; + } + } + + // Task array is shared with children + pool->task = mmap(NULL, (pool->num_alloc + 1) * sizeof(*pool->task), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (pool->task == MAP_FAILED) { + perror("mmap"); + mp_pool_free(&pool); + return NULL; + } + + return pool; +} + +void mp_pool_free(struct MultiProcessingPool **pool) { + for (size_t i = 0; i < (*pool)->num_alloc; i++) { + } + // Unmap all pool tasks + if ((*pool)->task) { + if ((*pool)->task->cmd) { + if (munmap((*pool)->task->cmd, (*pool)->task->cmd_len) < 0) { + perror("munmap"); + } + } + if (munmap((*pool)->task, sizeof(*(*pool)->task) * (*pool)->num_alloc) < 0) { + perror("munmap"); + } + } + // Unmap the pool + if ((*pool)) { + if (munmap((*pool), sizeof(*(*pool))) < 0) { + perror("munmap"); + } + (*pool) = NULL; + } +}
\ No newline at end of file diff --git a/src/recipe.c b/src/recipe.c index e51fde6..833908c 100644 --- a/src/recipe.c +++ b/src/recipe.c @@ -16,7 +16,7 @@ int recipe_clone(char *recipe_dir, char *url, char *gitref, char **result) { return -1; } } - strncpy(*result, destdir, PATH_MAX - 1); + strncpy(*result, destdir, PATH_MAX); if (!access(destdir, F_OK)) { if (!strcmp(destdir, "/")) { diff --git a/src/stasis_indexer.c b/src/stasis_indexer.c index ef6375b..b38c8d0 100644 --- a/src/stasis_indexer.c +++ b/src/stasis_indexer.c @@ -72,9 +72,9 @@ int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t roo if (!access(srcdir_with_output, F_OK)) { srcdir = srcdir_with_output; } - sprintf(cmd + strlen(cmd), "'%s'/ ", srcdir); + snprintf(cmd + strlen(cmd), sizeof(srcdir) - strlen(srcdir) + 4, "'%s'/ ", srcdir); } - sprintf(cmd + strlen(cmd), "%s/", destdir); + snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(destdir) + 1, " %s/", destdir); if (globals.verbose) { puts(cmd); @@ -368,6 +368,8 @@ int indexer_make_website(struct Delivery *ctx) { // This might be negative when killed by a signal. // Otherwise, the return code is not critical to us. if (system(cmd) < 0) { + guard_free(css_filename); + guard_strlist_free(&dirs); return 1; } if (file_replace_text(fullpath_dest, ".md", ".html", 0)) { @@ -381,11 +383,14 @@ int indexer_make_website(struct Delivery *ctx) { char link_dest[PATH_MAX] = {0}; strcpy(link_from, "README.html"); sprintf(link_dest, "%s/%s", root, "index.html"); - symlink(link_from, link_dest); + if (symlink(link_from, link_dest)) { + SYSERROR("Warning: symlink(%s, %s) failed: %s", link_from, link_dest, strerror(errno)); + } } } guard_strlist_free(&inputs); } + guard_free(css_filename); guard_strlist_free(&dirs); return 0; diff --git a/src/stasis_main.c b/src/stasis_main.c index 7ea465c..b5d43e3 100644 --- a/src/stasis_main.c +++ b/src/stasis_main.c @@ -12,20 +12,28 @@ #define OPT_NO_TESTING 1004 #define OPT_OVERWRITE 1005 #define OPT_NO_REWRITE_SPEC_STAGE_2 1006 +#define OPT_FAIL_FAST 1007 +#define OPT_NO_PARALLEL 1008 +#define OPT_POOL_STATUS_INTERVAL 1009 + static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'V'}, {"continue-on-error", no_argument, 0, 'C'}, {"config", required_argument, 0, 'c'}, + {"cpu-limit", required_argument, 0, 'l'}, + {"pool-status-interval", required_argument, 0, OPT_POOL_STATUS_INTERVAL}, {"python", required_argument, 0, 'p'}, {"verbose", no_argument, 0, 'v'}, {"unbuffered", no_argument, 0, 'U'}, {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE}, + {"fail-fast", no_argument, 0, OPT_FAIL_FAST}, {"overwrite", no_argument, 0, OPT_OVERWRITE}, {"no-docker", no_argument, 0, OPT_NO_DOCKER}, {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY}, {"no-artifactory-build-info", no_argument, 0, OPT_NO_ARTIFACTORY_BUILD_INFO}, {"no-testing", no_argument, 0, OPT_NO_TESTING}, + {"no-parallel", no_argument, 0, OPT_NO_PARALLEL}, {"no-rewrite", no_argument, 0, OPT_NO_REWRITE_SPEC_STAGE_2}, {0, 0, 0, 0}, }; @@ -35,15 +43,19 @@ const char *long_options_help[] = { "Display program version", "Allow tests to fail", "Read configuration file", + "Number of processes to spawn concurrently (default: cpus - 1)", + "Report task status every n seconds (default: 30)", "Override version of Python in configuration", "Increase output verbosity", "Disable line buffering", "Update conda installation prior to STASIS environment creation", + "On error, immediately terminate all tasks", "Overwrite an existing release", "Do not build docker images", "Do not upload artifacts to Artifactory", "Do not upload build info objects to Artifactory", "Do not execute test scripts", + "Do not execute tests in parallel", "Do not rewrite paths and URLs in output files", NULL, }; @@ -214,6 +226,10 @@ int main(int argc, char *argv[]) { char installer_url[PATH_MAX]; char python_override_version[STASIS_NAME_MAX]; int user_disabled_docker = false; + globals.cpu_limit = get_cpu_count(); + if (globals.cpu_limit > 1) { + globals.cpu_limit--; // max - 1 + } memset(env_name, 0, sizeof(env_name)); memset(env_name_testing, 0, sizeof(env_name_testing)); @@ -241,9 +257,29 @@ int main(int argc, char *argv[]) { case 'p': strcpy(python_override_version, optarg); break; + case 'l': + globals.cpu_limit = strtol(optarg, NULL, 10); + if (globals.cpu_limit <= 1) { + globals.cpu_limit = 1; + globals.enable_parallel = false; // No point + } + break; case OPT_ALWAYS_UPDATE_BASE: globals.always_update_base_environment = true; break; + case OPT_FAIL_FAST: + globals.parallel_fail_fast = true; + break; + case OPT_POOL_STATUS_INTERVAL: + globals.pool_status_interval = (int) strtol(optarg, NULL, 10); + if (globals.pool_status_interval < 1) { + globals.pool_status_interval = 1; + } else if (globals.pool_status_interval > 60 * 10) { + // Possible poor choice alert + fprintf(stderr, "Caution: Excessive pausing between status updates may cause third-party CI/CD" + " jobs to fail if the stdout/stderr streams are idle for too long!\n"); + } + break; case 'U': setenv("PYTHONUNBUFFERED", "1", 1); fflush(stdout); @@ -273,6 +309,9 @@ int main(int argc, char *argv[]) { case OPT_NO_REWRITE_SPEC_STAGE_2: globals.enable_rewrite_spec_stage_2 = false; break; + case OPT_NO_PARALLEL: + globals.enable_parallel = false; + break; case '?': default: exit(1); @@ -327,7 +366,6 @@ int main(int argc, char *argv[]) { tpl_register("deploy.jfrog.repo", &globals.jfrog.repo); tpl_register("deploy.jfrog.url", &globals.jfrog.url); tpl_register("deploy.docker.registry", &ctx.deploy.docker.registry); - tpl_register("workaround.tox_posargs", &globals.workaround.tox_posargs); tpl_register("workaround.conda_reactivate", &globals.workaround.conda_reactivate); // Expose function(s) to the template engine @@ -336,6 +374,7 @@ int main(int argc, char *argv[]) { tpl_register_func("get_github_release_notes_auto", &get_github_release_notes_auto_tplfunc_entrypoint, 1, &ctx); tpl_register_func("junitxml_file", &get_junitxml_file_entrypoint, 1, &ctx); tpl_register_func("basetemp_dir", &get_basetemp_dir_entrypoint, 1, &ctx); + tpl_register_func("tox_run", &tox_run_entrypoint, 2, &ctx); // Set up PREFIX/etc directory information // The user may manipulate the base directory path with STASIS_SYSCONFDIR @@ -423,9 +462,9 @@ int main(int argc, char *argv[]) { } msg(STASIS_MSG_L1, "Conda setup\n"); - delivery_get_installer_url(&ctx, installer_url); + delivery_get_conda_installer_url(&ctx, installer_url); msg(STASIS_MSG_L2, "Downloading: %s\n", installer_url); - if (delivery_get_installer(&ctx, installer_url)) { + if (delivery_get_conda_installer(&ctx, installer_url)) { msg(STASIS_MSG_ERROR, "download failed: %s\n", installer_url); exit(1); } @@ -223,21 +223,35 @@ char *join_ex(char *separator, ...) { } char *substring_between(char *sptr, const char *delims) { + char delim_open[255] = {0}; + char delim_close[255] = {0}; if (sptr == NULL || delims == NULL) { return NULL; } // Ensure we have enough delimiters to continue size_t delim_count = strlen(delims); - if (delim_count < 2 || delim_count % 2) { + if (delim_count < 2 || delim_count % 2 || (delim_count > (sizeof(delim_open) - 1)) != 0) { return NULL; } + size_t delim_take = delim_count / 2; - char delim_open[255] = {0}; - strncpy(delim_open, delims, delim_count / 2); + // How else am I supposed to consume the first and last n chars of the string? Give me a break. + // warning: ‘__builtin___strncpy_chk’ specified bound depends on the length of the source argument + // --- + //strncpy(delim_open, delims, delim_take); + size_t i = 0; + while (i < delim_take && i < sizeof(delim_open)) { + delim_open[i] = delims[i]; + i++; + } - char delim_close[255] = {0}; - strcpy(delim_close, &delims[delim_count / 2]); + //strncpy(delim_close, &delims[delim_take], delim_take); + i = 0; + while (i < delim_take && i < sizeof(delim_close)) { + delim_close[i] = delims[i + delim_take]; + i++; + } // Create pointers to the delimiters char *start = strstr(sptr, delim_open); diff --git a/src/strlist.c b/src/strlist.c index de76744..7a045f1 100644 --- a/src/strlist.c +++ b/src/strlist.c @@ -331,7 +331,7 @@ void strlist_set(struct StrList **pStrList, size_t index, char *value) { } memset((*pStrList)->data[index], '\0', strlen(value) + 1); - strncpy((*pStrList)->data[index], value, strlen(value)); + strcpy((*pStrList)->data[index], value); } } diff --git a/src/system.c b/src/system.c index a564769..4e605ec 100644 --- a/src/system.c +++ b/src/system.c @@ -46,11 +46,19 @@ int shell(struct Process *proc, char *args) { if (strlen(proc->f_stdout)) { fp_out = freopen(proc->f_stdout, "w+", stdout); + if (!fp_out) { + fprintf(stderr, "Unable to redirect stdout to %s: %s\n", proc->f_stdout, strerror(errno)); + exit(1); + } } if (strlen(proc->f_stderr)) { if (!proc->redirect_stderr) { fp_err = freopen(proc->f_stderr, "w+", stderr); + if (!fp_err) { + fprintf(stderr, "Unable to redirect stderr to %s: %s\n", proc->f_stdout, strerror(errno)); + exit(1); + } } } @@ -59,7 +67,10 @@ int shell(struct Process *proc, char *args) { fclose(fp_err); fclose(stderr); } - dup2(fileno(stdout), fileno(stderr)); + if (dup2(fileno(stdout), fileno(stderr)) < 0) { + fprintf(stderr, "Unable to redirect stderr to stdout: %s\n", strerror(errno)); + exit(1); + } } return execl("/bin/bash", "bash", "--norc", t_name, (char *) NULL); diff --git a/src/template_func_proto.c b/src/template_func_proto.c index 3cf66e4..9c325bb 100644 --- a/src/template_func_proto.c +++ b/src/template_func_proto.c @@ -74,7 +74,10 @@ int get_junitxml_file_entrypoint(void *frame, void *data_out) { const struct Delivery *ctx = (const struct Delivery *) f->data_in; char cwd[PATH_MAX] = {0}; - getcwd(cwd, PATH_MAX - 1); + if (!getcwd(cwd, PATH_MAX - 1)) { + SYSERROR("unable to determine current working directory: %s", strerror(errno)); + return -1; + } char nametmp[PATH_MAX] = {0}; strcpy(nametmp, cwd); char *name = path_basename(nametmp); @@ -96,7 +99,10 @@ int get_basetemp_dir_entrypoint(void *frame, void *data_out) { const struct Delivery *ctx = (const struct Delivery *) f->data_in; char cwd[PATH_MAX] = {0}; - getcwd(cwd, PATH_MAX - 1); + if (!getcwd(cwd, PATH_MAX - 1)) { + SYSERROR("unable to determine current working directory: %s", strerror(errno)); + return -1; + } char nametmp[PATH_MAX] = {0}; strcpy(nametmp, cwd); char *name = path_basename(nametmp); @@ -109,4 +115,44 @@ int get_basetemp_dir_entrypoint(void *frame, void *data_out) { sprintf(*output, "%s/truth-%s-%s", ctx->storage.tmpdir, name, ctx->info.release_name); return result; +} + +int tox_run_entrypoint(void *frame, void *data_out) { + char **output = (char **) data_out; + struct tplfunc_frame *f = (struct tplfunc_frame *) frame; + const struct Delivery *ctx = (const struct Delivery *) f->data_in; + + // Apply workaround for tox positional arguments + char *toxconf = NULL; + if (!access("tox.ini", F_OK)) { + if (!fix_tox_conf("tox.ini", &toxconf)) { + msg(STASIS_MSG_L3, "Fixing tox positional arguments\n"); + *output = calloc(STASIS_BUFSIZ, sizeof(**output)); + if (!*output) { + return -1; + } + char *basetemp_path = NULL; + if (get_basetemp_dir_entrypoint(f, &basetemp_path)) { + return -2; + } + char *jxml_path = NULL; + if (get_junitxml_file_entrypoint(f, &jxml_path)) { + guard_free(basetemp_path); + return -3; + } + const char *tox_target = f->argv[0].t_char_ptr; + const char *pytest_args = f->argv[1].t_char_ptr; + if (isempty(toxconf) || !strcmp(toxconf, "/")) { + SYSERROR("Unsafe toxconf path: '%s'", toxconf); + guard_free(basetemp_path); + guard_free(jxml_path); + return -4; + } + snprintf(*output, STASIS_BUFSIZ - 1, "\npip install tox && (tox -e py%s%s -c %s --root . -- --basetemp=\"%s\" --junitxml=\"%s\" %s ; rm -f '%s')\n", ctx->meta.python_compact, tox_target, toxconf, basetemp_path, jxml_path, pytest_args ? pytest_args : "", toxconf); + + guard_free(jxml_path); + guard_free(basetemp_path); + } + } + return 0; }
\ No newline at end of file diff --git a/src/utils.c b/src/utils.c index c0b3733..e037088 100644 --- a/src/utils.c +++ b/src/utils.c @@ -34,7 +34,7 @@ int popd() { int rmtree(char *_path) { int status = 0; char path[PATH_MAX] = {0}; - strncpy(path, _path, sizeof(path)); + strncpy(path, _path, sizeof(path) - 1); DIR *dir; struct dirent *d_entity; @@ -122,10 +122,10 @@ char *expandpath(const char *_path) { } // Construct the new path - strncat(result, home, PATH_MAX - 1); + strncat(result, home, sizeof(result) - strlen(home) + 1); if (sep) { - strncat(result, DIR_SEP, PATH_MAX - 1); - strncat(result, ptmp, PATH_MAX - 1); + strncat(result, DIR_SEP, sizeof(result) - strlen(home) + 1); + strncat(result, ptmp, sizeof(result) - strlen(home) + 1); } return strdup(result); @@ -315,7 +315,7 @@ int git_clone(struct Process *proc, char *url, char *destdir, char *gitref) { } static char command[PATH_MAX]; - sprintf(command, "%s clone --recursive %s", program, url); + sprintf(command, "%s clone -c advice.detachedHead=false --recursive %s", program, url); if (destdir && access(destdir, F_OK) < 0) { sprintf(command + strlen(command), " %s", destdir); result = shell(proc, command); @@ -444,7 +444,10 @@ void msg(unsigned type, char *fmt, ...) { void debug_shell() { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "ENTERING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET); - system("/bin/bash -c 'PS1=\"(STASIS DEBUG) \\W $ \" bash --norc --noprofile'"); + if (system("/bin/bash -c 'PS1=\"(STASIS DEBUG) \\W $ \" bash --norc --noprofile'") < 0) { + SYSERROR("unable to spawn debug shell: %s", strerror(errno)); + exit(errno); + } msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "EXITING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET); exit(255); } @@ -465,12 +468,23 @@ char *xmkstemp(FILE **fp, const char *mode) { fd = mkstemp(t_name); *fp = fdopen(fd, mode); if (!*fp) { + // unable to open, die if (fd > 0) close(fd); *fp = NULL; return NULL; } + char *path = strdup(t_name); + if (!path) { + // strdup failed, die + if (*fp) { + // close the file handle + fclose(*fp); + *fp = NULL; + } + // fall through. path is NULL. + } return path; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 62f58a9..f4380e0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,8 +5,8 @@ include_directories( find_program(BASH_PROGRAM bash) set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/tests) set(CTEST_BINARY_DIRECTORY ${PROJECT_BINARY_DIR}/tests) -set(nix_gnu_cflags -Wno-error -Wno-unused-parameter -Wno-discarded-qualifiers) -set(nix_clang_cflags -Wno-unused-parameter -Wno-incompatible-pointer-types-discards-qualifiers) +set(nix_gnu_cflags -Wno-format-truncation -Wno-error -Wno-unused-parameter -Wno-unused-result -Wno-discarded-qualifiers) +set(nix_clang_cflags -Wno-format-truncation -Wno-unused-parameter -Wno-unused-result -Wno-incompatible-pointer-types-discards-qualifiers) set(win_msvc_cflags /Wall) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/data/generic.ini ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) diff --git a/tests/data/generic.ini b/tests/data/generic.ini index c1e5c9c..7d77edd 100644 --- a/tests/data/generic.ini +++ b/tests/data/generic.ini @@ -17,6 +17,7 @@ installer_baseurl = https://github.com/conda-forge/miniforge/releases/download/{ ;conda_packages = pip_packages = firewatch==0.0.4 + tweakwcs==0.8.8 [runtime] @@ -25,8 +26,21 @@ PYTHONUNBUFFERED = 1 [test:firewatch] repository = https://github.com/astroconda/firewatch -script = +script_setup = pip install -e '.' +script = + firewatch -c conda-forge -p ${STASIS_CONDA_PLATFORM_SUBDIR} | grep -E ' python-[0-9]' + + +[test:tweakwcs] +repository = https://github.com/spacetelescope/tweakwcs +script_setup = + pip install -e '.[test]' +script = + pytest \ + -r fEsx \ + --basetemp="{{ func:basetemp_dir() }}" \ + --junitxml="{{ func:junitxml_file() }}" [deploy:artifactory:delivery] diff --git a/tests/rt_generic.sh b/tests/rt_generic.sh index 6da953d..6e4454c 100644 --- a/tests/rt_generic.sh +++ b/tests/rt_generic.sh @@ -6,10 +6,16 @@ if [ -n "$GITHUB_TOKEN" ] && [ -z "$STASIS_GH_TOKEN"]; then else export STASIS_GH_TOKEN="anonymous" fi +python_versions=( + 3.10 + 3.11 + 3.12 +) topdir=$(pwd) ws="rt_workspace" +rm -rf "$ws" mkdir -p "$ws" ws="$(realpath $ws)" @@ -28,9 +34,12 @@ popd pushd "$ws" type -P stasis type -P stasis_indexer + retcode=0 - stasis --no-docker --no-artifactory --unbuffered -v "$topdir"/generic.ini - retcode=$? + for py_version in "${python_versions[@]}"; do + stasis --python "$py_version" --no-docker --no-artifactory --unbuffered -v "$topdir"/generic.ini + retcode+=$? + done set +x @@ -54,7 +63,7 @@ pushd "$ws" for cond in "${fail_on_main[@]}"; do if grep --color -H -n "$cond" "$x" >&2; then echo "ERROR DETECTED IN $x!" >&2 - retcode=2 + retcode+=1 fi done done @@ -94,6 +103,8 @@ pushd "$ws" done popd -rm -rf "$ws" +if [ -z "$RT_KEEP_WORKSPACE" ]; then + rm -rf "$ws" +fi exit $retcode
\ No newline at end of file diff --git a/tests/test_conda.c b/tests/test_conda.c index 72217fc..fc762e9 100644 --- a/tests/test_conda.c +++ b/tests/test_conda.c @@ -38,8 +38,8 @@ struct Delivery ctx; void test_conda_installation() { char *install_url = calloc(255, sizeof(install_url)); - delivery_get_installer_url(&ctx, install_url); - delivery_get_installer(&ctx, install_url); + delivery_get_conda_installer_url(&ctx, install_url); + delivery_get_conda_installer(&ctx, install_url); delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix); STASIS_ASSERT_FATAL(access(ctx.storage.conda_install_prefix, F_OK) == 0, "conda was not installed correctly"); STASIS_ASSERT_FATAL(conda_activate(ctx.storage.conda_install_prefix, "base") == 0, "unable to activate base environment"); diff --git a/tests/test_ini.c b/tests/test_ini.c index 2579e21..e4a7808 100644 --- a/tests/test_ini.c +++ b/tests/test_ini.c @@ -86,11 +86,13 @@ void test_ini_setval_getval() { STASIS_ASSERT(ini_getval(ini, "default", "a", INIVAL_TYPE_STR, render_mode, &val) == 0, "failed to get value"); STASIS_ASSERT(strcmp(val.as_char_p, "a") != 0, "unexpected value loaded from modified variable"); STASIS_ASSERT(strcmp(val.as_char_p, "changed") == 0, "unexpected value loaded from modified variable"); + guard_free(val.as_char_p); STASIS_ASSERT(ini_setval(&ini, INI_SETVAL_APPEND, "default", "a", " twice") == 0, "failed to set value"); STASIS_ASSERT(ini_getval(ini, "default", "a", INIVAL_TYPE_STR, render_mode, &val) == 0, "failed to get value"); STASIS_ASSERT(strcmp(val.as_char_p, "changed") != 0, "unexpected value loaded from modified variable"); STASIS_ASSERT(strcmp(val.as_char_p, "changed twice") == 0, "unexpected value loaded from modified variable"); + guard_free(val.as_char_p); ini_free(&ini); remove(filename); } diff --git a/tests/test_multiprocessing.c b/tests/test_multiprocessing.c new file mode 100644 index 0000000..cd037d0 --- /dev/null +++ b/tests/test_multiprocessing.c @@ -0,0 +1,123 @@ +#include "testing.h" + +static struct MultiProcessingPool *pool; +char *commands[] = { + "true", + "uname -a", + "/bin/echo hello world", +}; + +void test_mp_pool_init() { + STASIS_ASSERT((pool = mp_pool_init("mypool", "mplogs")) != NULL, "Pool initialization failed"); + STASIS_ASSERT_FATAL(pool != NULL, "Should not be NULL"); + STASIS_ASSERT(pool->num_alloc == MP_POOL_TASK_MAX, "Wrong number of default records"); + STASIS_ASSERT(pool->num_used == 0, "Wrong number of used records"); + STASIS_ASSERT(strcmp(pool->log_root, "mplogs") == 0, "Wrong log root directory"); + STASIS_ASSERT(strcmp(pool->ident, "mypool") == 0, "Wrong identity"); + + int data_bad_total = 0; + for (size_t i = 0; i < pool->num_alloc; i++) { + int data_bad = 0; + struct MultiProcessingTask *task = &pool->task[i]; + + data_bad += task->status == 0 ? 0 : 1; + data_bad += task->pid == 0 ? 0 : 1; + data_bad += task->parent_pid == 0 ? 0 : 1; + data_bad += task->signaled_by == 0 ? 0 : 1; + data_bad += task->time_data.t_start.tv_nsec == 0 ? 0 : 1; + data_bad += task->time_data.t_start.tv_sec == 0 ? 0 : 1; + data_bad += task->time_data.t_stop.tv_nsec == 0 ? 0 : 1; + data_bad += task->time_data.t_stop.tv_sec == 0 ? 0 : 1; + data_bad += (int) strlen(task->ident) == 0 ? 0 : 1; + data_bad += (int) strlen(task->parent_script) == 0 ? 0 : 1; + if (data_bad) { + SYSERROR("%s.task[%zu] has garbage values!", pool->ident, i); + SYSERROR(" ident: %s", task->ident); + SYSERROR(" status: %d", task->status); + SYSERROR(" pid: %d", task->pid); + SYSERROR(" parent_pid: %d", task->parent_pid); + SYSERROR(" signaled_by: %d", task->signaled_by); + SYSERROR(" t_start.tv_nsec: %ld", task->time_data.t_start.tv_nsec); + SYSERROR(" t_start.tv_sec: %ld", task->time_data.t_start.tv_sec); + SYSERROR(" t_stop.tv_nsec: %ld", task->time_data.t_stop.tv_nsec); + SYSERROR(" t_stop.tv_sec: %ld", task->time_data.t_stop.tv_sec); + data_bad_total++; + } + } + STASIS_ASSERT(data_bad_total == 0, "Task array is not pristine"); + mp_pool_free(&pool); +} + +void test_mp_task() { + pool = mp_pool_init("mypool", "mplogs"); + + if (pool) { + for (size_t i = 0; i < sizeof(commands) / sizeof(*commands); i++) { + struct MultiProcessingTask *task; + char task_name[100] = {0}; + sprintf(task_name, "mytask%zu", i); + STASIS_ASSERT_FATAL((task = mp_pool_task(pool, task_name, NULL, commands[i])) != NULL, "Task should not be NULL"); + STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "PID should be non-zero at this point"); + STASIS_ASSERT(task->parent_pid == MP_POOL_PID_UNUSED, "Parent PID should be non-zero"); + STASIS_ASSERT(task->status == -1, "Status should be -1 (not started yet)"); + STASIS_ASSERT(strcmp(task->ident, task_name) == 0, "Wrong task identity"); + STASIS_ASSERT(strstr(task->log_file, pool->log_root) != NULL, "Log file path must be in log_root"); + } + } +} + +void test_mp_pool_join() { + STASIS_ASSERT(mp_pool_join(pool, get_cpu_count(), 0) == 0, "Pool tasks should have not have failed"); + for (size_t i = 0; i < pool->num_used; i++) { + struct MultiProcessingTask *task = &pool->task[i]; + STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "Task should be marked as unused"); + STASIS_ASSERT(task->status == 0, "Task status should be zero (success)"); + } +} + +void test_mp_pool_free() { + mp_pool_free(&pool); + STASIS_ASSERT(pool == NULL, "Should be NULL"); +} + +void test_mp_pool_workflow() { + struct testcase { + const char *input_cmd; + int input_join_flags; + int expected_result; + int expected_status; + int expected_signal; + }; + struct testcase tc[] = { + {.input_cmd = "true && kill $$", .input_join_flags = 0, .expected_result = 1, .expected_status = 0, .expected_signal = SIGTERM}, + {.input_cmd = "false || kill $$", .input_join_flags = 0, .expected_result = 1, .expected_status = 0, .expected_signal = SIGTERM}, + {.input_cmd = "true", .input_join_flags = 0,.expected_result = 0, .expected_status = 0, .expected_signal = 0}, + {.input_cmd = "false", .input_join_flags = 0, .expected_result = 1, .expected_status = 1, .expected_signal = 0}, + }; + for (size_t i = 0; i < sizeof(tc) / sizeof(*tc); i++) { + struct testcase *test = &tc[i]; + struct MultiProcessingPool *p; + struct MultiProcessingTask *task; + STASIS_ASSERT((p = mp_pool_init("workflow", "mplogs")) != NULL, "Failed to initialize pool"); + STASIS_ASSERT((task = mp_pool_task(p, "task", NULL, (char *) test->input_cmd)) != NULL, "Failed to queue task"); + STASIS_ASSERT(mp_pool_join(p, get_cpu_count(), test->input_join_flags) == test->expected_result, "Unexpected result"); + STASIS_ASSERT(task->status == test->expected_status, "Unexpected status"); + STASIS_ASSERT(task->signaled_by == test->expected_signal, "Unexpected signal"); + STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "Unexpected PID. Should be marked UNUSED."); + mp_pool_show_summary(p); + mp_pool_free(&p); + } +} + +int main(int argc, char *argv[]) { + STASIS_TEST_BEGIN_MAIN(); + STASIS_TEST_FUNC *tests[] = { + test_mp_pool_init, + test_mp_task, + test_mp_pool_join, + test_mp_pool_free, + test_mp_pool_workflow, + }; + STASIS_TEST_RUN(tests); + STASIS_TEST_END_MAIN(); +}
\ No newline at end of file diff --git a/tests/test_str.c b/tests/test_str.c index 85c3b78..f8aa1e9 100644 --- a/tests/test_str.c +++ b/tests/test_str.c @@ -79,6 +79,7 @@ void test_strdup_array_and_strcmp_array() { for (size_t outer = 0; outer < sizeof(tc) / sizeof(*tc); outer++) { char **result = strdup_array((char **) tc[outer].data); STASIS_ASSERT(strcmp_array((const char **) result, tc[outer].expected) == 0, "array members were different"); + GENERIC_ARRAY_FREE(result); } const struct testcase tc_bad[] = { @@ -94,6 +95,7 @@ void test_strdup_array_and_strcmp_array() { for (size_t outer = 0; outer < sizeof(tc_bad) / sizeof(*tc_bad); outer++) { char **result = strdup_array((char **) tc_bad[outer].data); STASIS_ASSERT(strcmp_array((const char **) result, tc_bad[outer].expected) != 0, "array members were identical"); + GENERIC_ARRAY_FREE(result); } } |