diff options
Diffstat (limited to 'src')
43 files changed, 2732 insertions, 599 deletions
diff --git a/src/cli/stasis/args.c b/src/cli/stasis/args.c index f3ce823..172981a 100644 --- a/src/cli/stasis/args.c +++ b/src/cli/stasis/args.c @@ -13,6 +13,7 @@ struct option long_options[] = { {"unbuffered", no_argument, 0, 'U'}, {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE}, {"fail-fast", no_argument, 0, OPT_FAIL_FAST}, + {"task-timeout", required_argument, 0, OPT_TASK_TIMEOUT}, {"overwrite", no_argument, 0, OPT_OVERWRITE}, {"no-docker", no_argument, 0, OPT_NO_DOCKER}, {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY}, @@ -20,6 +21,7 @@ struct option long_options[] = { {"no-artifactory-upload", no_argument, 0, OPT_NO_ARTIFACTORY_UPLOAD}, {"no-testing", no_argument, 0, OPT_NO_TESTING}, {"no-parallel", no_argument, 0, OPT_NO_PARALLEL}, + {"no-task-logging", no_argument, 0, OPT_NO_TASK_LOGGING}, {"no-rewrite", no_argument, 0, OPT_NO_REWRITE_SPEC_STAGE_2}, {0, 0, 0, 0}, }; @@ -36,6 +38,7 @@ const char *long_options_help[] = { "Disable line buffering", "Update conda installation prior to STASIS environment creation", "On error, immediately terminate all tasks", + "Terminate task after timeout is reached (#s, #m, #h)", "Overwrite an existing release", "Do not build docker images", "Do not upload artifacts to Artifactory", @@ -43,6 +46,7 @@ const char *long_options_help[] = { "Do not upload artifacts to Artifactory (dry-run)", "Do not execute test scripts", "Do not execute tests in parallel", + "Do not log task output (write to stdout)", "Do not rewrite paths and URLs in output files", NULL, }; diff --git a/src/cli/stasis/include/args.h b/src/cli/stasis/include/args.h index 5bad752..5536735 100644 --- a/src/cli/stasis/include/args.h +++ b/src/cli/stasis/include/args.h @@ -17,6 +17,8 @@ #define OPT_FAIL_FAST 1009 #define OPT_NO_PARALLEL 1010 #define OPT_POOL_STATUS_INTERVAL 1011 +#define OPT_NO_TASK_LOGGING 1012 +#define OPT_TASK_TIMEOUT 1013 extern struct option long_options[]; void usage(char *progname); diff --git a/src/cli/stasis/stasis_main.c b/src/cli/stasis/stasis_main.c index 7f0b88a..44ee6d7 100644 --- a/src/cli/stasis/stasis_main.c +++ b/src/cli/stasis/stasis_main.c @@ -10,140 +10,7 @@ #include "system_requirements.h" #include "tpl.h" - -int main(int argc, char *argv[]) { - struct Delivery ctx; - struct Process proc = { - .f_stdout = "", - .f_stderr = "", - .redirect_stderr = 0, - }; - char env_name[STASIS_NAME_MAX] = {0}; - char env_name_testing[STASIS_NAME_MAX] = {0}; - char *delivery_input = NULL; - char *config_input = NULL; - char installer_url[PATH_MAX]; - char python_override_version[STASIS_NAME_MAX]; - int user_disabled_docker = false; - globals.cpu_limit = get_cpu_count(); - if (globals.cpu_limit > 1) { - globals.cpu_limit--; // max - 1 - } - - memset(env_name, 0, sizeof(env_name)); - memset(env_name_testing, 0, sizeof(env_name_testing)); - memset(installer_url, 0, sizeof(installer_url)); - memset(python_override_version, 0, sizeof(python_override_version)); - memset(&proc, 0, sizeof(proc)); - memset(&ctx, 0, sizeof(ctx)); - - int c; - int option_index = 0; - while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) { - switch (c) { - case 'h': - usage(path_basename(argv[0])); - exit(0); - case 'V': - puts(VERSION); - exit(0); - case 'c': - config_input = strdup(optarg); - break; - case 'C': - globals.continue_on_error = true; - break; - case 'p': - strcpy(python_override_version, optarg); - break; - case 'l': - globals.cpu_limit = strtol(optarg, NULL, 10); - if (globals.cpu_limit <= 1) { - globals.cpu_limit = 1; - globals.enable_parallel = false; // No point - } - break; - case OPT_ALWAYS_UPDATE_BASE: - globals.always_update_base_environment = true; - break; - case OPT_FAIL_FAST: - globals.parallel_fail_fast = true; - break; - case OPT_POOL_STATUS_INTERVAL: - globals.pool_status_interval = (int) strtol(optarg, NULL, 10); - if (globals.pool_status_interval < 1) { - globals.pool_status_interval = 1; - } else if (globals.pool_status_interval > 60 * 10) { - // Possible poor choice alert - fprintf(stderr, "Caution: Excessive pausing between status updates may cause third-party CI/CD" - " jobs to fail if the stdout/stderr streams are idle for too long!\n"); - } - break; - case 'U': - setenv("PYTHONUNBUFFERED", "1", 1); - fflush(stdout); - fflush(stderr); - setvbuf(stdout, NULL, _IONBF, 0); - setvbuf(stderr, NULL, _IONBF, 0); - break; - case 'v': - globals.verbose = true; - break; - case OPT_OVERWRITE: - globals.enable_overwrite = true; - break; - case OPT_NO_DOCKER: - globals.enable_docker = false; - user_disabled_docker = true; - break; - case OPT_NO_ARTIFACTORY: - globals.enable_artifactory = false; - break; - case OPT_NO_ARTIFACTORY_BUILD_INFO: - globals.enable_artifactory_build_info = false; - break; - case OPT_NO_ARTIFACTORY_UPLOAD: - globals.enable_artifactory_build_info = false; - globals.enable_artifactory_upload = false; - break; - case OPT_NO_TESTING: - globals.enable_testing = false; - break; - case OPT_NO_REWRITE_SPEC_STAGE_2: - globals.enable_rewrite_spec_stage_2 = false; - break; - case OPT_NO_PARALLEL: - globals.enable_parallel = false; - break; - case '?': - default: - exit(1); - } - } - - if (optind < argc) { - while (optind < argc) { - // use first positional argument - delivery_input = argv[optind++]; - break; - } - } - - if (!delivery_input) { - fprintf(stderr, "error: a DELIVERY_FILE is required\n"); - usage(path_basename(argv[0])); - exit(1); - } - - printf(BANNER, VERSION, AUTHOR); - - check_system_path(); - - msg(STASIS_MSG_L1, "Setup\n"); - - tpl_setup_vars(&ctx); - tpl_setup_funcs(&ctx); - +static void setup_sysconfdir() { // Set up PREFIX/etc directory information // The user may manipulate the base directory path with STASIS_SYSCONFDIR // environment variable @@ -159,83 +26,108 @@ int main(int argc, char *argv[]) { msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to resolve path to configuration directory: %s\n", stasis_sysconfdir_tmp); exit(1); } +} +static void setup_python_version_override(struct Delivery *ctx, const char *version) { // Override Python version from command-line, if any - if (strlen(python_override_version)) { - guard_free(ctx.meta.python); - ctx.meta.python = strdup(python_override_version); - guard_free(ctx.meta.python_compact); - ctx.meta.python_compact = to_short_version(ctx.meta.python); + if (strlen(version)) { + guard_free(ctx->meta.python); + ctx->meta.python = strdup(version); + if (!ctx->meta.python) { + SYSERROR("%s", "Unable to allocate bytes for python version override"); + } + guard_free(ctx->meta.python_compact); + ctx->meta.python_compact = to_short_version(ctx->meta.python); } +} - if (!config_input) { - // no configuration passed by argument. use basic config. +static void configure_stasis_ini(struct Delivery *ctx, char **config_input) { + if (!*config_input) { + SYSDEBUG("%s", "No configuration passed by argument. Using basic config."); char cfgfile[PATH_MAX * 2]; sprintf(cfgfile, "%s/%s", globals.sysconfdir, "stasis.ini"); + SYSDEBUG("cfgfile: %s", cfgfile); if (!access(cfgfile, F_OK | R_OK)) { - config_input = strdup(cfgfile); + *config_input = strdup(cfgfile); } else { msg(STASIS_MSG_WARN, "STASIS global configuration is not readable, or does not exist: %s", cfgfile); } } - if (config_input) { - msg(STASIS_MSG_L2, "Reading STASIS global configuration: %s\n", config_input); - ctx._stasis_ini_fp.cfg = ini_open(config_input); - if (!ctx._stasis_ini_fp.cfg) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read config file: %s, %s\n", delivery_input, strerror(errno)); - exit(1); - } - ctx._stasis_ini_fp.cfg_path = strdup(config_input); - guard_free(config_input); + msg(STASIS_MSG_L2, "Reading STASIS global configuration: %s\n", *config_input); + ctx->_stasis_ini_fp.cfg = ini_open(*config_input); + if (!ctx->_stasis_ini_fp.cfg) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read config file: %s, %s\n", *config_input, strerror(errno)); + exit(1); + } + ctx->_stasis_ini_fp.cfg_path = strdup(*config_input); + if (!ctx->_stasis_ini_fp.cfg_path) { + SYSERROR("%s", "Failed to allocate memory for config file name"); + exit(1); } + guard_free(*config_input); +} - msg(STASIS_MSG_L2, "Reading STASIS delivery configuration: %s\n", delivery_input); - ctx._stasis_ini_fp.delivery = ini_open(delivery_input); - if (!ctx._stasis_ini_fp.delivery) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read delivery file: %s, %s\n", delivery_input, strerror(errno)); +static void configure_delivery_ini(struct Delivery *ctx, char **delivery_input) { + msg(STASIS_MSG_L2, "Reading STASIS delivery configuration: %s\n", *delivery_input); + ctx->_stasis_ini_fp.delivery = ini_open(*delivery_input); + if (!ctx->_stasis_ini_fp.delivery) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read delivery file: %s, %s\n", *delivery_input, strerror(errno)); exit(1); } - ctx._stasis_ini_fp.delivery_path = strdup(delivery_input); + ctx->_stasis_ini_fp.delivery_path = strdup(*delivery_input); +} +static void configure_delivery_context(struct Delivery *ctx) { msg(STASIS_MSG_L2, "Bootstrapping delivery context\n"); - if (bootstrap_build_info(&ctx)) { + if (bootstrap_build_info(ctx)) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to bootstrap delivery context\n"); exit(1); } msg(STASIS_MSG_L2, "Initializing delivery context\n"); - if (delivery_init(&ctx, INI_READ_RENDER)) { + if (delivery_init(ctx, INI_READ_RENDER)) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to initialize delivery context\n"); exit(1); } - check_requirements(&ctx); +} +static void configure_jfrog_cli(struct Delivery *ctx) { msg(STASIS_MSG_L2, "Configuring JFrog CLI\n"); - if (delivery_init_artifactory(&ctx)) { + if (delivery_init_artifactory(ctx)) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "JFrog CLI configuration failed\n"); exit(1); } +} - runtime_apply(ctx.runtime.environ); - strcpy(env_name, ctx.info.release_name); - strcpy(env_name_testing, env_name); - strcat(env_name_testing, "-test"); - +static void check_release_history(struct Delivery *ctx) { // Safety gate: Avoid clobbering a delivered release unless the user wants that behavior msg(STASIS_MSG_L1, "Checking release history\n"); - if (!globals.enable_overwrite && delivery_exists(&ctx) == DELIVERY_FOUND) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Refusing to overwrite release: %s\nUse --overwrite to enable release clobbering.\n", ctx.info.release_name); + if (!globals.enable_overwrite && delivery_exists(ctx) == DELIVERY_FOUND) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Refusing to overwrite release: %s\nUse --overwrite to enable release clobbering.\n", ctx->info.release_name); exit(1); } +} + +static void check_conda_install_prefix(const struct Delivery *ctx) { + // Unlikely to occur: this should help prevent rmtree() from destroying your entire filesystem + // if path is "/" then, die + // or if empty string, die + if (!strcmp(ctx->storage.conda_install_prefix, DIR_SEP) || !strlen(ctx->storage.conda_install_prefix)) { + fprintf(stderr, "error: ctx.storage.conda_install_prefix is malformed!\n"); + exit(1); + } +} + +static void sync_release_history(struct Delivery *ctx) { if (globals.enable_artifactory) { // We need to download previous revisions to ensure processed packages are available at build-time // This is also a docker requirement. Python wheels must exist locally. - if (ctx.meta.rc > 1) { - msg(STASIS_MSG_L1, "Syncing delivery artifacts for %s\n", ctx.info.build_name); - if (delivery_series_sync(&ctx) != 0) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to sync artifacts for %s\n", ctx.info.build_name); + if (ctx->meta.rc > 1) { + msg(STASIS_MSG_L1, "Syncing delivery artifacts for %s\n", ctx->info.build_name); + if (delivery_series_sync(ctx) != 0) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to sync artifacts for %s\n", ctx->info.build_name); msg(STASIS_MSG_L3, "Case #1:\n" "\tIf this is a new 'version', and 'rc' is greater " "than 1, then no previous deliveries exist remotely. " @@ -248,45 +140,43 @@ int main(int argc, char *argv[]) { } } } +} - // Unlikely to occur: this should help prevent rmtree() from destroying your entire filesystem - // if path is "/" then, die - // or if empty string, die - if (!strcmp(ctx.storage.conda_install_prefix, DIR_SEP) || !strlen(ctx.storage.conda_install_prefix)) { - fprintf(stderr, "error: ctx.storage.conda_install_prefix is malformed!\n"); - exit(1); - } - +static void check_conda_prefix_length(const struct Delivery *ctx) { // 2 = #! // 5 = /bin\n - const size_t prefix_len = strlen(ctx.storage.conda_install_prefix) + 2 + 5; + const size_t prefix_len = strlen(ctx->storage.conda_install_prefix) + 2 + 5; const size_t prefix_len_max = 127; msg(STASIS_MSG_L1, "Checking length of conda installation prefix\n"); - if (!strcmp(ctx.system.platform[DELIVERY_PLATFORM], "Linux") && prefix_len > prefix_len_max) { + if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux") && prefix_len > prefix_len_max) { msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "The shebang, '#!%s/bin/python\\n' is too long (%zu > %zu).\n", - ctx.storage.conda_install_prefix, prefix_len, prefix_len_max); + ctx->storage.conda_install_prefix, prefix_len, prefix_len_max); msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Conda's workaround to handle long path names does not work consistently within STASIS.\n"); msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Please try again from a different, \"shorter\", directory.\n"); exit(1); } +} +static void setup_conda(struct Delivery *ctx, char *installer_url) { msg(STASIS_MSG_L1, "Conda setup\n"); - delivery_get_conda_installer_url(&ctx, installer_url); + delivery_get_conda_installer_url(ctx, installer_url); msg(STASIS_MSG_L2, "Downloading: %s\n", installer_url); - if (delivery_get_conda_installer(&ctx, installer_url)) { + if (delivery_get_conda_installer(ctx, installer_url)) { msg(STASIS_MSG_ERROR, "download failed: %s\n", installer_url); exit(1); } - msg(STASIS_MSG_L2, "Installing: %s\n", ctx.conda.installer_name); - delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix); + msg(STASIS_MSG_L2, "Installing: %s\n", ctx->conda.installer_name); + delivery_install_conda(ctx->conda.installer_path, ctx->storage.conda_install_prefix); - msg(STASIS_MSG_L2, "Configuring: %s\n", ctx.storage.conda_install_prefix); - delivery_conda_enable(&ctx, ctx.storage.conda_install_prefix); + msg(STASIS_MSG_L2, "Configuring: %s\n", ctx->storage.conda_install_prefix); + delivery_conda_enable(ctx, ctx->storage.conda_install_prefix); +} +static void configure_conda_base(struct Delivery *ctx, char *envs[]) { // // Implied environment creation modes/actions // @@ -303,268 +193,247 @@ int main(int argc, char *argv[]) { // 3b. Bugs, conflicts, and dependency resolution issues are inherited and // must be handled in the INI config msg(STASIS_MSG_L1, "Creating release environment(s)\n"); - char *mission_base = NULL; - if (isempty(ctx.meta.based_on)) { - guard_free(ctx.meta.based_on); + + if (isempty(ctx->meta.based_on)) { + // based_on was not set by the input file + + guard_free(ctx->meta.based_on); char *mission_base_orig = NULL; - if (asprintf(&mission_base_orig, "%s/%s/base.yml", ctx.storage.mission_dir, ctx.meta.mission) < 0) { - SYSERROR("Unable to allocate bytes for %s/%s/base.yml path\n", ctx.storage.mission_dir, ctx.meta.mission); + if (asprintf(&mission_base_orig, "%s/%s/base.yml", ctx->storage.mission_dir, ctx->meta.mission) < 0) { + SYSERROR("Unable to allocate bytes for %s/%s/base.yml path\n", ctx->storage.mission_dir, ctx->meta.mission); exit(1); } + // Does a base.yml exist in the mission directory? + // If not, do nothing. Otherwise, use the base.yml in the mission directory. if (access(mission_base_orig, F_OK) < 0) { - msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Mission does not provide a base.yml configuration: %s (%s)\n", - ctx.meta.mission, ctx.storage.mission_dir); + msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Mission does not provide a base.yml"); } else { msg(STASIS_MSG_L2, "Using base environment configuration: %s\n", mission_base_orig); - if (asprintf(&mission_base, "%s/%s-base.yml", ctx.storage.tmpdir, ctx.info.release_name) < 0) { + if (asprintf(&mission_base, "%s/%s-base.yml", ctx->storage.tmpdir, ctx->info.release_name) < 0) { SYSERROR("%s", "Unable to allocate bytes for temporary base.yml configuration"); remove(mission_base); exit(1); } copy2(mission_base_orig, mission_base, CT_OWNER | CT_PERM); - ctx.meta.based_on = mission_base; + ctx->meta.based_on = mission_base; } guard_free(mission_base_orig); } - if (!isempty(ctx.meta.based_on)) { - if (conda_env_exists(ctx.storage.conda_install_prefix, env_name) && conda_env_remove(env_name)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove release environment: %s\n", env_name); - exit(1); - } + msg(STASIS_MSG_L2, "Based on: %s\n", ctx->meta.based_on); - msg(STASIS_MSG_L2, "Based on: %s\n", ctx.meta.based_on); - if (conda_env_create_from_uri(env_name, ctx.meta.based_on, ctx.meta.python)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install release environment using configuration file\n"); - exit(1); - } + for (size_t i = 0; envs[i] != NULL; i += 2) { + char *title = envs[i]; + char *env = envs[i+1]; + // If based_on was populated above, or defined in the configuration: install its packages. + if (!isempty(ctx->meta.based_on)) { + if (conda_env_exists(ctx->storage.conda_install_prefix, env) && conda_env_remove(env)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove %s environment: %s\n", title); + exit(1); + } - if (conda_env_exists(ctx.storage.conda_install_prefix, env_name_testing) && conda_env_remove(env_name_testing)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove testing environment %s\n", env_name_testing); - exit(1); - } - if (conda_env_create_from_uri(env_name_testing, ctx.meta.based_on, ctx.meta.python)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install testing environment using configuration file\n"); - exit(1); - } - } else { - if (conda_env_create(env_name, ctx.meta.python, NULL)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create release environment\n"); - exit(1); - } - if (conda_env_create(env_name_testing, ctx.meta.python, NULL)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create testing environment\n"); - exit(1); + if (conda_env_create_from_uri(env, ctx->meta.based_on, ctx->meta.python)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install %s environment using configuration file\n", title); + exit(1); + } + } else { + // Otherwise, create the environments with the requested Python version and move on + if (conda_env_create(env, ctx->meta.python, NULL)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to create %s environment\n", title); + exit(1); + } } } // The base environment configuration not used past this point remove(mission_base); +} - const char *envs[] = {env_name_testing, env_name}; - for (size_t e = 0; e < sizeof(envs) / sizeof(*envs); e++) { - const char *name = envs[e]; - if (ctx.conda.conda_packages_purge && strlist_count(ctx.conda.conda_packages_purge)) { - msg(STASIS_MSG_L2, "Purging conda packages from %s\n", name); - if (delivery_purge_packages(&ctx, name, PKG_USE_CONDA)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to purge requested conda packages from %s\n", name); - exit(1); - } - } - - if (ctx.conda.pip_packages_purge && strlist_count(ctx.conda.pip_packages_purge)) { - msg(STASIS_MSG_L2, "Purging pip packages from %s\n", name); - if (delivery_purge_packages(&ctx, env_name_testing, PKG_USE_PIP)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to purge requested pip packages from %s\n", - env_name_testing); - exit(1); +static void configure_conda_purge(struct Delivery *ctx, char *envs[]) { + struct StrList *purge_list[] = { + ctx->conda.conda_packages_purge, + ctx->conda.pip_packages_purge + }; + for (size_t i = 0; i < sizeof(purge_list) / sizeof(purge_list[0]); i++) { + struct StrList *to_purge = purge_list[i]; + for (size_t e = 0; envs[e] != NULL; e += 2) { + //const char *title = envs[e]; // unused + const char *env = envs[e+1]; + if (to_purge && strlist_count(to_purge)) { + const char *pkg_manager_name[] = { + "conda", + "pip" + }; + const int pkg_manager_use[] = { + PKG_USE_CONDA, + PKG_USE_PIP + }; + const char *manager_str = pkg_manager_name[i]; + const int manager_flag = pkg_manager_use[i]; + msg(STASIS_MSG_L2, "Purging %s packages from %s\n", manager_str, env); + if (delivery_purge_packages(ctx, env, manager_flag)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to purge requested %s packages from %s\n", manager_str, env); + exit(1); + } } } } +} +static void setup_activate_test_env(const struct Delivery *ctx, const char *env_name_testing) { // Activate test environment msg(STASIS_MSG_L1, "Activating test environment\n"); - if (conda_activate(ctx.storage.conda_install_prefix, env_name_testing)) { + if (conda_activate(ctx->storage.conda_install_prefix, env_name_testing)) { fprintf(stderr, "failed to activate test environment\n"); exit(1); } +} - if (delivery_gather_tool_versions(&ctx)) { - if (!ctx.conda.tool_version) { +static void configure_tool_versions(struct Delivery *ctx) { + if (delivery_gather_tool_versions(ctx)) { + if (!ctx->conda.tool_version) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda version\n"); exit(1); } - if (!ctx.conda.tool_build_version) { + if (!ctx->conda.tool_build_version) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Could not determine conda-build version\n"); exit(1); } } +} +static void install_build_package() { if (pip_exec("install build")) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed\n"); exit(1); } +} - if (!isempty(ctx.meta.based_on)) { +static void configure_package_overlay(struct Delivery *ctx, const char *env_name) { + if (!isempty(ctx->meta.based_on)) { msg(STASIS_MSG_L1, "Generating package overlay from environment: %s\n", env_name); - if (delivery_overlay_packages_from_env(&ctx, env_name)) { + if (delivery_overlay_packages_from_env(ctx, env_name)) { msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "%s", "Failed to generate package overlay. Resulting environment integrity cannot be guaranteed.\n"); exit(1); } } +} +static void configure_deferred_packages(struct Delivery *ctx) { msg(STASIS_MSG_L1, "Filter deliverable packages\n"); - delivery_defer_packages(&ctx, DEFER_CONDA); - delivery_defer_packages(&ctx, DEFER_PIP); + delivery_defer_packages(ctx, DEFER_CONDA); + delivery_defer_packages(ctx, DEFER_PIP); +} + +static void show_overiew(struct Delivery *ctx) { msg(STASIS_MSG_L1, "Overview\n"); - delivery_meta_show(&ctx); - delivery_conda_show(&ctx); + delivery_meta_show(ctx); + delivery_conda_show(ctx); if (globals.verbose) { //delivery_runtime_show(&ctx); } +} +static void run_tests(struct Delivery *ctx) { // Execute configuration-defined tests if (globals.enable_testing) { - delivery_tests_show(&ctx); + delivery_tests_show(ctx); msg(STASIS_MSG_L1, "Begin test execution\n"); - delivery_tests_run(&ctx); + delivery_tests_run(ctx); msg(STASIS_MSG_L2, "Rewriting test results\n"); - delivery_fixup_test_results(&ctx); + delivery_fixup_test_results(ctx); } else { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Test execution is disabled\n"); } +} - if (ctx.conda.conda_packages_defer && strlist_count(ctx.conda.conda_packages_defer)) { +static void build_conda_recipes(struct Delivery *ctx) { + if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) { msg(STASIS_MSG_L2, "Building Conda recipe(s)\n"); - if (delivery_build_recipes(&ctx)) { + if (delivery_build_recipes(ctx)) { exit(1); } msg(STASIS_MSG_L3, "Copying artifacts\n"); - if (delivery_copy_conda_artifacts(&ctx)) { + if (delivery_copy_conda_artifacts(ctx)) { exit(1); } msg(STASIS_MSG_L3, "Indexing artifacts\n"); - if (delivery_index_conda_artifacts(&ctx)) { + if (delivery_index_conda_artifacts(ctx)) { exit(1); } } +} - if (strlist_count(ctx.conda.pip_packages_defer)) { - if (!((ctx.conda.wheels_packages = delivery_build_wheels(&ctx)))) { +static void build_wheel_packages(struct Delivery *ctx) { + if (strlist_count(ctx->conda.pip_packages_defer)) { + msg(STASIS_MSG_L2, "Building Python wheels(s)\n"); + if (!((ctx->conda.wheels_packages = delivery_build_wheels(ctx)))) { exit(1); } - if (delivery_index_wheel_artifacts(&ctx)) { + if (delivery_index_wheel_artifacts(ctx)) { exit(1); } } +} - // Populate the release environment - msg(STASIS_MSG_L1, "Populating release environment\n"); +static void release_install_conda_packages(struct Delivery *ctx, char *env_name) { msg(STASIS_MSG_L2, "Installing conda packages\n"); - if (strlist_count(ctx.conda.conda_packages)) { - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA, (struct StrList *[]) {ctx.conda.conda_packages, NULL})) { + if (strlist_count(ctx->conda.conda_packages)) { + if (delivery_install_packages(ctx, ctx->storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA, (struct StrList *[]) {ctx->conda.conda_packages, NULL})) { exit(1); } } - if (strlist_count(ctx.conda.conda_packages_defer)) { + if (strlist_count(ctx->conda.conda_packages_defer)) { msg(STASIS_MSG_L3, "Installing deferred conda packages\n"); - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA | INSTALL_PKG_CONDA_DEFERRED, (struct StrList *[]) {ctx.conda.conda_packages_defer, NULL})) { + if (delivery_install_packages(ctx, ctx->storage.conda_install_prefix, env_name, INSTALL_PKG_CONDA | INSTALL_PKG_CONDA_DEFERRED, (struct StrList *[]) {ctx->conda.conda_packages_defer, NULL})) { exit(1); } } else { msg(STASIS_MSG_L3, "No deferred conda packages\n"); } +} +static void release_install_pip_packages(struct Delivery *ctx, char *env_name) { msg(STASIS_MSG_L2, "Installing pip packages\n"); - if (strlist_count(ctx.conda.pip_packages)) { - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP, (struct StrList *[]) {ctx.conda.pip_packages, NULL})) { + if (strlist_count(ctx->conda.pip_packages)) { + if (delivery_install_packages(ctx, ctx->storage.conda_install_prefix, env_name, INSTALL_PKG_PIP, (struct StrList *[]) {ctx->conda.pip_packages, NULL})) { exit(1); } } - if (strlist_count(ctx.conda.pip_packages_defer)) { + if (strlist_count(ctx->conda.pip_packages_defer)) { msg(STASIS_MSG_L3, "Installing deferred pip packages\n"); - if (delivery_install_packages(&ctx, ctx.storage.conda_install_prefix, env_name, INSTALL_PKG_PIP | INSTALL_PKG_PIP_DEFERRED, (struct StrList *[]) {ctx.conda.pip_packages_defer, NULL})) { + if (delivery_install_packages(ctx, ctx->storage.conda_install_prefix, env_name, INSTALL_PKG_PIP | INSTALL_PKG_PIP_DEFERRED, (struct StrList *[]) {ctx->conda.pip_packages_defer, NULL})) { exit(1); } } else { msg(STASIS_MSG_L3, "No deferred pip packages\n"); } +} - conda_exec("list"); - - msg(STASIS_MSG_L1, "Creating release\n"); - msg(STASIS_MSG_L2, "Exporting delivery configuration\n"); - if (!pushd(ctx.storage.cfgdump_dir)) { - char filename[PATH_MAX] = {0}; - sprintf(filename, "%s.ini", ctx.info.release_name); - FILE *spec = fopen(filename, "w+"); - if (!spec) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); - exit(1); - } - ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_RAW); - fclose(spec); - - memset(filename, 0, sizeof(filename)); - sprintf(filename, "%s-rendered.ini", ctx.info.release_name); - spec = fopen(filename, "w+"); - if (!spec) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); - exit(1); - } - ini_write(ctx._stasis_ini_fp.delivery, &spec, INI_WRITE_PRESERVE); - fclose(spec); - popd(); - } else { - SYSERROR("Failed to enter directory: %s", ctx.storage.delivery_dir); - exit(1); - } - - msg(STASIS_MSG_L2, "Exporting %s\n", env_name_testing); - if (conda_env_export(env_name_testing, ctx.storage.delivery_dir, env_name_testing)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name_testing); - exit(1); - } - - msg(STASIS_MSG_L2, "Exporting %s\n", env_name); - if (conda_env_export(env_name, ctx.storage.delivery_dir, env_name)) { - msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", env_name); - exit(1); - } - - // Rewrite release environment output (i.e. set package origin(s) to point to the deployment server, etc.) - char specfile[PATH_MAX]; - sprintf(specfile, "%s/%s.yml", ctx.storage.delivery_dir, env_name); - msg(STASIS_MSG_L3, "Rewriting release spec file (stage 1): %s\n", path_basename(specfile)); - delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_1); - - msg(STASIS_MSG_L1, "Rendering mission templates\n"); - delivery_mission_render_files(&ctx); - - int want_docker = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:docker") ? true : false; - int want_artifactory = ini_section_search(&ctx._stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:artifactory") ? true : false; +static void build_docker(struct Delivery *ctx, const int disabled) { + const int want_docker = ini_section_search(&ctx->_stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:docker") ? true : false; if (want_docker) { - if (user_disabled_docker) { + if (disabled) { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled by CLI argument\n"); } else { char dockerfile[PATH_MAX] = {0}; - sprintf(dockerfile, "%s/%s", ctx.storage.build_docker_dir, "Dockerfile"); + sprintf(dockerfile, "%s/%s", ctx->storage.build_docker_dir, "Dockerfile"); if (globals.enable_docker) { if (!access(dockerfile, F_OK)) { msg(STASIS_MSG_L1, "Building Docker image\n"); - if (delivery_docker(&ctx)) { + if (delivery_docker(ctx)) { msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Failed to build docker image!\n"); COE_CHECK_ABORT(1, "Failed to build docker image"); } } else { - msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. No Dockerfile found in %s\n", ctx.storage.build_docker_dir); + msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. No Dockerfile found in %s\n", ctx->storage.build_docker_dir); } } else { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. System configuration error\n"); @@ -573,25 +442,235 @@ int main(int argc, char *argv[]) { } else { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Docker image building is disabled. deploy:docker is not configured\n"); } +} + +static void generate_release(struct Delivery *ctx, char *env_name, char *env_name_testing, const int disable_docker) { + // Populate the release environment + msg(STASIS_MSG_L1, "Populating release environment\n"); + release_install_conda_packages(ctx, env_name); + release_install_pip_packages(ctx, env_name); + + conda_exec("list"); - msg(STASIS_MSG_L3, "Rewriting release spec file (stage 2): %s\n", path_basename(specfile)); - delivery_rewrite_spec(&ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_2); + msg(STASIS_MSG_L1, "Creating release\n"); + delivery_export(ctx, (char *[]) {env_name, env_name_testing, NULL}); + + char specfile[PATH_MAX]; + sprintf(specfile, "%s/%s.yml", ctx->storage.delivery_dir, env_name); + + delivery_rewrite_stage1(ctx, specfile); + build_docker(ctx, disable_docker); + delivery_rewrite_stage2(ctx, specfile); msg(STASIS_MSG_L1, "Dumping metadata\n"); - if (delivery_dump_metadata(&ctx)) { + if (delivery_dump_metadata(ctx)) { msg(STASIS_MSG_L1 | STASIS_MSG_ERROR, "Metadata dump failed\n"); } +} +static void transfer_artifacts(struct Delivery *ctx) { + const int want_artifactory = ini_section_search(&ctx->_stasis_ini_fp.delivery, INI_SEARCH_BEGINS, "deploy:artifactory") ? true : false; if (want_artifactory) { if (globals.enable_artifactory && globals.enable_artifactory_upload) { msg(STASIS_MSG_L1, "Uploading artifacts\n"); - delivery_artifact_upload(&ctx); + delivery_artifact_upload(ctx); } else { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifactory upload is disabled by CLI argument\n"); } } else { msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "Artifactory upload is disabled. deploy:artifactory is not configured\n"); } +} + +int main(int argc, char *argv[]) { + struct Delivery ctx; + struct Process proc = { + .f_stdout = "", + .f_stderr = "", + .redirect_stderr = 0, + }; + char env_name[STASIS_NAME_MAX] = {0}; + char env_name_testing[STASIS_NAME_MAX] = {0}; + char *delivery_input = NULL; + char *config_input = NULL; + char installer_url[PATH_MAX]; + char python_override_version[STASIS_NAME_MAX]; + int user_disabled_docker = false; + globals.cpu_limit = get_cpu_count(); + if (globals.cpu_limit > 1) { + globals.cpu_limit--; // max - 1 + } + + memset(env_name, 0, sizeof(env_name)); + memset(env_name_testing, 0, sizeof(env_name_testing)); + memset(installer_url, 0, sizeof(installer_url)); + memset(python_override_version, 0, sizeof(python_override_version)); + memset(&proc, 0, sizeof(proc)); + memset(&ctx, 0, sizeof(ctx)); + + int c; + int option_index = 0; + while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) { + switch (c) { + case 'h': + usage(path_basename(argv[0])); + exit(0); + case 'V': + puts(VERSION); + exit(0); + case 'c': + config_input = strdup(optarg); + break; + case 'C': + globals.continue_on_error = true; + break; + case 'p': + strcpy(python_override_version, optarg); + break; + case 'l': + globals.cpu_limit = strtol(optarg, NULL, 10); + if (globals.cpu_limit <= 1) { + globals.cpu_limit = 1; + globals.enable_parallel = false; // No point + } + break; + case OPT_ALWAYS_UPDATE_BASE: + globals.always_update_base_environment = true; + break; + case OPT_FAIL_FAST: + globals.parallel_fail_fast = true; + break; + case OPT_TASK_TIMEOUT: + globals.task_timeout = str_to_timeout(optarg); + if (globals.task_timeout < 0) { + fprintf(stderr, "Invalid timeout: %s\n", optarg); + if (globals.task_timeout == STR_TO_TIMEOUT_INVALID_TIME_SCALE) { + fprintf(stderr, "Use format '#s' (seconds), '#m' (minutes), '#h' (hours)\n"); + } else if (globals.task_timeout == STR_TO_TIMEOUT_NEGATIVE) { + fprintf(stderr, "Timeout cannot be negative\n"); + } + exit(1); + } + break; + case OPT_POOL_STATUS_INTERVAL: + globals.pool_status_interval = (int) strtol(optarg, NULL, 10); + if (globals.pool_status_interval < 1) { + globals.pool_status_interval = 1; + } else if (globals.pool_status_interval > 60 * 10) { + // Possible poor choice alert + fprintf(stderr, "Caution: Excessive pausing between status updates may cause third-party CI/CD" + " jobs to fail if the stdout/stderr streams are idle for too long!\n"); + } + break; + case 'U': + setenv("PYTHONUNBUFFERED", "1", 1); + fflush(stdout); + fflush(stderr); + setvbuf(stdout, NULL, _IONBF, 0); + setvbuf(stderr, NULL, _IONBF, 0); + break; + case 'v': + globals.verbose = true; + break; + case OPT_OVERWRITE: + globals.enable_overwrite = true; + break; + case OPT_NO_DOCKER: + globals.enable_docker = false; + user_disabled_docker = true; + break; + case OPT_NO_ARTIFACTORY: + globals.enable_artifactory = false; + break; + case OPT_NO_ARTIFACTORY_BUILD_INFO: + globals.enable_artifactory_build_info = false; + break; + case OPT_NO_ARTIFACTORY_UPLOAD: + globals.enable_artifactory_build_info = false; + globals.enable_artifactory_upload = false; + break; + case OPT_NO_TESTING: + globals.enable_testing = false; + break; + case OPT_NO_REWRITE_SPEC_STAGE_2: + globals.enable_rewrite_spec_stage_2 = false; + break; + case OPT_NO_PARALLEL: + globals.enable_parallel = false; + break; + case OPT_NO_TASK_LOGGING: + globals.enable_task_logging = false; + break; + case '?': + default: + exit(1); + } + } + + if (optind < argc) { + while (optind < argc) { + // use first positional argument + delivery_input = argv[optind++]; + break; + } + } + + if (!delivery_input) { + fprintf(stderr, "error: a DELIVERY_FILE is required\n"); + usage(path_basename(argv[0])); + exit(1); + } + + printf(BANNER, VERSION, AUTHOR); + + check_system_path(); + + msg(STASIS_MSG_L1, "Setup\n"); + + tpl_setup_vars(&ctx); + tpl_setup_funcs(&ctx); + + setup_sysconfdir(); + setup_python_version_override(&ctx, python_override_version); + + configure_stasis_ini(&ctx, &config_input); + configure_delivery_ini(&ctx, &delivery_input); + configure_delivery_context(&ctx); + + check_requirements(&ctx); + configure_jfrog_cli(&ctx); + + runtime_apply(ctx.runtime.environ); + strcpy(env_name, ctx.info.release_name); + strcpy(env_name_testing, env_name); + strcat(env_name_testing, "-test"); + char *envs[] = { + "release", env_name, + "testing", env_name_testing, + NULL, NULL, + }; + + check_release_history(&ctx); + sync_release_history(&ctx); + + check_conda_install_prefix(&ctx); + check_conda_prefix_length(&ctx); + setup_conda(&ctx, installer_url); + configure_conda_base(&ctx, envs); + configure_conda_purge(&ctx, envs); + setup_activate_test_env(&ctx, env_name_testing); + + configure_tool_versions(&ctx); + install_build_package(); + configure_package_overlay(&ctx, env_name); + configure_deferred_packages(&ctx); + + show_overiew(&ctx); + run_tests(&ctx); + build_conda_recipes(&ctx); + build_wheel_packages(&ctx); + generate_release(&ctx, env_name, env_name_testing, user_disabled_docker); + transfer_artifacts(&ctx); msg(STASIS_MSG_L1, "Cleaning up\n"); delivery_free(&ctx); diff --git a/src/cli/stasis_indexer/callbacks.c b/src/cli/stasis_indexer/callbacks.c index 603aef9..20674f0 100644 --- a/src/cli/stasis_indexer/callbacks.c +++ b/src/cli/stasis_indexer/callbacks.c @@ -7,9 +7,9 @@ // qsort callback to sort delivery contexts by compact python version int callback_sort_deliveries_cmpfn(const void *a, const void *b) { - const struct Delivery *delivery1 = (struct Delivery *) a; + const struct Delivery *delivery1 = *(struct Delivery **) a; const size_t delivery1_python = strtoul(delivery1->meta.python_compact, NULL, 10); - const struct Delivery *delivery2 = (struct Delivery *) b; + const struct Delivery *delivery2 = *(struct Delivery **) b; const size_t delivery2_python = strtoul(delivery2->meta.python_compact, NULL, 10); if (delivery2_python > delivery1_python) { diff --git a/src/cli/stasis_indexer/helpers.c b/src/cli/stasis_indexer/helpers.c index 018a8f6..6dc653d 100644 --- a/src/cli/stasis_indexer/helpers.c +++ b/src/cli/stasis_indexer/helpers.c @@ -5,24 +5,24 @@ #include "core.h" #include "helpers.h" -struct StrList *get_architectures(struct Delivery ctx[], const size_t nelem) { +struct StrList *get_architectures(struct Delivery **ctx, const size_t nelem) { struct StrList *architectures = strlist_init(); for (size_t i = 0; i < nelem; i++) { - if (ctx[i].system.arch) { - if (!strstr_array(architectures->data, ctx[i].system.arch)) { - strlist_append(&architectures, ctx[i].system.arch); + if (ctx[i]->system.arch) { + if (!strstr_array(architectures->data, ctx[i]->system.arch)) { + strlist_append(&architectures, ctx[i]->system.arch); } } } return architectures; } -struct StrList *get_platforms(struct Delivery ctx[], const size_t nelem) { +struct StrList *get_platforms(struct Delivery **ctx, const size_t nelem) { struct StrList *platforms = strlist_init(); for (size_t i = 0; i < nelem; i++) { - if (ctx[i].system.platform) { - if (!strstr_array(platforms->data, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE])) { - strlist_append(&platforms, ctx[i].system.platform[DELIVERY_PLATFORM_RELEASE]); + if (ctx[i]->system.platform) { + if (!strstr_array(platforms->data, ctx[i]->system.platform[DELIVERY_PLATFORM_RELEASE])) { + strlist_append(&platforms, ctx[i]->system.platform[DELIVERY_PLATFORM_RELEASE]); } } } @@ -177,19 +177,19 @@ int micromamba_configure(const struct Delivery *ctx, struct MicromambaInfo *m) { return status; } -int get_latest_rc(struct Delivery ctx[], const size_t nelem) { +int get_latest_rc(struct Delivery **ctx, const size_t nelem) { int result = 0; for (size_t i = 0; i < nelem; i++) { - if (ctx[i].meta.rc > result) { - result = ctx[i].meta.rc; + if (ctx[i]->meta.rc > result) { + result = ctx[i]->meta.rc; } } return result; } int sort_by_latest_rc(const void *a, const void *b) { - const struct Delivery *aa = a; - const struct Delivery *bb = b; + const struct Delivery *aa = *(struct Delivery **) a; + const struct Delivery *bb = *(struct Delivery **) b; if (aa->meta.rc > bb->meta.rc) { return -1; } else if (aa->meta.rc < bb->meta.rc) { @@ -214,11 +214,11 @@ int sort_by_latest_rc(const void *a, const void *b) { } } -struct Delivery *get_latest_deliveries(struct Delivery ctx[], size_t nelem) { +struct Delivery **get_latest_deliveries(struct Delivery **ctx, size_t nelem, size_t *result_nelem) { int latest = 0; size_t n = 0; - struct Delivery *result = calloc(nelem + 1, sizeof(*result)); + struct Delivery **result = calloc(nelem + 1, sizeof(*result)); if (!result) { fprintf(stderr, "Unable to allocate %zu bytes for result delivery array: %s\n", nelem * sizeof(*result), strerror(errno)); return NULL; @@ -227,11 +227,15 @@ struct Delivery *get_latest_deliveries(struct Delivery ctx[], size_t nelem) { latest = get_latest_rc(ctx, nelem); qsort(ctx, nelem, sizeof(*ctx), sort_by_latest_rc); for (size_t i = 0; i < nelem; i++) { - if (ctx[i].meta.rc == latest) { + if (ctx[i]->meta.rc == latest) { result[n] = ctx[i]; n++; } } + + if (result_nelem) { + *result_nelem = n; + } return result; } diff --git a/src/cli/stasis_indexer/include/helpers.h b/src/cli/stasis_indexer/include/helpers.h index 9cefc05..6e2f93c 100644 --- a/src/cli/stasis_indexer/include/helpers.h +++ b/src/cli/stasis_indexer/include/helpers.h @@ -13,12 +13,12 @@ for ((COUNTER) = 0; (X)[COUNTER].MEMBER != NULL; (COUNTER)++) {} \ } while(0) -struct StrList *get_architectures(struct Delivery ctx[], size_t nelem); -struct StrList *get_platforms(struct Delivery ctx[], size_t nelem); +struct StrList *get_architectures(struct Delivery **ctx, size_t nelem); +struct StrList *get_platforms(struct Delivery **ctx, size_t nelem); int get_pandoc_version(size_t *result); int pandoc_exec(const char *in_file, const char *out_file, const char *css_file, const char *title); -int get_latest_rc(struct Delivery ctx[], size_t nelem); -struct Delivery *get_latest_deliveries(struct Delivery ctx[], size_t nelem); +int get_latest_rc(struct Delivery **ctx, size_t nelem); +struct Delivery **get_latest_deliveries(struct Delivery **ctx, size_t nelem, size_t *result_nelem); int get_files(struct StrList **out, const char *path, const char *pattern, ...); struct StrList *get_docker_images(struct Delivery *ctx, char *pattern); int load_metadata(struct Delivery *ctx, const char *filename); diff --git a/src/cli/stasis_indexer/include/junitxml_report.h b/src/cli/stasis_indexer/include/junitxml_report.h index 6d2a248..5747359 100644 --- a/src/cli/stasis_indexer/include/junitxml_report.h +++ b/src/cli/stasis_indexer/include/junitxml_report.h @@ -3,6 +3,6 @@ #include "helpers.h" -int indexer_junitxml_report(struct Delivery ctx[], size_t nelem); +int indexer_junitxml_report(struct Delivery **ctx, size_t nelem); #endif //JUNITXML_REPORT_H diff --git a/src/cli/stasis_indexer/include/readmes.h b/src/cli/stasis_indexer/include/readmes.h index d4fa7ac..e14e681 100644 --- a/src/cli/stasis_indexer/include/readmes.h +++ b/src/cli/stasis_indexer/include/readmes.h @@ -3,6 +3,6 @@ #include "helpers.h" -int indexer_readmes(struct Delivery ctx[], size_t nelem); +int indexer_readmes(struct Delivery **ctx, size_t nelem); #endif //READMES_H diff --git a/src/cli/stasis_indexer/include/website.h b/src/cli/stasis_indexer/include/website.h index e67d58b..83657a1 100644 --- a/src/cli/stasis_indexer/include/website.h +++ b/src/cli/stasis_indexer/include/website.h @@ -3,6 +3,6 @@ #include "helpers.h" -int indexer_make_website(const struct Delivery *ctx); +int indexer_make_website(struct Delivery **ctx); #endif //WEBSITE_H diff --git a/src/cli/stasis_indexer/junitxml_report.c b/src/cli/stasis_indexer/junitxml_report.c index 904a3e5..21cf729 100644 --- a/src/cli/stasis_indexer/junitxml_report.c +++ b/src/cli/stasis_indexer/junitxml_report.c @@ -96,17 +96,17 @@ static int write_report_output(struct Delivery *ctx, FILE *destfp, const char *x return 0; } -int indexer_junitxml_report(struct Delivery ctx[], const size_t nelem) { +int indexer_junitxml_report(struct Delivery **ctx, const size_t nelem) { char indexfile[PATH_MAX] = {0}; - sprintf(indexfile, "%s/README.md", ctx->storage.results_dir); + sprintf(indexfile, "%s/README.md", (*ctx)->storage.results_dir); - struct StrList *file_listing = listdir(ctx->storage.results_dir); + struct StrList *file_listing = listdir((*ctx)->storage.results_dir); if (!file_listing) { // no test results to process return 0; } - if (!pushd(ctx->storage.results_dir)) { + if (!pushd((*ctx)->storage.results_dir)) { FILE *indexfp = fopen(indexfile, "w+"); if (!indexfp) { fprintf(stderr, "Unable to open %s for writing\n", indexfile); @@ -114,21 +114,21 @@ int indexer_junitxml_report(struct Delivery ctx[], const size_t nelem) { } printf("Index %s opened for writing\n", indexfile); - int current_rc = ctx->meta.rc; + int current_rc = (*ctx)->meta.rc; for (size_t d = 0; d < nelem; d++) { char pattern[PATH_MAX] = {0}; - snprintf(pattern, sizeof(pattern) - 1, "*%s*", ctx[d].info.release_name); + snprintf(pattern, sizeof(pattern) - 1, "*%s*", ctx[d]->info.release_name); // if the result directory contains tests for this release name, print them if (!is_file_in_listing(file_listing, pattern)) { // no test results continue; } - if (current_rc > ctx[d].meta.rc) { - current_rc = ctx[d].meta.rc; + if (current_rc > ctx[d]->meta.rc) { + current_rc = ctx[d]->meta.rc; fprintf(indexfp, "\n\n---\n\n"); } - fprintf(indexfp, "### %s\n", ctx[d].info.release_name); + fprintf(indexfp, "### %s\n", ctx[d]->info.release_name); fprintf(indexfp, "\n|Suite|Duration|Total|Pass|Fail|Skip|Error|\n"); fprintf(indexfp, "|:----|:------:|:---:|:--:|:--:|:--:|:---:|\n"); @@ -139,7 +139,7 @@ int indexer_junitxml_report(struct Delivery ctx[], const size_t nelem) { continue; } if (!fnmatch(pattern, filename, 0)) { - if (write_report_output(&ctx[d], indexfp, filename)) { + if (write_report_output(ctx[d], indexfp, filename)) { // warn only SYSERROR("Unable to write xml report file using %s", filename); } @@ -150,7 +150,7 @@ int indexer_junitxml_report(struct Delivery ctx[], const size_t nelem) { fclose(indexfp); popd(); } else { - fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); + fprintf(stderr, "Unable to enter delivery directory: %s\n", (*ctx)->storage.delivery_dir); guard_strlist_free(&file_listing); return -1; } diff --git a/src/cli/stasis_indexer/readmes.c b/src/cli/stasis_indexer/readmes.c index 413a6a3..edc6312 100644 --- a/src/cli/stasis_indexer/readmes.c +++ b/src/cli/stasis_indexer/readmes.c @@ -1,8 +1,9 @@ #include "core.h" #include "readmes.h" -int indexer_readmes(struct Delivery ctx[], const size_t nelem) { - struct Delivery *latest_deliveries = get_latest_deliveries(ctx, nelem); +int indexer_readmes(struct Delivery **ctx, const size_t nelem) { + size_t nelem_real = 0; + struct Delivery **latest_deliveries = get_latest_deliveries(ctx, nelem, &nelem_real); if (!latest_deliveries) { if (errno) { return -1; @@ -11,17 +12,17 @@ int indexer_readmes(struct Delivery ctx[], const size_t nelem) { } char indexfile[PATH_MAX] = {0}; - sprintf(indexfile, "%s/README.md", ctx->storage.delivery_dir); + sprintf(indexfile, "%s/README.md", (*ctx)->storage.delivery_dir); FILE *indexfp = fopen(indexfile, "w+"); if (!indexfp) { fprintf(stderr, "Unable to open %s for writing\n", indexfile); return -1; } - struct StrList *archs = get_architectures(latest_deliveries, nelem); - struct StrList *platforms = get_platforms(latest_deliveries, nelem); + struct StrList *archs = get_architectures(latest_deliveries, nelem_real); + struct StrList *platforms = get_platforms(latest_deliveries, nelem_real); - fprintf(indexfp, "# %s-%s\n\n", ctx->meta.name, ctx->meta.version); + fprintf(indexfp, "# %s-%s\n\n", (*ctx)->meta.name, (*ctx)->meta.version); fprintf(indexfp, "## Current Release\n\n"); strlist_sort(platforms, STASIS_SORT_ALPHA); strlist_sort(archs, STASIS_SORT_ALPHA); @@ -31,10 +32,10 @@ int indexer_readmes(struct Delivery ctx[], const size_t nelem) { for (size_t a = 0; a < strlist_count(archs); a++) { char *arch = strlist_item(archs, a); int have_combo = 0; - for (size_t i = 0; i < nelem; i++) { - if (latest_deliveries[i].system.platform) { - if (strstr(latest_deliveries[i].system.platform[DELIVERY_PLATFORM_RELEASE], platform) && - strstr(latest_deliveries[i].system.arch, arch)) { + for (size_t i = 0; i < nelem_real; i++) { + if (latest_deliveries[i]->system.platform) { + if (strstr(latest_deliveries[i]->system.platform[DELIVERY_PLATFORM_RELEASE], platform) && + strstr(latest_deliveries[i]->system.arch, arch)) { have_combo = 1; } } @@ -43,36 +44,36 @@ int indexer_readmes(struct Delivery ctx[], const size_t nelem) { continue; } fprintf(indexfp, "### %s-%s\n\n", platform, arch); - for (size_t i = 0; i < nelem; i++) { + for (size_t i = 0; i < nelem_real; i++) { char link_name[PATH_MAX] = {0}; char readme_name[PATH_MAX] = {0}; char conf_name[PATH_MAX] = {0}; char conf_name_relative[PATH_MAX] = {0}; - if (!latest_deliveries[i].meta.name) { + if (!latest_deliveries[i]->meta.name) { continue; } - sprintf(link_name, "latest-py%s-%s-%s.yml", latest_deliveries[i].meta.python_compact, latest_deliveries[i].system.platform[DELIVERY_PLATFORM_RELEASE], latest_deliveries[i].system.arch); - sprintf(readme_name, "README-py%s-%s-%s.md", latest_deliveries[i].meta.python_compact, latest_deliveries[i].system.platform[DELIVERY_PLATFORM_RELEASE], latest_deliveries[i].system.arch); - sprintf(conf_name, "%s.ini", latest_deliveries[i].info.release_name); - sprintf(conf_name_relative, "../config/%s.ini", latest_deliveries[i].info.release_name); + sprintf(link_name, "latest-py%s-%s-%s.yml", latest_deliveries[i]->meta.python_compact, latest_deliveries[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest_deliveries[i]->system.arch); + sprintf(readme_name, "README-py%s-%s-%s.md", latest_deliveries[i]->meta.python_compact, latest_deliveries[i]->system.platform[DELIVERY_PLATFORM_RELEASE], latest_deliveries[i]->system.arch); + sprintf(conf_name, "%s.ini", latest_deliveries[i]->info.release_name); + sprintf(conf_name_relative, "../config/%s.ini", latest_deliveries[i]->info.release_name); if (strstr(link_name, platform) && strstr(link_name, arch)) { - fprintf(indexfp, "- Python %s\n", latest_deliveries[i].meta.python); + fprintf(indexfp, "- Python %s\n", latest_deliveries[i]->meta.python); fprintf(indexfp, " - Info: [README](%s)\n", readme_name); fprintf(indexfp, " - Release: [Conda Environment YAML](%s)\n", link_name); fprintf(indexfp, " - Receipt: [STASIS input file](%s)\n", conf_name_relative); char *pattern = NULL; asprintf(&pattern, "*%s*%s*", - latest_deliveries[i].info.build_number, - strstr(ctx->rules.release_fmt, "%p") ? latest_deliveries[i].meta.python_compact : "" ); + latest_deliveries[i]->info.build_number, + strstr((*ctx)->rules.release_fmt, "%p") ? latest_deliveries[i]->meta.python_compact : "" ); if (!pattern) { SYSERROR("%s", "Unable to allocate bytes for pattern"); return -1; } - struct StrList *docker_images = get_docker_images(&latest_deliveries[i], pattern); + struct StrList *docker_images = get_docker_images(latest_deliveries[i], pattern); if (docker_images && strlist_count(docker_images) - && !strcmp(latest_deliveries[i].system.platform[DELIVERY_PLATFORM_RELEASE], "linux")) { + && !strcmp(latest_deliveries[i]->system.platform[DELIVERY_PLATFORM_RELEASE], "linux")) { fprintf(indexfp, " - Docker: "); fprintf(indexfp, "[Archive](../packages/docker/%s)\n", path_basename(strlist_item(docker_images, 0))); } @@ -86,9 +87,9 @@ int indexer_readmes(struct Delivery ctx[], const size_t nelem) { } fprintf(indexfp, "## Releases\n"); - int current_rc = ctx->meta.rc; - for (size_t i = 0; ctx[i].meta.name != NULL; i++) { - struct Delivery *current = &ctx[i]; + int current_rc = (*ctx)->meta.rc; + for (size_t i = 0; i < nelem; i++) { + struct Delivery *current = ctx[i]; if (current_rc > current->meta.rc) { current_rc = current->meta.rc; fprintf(indexfp, "\n\n---\n\n"); @@ -101,7 +102,7 @@ int indexer_readmes(struct Delivery ctx[], const size_t nelem) { char *pattern = NULL; asprintf(&pattern, "*%s*%s*", current->info.build_number, - strstr(ctx->rules.release_fmt, "%p") ? current->meta.python_compact : "" ); + strstr((*ctx)->rules.release_fmt, "%p") ? current->meta.python_compact : "" ); if (!pattern) { SYSERROR("%s", "Unable to allocate bytes for pattern"); return -1; diff --git a/src/cli/stasis_indexer/stasis_indexer_main.c b/src/cli/stasis_indexer/stasis_indexer_main.c index 279af5a..840e897 100644 --- a/src/cli/stasis_indexer/stasis_indexer_main.c +++ b/src/cli/stasis_indexer/stasis_indexer_main.c @@ -8,15 +8,11 @@ #include "delivery.h" int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t rootdirs_total) { - char cmd[PATH_MAX]; - char destdir_bare[PATH_MAX]; - char destdir_with_output[PATH_MAX]; + char cmd[PATH_MAX] = {0}; + char destdir_bare[PATH_MAX] = {0}; + char destdir_with_output[PATH_MAX] = {0}; char *destdir = destdir_bare; - memset(cmd, 0, sizeof(cmd)); - memset(destdir_bare, 0, sizeof(destdir_bare)); - memset(destdir_with_output, 0, sizeof(destdir_bare)); - strcpy(destdir_bare, dest); strcpy(destdir_with_output, dest); strcat(destdir_with_output, "/output"); @@ -25,7 +21,7 @@ int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t roo destdir = destdir_with_output; } - sprintf(cmd, "rsync -ah%s --delete --exclude 'tools/' --exclude 'tmp/' --exclude 'build/' ", globals.verbose ? "v" : "q"); + snprintf(cmd, sizeof(cmd), "rsync -ah%s --delete --exclude 'tools/' --exclude 'tmp/' --exclude 'build/' ", globals.verbose ? "v" : "q"); for (size_t i = 0; i < rootdirs_total; i++) { char srcdir_bare[PATH_MAX] = {0}; char srcdir_with_output[PATH_MAX] = {0}; @@ -42,9 +38,9 @@ int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t roo if (!access(srcdir_with_output, F_OK)) { srcdir = srcdir_with_output; } - snprintf(cmd + strlen(cmd), sizeof(srcdir) - strlen(srcdir) + 4, "'%s'/ ", srcdir); + snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(cmd), "'%s'/ ", srcdir); } - snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(destdir) + 1, " %s/", destdir); + snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(cmd), " %s/", destdir); if (globals.verbose) { puts(cmd); @@ -67,27 +63,28 @@ int indexer_conda(const struct Delivery *ctx, struct MicromambaInfo m) { return status; } -int indexer_symlinks(struct Delivery *ctx, const size_t nelem) { - struct Delivery *data = NULL; - data = get_latest_deliveries(ctx, nelem); +int indexer_symlinks(struct Delivery **ctx, const size_t nelem) { + struct Delivery **data = NULL; + size_t nelem_real = 0; + data = get_latest_deliveries(ctx, nelem, &nelem_real); //int latest = get_latest_rc(ctx, nelem); - if (!pushd(ctx->storage.delivery_dir)) { - for (size_t i = 0; i < nelem; i++) { + if (!pushd((*ctx)->storage.delivery_dir)) { + for (size_t i = 0; i < nelem_real; i++) { char link_name_spec[PATH_MAX]; char link_name_readme[PATH_MAX]; char file_name_spec[PATH_MAX]; char file_name_readme[PATH_MAX]; - if (!data[i].meta.name) { + if (!data[i]->meta.name) { continue; } - sprintf(link_name_spec, "latest-py%s-%s-%s.yml", data[i].meta.python_compact, data[i].system.platform[DELIVERY_PLATFORM_RELEASE], data[i].system.arch); - sprintf(file_name_spec, "%s.yml", data[i].info.release_name); + sprintf(link_name_spec, "latest-py%s-%s-%s.yml", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); + sprintf(file_name_spec, "%s.yml", data[i]->info.release_name); - sprintf(link_name_readme, "README-py%s-%s-%s.md", data[i].meta.python_compact, data[i].system.platform[DELIVERY_PLATFORM_RELEASE], data[i].system.arch); - sprintf(file_name_readme, "README-%s.md", data[i].info.release_name); + sprintf(link_name_readme, "README-py%s-%s-%s.md", data[i]->meta.python_compact, data[i]->system.platform[DELIVERY_PLATFORM_RELEASE], data[i]->system.arch); + sprintf(file_name_readme, "README-%s.md", data[i]->info.release_name); if (!access(link_name_spec, F_OK)) { if (unlink(link_name_spec)) { @@ -116,7 +113,7 @@ int indexer_symlinks(struct Delivery *ctx, const size_t nelem) { } popd(); } else { - fprintf(stderr, "Unable to enter delivery directory: %s\n", ctx->storage.delivery_dir); + fprintf(stderr, "Unable to enter delivery directory: %s\n", (*ctx)->storage.delivery_dir); guard_free(data); return -1; } @@ -325,7 +322,7 @@ int main(const int argc, char *argv[]) { get_files(&metafiles, ctx.storage.meta_dir, "*.stasis"); strlist_sort(metafiles, STASIS_SORT_LEN_ASCENDING); - struct Delivery *local = calloc(strlist_count(metafiles) + 1, sizeof(*local)); + struct Delivery **local = calloc(strlist_count(metafiles) + 1, sizeof(*local)); if (!local) { SYSERROR("%s", "Unable to allocate bytes for local delivery context array"); exit(1); @@ -334,11 +331,15 @@ int main(const int argc, char *argv[]) { for (size_t i = 0; i < strlist_count(metafiles); i++) { char *item = strlist_item(metafiles, i); // Copy the pre-filled contents of the main delivery context - memcpy(&local[i], &ctx, sizeof(ctx)); + local[i] = delivery_duplicate(&ctx); + if (!local[i]) { + SYSERROR("Unable to duplicate delivery context %zu", i); + exit(1); + } if (globals.verbose) { puts(item); } - load_metadata(&local[i], item); + load_metadata(local[i], item); } qsort(local, strlist_count(metafiles), sizeof(*local), callback_sort_deliveries_cmpfn); @@ -430,10 +431,14 @@ int main(const int argc, char *argv[]) { guard_free(destdir); guard_array_free(rootdirs); - guard_strlist_free(&metafiles); guard_free(m.micromamba_prefix); delivery_free(&ctx); + for (size_t i = 0; i < strlist_count(metafiles); i++) { + delivery_free(local[i]); + guard_free(local[i]); + } guard_free(local); + guard_strlist_free(&metafiles); globals_free(); msg(STASIS_MSG_L1, "Done!\n"); diff --git a/src/cli/stasis_indexer/website.c b/src/cli/stasis_indexer/website.c index 55f0c45..966391e 100644 --- a/src/cli/stasis_indexer/website.c +++ b/src/cli/stasis_indexer/website.c @@ -1,7 +1,7 @@ #include "core.h" #include "website.h" -int indexer_make_website(const struct Delivery *ctx) { +int indexer_make_website(struct Delivery **ctx) { char *css_filename = calloc(PATH_MAX, sizeof(*css_filename)); if (!css_filename) { SYSERROR("unable to allocate string for CSS file path: %s", strerror(errno)); @@ -12,8 +12,8 @@ int indexer_make_website(const struct Delivery *ctx) { const int have_css = access(css_filename, F_OK | R_OK) == 0; struct StrList *dirs = strlist_init(); - strlist_append(&dirs, ctx->storage.delivery_dir); - strlist_append(&dirs, ctx->storage.results_dir); + strlist_append(&dirs, (*ctx)->storage.delivery_dir); + strlist_append(&dirs, (*ctx)->storage.results_dir); struct StrList *inputs = NULL; for (size_t i = 0; i < strlist_count(dirs); i++) { diff --git a/src/lib/core/CMakeLists.txt b/src/lib/core/CMakeLists.txt index e3e3d4b..eb7a908 100644 --- a/src/lib/core/CMakeLists.txt +++ b/src/lib/core/CMakeLists.txt @@ -1,5 +1,6 @@ add_library(stasis_core STATIC globals.c + timespec.c str.c strlist.c ini.c @@ -21,6 +22,7 @@ add_library(stasis_core STATIC template_func_proto.c envctl.c multiprocessing.c + semaphore.c ) target_include_directories(stasis_core PRIVATE ${core_INCLUDE} diff --git a/src/lib/core/conda.c b/src/lib/core/conda.c index c81e6cc..de6130f 100644 --- a/src/lib/core/conda.c +++ b/src/lib/core/conda.c @@ -4,7 +4,7 @@ #include "conda.h" -int micromamba(struct MicromambaInfo *info, char *command, ...) { +int micromamba(const struct MicromambaInfo *info, char *command, ...) { struct utsname sys; uname(&sys); @@ -24,7 +24,13 @@ int micromamba(struct MicromambaInfo *info, char *command, ...) { sprintf(installer_path, "%s/latest", getenv("TMPDIR") ? getenv("TMPDIR") : "/tmp"); if (access(installer_path, F_OK)) { - download(url, installer_path, NULL); + char *errmsg = NULL; + const long http_code = download(url, installer_path, &errmsg); + if (HTTP_ERROR(http_code)) { + fprintf(stderr, "download failed: %ld: %s\n", http_code, errmsg); + guard_free(errmsg); + return -1; + } } char mmbin[PATH_MAX]; @@ -62,17 +68,40 @@ int micromamba(struct MicromambaInfo *info, char *command, ...) { } int python_exec(const char *args) { - char command[PATH_MAX] = {0}; - snprintf(command, sizeof(command) - 1, "python %s", args); + const char *command_base = "python "; + const char *command_fmt = "%s%s"; + + const int len = snprintf(NULL, 0, command_fmt, command_base, args); + char *command = calloc(len + 1, sizeof(*command)); + if (!command) { + SYSERROR("Unable to allocate %d bytes for command string", len); + return -1; + } + + snprintf(command, len + 1, command_fmt, command_base, args); msg(STASIS_MSG_L3, "Executing: %s\n", command); - return system(command); + + const int result = system(command); + guard_free(command); + return result; } int pip_exec(const char *args) { - char command[PATH_MAX] = {0}; - snprintf(command, sizeof(command) - 1, "python -m pip %s", args); + const char *command_base = "python -m pip "; + const char *command_fmt = "%s%s"; + + const int len = snprintf(NULL, 0, command_fmt, command_base, args); + char *command = calloc(len + 1, sizeof(*command)); + if (!command) { + SYSERROR("Unable to allocate %d bytes for command string", len); + return -1; + } + snprintf(command, len + 1, command_fmt, command_base, args); msg(STASIS_MSG_L3, "Executing: %s\n", command); - return system(command); + + const int result = system(command); + guard_free(command); + return result; } static const char *PKG_ERROR_STR[] = { @@ -177,7 +206,6 @@ int pkg_index_provides(int mode, const char *index, const char *spec) { } int conda_exec(const char *args) { - char command[PATH_MAX]; const char *mamba_commands[] = { "build", "install", @@ -202,15 +230,24 @@ int conda_exec(const char *args) { } } - snprintf(command, sizeof(command) - 1, "%s %s", conda_as, args); + const char *command_fmt = "%s %s"; + const int len = snprintf(NULL, 0, command_fmt, conda_as, args); + char *command = calloc(len + 1, sizeof(*command)); + if (!command) { + return -1; + } + + snprintf(command, len + 1, command_fmt, conda_as, args); msg(STASIS_MSG_L3, "Executing: %s\n", command); - return system(command); + const int result = system(command); + guard_free(command); + return result; } static int conda_prepend_bin(const char *root) { char conda_bin[PATH_MAX] = {0}; - snprintf(conda_bin, sizeof(conda_bin) - 1, "%s/bin", root); + snprintf(conda_bin, sizeof(conda_bin), "%s/bin", root); if (env_manipulate_pathstr("PATH", conda_bin, PM_PREPEND | PM_ONCE)) { return -1; } @@ -220,7 +257,7 @@ static int conda_prepend_bin(const char *root) { static int conda_prepend_condabin(const char *root) { char conda_condabin[PATH_MAX] = {0}; - snprintf(conda_condabin, sizeof(conda_condabin) - 1, "%s/condabin", root); + snprintf(conda_condabin, sizeof(conda_condabin), "%s/condabin", root); if (env_manipulate_pathstr("PATH", conda_condabin, PM_PREPEND | PM_ONCE)) { return -1; } @@ -329,7 +366,7 @@ int conda_activate(const char *root, const char *env_name) { return -1; } - snprintf(command, sizeof(command) - 1, + snprintf(command, sizeof(command), "set -a\n" "source %s\n" "__conda_exe() (\n" @@ -499,10 +536,8 @@ int conda_setup_headless() { } int conda_env_create_from_uri(char *name, char *uri, char *python_version) { - char env_command[PATH_MAX]; char *uri_fs = NULL; - // Convert a bare system path to a file:// path if (!strstr(uri, "://")) { uri_fs = calloc(strlen(uri) + strlen("file://") + 1, sizeof(*uri_fs)); @@ -523,25 +558,50 @@ int conda_env_create_from_uri(char *name, char *uri, char *python_version) { // We'll create a new file with the same random bits, ending with .yml strcat(tempfile, ".yml"); char *errmsg = NULL; - download(uri_fs ? uri_fs : uri, tempfile, &errmsg); + const long http_code = download(uri_fs ? uri_fs : uri, tempfile, &errmsg); + if (HTTP_ERROR(http_code)) { + if (errmsg) { + fprintf(stderr, "download failed: %ld: %s\n", http_code, errmsg); + guard_free(errmsg); + } + guard_free(uri_fs); + return -1; + } guard_free(uri_fs); // Rewrite python version char spec[255] = {0}; - snprintf(spec, sizeof(spec) - 1, "- python=%s\n", python_version); + snprintf(spec, sizeof(spec), "- python=%s\n", python_version); file_replace_text(tempfile, "- python\n", spec, 0); - sprintf(env_command, "env create -n '%s' --file='%s'", name, tempfile); - int status = conda_exec(env_command); + const char *fmt = "env create -n '%s' --file='%s'"; + int len = snprintf(NULL, 0, fmt, name, tempfile); + char *env_command = calloc(len + 1, sizeof(*env_command)); + if (!env_command) { + return -1; + } + + snprintf(env_command, len + 1, fmt, name, tempfile); + const int status = conda_exec(env_command); unlink(tempfile); + guard_free(env_command); return status; } int conda_env_create(char *name, char *python_version, char *packages) { - char env_command[PATH_MAX]; - sprintf(env_command, "create -n %s python=%s %s", name, python_version, packages ? packages : ""); - return conda_exec(env_command); + const char *fmt = "create -n %s python=%s %s"; + const int len = snprintf(NULL, 0, fmt, name, python_version, packages ? packages : ""); + char *env_command = calloc(len + 1, sizeof(*env_command)); + if (!env_command) { + return -1; + } + + snprintf(env_command, len + 1, fmt, name, python_version, packages ? packages : ""); + const int result = conda_exec(env_command); + guard_free(env_command); + + return result; } int conda_env_remove(char *name) { diff --git a/src/lib/core/download.c b/src/lib/core/download.c index c3f8dca..b021860 100644 --- a/src/lib/core/download.c +++ b/src/lib/core/download.c @@ -41,10 +41,10 @@ long download(char *url, const char *filename, char **errmsg) { CURLcode curl_code = curl_easy_perform(c); SYSDEBUG("curl status code: %d", curl_code); if (curl_code != CURLE_OK) { - if (errmsg) { - strcpy(*errmsg, curl_easy_strerror(curl_code)); + if (!*errmsg) { + *errmsg = strdup(curl_easy_strerror(curl_code)); } else { - fprintf(stderr, "\nCURL ERROR: %s\n", curl_easy_strerror(curl_code)); + strncpy(*errmsg, curl_easy_strerror(curl_code), strlen(curl_easy_strerror(curl_code) + 1)); } goto failed; } diff --git a/src/lib/core/envctl.c b/src/lib/core/envctl.c index 0be3f89..b036611 100644 --- a/src/lib/core/envctl.c +++ b/src/lib/core/envctl.c @@ -17,6 +17,8 @@ struct EnvCtl *envctl_init() { } static int callback_builtin_nop(const void *a, const void *b) { + (void) a; // Unused + (void) b; // Unused return STASIS_ENVCTL_RET_SUCCESS; } @@ -58,6 +60,7 @@ size_t envctl_get_index(const struct EnvCtl *envctl, const char *name) { } void envctl_decode_index(size_t in_i, size_t *state, size_t *out_i, size_t *name_i) { + (void) name_i; *state = ((in_i >> 63L) & 1); *out_i = in_i & 0xffffffffL; } diff --git a/src/lib/core/environment.c b/src/lib/core/environment.c index f5e8566..7ece5e6 100644 --- a/src/lib/core/environment.c +++ b/src/lib/core/environment.c @@ -106,14 +106,13 @@ void runtime_export(RuntimeEnv *env, char **keys) { if (keys != NULL) { for (size_t j = 0; keys[j] != NULL; j++) { if (strcmp(keys[j], key) == 0) { - //sprintf(output, "%s=\"%s\"\n%s %s", key, value ? value : "", export_command, key); - sprintf(output, "%s %s=\"%s\"", export_command, key, value ? value : ""); + snprintf(output, sizeof(output), "%s %s=\"%s\"", export_command, key, value ? value : ""); puts(output); } } } else { - sprintf(output, "%s %s=\"%s\"", export_command, key, value ? value : ""); + snprintf(output, sizeof(output), "%s %s=\"%s\"", export_command, key, value ? value : ""); puts(output); } guard_free(value); @@ -178,7 +177,7 @@ int runtime_replace(RuntimeEnv **dest, char **src) { } /** - * Determine whether or not a key exists in the runtime environment + * Determine whether a key exists in the runtime environment * * Example: * @@ -245,7 +244,14 @@ char *runtime_get(RuntimeEnv *env, const char *key) { ssize_t key_offset = runtime_contains(env, key); if (key_offset != -1) { char **pair = split(strlist_item(env, key_offset), "=", 0); + if (!pair) { + return NULL; + } result = join(&pair[1], "="); + if (!result) { + guard_array_free(pair); + return NULL; + } guard_array_free(pair); } return result; @@ -285,8 +291,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { // If there's no environment variables to process return the input string if (strchr(input, delim) == NULL) { - //return strdup(input); - return input; + return strdup(input); } expanded = calloc(STASIS_BUFSIZ, sizeof(char)); @@ -336,7 +341,10 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { if (env) { tmp = runtime_get(env, var); } else { - tmp = getenv(var); + const char *v = getenv(var); + if (v) { + tmp = strdup(v); + } } if (tmp == NULL) { // This mimics shell behavior in general. @@ -348,9 +356,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { } // Append expanded environment variable to output strncat(expanded, tmp, STASIS_BUFSIZ - 1); - if (env) { - guard_free(tmp); - } + guard_free(tmp); } // Nothing to do so append input to output @@ -400,13 +406,28 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) { * @param _value New environment variable value */ void runtime_set(RuntimeEnv *env, const char *_key, char *_value) { + const char *sep = "="; if (_key == NULL) { return; } + const ssize_t key_offset = runtime_contains(env, _key); char *key = strdup(_key); - ssize_t key_offset = runtime_contains(env, key); + if (!key) { + SYSERROR("%s", "unable to allocate memory for key"); + exit(1); + } char *value = runtime_expand_var(env, _value); - char *now = join((char *[]) {key, value, NULL}, "="); + if (!value) { + SYSERROR("%s", "unable to allocate memory for value"); + exit(1); + } + + lstrip(value); + char *now = join((char *[]) {key, value, NULL}, sep); + if (!now) { + SYSERROR("%s", "unable to allocate memory for join"); + exit(1); + } if (key_offset < 0) { strlist_append(&env, now); @@ -415,6 +436,7 @@ void runtime_set(RuntimeEnv *env, const char *_key, char *_value) { } guard_free(now); guard_free(key); + guard_free(value); } /** @@ -424,6 +446,10 @@ void runtime_set(RuntimeEnv *env, const char *_key, char *_value) { void runtime_apply(RuntimeEnv *env) { for (size_t i = 0; i < strlist_count(env); i++) { char **pair = split(strlist_item(env, i), "=", 1); + if (!pair) { + SYSERROR("%s", "unable to allocate memory for runtime_apply"); + return; + } setenv(pair[0], pair[1], 1); guard_array_free(pair); } diff --git a/src/lib/core/github.c b/src/lib/core/github.c index c195a28..f0c5199 100644 --- a/src/lib/core/github.c +++ b/src/lib/core/github.c @@ -109,9 +109,12 @@ int get_github_release_notes(const char *api_token, const char *repo, const char if (last_char == ',') { trim++; } - data_offset[strlen(data_offset) - trim] = 0; + // Truncate the trimmed bytes + memset(&data_offset[strlen(data_offset) - trim], 0, trim); // Extract release notes - *output = strdup(data_offset); + *output = calloc(strlen(data_offset) + 1, sizeof(**output)); + // Copy output (including terminator) + strncpy(*output, data_offset, strlen(data_offset) + 1); } else if ((data_offset = strstr(line, field_message))) { // Skip past the message field data_offset += strlen(field_message); diff --git a/src/lib/core/globals.c b/src/lib/core/globals.c index d84e799..834213b 100644 --- a/src/lib/core/globals.c +++ b/src/lib/core/globals.c @@ -41,8 +41,10 @@ struct STASIS_GLOBAL globals = { .enable_testing = true, ///< Toggle [test] block "script" execution. "script_setup" always executes. .enable_rewrite_spec_stage_2 = true, ///< Leave template stings in output files .enable_parallel = true, ///< Toggle testing in parallel + .enable_task_logging = true, ///< Toggle logging for multiprocess tasks .parallel_fail_fast = false, ///< Kill ALL multiprocessing tasks immediately on error .pool_status_interval = 30, ///< Report "Task is running" + .task_timeout = 0, ///< Time in seconds before task is terminated }; void globals_free() { diff --git a/src/lib/core/include/conda.h b/src/lib/core/include/conda.h index ea8613f..f3d481c 100644 --- a/src/lib/core/include/conda.h +++ b/src/lib/core/include/conda.h @@ -38,7 +38,7 @@ struct MicromambaInfo { * @param ... variadic arguments * @return exit code */ -int micromamba(struct MicromambaInfo *info, char *command, ...); +int micromamba(const struct MicromambaInfo *info, char *command, ...); /** * Execute Python diff --git a/src/lib/core/include/copy.h b/src/lib/core/include/copy.h index 0f92ddd..1eb5219 100644 --- a/src/lib/core/include/copy.h +++ b/src/lib/core/include/copy.h @@ -1,5 +1,6 @@ //! @file copy.h #ifndef STASIS_COPY_H +#define STASIS_COPY_H #include <stdio.h> #include <stdlib.h> diff --git a/src/lib/core/include/core.h b/src/lib/core/include/core.h index 35a9506..5a3fa85 100644 --- a/src/lib/core/include/core.h +++ b/src/lib/core/include/core.h @@ -15,7 +15,7 @@ #define STASIS_NAME_MAX 255 #define STASIS_DIRSTACK_MAX 1024 #define STASIS_TIME_STR_MAX 128 -#define HTTP_ERROR(X) X >= 400 +#define HTTP_ERROR(X) (X >= 400 || X < 0) #include "config.h" #include "core_mem.h" @@ -42,6 +42,7 @@ struct STASIS_GLOBAL { bool enable_overwrite; //!< Enable release file clobbering bool enable_rewrite_spec_stage_2; //!< Enable automatic @STR@ replacement in output files bool enable_parallel; //!< Enable testing in parallel + bool enable_task_logging; //!< Enable logging task output to a file long cpu_limit; //!< Limit parallel processing to n cores (default: max - 1) long parallel_fail_fast; //!< Fail immediately on error int pool_status_interval; //!< Report "Task is running" every n seconds @@ -50,6 +51,7 @@ struct STASIS_GLOBAL { char *tmpdir; //!< Path to temporary storage directory char *conda_install_prefix; //!< Path to install conda char *sysconfdir; //!< Path where STASIS reads its configuration files (mission directory, etc) + int task_timeout; ///< Time in seconds before task is terminated struct { char *tox_posargs; char *conda_reactivate; diff --git a/src/lib/core/include/multiprocessing.h b/src/lib/core/include/multiprocessing.h index ff674e9..874777c 100644 --- a/src/lib/core/include/multiprocessing.h +++ b/src/lib/core/include/multiprocessing.h @@ -3,32 +3,36 @@ #define STASIS_MULTIPROCESSING_H #include "core.h" +#include "sem.h" +#include "timespec.h" #include <signal.h> #include <sys/wait.h> -#include <semaphore.h> #include <sys/mman.h> #include <fcntl.h> #include <sys/stat.h> +#include <math.h> + +struct MultiProcessingTimer { + struct timespec t_start; + struct timespec t_stop; + double duration; +}; struct MultiProcessingTask { pid_t pid; ///< Program PID pid_t parent_pid; ///< Program PID (parent process) int status; ///< Child process exit status int signaled_by; ///< Last signal received, if any - time_t _now; ///< Current time - time_t _seconds; ///< Time elapsed since status interval (used by MultiprocessingPool.status_interval) + int timeout; ///< Seconds to elapse before killing the process time_t _startup; ///< Time elapsed since task started - long elapsed; ///< Total time elapsed in seconds char ident[255]; ///< Identity of the pool task char *cmd; ///< Shell command(s) to be executed size_t cmd_len; ///< Length of command string (for mmap/munmap) char working_dir[PATH_MAX]; ///< Path to directory `cmd` should be executed in char log_file[PATH_MAX]; ///< Full path to stdout/stderr log file char parent_script[PATH_MAX]; ///< Path to temporary script executing the task - struct { - struct timespec t_start; - struct timespec t_stop; - } time_data; ///< Wall-time counters + struct MultiProcessingTimer time_data; ///< Wall-time counters + struct MultiProcessingTimer interval_data; ///< Progress report counters }; struct MultiProcessingPool { @@ -38,6 +42,7 @@ struct MultiProcessingPool { char ident[255]; ///< Identity of task pool char log_root[PATH_MAX]; ///< Base directory to store stderr/stdout log files int status_interval; ///< Report a pooled task is "running" every n seconds + struct Semaphore semaphore; }; /// A multiprocessing task's initial state (i.e. "FAIL") diff --git a/src/lib/core/include/sem.h b/src/lib/core/include/sem.h new file mode 100644 index 0000000..b8f9a39 --- /dev/null +++ b/src/lib/core/include/sem.h @@ -0,0 +1,62 @@ +/** +* @file sem.h +*/ +#ifndef STASIS_SEMAPHORE_H +#define STASIS_SEMAPHORE_H + +#include "core.h" +#include <semaphore.h> +#if defined(STASIS_OS_DARWIN) +// Darwin's sem_open() limits the path length to PSEMNAMLEN +// even though it isn't used directly. +#include <sys/posix_sem.h> // PSEMNAMLEN +#endif + +struct Semaphore { + sem_t *sem; + char name[STASIS_NAME_MAX]; +}; + +/** + * Initialize a cross-platform semaphore (Linux/Darwin) + * + * @code c + * #include "sem.h" + * + * int main(int argc, char *argv[]) { + * struct Semaphore s; + * if (semaphore_init(&s, "mysem", 1)) { + * perror("semaphore_init failed"); + * exit(1); + * } + * if (semaphore_wait(&s)) { + * perror("semaphore_wait failed"); + * exit(1); + * } + * + * // + * // Critical section + * // CODE HERE + * // + * + * if (semaphore_post(&s)) { + * perror("semaphore_post failed"); + * exit(1); + * } + * + * semaphore_destroy(&s); + * } + * @endcode + * + * @param s a pointer to `Semaphore` + * @param name of the semaphore + * @param value initial value of the semaphore + * @return -1 on error + * @return 0 on success + */ +int semaphore_init(struct Semaphore *s, const char *name, int value); +int semaphore_wait(struct Semaphore *s); +int semaphore_post(struct Semaphore *s); +void semaphore_destroy(struct Semaphore *s); + +#endif //STASIS_SEMAPHORE_H
\ No newline at end of file diff --git a/src/lib/core/include/str.h b/src/lib/core/include/str.h index bb96db0..be497ed 100644 --- a/src/lib/core/include/str.h +++ b/src/lib/core/include/str.h @@ -293,18 +293,27 @@ int isdigit_s(const char *s); char *tolower_s(char *s); /** - * Return a copy of the input string with "." characters removed + * Reduce a version string to the major[minor] format used by Python * - * ~~~{.c} - * char *version = strdup("1.2.3"); - * char *version_short = to_short_version(str); - * // version_short is "123" - * free(version_short); + * @code{.c} + * #include <stdio.h> + * #include "str.h" * - * ~~~ + * int main(int argc, char *argv[]) { + * char python_version[] = "3.13.3" + * char *python_short_version = to_short_version(python_version); // "313" + * if (!python_short_version) { + * perror("unable to allocate memory for shortened python version"); + * return 1; + * } + * free(python_short_version); + * return 0; + * } + * @endcode * - * @param s input string - * @return pointer to new string + * @param s python version string + * @return the shortened version string + * @return NULL on error */ char *to_short_version(const char *s); diff --git a/src/lib/core/include/timespec.h b/src/lib/core/include/timespec.h new file mode 100644 index 0000000..3f4b9a7 --- /dev/null +++ b/src/lib/core/include/timespec.h @@ -0,0 +1,71 @@ +/* Functions for working with timespec structures + * Written by Daniel Collins (2017-2021) + * timespec_mod by Alex Forencich (2019) + * Various contributions by Ingo Albrecht (2021) + * + * This is free and unencumbered software released into the public domain. + * + * Anyone is free to copy, modify, publish, use, compile, sell, or + * distribute this software, either in source code form or as a compiled + * binary, for any purpose, commercial or non-commercial, and by any + * means. + * + * In jurisdictions that recognize copyright laws, the author or authors + * of this software dedicate any and all copyright interest in the + * software to the public domain. We make this dedication for the benefit + * of the public at large and to the detriment of our heirs and + * successors. We intend this dedication to be an overt act of + * relinquishment in perpetuity of all present and future rights to this + * software under copyright law. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * For more information, please refer to <http://unlicense.org/> +*/ + +#ifndef DAN_TIMESPEC_H +#define DAN_TIMESPEC_H + +#include <stdbool.h> +#include <sys/time.h> +#include <time.h> + +#ifdef __cplusplus +extern "C" { +#endif + +struct timespec timespec_add(struct timespec ts1, struct timespec ts2); +struct timespec timespec_sub(struct timespec ts1, struct timespec ts2); +struct timespec timespec_mod(struct timespec ts1, struct timespec ts2); + +struct timespec timespec_min(struct timespec ts1, struct timespec ts2); +struct timespec timespec_max(struct timespec ts1, struct timespec ts2); +struct timespec timespec_clamp(struct timespec ts1, struct timespec min, struct timespec max); + +int timespec_cmp(struct timespec ts1, struct timespec ts2); +bool timespec_eq(struct timespec ts1, struct timespec ts2); +bool timespec_gt(struct timespec ts1, struct timespec ts2); +bool timespec_ge(struct timespec ts1, struct timespec ts2); +bool timespec_lt(struct timespec ts1, struct timespec ts2); +bool timespec_le(struct timespec ts1, struct timespec ts2); + +struct timespec timespec_from_double(double s); +double timespec_to_double(struct timespec ts); +struct timespec timespec_from_timeval(struct timeval tv); +struct timeval timespec_to_timeval(struct timespec ts); +struct timespec timespec_from_ms(long milliseconds); +long timespec_to_ms(struct timespec ts); + +struct timespec timespec_normalise(struct timespec ts); + +#ifdef __cplusplus +} +#endif + +#endif /* !DAN_TIMESPEC_H */ diff --git a/src/lib/core/include/utils.h b/src/lib/core/include/utils.h index 1906808..ea98faf 100644 --- a/src/lib/core/include/utils.h +++ b/src/lib/core/include/utils.h @@ -417,4 +417,57 @@ int gen_file_extension_str(char *filename, const char *extension); * Remove [extra]s from a spec string */ char *remove_extras(char *s); + +void debug_hexdump(char *data, int len); + +/** + * Realloc helper + * + * @code{.c} + * #include <stdio.h> + * #include <stdlib.h> + * #include <string.h> + * #include "utils.h" + * + * int main(int argc, char *argv[]) { + * size_t sz = 10; + * char *data = calloc(sz, sizeof(*data)); + * + * // populate data + * strncat(data, "/path/to/", sz - 1); + * + * // Double the allocation size for data + * if (grow(sz * 2, &sz, &data)) { + * // memory error + * } + * + * // sz is now 20 + * strncat(data, "filename", sz - 1 - strlen(data)); + * + * puts(data); + * // output: "/path/to/filename" + * } + * @endcode + * + * @param size_new increase by `size_new` bytes + * @param size_orig address of variable containing the original allocation size (modified) + * @param data address to write data + * @return 0 on success + * @return -1 on error + */ +int grow(size_t size_new, size_t *size_orig, char **data); + +int in_ascii_range(char c, char lower, char upper); + +#define GIT_HASH_LEN 40 +int is_git_sha(char const *hash); + +int check_python_package_dependencies(const char *srcdir); + +void seconds_to_human_readable(int v, char *result, size_t maxlen); + +#define STR_TO_TIMEOUT_NEGATIVE (-1) +#define STR_TO_TIMEOUT_INVALID_TIME_SCALE (-2) +int str_to_timeout(char *s); + #endif //STASIS_UTILS_H diff --git a/src/lib/core/multiprocessing.c b/src/lib/core/multiprocessing.c index 0cf251e..298484a 100644 --- a/src/lib/core/multiprocessing.c +++ b/src/lib/core/multiprocessing.c @@ -4,11 +4,62 @@ /// The sum of all tasks started by mp_task() size_t mp_global_task_count = 0; +static double get_duration(const struct timespec stop, const struct timespec start) { + const struct timespec result = timespec_sub(stop, start); + return timespec_to_double(result); +} + +static double get_task_duration(const struct MultiProcessingTask *task) { + const struct timespec *start = &task->time_data.t_start; + const struct timespec *stop = &task->time_data.t_stop; + return get_duration(*stop, *start); +} + +static double get_task_interval_duration(const struct MultiProcessingTask *task) { + const struct timespec *start = &task->interval_data.t_start; + const struct timespec *stop = &task->interval_data.t_stop; + return get_duration(*stop, *start); +} + +static void update_task_interval_start(struct MultiProcessingTask *task) { + // Record the task stop time + if (clock_gettime(CLOCK_REALTIME, &task->interval_data.t_start) < 0) { + perror("clock_gettime"); + exit(1); + } +} + +static void update_task_interval_elapsed(struct MultiProcessingTask *task) { + // Record the interval stop time + if (clock_gettime(CLOCK_REALTIME, &task->interval_data.t_stop) < 0) { + perror("clock_gettime"); + exit(1); + } + task->interval_data.duration = get_task_interval_duration(task); +} + +static void update_task_start(struct MultiProcessingTask *task) { + // Record the task start time + if (clock_gettime(CLOCK_REALTIME, &task->time_data.t_start) < 0) { + perror("clock_gettime"); + exit(1); + } +} +static void update_task_elapsed(struct MultiProcessingTask *task) { + // Record the task stop time + if (clock_gettime(CLOCK_REALTIME, &task->time_data.t_stop) < 0) { + perror("clock_gettime"); + exit(1); + } + task->time_data.duration = get_task_duration(task); +} + static struct MultiProcessingTask *mp_pool_next_available(struct MultiProcessingPool *pool) { return &pool->task[pool->num_used]; } int child(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { + (void) pool; FILE *fp_log = NULL; // The task starts inside the requested working directory @@ -17,17 +68,14 @@ int child(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { exit(1); } - // Record the task start time - if (clock_gettime(CLOCK_REALTIME, &task->time_data.t_start) < 0) { - perror("clock_gettime"); - exit(1); - } - // Redirect stdout and stderr to the log file fflush(stdout); fflush(stderr); + // Set log file name - sprintf(task->log_file + strlen(task->log_file), "task-%zu-%d.log", mp_global_task_count, task->parent_pid); + if (globals.enable_task_logging) { + sprintf(task->log_file + strlen(task->log_file), "task-%zu-%d.log", mp_global_task_count, task->parent_pid); + } fp_log = freopen(task->log_file, "w+", stdout); if (!fp_log) { fprintf(stderr, "unable to open '%s' for writing: %s\n", task->log_file, strerror(errno)); @@ -60,13 +108,18 @@ int child(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { } int parent(struct MultiProcessingPool *pool, struct MultiProcessingTask *task, pid_t pid, int *child_status) { + // Record the task start time + update_task_start(task); + printf("[%s:%s] Task started (pid: %d)\n", pool->ident, task->ident, pid); // Give the child process access to our PID value task->pid = pid; task->parent_pid = pid; + semaphore_wait(&pool->semaphore); mp_global_task_count++; + semaphore_post(&pool->semaphore); // Check child's status pid_t code = waitpid(pid, child_status, WUNTRACED | WCONTINUED | WNOHANG); @@ -79,14 +132,22 @@ int parent(struct MultiProcessingPool *pool, struct MultiProcessingTask *task, p static int mp_task_fork(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) { SYSDEBUG("Preparing to fork() child task %s:%s", pool->ident, task->ident); + semaphore_wait(&pool->semaphore); pid_t pid = fork(); + int parent_status = 0; int child_status = 0; if (pid == -1) { return -1; - } else if (pid == 0) { + } + if (pid == 0) { + semaphore_post(&pool->semaphore); child(pool, task); + } else { + parent_status = parent(pool, task, pid, &child_status); + fflush(stdout); + fflush(stderr); } - return parent(pool, task, pid, &child_status); + return parent_status; } struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const char *ident, char *working_dir, char *cmd) { @@ -109,8 +170,12 @@ struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const // Set log file path memset(slot->log_file, 0, sizeof(*slot->log_file)); - strcat(slot->log_file, pool->log_root); - strcat(slot->log_file, "/"); + if (globals.enable_task_logging) { + strcat(slot->log_file, pool->log_root); + strcat(slot->log_file, "/"); + } else { + strcpy(slot->log_file, "/dev/stdout"); + } // Set working directory if (isempty(working_dir)) { @@ -151,27 +216,17 @@ struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const memset(slot->cmd, 0, slot->cmd_len); strncpy(slot->cmd, cmd, slot->cmd_len); - return slot; -} + // Set task timeout + slot->timeout = globals.task_timeout; -static void get_task_duration(struct MultiProcessingTask *task, struct timespec *result) { - // based on the timersub() macro in time.h - // This implementation uses timespec and increases the resolution from microseconds to nanoseconds. - struct timespec *start = &task->time_data.t_start; - struct timespec *stop = &task->time_data.t_stop; - result->tv_sec = (stop->tv_sec - start->tv_sec); - result->tv_nsec = (stop->tv_nsec - start->tv_nsec); - if (result->tv_nsec < 0) { - --result->tv_sec; - result->tv_nsec += 1000000000L; - } + return slot; } void mp_pool_show_summary(struct MultiProcessingPool *pool) { print_banner("=", 79); printf("Pool execution summary for \"%s\"\n", pool->ident); print_banner("=", 79); - printf("STATUS PID DURATION IDENT\n"); + printf("STATUS PID DURATION IDENT\n"); for (size_t i = 0; i < pool->num_used; i++) { struct MultiProcessingTask *task = &pool->task[i]; char status_str[10] = {0}; @@ -189,10 +244,10 @@ void mp_pool_show_summary(struct MultiProcessingPool *pool) { strcpy(status_str, "FAIL"); } - struct timespec duration; - get_task_duration(task, &duration); - long diff = duration.tv_sec + duration.tv_nsec / 1000000000L; - printf("%-4s %10d %7lds %-10s\n", status_str, task->parent_pid, diff, task->ident) ; + char duration[255] = {0}; + seconds_to_human_readable(task->time_data.duration, duration, sizeof(duration)); + printf("%-4s %10d %10s %-10s\n", status_str, task->parent_pid, duration, task->ident) ; + //printf("%-4s %10d %7lds %-10s\n", status_str, task->parent_pid, task->elapsed, task->ident) ; } puts(""); } @@ -208,6 +263,7 @@ static int show_log_contents(FILE *stream, struct MultiProcessingTask *task) { memset(buf, 0, sizeof(buf)); } fprintf(stream, "\n"); + fflush(stream); fclose(fp); return 0; } @@ -223,32 +279,39 @@ int mp_pool_kill(struct MultiProcessingPool *pool, int signum) { if (slot->pid > 0) { int status; printf("Sending signal %d to task '%s' (pid: %d)\n", signum, slot->ident, slot->pid); + semaphore_wait(&pool->semaphore); status = kill(slot->pid, signum); + semaphore_post(&pool->semaphore); if (status && errno != ESRCH) { fprintf(stderr, "Task '%s' (pid: %d) did not respond: %s\n", slot->ident, slot->pid, strerror(errno)); } else { // Wait for process to handle the signal, then set the status accordingly if (waitpid(slot->pid, &status, 0) >= 0) { slot->signaled_by = WTERMSIG(status); - // Record the task stop time - if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) { - perror("clock_gettime"); - exit(1); - } + semaphore_wait(&pool->semaphore); + update_task_elapsed(slot); + semaphore_post(&pool->semaphore); // We are short-circuiting the normal flow, and the process is now dead, so mark it as such SYSDEBUG("Marking slot %zu: UNUSED", i); slot->pid = MP_POOL_PID_UNUSED; } } } - if (!access(slot->log_file, F_OK)) { - SYSDEBUG("Removing log file: %s", slot->log_file); - remove(slot->log_file); + if (globals.enable_task_logging) { + semaphore_wait(&pool->semaphore); + if (!access(slot->log_file, F_OK)) { + SYSDEBUG("Removing log file: %s", slot->log_file); + remove(slot->log_file); + } + semaphore_post(&pool->semaphore); } + + semaphore_wait(&pool->semaphore); if (!access(slot->parent_script, F_OK)) { SYSDEBUG("Removing runner script: %s", slot->parent_script); remove(slot->parent_script); } + semaphore_post(&pool->semaphore); } return 0; } @@ -268,6 +331,7 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { } for (size_t i = lower_i; i < upper_i; i++) { + char duration[255] = {0}; struct MultiProcessingTask *slot = &pool->task[i]; if (slot->status == MP_POOL_TASK_STATUS_INITIAL) { slot->_startup = time(NULL); @@ -295,32 +359,50 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { // Is the process finished? pid_t pid = waitpid(slot->pid, &status, WNOHANG | WUNTRACED | WCONTINUED); - int task_ended = WIFEXITED(status); - int task_ended_by_signal = WIFSIGNALED(status); - int task_stopped = WIFSTOPPED(status); - int task_continued = WIFCONTINUED(status); - int status_exit = WEXITSTATUS(status); - int status_signal = WTERMSIG(status); - int status_stopped = WSTOPSIG(status); + + char progress[1024] = {0}; + const double percent = ((double) (tasks_complete + 1) / (double) pool->num_used) * 100; + snprintf(progress, sizeof(progress) - 1, "[%s:%s] [%3.1f%%]", pool->ident, slot->ident, percent); + + int task_timed_out = false; + if (slot->timeout) { + task_timed_out = slot->time_data.duration >= (double) slot->timeout; + if (task_timed_out && pid == 0 && slot->pid != 0) { + seconds_to_human_readable(slot->timeout, duration, sizeof(duration)); + printf("%s Task timed out after %s (pid: %d)\n", progress, duration, slot->pid); + if (kill(slot->pid, SIGKILL) == 0) { + status = SIGKILL; + } else { + SYSERROR("Timeout reached, however pid %d could not be killed.", slot->pid); + return -1; + } + } + } + + const int task_ended = WIFEXITED(status); + const int task_ended_by_signal = WIFSIGNALED(status); + const int task_stopped = WIFSTOPPED(status); + const int task_continued = WIFCONTINUED(status); + const int status_exit = WEXITSTATUS(status); + const int status_signal = WTERMSIG(status); + const int status_stopped = WSTOPSIG(status); // Update status slot->status = status_exit; slot->signaled_by = status_signal; - char progress[1024] = {0}; if (pid > 0) { - double percent = ((double) (tasks_complete + 1) / (double) pool->num_used) * 100; - snprintf(progress, sizeof(progress) - 1, "[%s:%s] [%3.1f%%]", pool->ident, slot->ident, percent); - // The process ended in one the following ways // Note: SIGSTOP nor SIGCONT will not increment the tasks_complete counter if (task_stopped) { printf("%s Task was suspended (%d)\n", progress, status_stopped); continue; - } else if (task_continued) { + } + if (task_continued) { printf("%s Task was resumed\n", progress); continue; - } else if (task_ended_by_signal) { + } + if (task_ended_by_signal) { printf("%s Task ended by signal %d (%s)\n", progress, status_signal, strsignal(status_signal)); tasks_complete++; } else if (task_ended) { @@ -330,19 +412,19 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { fprintf(stderr, "%s Task state is unknown (0x%04X)\n", progress, status); } - // Show the log (always) - if (show_log_contents(stdout, slot)) { - perror(slot->log_file); - } - - // Record the task stop time - if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) { - perror("clock_gettime"); - exit(1); + if (globals.enable_task_logging) { + // Show the log (always) + if (show_log_contents(stdout, slot)) { + perror(slot->log_file); + } } if (status >> 8 != 0 || (status & 0xff) != 0) { - fprintf(stderr, "%s Task failed after %lus\n", progress, slot->elapsed); + semaphore_wait(&pool->semaphore); + update_task_elapsed(slot); + semaphore_post(&pool->semaphore); + seconds_to_human_readable(slot->time_data.duration, duration, sizeof(duration)); + fprintf(stderr, "%s Task failed after %s\n", progress, duration); failures++; if (flags & MP_POOL_FAIL_FAST && pool->num_used > 1) { @@ -350,12 +432,15 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { return -2; } } else { - printf("%s Task finished after %lus\n", progress, slot->elapsed); + seconds_to_human_readable(slot->time_data.duration, duration, sizeof(duration)); + printf("%s Task finished after %s\n", progress, duration); } // Clean up logs and scripts left behind by the task - if (remove(slot->log_file)) { - fprintf(stderr, "%s Unable to remove log file: '%s': %s\n", progress, slot->parent_script, strerror(errno)); + if (globals.enable_task_logging) { + if (remove(slot->log_file)) { + fprintf(stderr, "%s Unable to remove log file: '%s': %s\n", progress, slot->parent_script, strerror(errno)); + } } if (remove(slot->parent_script)) { fprintf(stderr, "%s Unable to remove temporary script '%s': %s\n", progress, slot->parent_script, strerror(errno)); @@ -369,17 +454,27 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { } else { // Track the number of seconds elapsed for each task. // When a task has executed for longer than status_intervals, print a status update - // _seconds represents the time between intervals, not the total runtime of the task - slot->_seconds = time(NULL) - slot->_now; - if (slot->_seconds > pool->status_interval) { - slot->_now = time(NULL); - slot->_seconds = 0; + // interval_elapsed represents the time between intervals, not the total runtime of the task + semaphore_wait(&pool->semaphore); + if (fabs(slot->interval_data.duration) > pool->status_interval) { + slot->interval_data.duration = 0.0; } - if (slot->_seconds == 0) { - printf("[%s:%s] Task is running (pid: %d, elapsed: %lus)\n", pool->ident, slot->ident, slot->parent_pid, slot->elapsed); + if (slot->interval_data.duration == 0.0) { + seconds_to_human_readable(slot->time_data.duration, duration, sizeof(duration)); + printf("[%s:%s] Task is running (pid: %d, elapsed: %s)\n", + pool->ident, slot->ident, slot->parent_pid, duration); + update_task_interval_start(slot); } + + update_task_interval_elapsed(slot); + semaphore_post(&pool->semaphore); + } + + if (!task_ended || !task_ended_by_signal) { + semaphore_wait(&pool->semaphore); + update_task_elapsed(slot); + semaphore_post(&pool->semaphore); } - slot->elapsed = time(NULL) - slot->_startup; } if (tasks_complete == pool->num_used) { @@ -392,11 +487,12 @@ int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) { } // Poll again after a short delay - sleep(1); + usleep(100000); } while (1); pool_deadlocked: puts(""); + return failures; } @@ -440,12 +536,24 @@ struct MultiProcessingPool *mp_pool_init(const char *ident, const char *log_root return NULL; } + char semaphore_name[255] = {0}; + snprintf(semaphore_name, sizeof(semaphore_name), "stasis_mp_%s", ident); + if (semaphore_init(&pool->semaphore, semaphore_name, 1) != 0) { + fprintf(stderr, "unable to initialize semaphore\n"); + mp_pool_free(&pool); + return NULL; + } + + pool->status_interval = 3; + return pool; } void mp_pool_free(struct MultiProcessingPool **pool) { - for (size_t i = 0; i < (*pool)->num_alloc; i++) { + if (!isempty((*pool)->semaphore.name)) { + semaphore_destroy(&(*pool)->semaphore); } + // Unmap all pool tasks if ((*pool)->task) { if ((*pool)->task->cmd) { diff --git a/src/lib/core/semaphore.c b/src/lib/core/semaphore.c new file mode 100644 index 0000000..579479a --- /dev/null +++ b/src/lib/core/semaphore.c @@ -0,0 +1,71 @@ +/** +* @file semaphore.c +*/ +#include <stdio.h> +#include <fcntl.h> + +#include "core_message.h" +#include "sem.h" +#include "utils.h" + +struct Semaphore *semaphores[1000] = {0}; +bool semaphore_handle_exit_ready = false; + +void semaphore_handle_exit() { + for (size_t i = 0; i < sizeof(semaphores) / sizeof(*semaphores); ++i) { + if (semaphores[i]) { + SYSDEBUG("%s", semaphores[i]->name); + semaphore_destroy(semaphores[i]); + } + } +} + +static void register_semaphore(struct Semaphore *s) { + struct Semaphore **cur = semaphores; + size_t i = 0; + while (i < sizeof(semaphores) / sizeof(*semaphores) && cur != NULL) { + cur++; + i++; + } + cur = &s; +} + +int semaphore_init(struct Semaphore *s, const char *name, const int value) { +#if defined(STASIS_OS_DARWIN) + // see: sem_open(2) + const size_t max_namelen = PSEMNAMLEN; +#else + // see: sem_open(3) + const size_t max_namelen = STASIS_NAME_MAX; +#endif + snprintf(s->name, max_namelen, "/%s", name); + s->sem = sem_open(s->name, O_CREAT, 0644, value); + if (s->sem == SEM_FAILED) { + return -1; + } + SYSDEBUG("%s", s->name); + register_semaphore(s); + if (!semaphore_handle_exit_ready) { + atexit(semaphore_handle_exit); + } + + return 0; +} + +int semaphore_wait(struct Semaphore *s) { + return sem_wait(s->sem); +} + +int semaphore_post(struct Semaphore *s) { + return sem_post(s->sem); +} + +void semaphore_destroy(struct Semaphore *s) { + if (!s) { + SYSDEBUG("%s", "would have crashed"); + return; + } + SYSDEBUG("%s", s->name); + sem_close(s->sem); + sem_unlink(s->name); +}
\ No newline at end of file diff --git a/src/lib/core/str.c b/src/lib/core/str.c index 1d0b268..9524886 100644 --- a/src/lib/core/str.c +++ b/src/lib/core/str.c @@ -640,12 +640,35 @@ char *tolower_s(char *s) { } char *to_short_version(const char *s) { - char *result = strdup(s); - if (!result) { - return NULL; + char *result = NULL; + if (num_chars(s, '.') > 1) { + char **version_data = split((char *) s, ".", 1); + if (!version_data) { + goto to_short_version_failed; + } + if (version_data[1]) { + char *dot = strchr(version_data[1], '.'); + if (dot) { + *dot = '\0'; + } + } + result = join(version_data, ""); + if (!result) { + guard_array_free(version_data); + goto to_short_version_failed; + } + guard_array_free(version_data); + } else { + result = strdup(s); + if (!result) { + goto to_short_version_failed; + } + strchrdel(result, "."); } - strchrdel(result, "."); + return result; + to_short_version_failed: + return NULL; } void unindent(char *s) { diff --git a/src/lib/core/template.c b/src/lib/core/template.c index ba45a5a..dd3c7a2 100644 --- a/src/lib/core/template.c +++ b/src/lib/core/template.c @@ -137,23 +137,6 @@ struct tplfunc_frame *tpl_getfunc(char *key) { return result; } -static int grow(size_t z, size_t *output_bytes, char **output) { - if (z >= *output_bytes) { - size_t new_size = *output_bytes + z + 1; - SYSDEBUG("template output buffer new size: %zu\n", new_size); - - char *tmp = realloc(*output, new_size); - if (!tmp) { - perror("realloc failed"); - return -1; - } else if (tmp != *output) { - *output = tmp; - } - *output_bytes = new_size; - } - return 0; -} - char *tpl_render(char *str) { if (!str) { return NULL; @@ -297,8 +280,8 @@ char *tpl_render(char *str) { output[z] = pos[off]; z++; } - SYSDEBUG("template output length: %zu", strlen(output)); - SYSDEBUG("template output bytes: %zu", output_bytes); + //SYSDEBUG("template output length: %zu", strlen(output)); + //SYSDEBUG("template output bytes: %zu", output_bytes); return output; } diff --git a/src/lib/core/timespec.c b/src/lib/core/timespec.c new file mode 100644 index 0000000..bd33993 --- /dev/null +++ b/src/lib/core/timespec.c @@ -0,0 +1,979 @@ +/* Functions for working with timespec structures + * Written by Daniel Collins (2017-2021) + * timespec_mod by Alex Forencich (2019) + * Various contributions by Ingo Albrecht (2021) + * + * This is free and unencumbered software released into the public domain. + * + * Anyone is free to copy, modify, publish, use, compile, sell, or + * distribute this software, either in source code form or as a compiled + * binary, for any purpose, commercial or non-commercial, and by any + * means. + * + * In jurisdictions that recognize copyright laws, the author or authors + * of this software dedicate any and all copyright interest in the + * software to the public domain. We make this dedication for the benefit + * of the public at large and to the detriment of our heirs and + * successors. We intend this dedication to be an overt act of + * relinquishment in perpetuity of all present and future rights to this + * software under copyright law. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * For more information, please refer to <http://unlicense.org/> +*/ + +/** \file timespec.c + * \brief Functions for working with timespec structures. + * + * This library aims to provide a comprehensive set of functions with + * well-defined behaviour that handle all edge cases (e.g. negative values) in + * a sensible manner. + * + * Negative values are allowed in the tv_sec and/or tv_usec field of timespec + * structures, tv_usec is always relative to tv_sec, so mixing positive and + * negative values will produce consistent results: + * + * <PRE> + * { tv_sec = 1, tv_nsec = 500000000 } == 1.5 seconds + * { tv_sec = 1, tv_nsec = 0 } == 1.0 seconds + * { tv_sec = 1, tv_nsec = -500000000 } == 0.5 seconds + * { tv_sec = 0, tv_nsec = 500000000 } == 0.5 seconds + * { tv_sec = 0, tv_nsec = 0 } == 0.0 seconds + * { tv_sec = 0, tv_nsec = -500000000 } == -0.5 seconds + * { tv_sec = -1, tv_nsec = 500000000 } == -0.5 seconds + * { tv_sec = -1, tv_nsec = 0 } == -1.0 seconds + * { tv_sec = -1, tv_nsec = -500000000 } == -1.5 seconds + * </PRE> + * + * Furthermore, any timespec structure processed or returned by library functions + * is normalised according to the rules in timespec_normalise(). +*/ + +#include <limits.h> +#include <stdbool.h> +#include <sys/time.h> +#include <time.h> + +#include "timespec.h" + +#define NSEC_PER_SEC 1000000000 + +/** \fn struct timespec timespec_add(struct timespec ts1, struct timespec ts2) + * \brief Returns the result of adding two timespec structures. +*/ +struct timespec timespec_add(struct timespec ts1, struct timespec ts2) +{ + /* Normalise inputs to prevent tv_nsec rollover if whole-second values + * are packed in it. + */ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + ts1.tv_sec += ts2.tv_sec; + ts1.tv_nsec += ts2.tv_nsec; + + return timespec_normalise(ts1); +} + +/** \fn struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) + * \brief Returns the result of subtracting ts2 from ts1. +*/ +struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) +{ + /* Normalise inputs to prevent tv_nsec rollover if whole-second values + * are packed in it. + */ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + ts1.tv_sec -= ts2.tv_sec; + ts1.tv_nsec -= ts2.tv_nsec; + + return timespec_normalise(ts1); +} + +/** \fn struct timespec timespec_mod(struct timespec ts1, struct timespec ts2) + * \brief Returns the remainder left over after dividing ts1 by ts2 (ts1%ts2). +*/ +struct timespec timespec_mod(struct timespec ts1, struct timespec ts2) +{ + int i = 0; + bool neg1 = false; + bool neg2 = false; + + /* Normalise inputs to prevent tv_nsec rollover if whole-second values + * are packed in it. + */ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + /* If ts2 is zero, just return ts1 + */ + if (ts2.tv_sec == 0 && ts2.tv_nsec == 0) + { + return ts1; + } + + /* If inputs are negative, flip and record sign + */ + if (ts1.tv_sec < 0 || ts1.tv_nsec < 0) + { + neg1 = true; + ts1.tv_sec = -ts1.tv_sec; + ts1.tv_nsec = -ts1.tv_nsec; + } + + if (ts2.tv_sec < 0 || ts2.tv_nsec < 0) + { + neg2 = true; + ts2.tv_sec = -ts2.tv_sec; + ts2.tv_nsec = -ts2.tv_nsec; + } + + /* Shift ts2 until it is larger than ts1 or is about to overflow + */ + while ((ts2.tv_sec < (LONG_MAX >> 1)) && timespec_ge(ts1, ts2)) + { + i++; + ts2.tv_nsec <<= 1; + ts2.tv_sec <<= 1; + if (ts2.tv_nsec > NSEC_PER_SEC) + { + ts2.tv_nsec -= NSEC_PER_SEC; + ts2.tv_sec++; + } + } + + /* Division by repeated subtraction + */ + while (i >= 0) + { + if (timespec_ge(ts1, ts2)) + { + ts1 = timespec_sub(ts1, ts2); + } + + if (i == 0) + { + break; + } + + i--; + if (ts2.tv_sec & 1) + { + ts2.tv_nsec += NSEC_PER_SEC; + } + ts2.tv_nsec >>= 1; + ts2.tv_sec >>= 1; + } + + /* If signs differ and result is nonzero, subtract once more to cross zero + */ + if (neg1 ^ neg2 && (ts1.tv_sec != 0 || ts1.tv_nsec != 0)) + { + ts1 = timespec_sub(ts1, ts2); + } + + /* Restore sign + */ + if (neg1) + { + ts1.tv_sec = -ts1.tv_sec; + ts1.tv_nsec = -ts1.tv_nsec; + } + + return ts1; +} + +/** \fn struct timespec timespec_min(struct timespec ts1, struct timespec ts2) + * \brief Return the lesser one of the two given timespec values. +*/ +struct timespec timespec_min(struct timespec ts1, struct timespec ts2) { + if(timespec_le(ts1, ts2)) { + return ts1; + } else { + return ts2; + } +} + +/** \fn struct timespec timespec_max(struct timespec ts1, struct timespec ts2) + * \brief Return the greater one of the two given timespec values. +*/ +struct timespec timespec_max(struct timespec ts1, struct timespec ts2) { + if(timespec_ge(ts1, ts2)) { + return ts1; + } else { + return ts2; + } +} + +/** \fn struct timespec timespec_clamp(struct timespec ts, struct timespec min, struct timespec max) + * \brief Clamp the value of TS between MIN and MAX. +*/ +struct timespec timespec_clamp(struct timespec ts, struct timespec min, struct timespec max) { + if(timespec_gt(ts, max)) { + return max; + } + if(timespec_lt(ts, min)) { + return min; + } + return ts; +} + +/** \fn int timespec_cmp(struct timespec ts1, struct timespec ts2) + * \brief Returns (1, 0, -1) if ts1 is (greater than, equal to, less than) to ts2. +*/ +int timespec_cmp(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + if(ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec == ts2.tv_nsec) + { + return 0; + } + else if((ts1.tv_sec > ts2.tv_sec) + || (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec > ts2.tv_nsec)) + { + return 1; + } + else { + return -1; + } +} + +/** \fn bool timespec_eq(struct timespec ts1, struct timespec ts2) + * \brief Returns true if the two timespec structures are equal. +*/ +bool timespec_eq(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + return (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec == ts2.tv_nsec); +} + +/** \fn bool timespec_gt(struct timespec ts1, struct timespec ts2) + * \brief Returns true if ts1 is greater than ts2. +*/ +bool timespec_gt(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + return (ts1.tv_sec > ts2.tv_sec || (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec > ts2.tv_nsec)); +} + +/** \fn bool timespec_ge(struct timespec ts1, struct timespec ts2) + * \brief Returns true if ts1 is greater than or equal to ts2. +*/ +bool timespec_ge(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + return (ts1.tv_sec > ts2.tv_sec || (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec >= ts2.tv_nsec)); +} + +/** \fn bool timespec_lt(struct timespec ts1, struct timespec ts2) + * \brief Returns true if ts1 is less than ts2. +*/ +bool timespec_lt(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + return (ts1.tv_sec < ts2.tv_sec || (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec < ts2.tv_nsec)); +} + +/** \fn bool timespec_le(struct timespec ts1, struct timespec ts2) + * \brief Returns true if ts1 is less than or equal to ts2. +*/ +bool timespec_le(struct timespec ts1, struct timespec ts2) +{ + ts1 = timespec_normalise(ts1); + ts2 = timespec_normalise(ts2); + + return (ts1.tv_sec < ts2.tv_sec || (ts1.tv_sec == ts2.tv_sec && ts1.tv_nsec <= ts2.tv_nsec)); +} + +/** \fn struct timespec timespec_from_double(double s) + * \brief Converts a fractional number of seconds to a timespec. +*/ +struct timespec timespec_from_double(double s) +{ + struct timespec ts = { + .tv_sec = s, + .tv_nsec = (s - (long)(s)) * NSEC_PER_SEC, + }; + + return timespec_normalise(ts); +} + +/** \fn double timespec_to_double(struct timespec ts) + * \brief Converts a timespec to a fractional number of seconds. +*/ +double timespec_to_double(struct timespec ts) +{ + return ((double)(ts.tv_sec) + ((double)(ts.tv_nsec) / NSEC_PER_SEC)); +} + +/** \fn struct timespec timespec_from_timeval(struct timeval tv) + * \brief Converts a timeval to a timespec. +*/ +struct timespec timespec_from_timeval(struct timeval tv) +{ + struct timespec ts = { + .tv_sec = tv.tv_sec, + .tv_nsec = tv.tv_usec * 1000 + }; + + return timespec_normalise(ts); +} + +/** \fn struct timeval timespec_to_timeval(struct timespec ts) + * \brief Converts a timespec to a timeval. +*/ +struct timeval timespec_to_timeval(struct timespec ts) +{ + ts = timespec_normalise(ts); + + struct timeval tv = { + .tv_sec = ts.tv_sec, + .tv_usec = ts.tv_nsec / 1000, + }; + + return tv; +} + +/** \fn struct timespec timespec_from_ms(long milliseconds) + * \brief Converts an integer number of milliseconds to a timespec. +*/ +struct timespec timespec_from_ms(long milliseconds) +{ + struct timespec ts = { + .tv_sec = (milliseconds / 1000), + .tv_nsec = (milliseconds % 1000) * 1000000, + }; + + return timespec_normalise(ts); +} + +/** \fn long timespec_to_ms(struct timespec ts) + * \brief Converts a timespec to an integer number of milliseconds. +*/ +long timespec_to_ms(struct timespec ts) +{ + return (ts.tv_sec * 1000) + (ts.tv_nsec / 1000000); +} + +/** \fn struct timespec timespec_normalise(struct timespec ts) + * \brief Normalises a timespec structure. + * + * Returns a normalised version of a timespec structure, according to the + * following rules: + * + * 1) If tv_nsec is >=1,000,000,00 or <=-1,000,000,000, flatten the surplus + * nanoseconds into the tv_sec field. + * + * 2) If tv_nsec is negative, decrement tv_sec and roll tv_nsec up to represent + * the same value attainable by ADDING nanoseconds to tv_sec. +*/ +struct timespec timespec_normalise(struct timespec ts) +{ + while(ts.tv_nsec >= NSEC_PER_SEC) + { + ++(ts.tv_sec); + ts.tv_nsec -= NSEC_PER_SEC; + } + + while(ts.tv_nsec <= -NSEC_PER_SEC) + { + --(ts.tv_sec); + ts.tv_nsec += NSEC_PER_SEC; + } + + if(ts.tv_nsec < 0) + { + /* Negative nanoseconds isn't valid according to POSIX. + * Decrement tv_sec and roll tv_nsec over. + */ + + --(ts.tv_sec); + ts.tv_nsec = (NSEC_PER_SEC + ts.tv_nsec); + } + + return ts; +} + +#ifdef TEST +#include <stdio.h> + +#define TEST_NORMALISE(ts_sec, ts_nsec, expect_sec, expect_nsec) { \ + struct timespec in = { .tv_sec = ts_sec, .tv_nsec = ts_nsec }; \ + struct timespec got = timespec_normalise(in); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf("%s:%d: timespec_normalise({%ld, %ld}) returned wrong values\n", __FILE__, __LINE__, \ + (long)(ts_sec), (long)(ts_nsec)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_BINOP(func, ts1_sec, ts1_nsec, ts2_sec, ts2_nsec, expect_sec, expect_nsec) { \ + struct timespec ts1 = { .tv_sec = ts1_sec, .tv_nsec = ts1_nsec }; \ + struct timespec ts2 = { .tv_sec = ts2_sec, .tv_nsec = ts2_nsec }; \ + struct timespec got = func(ts1, ts2); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf(#func "({%ld, %ld}, {%ld, %ld}) returned wrong values\n", \ + (long)(ts1_sec), (long)(ts1_nsec), (long)(ts2_sec), (long)(ts2_nsec)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_TRINOP(func, ts1_sec, ts1_nsec, ts2_sec, ts2_nsec, ts3_sec, ts3_nsec, expect_sec, expect_nsec) { \ + struct timespec ts1 = { .tv_sec = ts1_sec, .tv_nsec = ts1_nsec }; \ + struct timespec ts2 = { .tv_sec = ts2_sec, .tv_nsec = ts2_nsec }; \ + struct timespec ts3 = { .tv_sec = ts3_sec, .tv_nsec = ts3_nsec }; \ + struct timespec got = func(ts1, ts2, ts3); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf(#func "({%ld, %ld}, {%ld, %ld}, {%ld, %ld}) returned wrong values\n", \ + (long)(ts1_sec), (long)(ts1_nsec), \ + (long)(ts2_sec), (long)(ts2_nsec), \ + (long)(ts3_sec), (long)(ts3_nsec)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_TEST_FUNC(func, ts1_sec, ts1_nsec, ts2_sec, ts2_nsec, expect) { \ + struct timespec ts1 = { .tv_sec = ts1_sec, .tv_nsec = ts1_nsec }; \ + struct timespec ts2 = { .tv_sec = ts2_sec, .tv_nsec = ts2_nsec }; \ + int got = func(ts1, ts2); \ + if(got != expect) \ + { \ + printf("%s:%d: " #func "({%ld, %ld}, {%ld, %ld}) returned %d, expected %s\n", __FILE__, __LINE__, \ + (long)(ts1_sec), (long)(ts1_nsec), (long)(ts2_sec), (long)(ts2_nsec), \ + got, #expect); \ + ++result; \ + } \ +} + +#define TEST_FROM_DOUBLE(d_secs, expect_sec, expect_nsec) { \ + struct timespec got = timespec_from_double(d_secs); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf("%s:%d: timespec_from_double(%f) returned wrong values\n", __FILE__, __LINE__, (double)(d_secs)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_TO_DOUBLE(ts_sec, ts_nsec, expect) { \ + struct timespec ts = { .tv_sec = ts_sec, .tv_nsec = ts_nsec }; \ + double got = timespec_to_double(ts); \ + if(got != expect) { \ + printf("%s:%d: timespec_to_double({%ld, %ld}) returned wrong value\n", __FILE__, __LINE__, \ + (long)(ts_sec), (long)(ts_nsec)); \ + printf(" Expected: %f\n", (double)(expect)); \ + printf(" Got: %f\n", got); \ + ++result; \ + } \ +} + +#define TEST_FROM_TIMEVAL(in_sec, in_usec, expect_sec, expect_nsec) { \ + struct timeval tv = { .tv_sec = in_sec, .tv_usec = in_usec }; \ + struct timespec got = timespec_from_timeval(tv); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf("%s:%d: timespec_from_timeval({%ld, %ld}) returned wrong values\n", __FILE__, __LINE__, \ + (long)(in_sec), (long)(in_usec)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_TO_TIMEVAL(ts_sec, ts_nsec, expect_sec, expect_usec) { \ + struct timespec ts = { .tv_sec = ts_sec, .tv_nsec = ts_nsec }; \ + struct timeval got = timespec_to_timeval(ts); \ + if(got.tv_sec != expect_sec || got.tv_usec != expect_usec) \ + { \ + printf("%s:%d: timespec_to_timeval({%ld, %ld}) returned wrong values\n", __FILE__, __LINE__, \ + (long)(ts_sec), (long)(ts_nsec)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_usec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_usec)); \ + ++result; \ + } \ +} + +#define TEST_FROM_MS(msecs, expect_sec, expect_nsec) { \ + struct timespec got = timespec_from_ms(msecs); \ + if(got.tv_sec != expect_sec || got.tv_nsec != expect_nsec) \ + { \ + printf("%s:%d: timespec_from_ms(%ld) returned wrong values\n", __FILE__, __LINE__, (long)(msecs)); \ + printf(" Expected: {%ld, %ld}\n", (long)(expect_sec), (long)(expect_nsec)); \ + printf(" Got: {%ld, %ld}\n", (long)(got.tv_sec), (long)(got.tv_nsec)); \ + ++result; \ + } \ +} + +#define TEST_TO_MS(ts_sec, ts_nsec, expect) { \ + struct timespec ts = { .tv_sec = ts_sec, .tv_nsec = ts_nsec }; \ + long got = timespec_to_ms(ts); \ + if(got != expect) { \ + printf("%s:%d: timespec_to_ms({%ld, %ld}) returned wrong value\n", __FILE__, __LINE__, \ + (long)(ts_sec), (long)(ts_nsec)); \ + printf(" Expected: %ld\n", (long)(expect)); \ + printf(" Got: %ld\n", got); \ + ++result; \ + } \ +} + +int main() +{ + int result = 0; + + // timespec_add + + TEST_BINOP(timespec_add, 0,0, 0,0, 0,0); + TEST_BINOP(timespec_add, 0,0, 1,0, 1,0); + TEST_BINOP(timespec_add, 1,0, 0,0, 1,0); + TEST_BINOP(timespec_add, 1,0, 1,0, 2,0); + TEST_BINOP(timespec_add, 1,500000000, 1,0, 2,500000000); + TEST_BINOP(timespec_add, 1,0, 1,500000000, 2,500000000); + TEST_BINOP(timespec_add, 1,500000000, 1,500000000, 3,0); + TEST_BINOP(timespec_add, 1,500000000, 1,499999999, 2,999999999); + TEST_BINOP(timespec_add, 1,500000000, 1,500000000, 3,0); + TEST_BINOP(timespec_add, 1,999999999, 1,999999999, 3,999999998); + TEST_BINOP(timespec_add, 0,500000000, 1,500000000, 2,0); + TEST_BINOP(timespec_add, 1,500000000, 0,500000000, 2,0); + + // timespec_sub + + TEST_BINOP(timespec_sub, 0,0, 0,0, 0,0); + TEST_BINOP(timespec_sub, 1,0, 0,0, 1,0); + TEST_BINOP(timespec_sub, 1,0, 1,0, 0,0); + TEST_BINOP(timespec_sub, 1,500000000, 0,500000000, 1,0); + TEST_BINOP(timespec_sub, 5,500000000, 2,999999999, 2,500000001); + TEST_BINOP(timespec_sub, 0,0, 1,0, -1,0); + TEST_BINOP(timespec_sub, 0,500000000, 1,500000000, -1,0); + TEST_BINOP(timespec_sub, 0,0, 1,500000000, -2,500000000); + TEST_BINOP(timespec_sub, 1,0, 1,500000000, -1,500000000); + TEST_BINOP(timespec_sub, 1,0, 1,499999999, -1,500000001); + + // timespec_mod + + TEST_BINOP(timespec_mod, 0,0, 0,0, 0,0); + TEST_BINOP(timespec_mod, 0,0, 1,0, 0,0); + TEST_BINOP(timespec_mod, 1,0, 0,0, 1,0); + TEST_BINOP(timespec_mod, 1,0, 1,0, 0,0); + TEST_BINOP(timespec_mod, 10,0, 1,0, 0,0); + TEST_BINOP(timespec_mod, 10,0, 3,0, 1,0); + TEST_BINOP(timespec_mod, 10,0, -3,0, -2,0); + TEST_BINOP(timespec_mod, -10,0, 3,0, 2,0); + TEST_BINOP(timespec_mod, -10,0, -3,0, -1,0); + TEST_BINOP(timespec_mod, 10,0, 5,0, 0,0); + TEST_BINOP(timespec_mod, 10,0, -5,0, 0,0); + TEST_BINOP(timespec_mod, -10,0, 5,0, 0,0); + TEST_BINOP(timespec_mod, -10,0, -5,0, 0,0); + TEST_BINOP(timespec_mod, 1,500000000, 0,500000000, 0,0); + TEST_BINOP(timespec_mod, 5,500000000, 2,999999999, 2,500000001); + TEST_BINOP(timespec_mod, 0,500000000, 1,500000000, 0,500000000); + TEST_BINOP(timespec_mod, 0,0, 1,500000000, 0,0); + TEST_BINOP(timespec_mod, 1,0, 1,500000000, 1,0); + TEST_BINOP(timespec_mod, 1,0, 0,1, 0,0); + TEST_BINOP(timespec_mod, 1,123456789, 0,1000, 0,789); + TEST_BINOP(timespec_mod, 1,0, 0,9999999, 0,100); + TEST_BINOP(timespec_mod, 12345,54321, 0,100001, 0,5555); + TEST_BINOP(timespec_mod, LONG_MAX,0, 0,1, 0,0); + TEST_BINOP(timespec_mod, LONG_MAX,0, LONG_MAX,1, LONG_MAX,0); + + // timespec_clamp + + TEST_TRINOP(timespec_clamp, 0,0, 0,0, 0,0, 0,0); + + TEST_TRINOP(timespec_clamp, 1000,0, 2000,0, 3000,0, 2000,0); + TEST_TRINOP(timespec_clamp, 1500,0, 2000,0, 3000,0, 2000,0); + TEST_TRINOP(timespec_clamp, 1999,0, 2000,0, 3000,0, 2000,0); + TEST_TRINOP(timespec_clamp, 2000,0, 2000,0, 3000,0, 2000,0); + TEST_TRINOP(timespec_clamp, 2001,0, 2000,0, 3000,0, 2001,0); + TEST_TRINOP(timespec_clamp, 2250,0, 2000,0, 3000,0, 2250,0); + TEST_TRINOP(timespec_clamp, 2500,0, 2000,0, 3000,0, 2500,0); + TEST_TRINOP(timespec_clamp, 2750,0, 2000,0, 3000,0, 2750,0); + TEST_TRINOP(timespec_clamp, 2999,0, 2000,0, 3000,0, 2999,0); + TEST_TRINOP(timespec_clamp, 3000,0, 2000,0, 3000,0, 3000,0); + TEST_TRINOP(timespec_clamp, 3001,0, 2000,0, 3000,0, 3000,0); + TEST_TRINOP(timespec_clamp, 3500,0, 2000,0, 3000,0, 3000,0); + TEST_TRINOP(timespec_clamp, 4000,0, 2000,0, 3000,0, 3000,0); + + TEST_TRINOP(timespec_clamp, 0,1000, 0,2000, 0,3000, 0,2000); + TEST_TRINOP(timespec_clamp, 0,1500, 0,2000, 0,3000, 0,2000); + TEST_TRINOP(timespec_clamp, 0,1999, 0,2000, 0,3000, 0,2000); + TEST_TRINOP(timespec_clamp, 0,2000, 0,2000, 0,3000, 0,2000); + TEST_TRINOP(timespec_clamp, 0,2001, 0,2000, 0,3000, 0,2001); + TEST_TRINOP(timespec_clamp, 0,2250, 0,2000, 0,3000, 0,2250); + TEST_TRINOP(timespec_clamp, 0,2500, 0,2000, 0,3000, 0,2500); + TEST_TRINOP(timespec_clamp, 0,2750, 0,2000, 0,3000, 0,2750); + TEST_TRINOP(timespec_clamp, 0,2999, 0,2000, 0,3000, 0,2999); + TEST_TRINOP(timespec_clamp, 0,3000, 0,2000, 0,3000, 0,3000); + TEST_TRINOP(timespec_clamp, 0,3001, 0,2000, 0,3000, 0,3000); + TEST_TRINOP(timespec_clamp, 0,3500, 0,2000, 0,3000, 0,3000); + TEST_TRINOP(timespec_clamp, 0,4000, 0,2000, 0,3000, 0,3000); + + TEST_TRINOP(timespec_clamp,0,-1000, 0,-3000, 0,-2000, 0,-2000); + TEST_TRINOP(timespec_clamp,0,-1500, 0,-3000, 0,-2000, 0,-2000); + TEST_TRINOP(timespec_clamp,0,-1999, 0,-3000, 0,-2000, 0,-2000); + TEST_TRINOP(timespec_clamp,0,-3000, 0,-3000, 0,-2000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-2001, 0,-3000, 0,-2000, 0,-2001); + TEST_TRINOP(timespec_clamp,0,-2250, 0,-3000, 0,-2000, 0,-2250); + TEST_TRINOP(timespec_clamp,0,-2500, 0,-3000, 0,-2000, 0,-2500); + TEST_TRINOP(timespec_clamp,0,-2750, 0,-3000, 0,-2000, 0,-2750); + TEST_TRINOP(timespec_clamp,0,-2999, 0,-3000, 0,-2000, 0,-2999); + TEST_TRINOP(timespec_clamp,0,-2000, 0,-3000, 0,-2000, 0,-2000); + TEST_TRINOP(timespec_clamp,0,-3001, 0,-3000, 0,-2000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-3500, 0,-3000, 0,-2000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-2000, 0,-3000, 0,-2000, 0,-2000); + + TEST_TRINOP(timespec_clamp,0,-4000, 0,-3000, 0,3000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-3001, 0,-3000, 0,3000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-3000, 0,-3000, 0,3000, 0,-3000); + TEST_TRINOP(timespec_clamp,0,-2999, 0,-3000, 0,3000, 0,-2999); + TEST_TRINOP(timespec_clamp,0,-1500, 0,-3000, 0,3000, 0,-1500); + TEST_TRINOP(timespec_clamp,0, -1, 0,-3000, 0,3000, 0, -1); + TEST_TRINOP(timespec_clamp,0, 0, 0,-3000, 0,3000, 0, 0); + TEST_TRINOP(timespec_clamp,0, 1, 0,-3000, 0,3000, 0, 1); + TEST_TRINOP(timespec_clamp,0, 1500, 0,-3000, 0,3000, 0, 1500); + TEST_TRINOP(timespec_clamp,0, 2999, 0,-3000, 0,3000, 0, 2999); + TEST_TRINOP(timespec_clamp,0, 3000, 0,-3000, 0,3000, 0, 3000); + TEST_TRINOP(timespec_clamp,0, 3001, 0,-3000, 0,3000, 0, 3000); + TEST_TRINOP(timespec_clamp,0, 4000, 0,-3000, 0,3000, 0, 3000); + + // timespec_min + + TEST_BINOP(timespec_min, 0,0, 0,0, 0,0); + TEST_BINOP(timespec_min, 0,0, 1,0, 0,0); + TEST_BINOP(timespec_min, 1,0, 0,0, 0,0); + TEST_BINOP(timespec_min, 1,0, 1,0, 1,0); + TEST_BINOP(timespec_min, 10,0, 1,0, 1,0); + TEST_BINOP(timespec_min, 10,0, 3,0, 3,0); + TEST_BINOP(timespec_min, 10,0, -3,0, -3,0); + TEST_BINOP(timespec_min, -10,0, 3,0, -10,0); + TEST_BINOP(timespec_min, -10,0, -3,0, -10,0); + TEST_BINOP(timespec_min, 10,0, 5,0, 5,0); + TEST_BINOP(timespec_min, 10,0, -5,0, -5,0); + TEST_BINOP(timespec_min, -10,0, 5,0, -10,0); + TEST_BINOP(timespec_min, -10,0, -5,0, -10,0); + TEST_BINOP(timespec_min, 1,500000000, 0,500000000, 0,500000000); + TEST_BINOP(timespec_min, 5,500000000, 2,999999999, 2,999999999); + TEST_BINOP(timespec_min, 0,500000000, 1,500000000, 0,500000000); + TEST_BINOP(timespec_min, 0,0, 1,500000000, 0,0); + TEST_BINOP(timespec_min, 1,0, 1,500000000, 1,0); + TEST_BINOP(timespec_min, 1,0, 0,1, 0,1); + TEST_BINOP(timespec_min, 1,123456789, 0,1000, 0,1000); + TEST_BINOP(timespec_min, 1,0, 0,9999999, 0,9999999); + TEST_BINOP(timespec_min, 12345,54321, 0,100001, 0,100001); + TEST_BINOP(timespec_min, LONG_MIN,0, 0,1, LONG_MIN,0); + TEST_BINOP(timespec_min, LONG_MIN,0, 0,-1, LONG_MIN,0); + TEST_BINOP(timespec_min, LONG_MIN,0, LONG_MAX,0, LONG_MIN,0); + TEST_BINOP(timespec_min, LONG_MIN,0, LONG_MIN,0, LONG_MIN,0); + TEST_BINOP(timespec_min, LONG_MAX,0, 0,1, 0,1); + TEST_BINOP(timespec_min, LONG_MAX,0, 0,-1, 0,-1); + TEST_BINOP(timespec_min, LONG_MAX,0, LONG_MAX,0, LONG_MAX,0); + TEST_BINOP(timespec_min, LONG_MAX,0, LONG_MIN,0, LONG_MIN,0); + + // timespec_max + + TEST_BINOP(timespec_max, 0,0, 0,0, 0,0); + TEST_BINOP(timespec_max, 0,0, 1,0, 1,0); + TEST_BINOP(timespec_max, 1,0, 0,0, 1,0); + TEST_BINOP(timespec_max, 1,0, 1,0, 1,0); + TEST_BINOP(timespec_max, 10,0, 1,0, 10,0); + TEST_BINOP(timespec_max, 10,0, 3,0, 10,0); + TEST_BINOP(timespec_max, 10,0, -3,0, 10,0); + TEST_BINOP(timespec_max, -10,0, 3,0, 3,0); + TEST_BINOP(timespec_max, -10,0, -3,0, -3,0); + TEST_BINOP(timespec_max, 10,0, 5,0, 10,0); + TEST_BINOP(timespec_max, 10,0, -5,0, 10,0); + TEST_BINOP(timespec_max, -10,0, 5,0, 5,0); + TEST_BINOP(timespec_max, -10,0, -5,0, -5,0); + TEST_BINOP(timespec_max, 1,500000000, 0,500000000, 1,500000000); + TEST_BINOP(timespec_max, 5,500000000, 2,999999999, 5,500000000); + TEST_BINOP(timespec_max, 0,500000000, 1,500000000, 1,500000000); + TEST_BINOP(timespec_max, 0,0, 1,500000000, 1,500000000); + TEST_BINOP(timespec_max, 1,0, 1,500000000, 1,500000000); + TEST_BINOP(timespec_max, 1,0, 0,1, 1,0); + TEST_BINOP(timespec_max, 1,123456789, 0,1000, 1,123456789); + TEST_BINOP(timespec_max, 1,0, 0,9999999, 1,0); + TEST_BINOP(timespec_max, 12345,54321, 0,100001, 12345,54321); + TEST_BINOP(timespec_max, LONG_MIN,0, 0,1, 0,1); + TEST_BINOP(timespec_max, LONG_MIN,0, 0,-1, 0,-1); + TEST_BINOP(timespec_max, LONG_MIN,0, LONG_MAX,0, LONG_MAX,0); + TEST_BINOP(timespec_max, LONG_MIN,0, LONG_MIN,0, LONG_MIN,0); + TEST_BINOP(timespec_max, LONG_MAX,0, 0,1, LONG_MAX,0); + TEST_BINOP(timespec_max, LONG_MAX,0, 0,-1, LONG_MAX,0); + TEST_BINOP(timespec_max, LONG_MAX,0, LONG_MAX,0, LONG_MAX,0); + TEST_BINOP(timespec_max, LONG_MAX,0, LONG_MIN,0, LONG_MAX,0); + + // timespec_cmp + + TEST_TEST_FUNC(timespec_cmp, 0,0, 0,0, 0); + TEST_TEST_FUNC(timespec_cmp, 100,0, 100,0, 0); + TEST_TEST_FUNC(timespec_cmp, -100,0, -100,0, 0); + + TEST_TEST_FUNC(timespec_cmp, 1,0, 0,0, 1); + TEST_TEST_FUNC(timespec_cmp, 0,0, 1,0, -1); + TEST_TEST_FUNC(timespec_cmp, 0,1, 0,0, 1); + TEST_TEST_FUNC(timespec_cmp, 0,0, 0,1, -1); + TEST_TEST_FUNC(timespec_cmp, 1,0, 0,100, 1); + TEST_TEST_FUNC(timespec_cmp, 0,100 , 1,0, -1); + + TEST_TEST_FUNC(timespec_cmp, -0,-0, 0,0, 0); + TEST_TEST_FUNC(timespec_cmp, -10,-500000000, -11,500000000, 0); + TEST_TEST_FUNC(timespec_cmp, -10,-500000001, -11,499999999, 0); + TEST_TEST_FUNC(timespec_cmp, -10,-500000001, -11,500000001, -1); + TEST_TEST_FUNC(timespec_cmp, -11,500000001, -10,-500000001, 1); + + // timespec_eq + + TEST_TEST_FUNC(timespec_eq, 0,0, 0,0, true); + TEST_TEST_FUNC(timespec_eq, 100,0, 100,0, true); + TEST_TEST_FUNC(timespec_eq, -200,0, -200,0, true); + TEST_TEST_FUNC(timespec_eq, 0,300, 0,300, true); + TEST_TEST_FUNC(timespec_eq, 0,-400, 0,-400, true); + + TEST_TEST_FUNC(timespec_eq, 100,1, 100,0, false); + TEST_TEST_FUNC(timespec_eq, 101,0, 100,0, false); + TEST_TEST_FUNC(timespec_eq, -100,0, 100,0, false); + TEST_TEST_FUNC(timespec_eq, 0,10, 0,-10, false); + + TEST_TEST_FUNC(timespec_eq, -0,-0, 0,0, true); + TEST_TEST_FUNC(timespec_eq, -10,-500000000, -11,500000000, true); + TEST_TEST_FUNC(timespec_eq, -10,-500000001, -11,499999999, true); + TEST_TEST_FUNC(timespec_eq, -10,-500000001, -11,500000001, false); + + // timespec_gt + + TEST_TEST_FUNC(timespec_gt, 1,0, 0,0, true); + TEST_TEST_FUNC(timespec_gt, 0,0, -1,0, true); + TEST_TEST_FUNC(timespec_gt, 0,1, 0,0, true); + TEST_TEST_FUNC(timespec_gt, 0,0, 0,-1, true); + + TEST_TEST_FUNC(timespec_gt, 1,0, 1,0, false); + TEST_TEST_FUNC(timespec_gt, 1,1, 1,1, false); + TEST_TEST_FUNC(timespec_gt, -1,0, 0,0, false); + TEST_TEST_FUNC(timespec_gt, 0,-1, 0,0, false); + + TEST_TEST_FUNC(timespec_gt, 0,0, -0,-0, false); + TEST_TEST_FUNC(timespec_gt, -10,-500000000, -11,500000000, false); + TEST_TEST_FUNC(timespec_gt, -11,500000000, -10,-500000000, false); + TEST_TEST_FUNC(timespec_gt, -10,-500000001, -11,499999999, false); + TEST_TEST_FUNC(timespec_gt, -11,499999999, -11,499999999, false); + TEST_TEST_FUNC(timespec_gt, -10,-500000001, -11,500000001, false); + TEST_TEST_FUNC(timespec_gt, -11,500000001, -10,-500000001, true); + + // timespec_ge + + TEST_TEST_FUNC(timespec_ge, 1,0, 0,0, true); + TEST_TEST_FUNC(timespec_ge, 0,0, -1,0, true); + TEST_TEST_FUNC(timespec_ge, 0,1, 0,0, true); + TEST_TEST_FUNC(timespec_ge, 0,0, 0,-1, true); + TEST_TEST_FUNC(timespec_ge, 1,0, 1,0, true); + TEST_TEST_FUNC(timespec_ge, 1,1, 1,1, true); + + TEST_TEST_FUNC(timespec_ge, -1,0, 0,0, false); + TEST_TEST_FUNC(timespec_ge, 0,-1, 0,0, false); + + TEST_TEST_FUNC(timespec_ge, 0,0, -0,-0, true); + TEST_TEST_FUNC(timespec_ge, -10,-500000000, -11,500000000, true); + TEST_TEST_FUNC(timespec_ge, -11,500000000, -10,-500000000, true); + TEST_TEST_FUNC(timespec_ge, -10,-500000001, -11,499999999, true); + TEST_TEST_FUNC(timespec_ge, -11,499999999, -11,499999999, true); + TEST_TEST_FUNC(timespec_ge, -10,-500000001, -11,500000001, false); + TEST_TEST_FUNC(timespec_ge, -11,500000001, -10,-500000001, true); + + // timespec_lt + + TEST_TEST_FUNC(timespec_lt, 0,0, 1,0, true); + TEST_TEST_FUNC(timespec_lt, -1,0, 0,0, true); + TEST_TEST_FUNC(timespec_lt, 0,0, 0,1, true); + TEST_TEST_FUNC(timespec_lt, 0,-1, 0,0, true); + + TEST_TEST_FUNC(timespec_lt, 1,0, 1,0, false); + TEST_TEST_FUNC(timespec_lt, 1,1, 1,1, false); + TEST_TEST_FUNC(timespec_lt, 0,0, -1,0, false); + TEST_TEST_FUNC(timespec_lt, 0,0, 0,-1, false); + + TEST_TEST_FUNC(timespec_lt, 0,0, -0,-0, false); + TEST_TEST_FUNC(timespec_lt, -10,-500000000, -11,500000000, false); + TEST_TEST_FUNC(timespec_lt, -11,500000000, -10,-500000000, false); + TEST_TEST_FUNC(timespec_lt, -10,-500000001, -11,499999999, false); + TEST_TEST_FUNC(timespec_lt, -11,499999999, -11,499999999, false); + TEST_TEST_FUNC(timespec_lt, -10,-500000001, -11,500000001, true); + TEST_TEST_FUNC(timespec_lt, -11,500000001, -10,-500000001, false); + + // timespec_le + + TEST_TEST_FUNC(timespec_le, 0,0, 1,0, true); + TEST_TEST_FUNC(timespec_le, -1,0, 0,0, true); + TEST_TEST_FUNC(timespec_le, 0,0, 0,1, true); + TEST_TEST_FUNC(timespec_le, 0,-1, 0,0, true); + TEST_TEST_FUNC(timespec_le, 1,0, 1,0, true); + TEST_TEST_FUNC(timespec_le, 1,1, 1,1, true); + + TEST_TEST_FUNC(timespec_le, 0,0, -1,0, false); + TEST_TEST_FUNC(timespec_le, 0,0, 0,-1, false); + + TEST_TEST_FUNC(timespec_le, 0,0, -0,-0, true); + TEST_TEST_FUNC(timespec_le, -10,-500000000, -11,500000000, true); + TEST_TEST_FUNC(timespec_le, -11,500000000, -10,-500000000, true); + TEST_TEST_FUNC(timespec_le, -10,-500000001, -11,499999999, true); + TEST_TEST_FUNC(timespec_le, -11,499999999, -11,499999999, true); + TEST_TEST_FUNC(timespec_le, -10,-500000001, -11,500000001, true); + TEST_TEST_FUNC(timespec_le, -11,500000001, -10,-500000001, false); + + // timespec_from_double + + TEST_FROM_DOUBLE(0.0, 0,0); + TEST_FROM_DOUBLE(10.0, 10,0); + TEST_FROM_DOUBLE(-10.0, -10,0); + TEST_FROM_DOUBLE(0.5, 0,500000000); + TEST_FROM_DOUBLE(-0.5, -1,500000000); + TEST_FROM_DOUBLE(10.5, 10,500000000); + TEST_FROM_DOUBLE(-10.5, -11,500000000); + + // timespec_to_double + + TEST_TO_DOUBLE(0,0, 0.0); + TEST_TO_DOUBLE(10,0, 10.0); + TEST_TO_DOUBLE(-10,0, -10.0); + TEST_TO_DOUBLE(0,500000000, 0.5); + TEST_TO_DOUBLE(0,-500000000, -0.5); + TEST_TO_DOUBLE(10,500000000, 10.5); + TEST_TO_DOUBLE(10,-500000000, 9.5); + TEST_TO_DOUBLE(-10,500000000, -9.5); + TEST_TO_DOUBLE(-10,-500000000, -10.5); + + // timespec_from_timeval + + TEST_FROM_TIMEVAL(0,0, 0,0); + TEST_FROM_TIMEVAL(1,0, 1,0); + TEST_FROM_TIMEVAL(1000,0, 1000,0); + TEST_FROM_TIMEVAL(0,0, 0,0); + TEST_FROM_TIMEVAL(-1,0, -1,0); + TEST_FROM_TIMEVAL(-1000,0, -1000,0); + + TEST_FROM_TIMEVAL(1,1, 1,1000); + TEST_FROM_TIMEVAL(1,1000, 1,1000000); + TEST_FROM_TIMEVAL(1,-1, 0,999999000); + TEST_FROM_TIMEVAL(1,-1000, 0,999000000); + TEST_FROM_TIMEVAL(-1,-1, -2,999999000); + TEST_FROM_TIMEVAL(-1,-1000, -2,999000000); + + // timespec_to_timeval + + TEST_TO_TIMEVAL(0,0, 0,0); + TEST_TO_TIMEVAL(1,0, 1,0); + TEST_TO_TIMEVAL(10,0, 10,0); + TEST_TO_TIMEVAL(-1,0, -1,0); + TEST_TO_TIMEVAL(-10,0, -10,0); + + TEST_TO_TIMEVAL(1,1, 1,0); + TEST_TO_TIMEVAL(1,999, 1,0); + TEST_TO_TIMEVAL(1,1000, 1,1); + TEST_TO_TIMEVAL(1,1001, 1,1); + TEST_TO_TIMEVAL(1,2000, 1,2); + TEST_TO_TIMEVAL(1,2000000, 1,2000); + + TEST_TO_TIMEVAL(1,-1, 0,999999); + TEST_TO_TIMEVAL(1,-999, 0,999999); + TEST_TO_TIMEVAL(1,-1000, 0,999999); + TEST_TO_TIMEVAL(1,-1001, 0,999998); + TEST_TO_TIMEVAL(1,-2000, 0,999998); + TEST_TO_TIMEVAL(1,-2000000, 0,998000); + + TEST_TO_TIMEVAL(-1,-1, -2,999999); + TEST_TO_TIMEVAL(-1,-999, -2,999999); + TEST_TO_TIMEVAL(-1,-1000, -2,999999); + TEST_TO_TIMEVAL(-1,-1001, -2,999998); + TEST_TO_TIMEVAL(-1,-2000, -2,999998); + TEST_TO_TIMEVAL(-1,-2000000, -2,998000); + + TEST_TO_TIMEVAL(1,1500000000, 2,500000); + TEST_TO_TIMEVAL(1,-1500000000, -1,500000); + TEST_TO_TIMEVAL(-1,-1500000000, -3,500000); + + // timespec_from_ms + + TEST_FROM_MS(0, 0,0); + TEST_FROM_MS(1, 0,1000000); + TEST_FROM_MS(-1, -1,999000000); + TEST_FROM_MS(1500, 1,500000000); + TEST_FROM_MS(-1000, -1,0); + TEST_FROM_MS(-1500, -2,500000000); + + // timespec_to_ms + + TEST_TO_MS(0,0, 0); + TEST_TO_MS(10,0, 10000); + TEST_TO_MS(-10,0, -10000); + TEST_TO_MS(0,500000000, 500); + TEST_TO_MS(0,-500000000, -500); + TEST_TO_MS(10,500000000, 10500); + TEST_TO_MS(10,-500000000, 9500); + TEST_TO_MS(-10,500000000, -9500); + TEST_TO_MS(-10,-500000000, -10500); + + // timespec_normalise + + TEST_NORMALISE(0,0, 0,0); + + TEST_NORMALISE(0,1000000000, 1,0); + TEST_NORMALISE(0,1500000000, 1,500000000); + TEST_NORMALISE(0,-1000000000, -1,0); + TEST_NORMALISE(0,-1500000000, -2,500000000); + + TEST_NORMALISE(5,1000000000, 6,0); + TEST_NORMALISE(5,1500000000, 6,500000000); + TEST_NORMALISE(-5,-1000000000, -6,0); + TEST_NORMALISE(-5,-1500000000, -7,500000000); + + TEST_NORMALISE(0,2000000000, 2,0); + TEST_NORMALISE(0,2100000000, 2,100000000); + TEST_NORMALISE(0,-2000000000, -2,0); + TEST_NORMALISE(0,-2100000000, -3,900000000); + + TEST_NORMALISE(1,-500000001, 0,499999999); + TEST_NORMALISE(1,-500000000, 0,500000000); + TEST_NORMALISE(1,-499999999, 0,500000001); + TEST_NORMALISE(0,-499999999, -1,500000001); + + TEST_NORMALISE(-1,500000000, -1,500000000); + TEST_NORMALISE(-1,499999999, -1,499999999); + + if(result > 0) + { + printf("%d tests failed\n", result); + } + else{ + printf("All tests passed\n"); + } + + return !!result; /* Don't overflow the exit status */ +} +#endif diff --git a/src/lib/core/utils.c b/src/lib/core/utils.c index a248f58..a8b1c73 100644 --- a/src/lib/core/utils.c +++ b/src/lib/core/utils.c @@ -892,3 +892,230 @@ int gen_file_extension_str(char *filename, const char *extension) { return replace_text(ext_orig, ext_orig, extension, 0); } +#define DEBUG_HEXDUMP_FMT_BYTES 6 +#define DEBUG_HEXDUMP_ADDR_MAXLEN 20 +#define DEBUG_HEXDUMP_BYTES_MAXLEN (16 * 3 + 2) +#define DEBUG_HEXDUMP_ASCII_MAXLEN (16 + 1) +#define DEBUG_HEXDUMP_OUTPUT_MAXLEN (DEBUG_HEXDUMP_FMT_BYTES + DEBUG_HEXDUMP_ADDR_MAXLEN + DEBUG_HEXDUMP_BYTES_MAXLEN + DEBUG_HEXDUMP_ASCII_MAXLEN + 1) + +void debug_hexdump(char *data, int len) { + int count = 0; + char addr[DEBUG_HEXDUMP_ADDR_MAXLEN] = {0}; + char bytes[DEBUG_HEXDUMP_BYTES_MAXLEN] = {0}; + char ascii[DEBUG_HEXDUMP_ASCII_MAXLEN] = {0}; + char output[DEBUG_HEXDUMP_OUTPUT_MAXLEN] = {0}; + char *start = data; + char *end = data + len; + + char *pos = start; + while (pos != end) { + if (count == 0) { + sprintf(addr + strlen(addr), "%p", pos); + } + if (count == 8) { + strcat(bytes, " "); + } + if (count > 15) { + sprintf(output, "%s | %s | %s", addr, bytes, ascii); + puts(output); + memset(output, 0, sizeof(output)); + memset(addr, 0, sizeof(addr)); + memset(bytes, 0, sizeof(bytes)); + memset(ascii, 0, sizeof(ascii)); + count = 0; + continue; + } + + sprintf(bytes + strlen(bytes), "%02X ", (unsigned char) *pos); + sprintf(ascii + strlen(ascii), "%c", isprint(*pos) ? *pos : '.'); + + pos++; + count++; + } + + if (count <= 8) { + // Add group padding + strcat(bytes, " "); + } + const int padding = 16 - count; + for (int i = 0; i < padding; i++) { + strcat(bytes, " "); + } + snprintf(output, DEBUG_HEXDUMP_FMT_BYTES + sizeof(addr) + sizeof(bytes) + sizeof(ascii), "%s | %s | %s", addr, bytes, ascii); + puts(output); +} + +int grow(const size_t size_new, size_t *size_orig, char **data) { + if (!*data) { + return 0; + } + if (size_new >= *size_orig) { + const size_t new_size = *size_orig + size_new + 1; + SYSDEBUG("template data buffer new size: %zu\n", new_size); + + char *tmp = realloc(*data, new_size); + if (!tmp) { + perror("realloc failed"); + return -1; + } + if (tmp != *data) { + *data = tmp; + } + *size_orig = new_size; + } + return 0; +} + +int in_ascii_range(const char c, char lower, char upper) { + if (!(c >= lower && c <= upper)) { + return 0; + } + return 1; +} + +int is_git_sha(char const *hash) { + size_t result = 0; + size_t len = strlen(hash); + + if (len > GIT_HASH_LEN) { + // too long to be a git commit hash + return 0; + } + for (size_t i = 0; i < len; i++) { + if (in_ascii_range(hash[i], 'a', 'f') + || in_ascii_range(hash[i], 'A', 'F') + || in_ascii_range(hash[i], '0', '9')) { + result++; + } + } + if (result < len) { + return 0; + } + return 1; +} + +static int read_vcs_records(const size_t line, char **data) { + (void) line; // unused + const char *vcs_name[] = { + "git", + "svn", + "hg", + "bzr", + }; + for (size_t i = 0; i < sizeof(vcs_name) / sizeof(vcs_name[0]); i++) { + const char *vcs = vcs_name[i]; + char *data_local = strdup(*data); + if (!data_local) { + fprintf(stderr, "Out of memory\n"); + return -1; + } + + // Remove leading/trailing blanks + lstrip(data_local); + strip(data_local); + + // Ignore file comment(s) + if (startswith(data_local, "#") || startswith(data_local, ";")) { + // continue + return 1; + } + + // Begin matching VCS package syntax + const char *match_vcs = strstr(data_local,vcs); + if (match_vcs) { + const char *match_protocol_sep = strstr(match_vcs, "+"); + if (match_protocol_sep) { + const char *match_protocol = strstr(match_protocol_sep, "://"); + if (match_protocol) { + guard_free(data_local); + // match found + return 0; + } + } + } + guard_free(data_local); + } + + // no match, continue + return 1; +} +int check_python_package_dependencies(const char *srcdir) { + const char *configs[] = { + "pyproject.toml", + "setup.cfg", + "setup.py" + }; + + for (size_t i = 0; i < sizeof(configs) / sizeof(configs[0]); i++) { + char path[PATH_MAX] = {0}; + const char *configfile = configs[i]; + + snprintf(path, sizeof(path), "%s/%s", srcdir, configfile); + if (access(path, F_OK) < 0) { + continue; + } + + //char **data = file_readlines(path, 0, 0, NULL); + struct StrList *data = strlist_init(); + int err = 0; + if ((err = strlist_append_file(data, path, read_vcs_records))) { + guard_strlist_free(&data); + return -1; + } + const size_t count = strlist_count(data); + if (count) { + printf("\nERROR: VCS requirement(s) detected in %s:\n", configfile); + for (size_t j = 0; j < count; j++) { + char *record = strlist_item(data, j); + lstrip(record); + strip(record); + printf("[%zu] %s\n", j, record); + } + guard_strlist_free(&data); + return 1; + } + guard_strlist_free(&data); + } + return 0; +} + +int str_to_timeout(char *s) { + if (!s) { + return 0; // no timeout + } + + char *scale = NULL; + int value = (int) strtol(s, &scale, 10); + if (scale) { + if (*scale == 's') { + value *= 1; // seconds, no-op + } else if (*scale == 'm') { + value *= 60; // minutes + } else if (*scale == 'h') { + value *= 3200; // hours + } else { + return STR_TO_TIMEOUT_INVALID_TIME_SCALE; // invalid time scale + } + } + + if (value < 0) { + return STR_TO_TIMEOUT_NEGATIVE; // cannot be negative + } + return value; +} + +void seconds_to_human_readable(const int v, char *result, const size_t maxlen) { + const int hours = v / 3600; + const int minutes = (v % 3600) / 60; + const int seconds = v % 60; + + memset(result, '\0', maxlen); + if (hours) { + snprintf(result + strlen(result), maxlen, "%dh ", hours); + } + if (hours || minutes) { + snprintf(result + strlen(result), maxlen, "%dm ", minutes); + } + snprintf(result + strlen(result), maxlen, "%ds", seconds); +} + diff --git a/src/lib/delivery/CMakeLists.txt b/src/lib/delivery/CMakeLists.txt index 78ed20f..559b2dc 100644 --- a/src/lib/delivery/CMakeLists.txt +++ b/src/lib/delivery/CMakeLists.txt @@ -1,4 +1,5 @@ add_library(stasis_delivery STATIC + delivery_export.c delivery_postprocess.c delivery_conda.c delivery_docker.c diff --git a/src/lib/delivery/delivery.c b/src/lib/delivery/delivery.c index d480ab4..600ddf9 100644 --- a/src/lib/delivery/delivery.c +++ b/src/lib/delivery/delivery.c @@ -1,5 +1,178 @@ #include "delivery.h" +static char *strdup_maybe(const char * restrict s) { + if (s != NULL) { + return strdup(s); + } + return NULL; +} +struct Delivery *delivery_duplicate(const struct Delivery *ctx) { + struct Delivery *result = calloc(1, sizeof(*result)); + if (!result) { + return NULL; + } + // Conda + result->conda.conda_packages = strlist_copy(ctx->conda.conda_packages); + result->conda.conda_packages_defer = strlist_copy(ctx->conda.conda_packages_defer); + result->conda.conda_packages_purge = strlist_copy(ctx->conda.conda_packages_purge); + result->conda.pip_packages = strlist_copy(ctx->conda.pip_packages); + result->conda.pip_packages_defer = strlist_copy(ctx->conda.pip_packages_defer); + result->conda.pip_packages_purge = strlist_copy(ctx->conda.pip_packages_purge); + result->conda.wheels_packages = strlist_copy(ctx->conda.wheels_packages); + result->conda.installer_arch = strdup_maybe(ctx->conda.installer_arch); + result->conda.installer_baseurl = strdup_maybe(ctx->conda.installer_baseurl); + result->conda.installer_name = strdup_maybe(ctx->conda.installer_name); + result->conda.installer_path = strdup_maybe(ctx->conda.installer_path); + result->conda.installer_platform = strdup_maybe(ctx->conda.installer_platform); + result->conda.installer_version = strdup_maybe(ctx->conda.installer_version); + result->conda.tool_build_version = strdup_maybe(ctx->conda.tool_build_version); + result->conda.tool_version = strdup_maybe(ctx->conda.tool_version); + + // Info + result->info.build_name = strdup_maybe(ctx->info.build_name); + result->info.build_number = strdup_maybe(ctx->info.build_number); + result->info.release_name = strdup_maybe(ctx->info.release_name); + result->info.time_info = ctx->info.time_info; + result->info.time_now = ctx->info.time_now; + result->info.time_str_epoch = strdup_maybe(ctx->info.time_str_epoch); + + // Meta + result->meta.name = strdup_maybe(ctx->meta.name); + result->meta.based_on = strdup_maybe(ctx->meta.based_on); + result->meta.codename = strdup_maybe(ctx->meta.codename); + result->meta.mission = strdup_maybe(ctx->meta.mission); + result->meta.final = ctx->meta.final; + result->meta.python = strdup_maybe(ctx->meta.python); + result->meta.python_compact = strdup_maybe(ctx->meta.python_compact); + result->meta.rc = ctx->meta.rc; + result->meta.version = strdup_maybe(ctx->meta.version); + + // Rules + result->rules.build_name_fmt = strdup_maybe(ctx->rules.build_name_fmt); + result->rules.build_number_fmt = strdup_maybe(ctx->rules.build_number_fmt); + // Unused member? + result->rules.enable_final = ctx->rules.enable_final; + result->rules.release_fmt = ctx->rules.release_fmt; + // TODO: need content duplication function + memcpy(&result->rules.content, &ctx->rules.content, sizeof(ctx->rules.content)); + + if (ctx->rules._handle) { + result->rules._handle = malloc(sizeof(*result->rules._handle)); + result->rules._handle->section = malloc(result->rules._handle->section_count * sizeof(*result->rules._handle->section)); + memcpy(result->rules._handle, &ctx->rules._handle, sizeof(*ctx->rules._handle)); + } + + // Runtime + if (ctx->runtime.environ) { + result->runtime.environ = runtime_copy(ctx->runtime.environ->data); + } + + // Storage + result->storage.tools_dir = strdup_maybe(ctx->storage.tools_dir); + result->storage.package_dir = strdup_maybe(ctx->storage.package_dir); + result->storage.results_dir = strdup_maybe(ctx->storage.results_dir); + result->storage.output_dir = strdup_maybe(ctx->storage.output_dir); + result->storage.cfgdump_dir = strdup_maybe(ctx->storage.cfgdump_dir); + result->storage.delivery_dir = strdup_maybe(ctx->storage.delivery_dir); + result->storage.meta_dir = strdup_maybe(ctx->storage.meta_dir); + result->storage.mission_dir = strdup_maybe(ctx->storage.mission_dir); + result->storage.root = strdup_maybe(ctx->storage.root); + result->storage.tmpdir = strdup_maybe(ctx->storage.tmpdir); + result->storage.build_dir = strdup_maybe(ctx->storage.build_dir); + result->storage.build_docker_dir = strdup_maybe(ctx->storage.build_docker_dir); + result->storage.build_recipes_dir = strdup_maybe(ctx->storage.build_recipes_dir); + result->storage.build_sources_dir = strdup_maybe(ctx->storage.build_sources_dir); + result->storage.build_testing_dir = strdup_maybe(ctx->storage.build_testing_dir); + result->storage.conda_artifact_dir = strdup_maybe(ctx->storage.conda_artifact_dir); + result->storage.conda_install_prefix = strdup_maybe(ctx->storage.conda_install_prefix); + result->storage.conda_staging_dir = strdup_maybe(ctx->storage.conda_staging_dir); + result->storage.conda_staging_url = strdup_maybe(ctx->storage.conda_staging_url); + result->storage.docker_artifact_dir = strdup_maybe(ctx->storage.docker_artifact_dir); + result->storage.wheel_artifact_dir = strdup_maybe(ctx->storage.wheel_artifact_dir); + result->storage.wheel_staging_url = strdup_maybe(ctx->storage.wheel_staging_url); + + result->system.arch = strdup_maybe(ctx->system.arch); + if (ctx->system.platform) { + result->system.platform = malloc(DELIVERY_PLATFORM_MAX * sizeof(*result->system.platform)); + for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) { + result->system.platform[i] = strdup_maybe(ctx->system.platform[i]); + } + } + + // Docker + result->deploy.docker.build_args = strlist_copy(ctx->deploy.docker.build_args); + result->deploy.docker.tags = strlist_copy(ctx->deploy.docker.tags); + result->deploy.docker.capabilities = ctx->deploy.docker.capabilities; + result->deploy.docker.dockerfile = strdup_maybe(ctx->deploy.docker.dockerfile); + result->deploy.docker.image_compression = strdup_maybe(ctx->deploy.docker.image_compression); + result->deploy.docker.registry = strdup_maybe(ctx->deploy.docker.registry); + result->deploy.docker.test_script = strdup_maybe(ctx->deploy.docker.test_script); + + // Jfrog + // TODO: break out into a separate a function + for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(ctx->deploy.jfrog[0]); i++) { + result->deploy.jfrog[i].dest = strdup_maybe(ctx->deploy.jfrog[i].dest); + result->deploy.jfrog[i].files = strlist_copy(ctx->deploy.jfrog[i].files); + result->deploy.jfrog[i].repo = strdup_maybe(ctx->deploy.jfrog[i].repo); + result->deploy.jfrog[i].upload_ctx.ant = ctx->deploy.jfrog[i].upload_ctx.ant; + result->deploy.jfrog[i].upload_ctx.archive = ctx->deploy.jfrog[i].upload_ctx.archive; + result->deploy.jfrog[i].upload_ctx.build_name = ctx->deploy.jfrog[i].upload_ctx.build_name; + result->deploy.jfrog[i].upload_ctx.build_number = ctx->deploy.jfrog[i].upload_ctx.build_number; + result->deploy.jfrog[i].upload_ctx.deb = ctx->deploy.jfrog[i].upload_ctx.deb; + result->deploy.jfrog[i].upload_ctx.detailed_summary = ctx->deploy.jfrog[i].upload_ctx.detailed_summary; + result->deploy.jfrog[i].upload_ctx.dry_run = ctx->deploy.jfrog[i].upload_ctx.dry_run; + result->deploy.jfrog[i].upload_ctx.exclusions = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.exclusions); + result->deploy.jfrog[i].upload_ctx.explode = ctx->deploy.jfrog[i].upload_ctx.explode; + result->deploy.jfrog[i].upload_ctx.fail_no_op = ctx->deploy.jfrog[i].upload_ctx.fail_no_op; + result->deploy.jfrog[i].upload_ctx.flat = ctx->deploy.jfrog[i].upload_ctx.flat; + result->deploy.jfrog[i].upload_ctx.include_dirs = ctx->deploy.jfrog[i].upload_ctx.include_dirs; + result->deploy.jfrog[i].upload_ctx.module = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.module); + result->deploy.jfrog[i].upload_ctx.project = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.project); + result->deploy.jfrog[i].upload_ctx.quiet = ctx->deploy.jfrog[i].upload_ctx.quiet; + result->deploy.jfrog[i].upload_ctx.recursive = ctx->deploy.jfrog[i].upload_ctx.recursive; + result->deploy.jfrog[i].upload_ctx.regexp = ctx->deploy.jfrog[i].upload_ctx.regexp; + result->deploy.jfrog[i].upload_ctx.retries = ctx->deploy.jfrog[i].upload_ctx.retries; + result->deploy.jfrog[i].upload_ctx.retry_wait_time = ctx->deploy.jfrog[i].upload_ctx.retry_wait_time; + result->deploy.jfrog[i].upload_ctx.spec = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.spec); + result->deploy.jfrog[i].upload_ctx.spec_vars = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.spec_vars); + result->deploy.jfrog[i].upload_ctx.symlinks = ctx->deploy.jfrog[i].upload_ctx.symlinks; + result->deploy.jfrog[i].upload_ctx.sync_deletes = ctx->deploy.jfrog[i].upload_ctx.sync_deletes; + result->deploy.jfrog[i].upload_ctx.target_props = strdup_maybe(ctx->deploy.jfrog[i].upload_ctx.target_props); + result->deploy.jfrog[i].upload_ctx.threads = ctx->deploy.jfrog[i].upload_ctx.threads; + result->deploy.jfrog[i].upload_ctx.workaround_parent_only = ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only; + } + + result->deploy.jfrog_auth.access_token = strdup_maybe(ctx->deploy.jfrog_auth.access_token); + result->deploy.jfrog_auth.client_cert_key_path = strdup_maybe(ctx->deploy.jfrog_auth.client_cert_key_path); + result->deploy.jfrog_auth.client_cert_path = strdup_maybe(ctx->deploy.jfrog_auth.client_cert_path); + result->deploy.jfrog_auth.insecure_tls = ctx->deploy.jfrog_auth.insecure_tls; + result->deploy.jfrog_auth.password = strdup_maybe(ctx->deploy.jfrog_auth.password); + result->deploy.jfrog_auth.server_id = strdup_maybe(ctx->deploy.jfrog_auth.server_id); + result->deploy.jfrog_auth.ssh_key_path = strdup_maybe(ctx->deploy.jfrog_auth.ssh_key_path); + result->deploy.jfrog_auth.ssh_passphrase = strdup_maybe(ctx->deploy.jfrog_auth.ssh_passphrase); + result->deploy.jfrog_auth.url = strdup_maybe(ctx->deploy.jfrog_auth.url); + result->deploy.jfrog_auth.user = strdup_maybe(ctx->deploy.jfrog_auth.user); + + for (size_t i = 0; i < sizeof(result->tests) / sizeof(result->tests[0]); i++) { + result->tests[i].disable = ctx->tests[i].disable; + result->tests[i].parallel = ctx->tests[i].parallel; + result->tests[i].build_recipe = strdup_maybe(ctx->tests[i].build_recipe); + result->tests[i].name = strdup_maybe(ctx->tests[i].name); + result->tests[i].version = strdup_maybe(ctx->tests[i].version); + result->tests[i].repository = strdup_maybe(ctx->tests[i].repository); + result->tests[i].repository_info_ref = strdup_maybe(ctx->tests[i].repository_info_ref); + result->tests[i].repository_info_tag = strdup_maybe(ctx->tests[i].repository_info_tag); + result->tests[i].repository_remove_tags = strlist_copy(ctx->tests[i].repository_remove_tags); + if (ctx->tests[i].runtime.environ) { + result->tests[i].runtime.environ = runtime_copy(ctx->tests[i].runtime.environ->data); + } + result->tests[i].script = strdup_maybe(ctx->tests[i].script); + result->tests[i].script_setup = strdup_maybe(ctx->tests[i].script_setup); + } + + return result; +} + void delivery_free(struct Delivery *ctx) { guard_free(ctx->system.arch); guard_array_free(ctx->system.platform); @@ -225,7 +398,15 @@ void delivery_defer_packages(struct Delivery *ctx, int type) { // Override test->version when a version is provided by the (pip|conda)_package list item guard_free(test->version); if (spec_begin && spec_end) { - test->version = strdup(spec_end); + char *version_at = strrchr(spec_end, '@'); + if (version_at) { + if (strlen(version_at)) { + version_at++; + } + test->version = strdup(version_at); + } else { + test->version = strdup(spec_end); + } } else { // There are too many possible default branches nowadays: master, main, develop, xyz, etc. // HEAD is a safe bet. @@ -233,6 +414,9 @@ void delivery_defer_packages(struct Delivery *ctx, int type) { } // Is the list item a git+schema:// URL? + // TODO: nametmp is just the name so this will never work. but do we want it to? this looks like + // TODO: an unsafe feature. We shouldn't be able to change what's in the config. we should + // TODO: be getting what we asked for, or exit the program with an error. if (strstr(nametmp, "git+") && strstr(nametmp, "://")) { char *xrepo = strstr(nametmp, "+"); if (xrepo) { diff --git a/src/lib/delivery/delivery_build.c b/src/lib/delivery/delivery_build.c index 2d891d2..c5093d4 100644 --- a/src/lib/delivery/delivery_build.c +++ b/src/lib/delivery/delivery_build.c @@ -173,6 +173,18 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) { memset(outdir, 0, sizeof(outdir)); memset(cmd, 0, sizeof(outdir)); + const int dep_status = check_python_package_dependencies("."); + if (dep_status) { + fprintf(stderr, "\nPlease replace all occurrences above with standard package specs:\n" + "\n" + " package==x.y.z\n" + " package>=x.y.z\n" + " package<=x.y.z\n" + " ...\n" + "\n"); + COE_CHECK_ABORT(dep_status, "Unreproducible delivery"); + } + strcpy(dname, ctx->tests[i].name); tolower_s(dname); sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); diff --git a/src/lib/delivery/delivery_export.c b/src/lib/delivery/delivery_export.c new file mode 100644 index 0000000..d982ad5 --- /dev/null +++ b/src/lib/delivery/delivery_export.c @@ -0,0 +1,58 @@ +#include "delivery.h" + +static void delivery_export_configuration(const struct Delivery *ctx) { + msg(STASIS_MSG_L2, "Exporting delivery configuration\n"); + if (!pushd(ctx->storage.cfgdump_dir)) { + char filename[PATH_MAX] = {0}; + sprintf(filename, "%s.ini", ctx->info.release_name); + FILE *spec = fopen(filename, "w+"); + if (!spec) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); + exit(1); + } + ini_write(ctx->_stasis_ini_fp.delivery, &spec, INI_WRITE_RAW); + fclose(spec); + + memset(filename, 0, sizeof(filename)); + sprintf(filename, "%s-rendered.ini", ctx->info.release_name); + spec = fopen(filename, "w+"); + if (!spec) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", filename); + exit(1); + } + ini_write(ctx->_stasis_ini_fp.delivery, &spec, INI_WRITE_PRESERVE); + fclose(spec); + popd(); + } else { + SYSERROR("Failed to enter directory: %s", ctx->storage.delivery_dir); + exit(1); + } +} + +void delivery_export(const struct Delivery *ctx, char *envs[]) { + delivery_export_configuration(ctx); + + for (size_t i = 0; envs[i] != NULL; i++) { + char *name = envs[i]; + msg(STASIS_MSG_L2, "Exporting %s\n", name); + if (conda_env_export(name, ctx->storage.delivery_dir, name)) { + msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed %s\n", name); + exit(1); + } + } +} + +void delivery_rewrite_stage1(struct Delivery *ctx, char *specfile) { + // Rewrite release environment output (i.e. set package origin(s) to point to the deployment server, etc.) + msg(STASIS_MSG_L3, "Rewriting release spec file (stage 1): %s\n", path_basename(specfile)); + delivery_rewrite_spec(ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_1); + + msg(STASIS_MSG_L1, "Rendering mission templates\n"); + delivery_mission_render_files(ctx); +} + +void delivery_rewrite_stage2(struct Delivery *ctx, char *specfile) { + msg(STASIS_MSG_L3, "Rewriting release spec file (stage 2): %s\n", path_basename(specfile)); + delivery_rewrite_spec(ctx, specfile, DELIVERY_REWRITE_SPEC_STAGE_2); +} + diff --git a/src/lib/delivery/delivery_init.c b/src/lib/delivery/delivery_init.c index 56c591a..a60d6af 100644 --- a/src/lib/delivery/delivery_init.c +++ b/src/lib/delivery/delivery_init.c @@ -287,18 +287,25 @@ int delivery_init(struct Delivery *ctx, int render_mode) { int bootstrap_build_info(struct Delivery *ctx) { struct Delivery local = {0}; + SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.cfg_path); local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path); + SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.delivery_path); local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path); + if (delivery_init_platform(&local)) { + SYSDEBUG("%s", "delivery_init_platform failed"); return -1; } if (populate_delivery_cfg(&local, INI_READ_RENDER)) { + SYSDEBUG("%s", "populate_delivery_cfg failed"); return -1; } if (populate_delivery_ini(&local, INI_READ_RENDER)) { + SYSDEBUG("%s", "populate_delivery_ini failed"); return -1; } if (populate_info(&local)) { + SYSDEBUG("%s", "populate_info failed"); return -1; } ctx->info.build_name = strdup(local.info.build_name); @@ -314,6 +321,7 @@ int bootstrap_build_info(struct Delivery *ctx) { memcpy(ctx->info.time_info, local.info.time_info, sizeof(*local.info.time_info)); ctx->info.time_now = local.info.time_now; ctx->info.time_str_epoch = strdup(local.info.time_str_epoch); + SYSDEBUG("%s", "delivery_free local resources"); delivery_free(&local); return 0; } diff --git a/src/lib/delivery/delivery_install.c b/src/lib/delivery/delivery_install.c index 246c604..f1637a3 100644 --- a/src/lib/delivery/delivery_install.c +++ b/src/lib/delivery/delivery_install.c @@ -36,7 +36,7 @@ static char *have_spec_in_config(const struct Delivery *ctx, const char *name) { strncpy(package, config_spec, sizeof(package) - 1); } remove_extras(package); - if (strncmp(package, name, strlen(package)) == 0) { + if (strncmp(package, name, strlen(name)) == 0) { return config_spec; } } @@ -189,8 +189,7 @@ int delivery_purge_packages(struct Delivery *ctx, const char *env_name, int use_ } int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) { - char cmd[PATH_MAX]; - char pkgs[STASIS_BUFSIZ]; + char command_base[PATH_MAX]; const char *env_current = getenv("CONDA_DEFAULT_ENV"); if (env_current) { @@ -203,9 +202,8 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha } } - memset(cmd, 0, sizeof(cmd)); - memset(pkgs, 0, sizeof(pkgs)); - strcat(cmd, "install"); + memset(command_base, 0, sizeof(command_base)); + strcat(command_base, "install"); typedef int (*Runner)(const char *); Runner runner = NULL; @@ -216,17 +214,23 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha } if (INSTALL_PKG_CONDA_DEFERRED & type) { - strcat(cmd, " --use-local"); + strcat(command_base, " --use-local"); } else if (INSTALL_PKG_PIP_DEFERRED & type) { // Don't change the baseline package set unless we're working with a // new build. Release candidates will need to keep packages as stable // as possible between releases. if (!ctx->meta.based_on) { - strcat(cmd, " --upgrade"); + strcat(command_base, " --upgrade"); } + sprintf(command_base + strlen(command_base), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); } - sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir); + size_t args_alloc_len = STASIS_BUFSIZ; + char *args = calloc(args_alloc_len + 1, sizeof(*args)); + if (!args) { + SYSERROR("%s", "Unable to allocate bytes for command arguments"); + return -1; + } for (size_t x = 0; manifest[x] != NULL; x++) { char *name = NULL; @@ -239,10 +243,11 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha if (INSTALL_PKG_PIP_DEFERRED & type) { struct Test *info = requirement_from_test(ctx, name); if (info) { - if (!strcmp(info->version, "HEAD")) { + if (!strcmp(info->version, "HEAD") || is_git_sha(info->version)) { struct StrList *tag_data = strlist_init(); if (!tag_data) { SYSERROR("%s", "Unable to allocate memory for tag data\n"); + guard_free(args); return -1; } strlist_append_tokenize(tag_data, info->repository_info_tag, "-"); @@ -273,9 +278,9 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha fprintf(stderr, "No wheel packages found that match the description of '%s'", info->name); } else { // found - guard_strlist_free(&tag_data); info->version = strdup(whl->version); } + guard_strlist_free(&tag_data); wheel_free(&whl); } @@ -290,26 +295,66 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha } } - snprintf(cmd + strlen(cmd), - sizeof(cmd) - strlen(cmd) - strlen(info->name) - strlen(info->version) + 5, - " '%s==%s'", req, info->version); + const char *fmt_append = "%s '%s==%s'"; + const char *fmt = " '%s==%s'"; + const int required_len = snprintf(NULL, 0, fmt_append, args, req, info->version); + if (required_len > (int) args_alloc_len) { + if (grow(required_len, &args_alloc_len, &args)) { + SYSERROR("Unable to allocate %d bytes for command arguments", required_len); + guard_free(args); + return -1; + } + } + snprintf(args + strlen(args), required_len + 1, fmt, req, info->version); } else { fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name); + guard_free(args); return -1; } } else { if (startswith(name, "--") || startswith(name, "-")) { - sprintf(cmd + strlen(cmd), " %s", name); + const char *fmt_append = "%s %s"; + const char *fmt = " %s"; + const int required_len = snprintf(NULL, 0, fmt_append, args, name); + if (required_len > (int) args_alloc_len) { + if (grow(required_len, &args_alloc_len, &args)) { + SYSERROR("Unable to allocate %d bytes for command arguments", required_len); + guard_free(args); + return -1; + } + } + snprintf(args + strlen(args), required_len + 1, fmt, name); } else { - sprintf(cmd + strlen(cmd), " '%s'", name); + const char *fmt_append = "%s '%s'"; + const char *fmt = " '%s'"; + const int required_len = snprintf(NULL, 0, fmt_append, args, name); + if (required_len > (int) args_alloc_len) { + if (grow(required_len, &args_alloc_len, &args)) { + SYSERROR("Unable to allocate %d bytes for command arguments", required_len); + guard_free(args); + return -1; + } + } + snprintf(args + strlen(args), required_len + 1, fmt, name); } } } - int status = runner(cmd); + char *command = NULL; + if (asprintf(&command, "%s %s", command_base, args) < 0) { + SYSERROR("%s", "Unable to allocate bytes for command\n"); + guard_free(args); + return -1; + } + + int status = runner(command); + guard_free(args); + guard_free(command); if (status) { + // fail quickly return status; } } + guard_free(args); return 0; } diff --git a/src/lib/delivery/delivery_populate.c b/src/lib/delivery/delivery_populate.c index 84676f1..28b2480 100644 --- a/src/lib/delivery/delivery_populate.c +++ b/src/lib/delivery/delivery_populate.c @@ -55,6 +55,7 @@ int populate_info(struct Delivery *ctx) { int populate_delivery_cfg(struct Delivery *ctx, int render_mode) { struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg; if (!cfg) { + SYSDEBUG("%s", "cfg is NULL"); return -1; } int err = 0; @@ -162,8 +163,6 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) { // keys in the configuration RuntimeEnv *rt = runtime_copy(__environ); while ((rtdata = ini_getall(ini, "runtime")) != NULL) { - char rec[STASIS_BUFSIZ]; - sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value))); runtime_set(rt, rtdata->key, rtdata->value); } runtime_apply(rt); diff --git a/src/lib/delivery/delivery_test.c b/src/lib/delivery/delivery_test.c index e80e0ec..6e0a226 100644 --- a/src/lib/delivery/delivery_test.c +++ b/src/lib/delivery/delivery_test.c @@ -97,6 +97,18 @@ void delivery_tests_run(struct Delivery *ctx) { if (pushd(destdir)) { COE_CHECK_ABORT(1, "Unable to enter repository directory\n"); } else { + int dep_status = check_python_package_dependencies("."); + if (dep_status) { + fprintf(stderr, "\nPlease replace all occurrences above with standard package specs:\n" + "\n" + " package==x.y.z\n" + " package>=x.y.z\n" + " package<=x.y.z\n" + " ...\n" + "\n"); + COE_CHECK_ABORT(dep_status, "Unreproducible delivery"); + } + char *cmd = calloc(strlen(test->script) + STASIS_BUFSIZ, sizeof(*cmd)); if (!cmd) { SYSERROR("Unable to allocate test script buffer: %s", strerror(errno)); diff --git a/src/lib/delivery/include/delivery.h b/src/lib/delivery/include/delivery.h index 26a5499..f8229ed 100644 --- a/src/lib/delivery/include/delivery.h +++ b/src/lib/delivery/include/delivery.h @@ -64,10 +64,8 @@ struct Delivery { * \brief System information */ struct System { - char *arch; - ///< System CPU architecture ident - char **platform; - ///< System platform name + char *arch; ///< System CPU architecture ident + char **platform; ///< System platform name } system; /*! \struct Storage * \brief Storage paths @@ -459,4 +457,35 @@ int delivery_series_sync(struct Delivery *ctx); */ int delivery_purge_packages(struct Delivery *ctx, const char *env_name, int use_pkg_manager); +/** + * Export delivery environments + * + * @param ctx Delivery context + * @param envs array of conda environment names + */ +void delivery_export(const struct Delivery *ctx, char *envs[]); + +/** + * STAGE 1: Rewrite delivery-related strings in specfile + * + * @param ctx Delivery context + * @param specfile path to YAML spec file + */ +void delivery_rewrite_stage1(struct Delivery *ctx, char *specfile); + +/** + * STAGE 2: Rewrite delivery-related strings in specfile + * + * @param ctx Delivery context + * @param specfile path to YAML spec file + */ +void delivery_rewrite_stage2(struct Delivery *ctx, char *specfile); + +/** + * Return a copy of a delivery context + * @param ctx Delivery context + * @return a copy + */ +struct Delivery *delivery_duplicate(const struct Delivery *ctx); + #endif //STASIS_DELIVERY_H |
