aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoseph Hunkeler <jhunkeler@users.noreply.github.com>2026-04-06 12:31:05 -0400
committerGitHub <noreply@github.com>2026-04-06 12:31:05 -0400
commitd01b465eee667e8efa4aa7c3088dc7af18ea2ab2 (patch)
treebf63c1378044c446f62dd20ce956a5a6d5e1973a
parent83831af6502e68fe6199c29614e2df68ffbca170 (diff)
parentdfe3420de345a7d4c6a529e1c240138ca9852c86 (diff)
downloadstasis-d01b465eee667e8efa4aa7c3088dc7af18ea2ab2.tar.gz
Merge pull request #128 from jhunkeler/manylinuxHEADnextmaster
Manylinux
-rw-r--r--README.md50
-rw-r--r--src/cli/stasis/args.c4
-rw-r--r--src/cli/stasis/include/args.h2
-rw-r--r--src/cli/stasis/stasis_main.c47
-rw-r--r--src/cli/stasis/system_requirements.c30
-rw-r--r--src/lib/core/docker.c21
-rw-r--r--src/lib/core/envctl.c17
-rw-r--r--src/lib/core/globals.c2
-rw-r--r--src/lib/core/include/core.h4
-rw-r--r--src/lib/core/include/docker.h4
-rw-r--r--src/lib/core/include/strlist.h1
-rw-r--r--src/lib/core/include/utils.h12
-rw-r--r--src/lib/core/strlist.c23
-rw-r--r--src/lib/core/template_func_proto.c2
-rw-r--r--src/lib/core/utils.c54
-rw-r--r--src/lib/core/wheel.c3
-rw-r--r--src/lib/delivery/delivery_build.c296
-rw-r--r--src/lib/delivery/delivery_docker.c2
-rw-r--r--src/lib/delivery/delivery_init.c6
-rw-r--r--src/lib/delivery/delivery_install.c3
-rw-r--r--src/lib/delivery/delivery_populate.c48
-rw-r--r--stasis.ini12
-rw-r--r--tests/test_docker.c2
-rw-r--r--tests/test_strlist.c15
-rw-r--r--tests/test_utils.c10
25 files changed, 571 insertions, 99 deletions
diff --git a/README.md b/README.md
index a8c72d6..6acfc8a 100644
--- a/README.md
+++ b/README.md
@@ -147,30 +147,32 @@ stasis mydelivery.ini
## Command Line Options
-| Long Option | Short Option | Purpose |
-|:----------------------------|:------------:|:---------------------------------------------------------------|
-| --help | -h | Display usage statement |
-| --version | -V | Display program version |
-| --continue-on-error | -C | Allow tests to fail |
-| --config ARG | -c ARG | Read STASIS configuration file |
-| --cpu-limit ARG | -l ARG | Number of processes to spawn concurrently (default: cpus - 1) |
-| --pool-status-interval ARG | n/a | Report task status every n seconds (default: 30) |
-| --python ARG | -p ARG | Override version of Python in configuration |
-| --verbose | -v | Increase output verbosity |
-| --unbuffered | -U | Disable line buffering |
-| --update-base | n/a | Update conda installation prior to STATIS environment creation |
-| --fail-fast | n/a | On test error, terminate all tasks |
-| --task-timeout ARG | n/a | Terminate task after timeout is reached (#s, #m, #h) |
-| --overwrite | n/a | Overwrite an existing release |
-| --no-docker | n/a | Do not build docker images |
-| --no-artifactory | n/a | Do not upload artifacts to Artifactory |
-| --no-artifactory-build-info | n/a | Do not upload build info objects to Artifactory |
-| --no-artifactory-upload | n/a | Do not upload artifacts to Artifactory (dry-run) |
-| --no-testing | n/a | Do not execute test scripts |
-| --no-parallel | n/a | Do not execute tests in parallel |
-| --no-task-logging | n/a | Do not log task output (write to stdout) |
-| --no-rewrite | n/a | Do not rewrite paths and URLs in output files |
-| DELIVERY_FILE | n/a | STASIS delivery file |
+| Long Option | Short Option | Purpose |
+|:------------------------------------|:------------:|:---------------------------------------------------------------|
+| --help | -h | Display usage statement |
+| --version | -V | Display program version |
+| --continue-on-error | -C | Allow tests to fail |
+| --config ARG | -c ARG | Read STASIS configuration file |
+| --cpu-limit ARG | -l ARG | Number of processes to spawn concurrently (default: cpus - 1) |
+| --pool-status-interval ARG | n/a | Report task status every n seconds (default: 30) |
+| --python ARG | -p ARG | Override version of Python in configuration |
+| --verbose | -v | Increase output verbosity |
+| --unbuffered | -U | Disable line buffering |
+| --update-base | n/a | Update conda installation prior to STATIS environment creation |
+| --fail-fast | n/a | On test error, terminate all tasks |
+| --task-timeout ARG | n/a | Terminate task after timeout is reached (#s, #m, #h) |
+| --overwrite | n/a | Overwrite an existing release |
+| --wheel-builder ARG | n/a | Wheel building backend (build, cibuildwheel, manylinux) |
+| --wheel-builder-manylinux-image ARG | n/a | Manylinux image name |
+| --no-docker | n/a | Do not build docker images |
+| --no-artifactory | n/a | Do not upload artifacts to Artifactory |
+| --no-artifactory-build-info | n/a | Do not upload build info objects to Artifactory |
+| --no-artifactory-upload | n/a | Do not upload artifacts to Artifactory (dry-run) |
+| --no-testing | n/a | Do not execute test scripts |
+| --no-parallel | n/a | Do not execute tests in parallel |
+| --no-task-logging | n/a | Do not log task output (write to stdout) |
+| --no-rewrite | n/a | Do not rewrite paths and URLs in output files |
+| DELIVERY_FILE | n/a | STASIS delivery file |
## Indexer Command Line Options
diff --git a/src/cli/stasis/args.c b/src/cli/stasis/args.c
index 172981a..dbc9c2f 100644
--- a/src/cli/stasis/args.c
+++ b/src/cli/stasis/args.c
@@ -15,6 +15,8 @@ struct option long_options[] = {
{"fail-fast", no_argument, 0, OPT_FAIL_FAST},
{"task-timeout", required_argument, 0, OPT_TASK_TIMEOUT},
{"overwrite", no_argument, 0, OPT_OVERWRITE},
+ {"wheel-builder", required_argument, 0, OPT_WHEEL_BUILDER},
+ {"wheel-builder-manylinux-image", required_argument, 0, OPT_WHEEL_BUILDER_MANYLINUX_IMAGE},
{"no-docker", no_argument, 0, OPT_NO_DOCKER},
{"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY},
{"no-artifactory-build-info", no_argument, 0, OPT_NO_ARTIFACTORY_BUILD_INFO},
@@ -40,6 +42,8 @@ const char *long_options_help[] = {
"On error, immediately terminate all tasks",
"Terminate task after timeout is reached (#s, #m, #h)",
"Overwrite an existing release",
+ "Wheel building backend (build, cibuildwheel, manylinux)",
+ "Manylinux image name",
"Do not build docker images",
"Do not upload artifacts to Artifactory",
"Do not upload build info objects to Artifactory",
diff --git a/src/cli/stasis/include/args.h b/src/cli/stasis/include/args.h
index 5536735..e789261 100644
--- a/src/cli/stasis/include/args.h
+++ b/src/cli/stasis/include/args.h
@@ -19,6 +19,8 @@
#define OPT_POOL_STATUS_INTERVAL 1011
#define OPT_NO_TASK_LOGGING 1012
#define OPT_TASK_TIMEOUT 1013
+#define OPT_WHEEL_BUILDER 1014
+#define OPT_WHEEL_BUILDER_MANYLINUX_IMAGE 1015
extern struct option long_options[];
void usage(char *progname);
diff --git a/src/cli/stasis/stasis_main.c b/src/cli/stasis/stasis_main.c
index 633d014..44efc4a 100644
--- a/src/cli/stasis/stasis_main.c
+++ b/src/cli/stasis/stasis_main.c
@@ -54,15 +54,16 @@ static void configure_stasis_ini(struct Delivery *ctx, char **config_input) {
}
}
- msg(STASIS_MSG_L2, "Reading STASIS global configuration: %s\n", *config_input);
+ SYSDEBUG("Reading STASIS global configuration: %s\n", *config_input);
ctx->_stasis_ini_fp.cfg = ini_open(*config_input);
if (!ctx->_stasis_ini_fp.cfg) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read config file: %s, %s\n", *config_input, strerror(errno));
+ msg(STASIS_MSG_ERROR, "Failed to read global config file: %s, %s\n", *config_input, strerror(errno));
+ SYSERROR("Failed to read global config file: %s\n", *config_input);
exit(1);
}
ctx->_stasis_ini_fp.cfg_path = strdup(*config_input);
if (!ctx->_stasis_ini_fp.cfg_path) {
- SYSERROR("%s", "Failed to allocate memory for config file name");
+ SYSERROR("%s", "Failed to allocate memory delivery context global config file name");
exit(1);
}
guard_free(*config_input);
@@ -102,9 +103,9 @@ static void configure_jfrog_cli(struct Delivery *ctx) {
static void check_release_history(struct Delivery *ctx) {
// Safety gate: Avoid clobbering a delivered release unless the user wants that behavior
- msg(STASIS_MSG_L1, "Checking release history\n");
+ msg(STASIS_MSG_L2, "Checking release history\n");
if (!globals.enable_overwrite && delivery_exists(ctx) == DELIVERY_FOUND) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Refusing to overwrite release: %s\nUse --overwrite to enable release clobbering.\n", ctx->info.release_name);
+ msg(STASIS_MSG_ERROR, "Refusing to overwrite release: %s\nUse --overwrite to enable release clobbering.\n", ctx->info.release_name);
exit(1);
}
@@ -147,14 +148,14 @@ static void check_conda_prefix_length(const struct Delivery *ctx) {
// 5 = /bin\n
const size_t prefix_len = strlen(ctx->storage.conda_install_prefix) + 2 + 5;
const size_t prefix_len_max = 127;
- msg(STASIS_MSG_L1, "Checking length of conda installation prefix\n");
+ msg(STASIS_MSG_L2, "Checking length of conda installation prefix\n");
if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux") && prefix_len > prefix_len_max) {
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR,
+ msg(STASIS_MSG_L3 | STASIS_MSG_ERROR,
"The shebang, '#!%s/bin/python\\n' is too long (%zu > %zu).\n",
ctx->storage.conda_install_prefix, prefix_len, prefix_len_max);
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR,
+ msg(STASIS_MSG_L3 | STASIS_MSG_ERROR,
"Conda's workaround to handle long path names does not work consistently within STASIS.\n");
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR,
+ msg(STASIS_MSG_L3 | STASIS_MSG_ERROR,
"Please try again from a different, \"shorter\", directory.\n");
exit(1);
}
@@ -304,7 +305,8 @@ static void configure_tool_versions(struct Delivery *ctx) {
}
}
-static void install_build_package() {
+static void install_packaging_tools() {
+ msg(STASIS_MSG_L1, "Installing packaging tool(s)\n");
if (pip_exec("install build")) {
msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed\n");
exit(1);
@@ -331,8 +333,7 @@ static void configure_deferred_packages(struct Delivery *ctx) {
delivery_defer_packages(ctx, DEFER_PIP);
}
-static void show_overiew(struct Delivery *ctx) {
-
+static void show_overview(struct Delivery *ctx) {
msg(STASIS_MSG_L1, "Overview\n");
delivery_meta_show(ctx);
delivery_conda_show(ctx);
@@ -512,9 +513,11 @@ int main(int argc, char *argv[]) {
memset(&proc, 0, sizeof(proc));
memset(&ctx, 0, sizeof(ctx));
+ setup_sysconfdir();
+
int c;
int option_index = 0;
- while ((c = getopt_long(argc, argv, "hVCc:p:vU", long_options, &option_index)) != -1) {
+ while ((c = getopt_long(argc, argv, "hVCc:p:vUl:", long_options, &option_index)) != -1) {
switch (c) {
case 'h':
usage(path_basename(argv[0]));
@@ -605,6 +608,12 @@ int main(int argc, char *argv[]) {
case OPT_NO_TASK_LOGGING:
globals.enable_task_logging = false;
break;
+ case OPT_WHEEL_BUILDER:
+ globals.wheel_builder = strdup(optarg);
+ break;
+ case OPT_WHEEL_BUILDER_MANYLINUX_IMAGE:
+ globals.wheel_builder_manylinux_image = strdup(optarg);
+ break;
case '?':
default:
exit(1);
@@ -627,21 +636,19 @@ int main(int argc, char *argv[]) {
printf(BANNER, VERSION, AUTHOR);
+ setup_python_version_override(&ctx, python_override_version);
+ configure_stasis_ini(&ctx, &config_input);
check_system_path();
+ check_requirements(&ctx);
msg(STASIS_MSG_L1, "Setup\n");
tpl_setup_vars(&ctx);
tpl_setup_funcs(&ctx);
- setup_sysconfdir();
- setup_python_version_override(&ctx, python_override_version);
-
- configure_stasis_ini(&ctx, &config_input);
configure_delivery_ini(&ctx, &delivery_input);
configure_delivery_context(&ctx);
- check_requirements(&ctx);
configure_jfrog_cli(&ctx);
runtime_apply(ctx.runtime.environ);
@@ -665,11 +672,11 @@ int main(int argc, char *argv[]) {
setup_activate_test_env(&ctx, env_name_testing);
configure_tool_versions(&ctx);
- install_build_package();
+ install_packaging_tools();
configure_package_overlay(&ctx, env_name);
configure_deferred_packages(&ctx);
- show_overiew(&ctx);
+ show_overview(&ctx);
run_tests(&ctx);
build_conda_recipes(&ctx);
build_wheel_packages(&ctx);
diff --git a/src/cli/stasis/system_requirements.c b/src/cli/stasis/system_requirements.c
index cb0ebd5..0f0aae8 100644
--- a/src/cli/stasis/system_requirements.c
+++ b/src/cli/stasis/system_requirements.c
@@ -27,36 +27,46 @@ void check_system_requirements(struct Delivery *ctx) {
};
msg(STASIS_MSG_L1, "Checking system requirements\n");
+
+ msg(STASIS_MSG_L2, "Tools\n");
for (size_t i = 0; tools_required[i] != NULL; i++) {
+ msg(STASIS_MSG_L3, "%s: ", tools_required[i]);
if (!find_program(tools_required[i])) {
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]);
+ msg(STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]);
exit(1);
}
+ msg(STASIS_MSG_RESTRICT, "found\n");
}
if (!globals.tmpdir && !ctx->storage.tmpdir) {
delivery_init_tmpdir(ctx);
}
- if (!docker_capable(&ctx->deploy.docker.capabilities)) {
+ msg(STASIS_MSG_L2, "Docker\n");
+ if (docker_capable(&ctx->deploy.docker.capabilities)) {
struct DockerCapabilities *dcap = &ctx->deploy.docker.capabilities;
- msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n");
- msg(STASIS_MSG_L3, "Available: %s\n", dcap->available ? "Yes" : "No");
- msg(STASIS_MSG_L3, "Usable: %s\n", dcap->usable ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Available: %s%s%s\n", dcap->available ? STASIS_COLOR_GREEN : STASIS_COLOR_RED, dcap->available ? "Yes" : "No", STASIS_COLOR_RESET);
+ msg(STASIS_MSG_L3, "Usable: %s%s%s\n", dcap->usable ? STASIS_COLOR_GREEN : STASIS_COLOR_RED, dcap->usable ? "Yes" : "No", STASIS_COLOR_RESET);
msg(STASIS_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap->podman ? "Yes" : "No");
msg(STASIS_MSG_L3, "Build plugin(s): ");
- if (dcap->usable) {
+ if (dcap->build) {
if (dcap->build & STASIS_DOCKER_BUILD) {
- printf("build ");
+ msg(STASIS_MSG_RESTRICT, "build ");
}
if (dcap->build & STASIS_DOCKER_BUILD_X) {
- printf("buildx ");
+ msg(STASIS_MSG_RESTRICT, "buildx ");
}
- puts("");
+ msg(STASIS_MSG_RESTRICT,"\n");
} else {
- printf("N/A\n");
+ msg(STASIS_MSG_RESTRICT, "%sN/A%s\n", STASIS_COLOR_YELLOW, STASIS_COLOR_RESET);
}
+ if (!dcap->usable) {
+ // disable docker builds
+ globals.enable_docker = false;
+ }
+ } else {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n");
// disable docker builds
globals.enable_docker = false;
}
diff --git a/src/lib/core/docker.c b/src/lib/core/docker.c
index 4723446..39357ad 100644
--- a/src/lib/core/docker.c
+++ b/src/lib/core/docker.c
@@ -1,17 +1,30 @@
#include "docker.h"
-int docker_exec(const char *args, unsigned flags) {
+int docker_exec(const char *args, const unsigned flags) {
struct Process proc;
char cmd[PATH_MAX];
memset(&proc, 0, sizeof(proc));
memset(cmd, 0, sizeof(cmd));
snprintf(cmd, sizeof(cmd) - 1, "docker %s", args);
+
+ unsigned final_flags = 0;
if (flags & STASIS_DOCKER_QUIET) {
+ final_flags |= STASIS_DOCKER_QUIET_STDOUT;
+ final_flags |= STASIS_DOCKER_QUIET_STDERR;
+ } else {
+ final_flags = flags;
+ }
+
+ if (final_flags & STASIS_DOCKER_QUIET_STDOUT) {
strcpy(proc.f_stdout, "/dev/null");
+ }
+ if (final_flags & STASIS_DOCKER_QUIET_STDERR) {
strcpy(proc.f_stderr, "/dev/null");
- } else {
+ }
+
+ if (!final_flags) {
msg(STASIS_MSG_L2, "Executing: %s\n", cmd);
}
@@ -19,11 +32,11 @@ int docker_exec(const char *args, unsigned flags) {
return proc.returncode;
}
-int docker_script(const char *image, char *data, unsigned flags) {
+int docker_script(const char *image, char *args, char *data, const unsigned flags) {
(void)flags; // TODO: placeholder
char cmd[PATH_MAX] = {0};
- snprintf(cmd, sizeof(cmd) - 1, "docker run --rm -i %s /bin/sh -", image);
+ snprintf(cmd, sizeof(cmd) - 1, "docker run -i %s \"%s\" /bin/sh -", args ? args : "", image);
FILE *outfile = popen(cmd, "w");
if (!outfile) {
diff --git a/src/lib/core/envctl.c b/src/lib/core/envctl.c
index b036611..d8d1b3d 100644
--- a/src/lib/core/envctl.c
+++ b/src/lib/core/envctl.c
@@ -92,23 +92,28 @@ unsigned envctl_get_flags(const struct EnvCtl *envctl, const char *name) {
}
void envctl_do_required(const struct EnvCtl *envctl, int verbose) {
+ int failed = 0;
for (size_t i = 0; i < envctl->num_used; i++) {
- struct EnvCtl_Item *item = envctl->item[i];
+ const struct EnvCtl_Item *item = envctl->item[i];
const char *name = item->name;
envctl_except_fn *callback = item->callback;
if (verbose) {
- msg(STASIS_MSG_L2, "Verifying %s\n", name);
+ msg(STASIS_MSG_L2, "Verifying %s [%s]\n", name, item->flags & STASIS_ENVCTL_REQUIRED ? "required" : "optional");
}
- int code = callback((const void *) item, (const void *) name);
+ const int code = callback((const void *) item, (const void *) name);
if (code == STASIS_ENVCTL_RET_IGNORE || code == STASIS_ENVCTL_RET_SUCCESS) {
continue;
}
if (code == STASIS_ENVCTL_RET_FAIL) {
- fprintf(stderr, "\n%s must be set. Exiting.\n", name);
- exit(1);
+ msg(STASIS_MSG_ERROR, "\n%s%s must be defined.\n", name, STASIS_COLOR_RESET);
+ failed++;
}
- fprintf(stderr, "\nan unknown envctl callback code occurred: %d\n", code);
+ msg(STASIS_MSG_ERROR, "\nan unknown envctl callback code occurred: %d\n", code);
+ }
+
+ if (failed) {
+ msg(STASIS_MSG_ERROR, "Environment check failed with %d error(s)\n", failed);
exit(1);
}
}
diff --git a/src/lib/core/globals.c b/src/lib/core/globals.c
index 834213b..63555a2 100644
--- a/src/lib/core/globals.c
+++ b/src/lib/core/globals.c
@@ -53,6 +53,8 @@ void globals_free() {
guard_free(globals.conda_install_prefix);
guard_strlist_free(&globals.conda_packages);
guard_strlist_free(&globals.pip_packages);
+ guard_free(globals.wheel_builder);
+ guard_free(globals.wheel_builder_manylinux_image);
guard_free(globals.jfrog.arch);
guard_free(globals.jfrog.os);
guard_free(globals.jfrog.url);
diff --git a/src/lib/core/include/core.h b/src/lib/core/include/core.h
index 5a3fa85..c895267 100644
--- a/src/lib/core/include/core.h
+++ b/src/lib/core/include/core.h
@@ -51,7 +51,9 @@ struct STASIS_GLOBAL {
char *tmpdir; //!< Path to temporary storage directory
char *conda_install_prefix; //!< Path to install conda
char *sysconfdir; //!< Path where STASIS reads its configuration files (mission directory, etc)
- int task_timeout; ///< Time in seconds before task is terminated
+ int task_timeout; ///!< Time in seconds before task is terminated
+ char *wheel_builder; ///!< Backend to build wheels (build, cibuildwheel, manylinux)
+ char *wheel_builder_manylinux_image; ///!< Image to use for a Manylinux build
struct {
char *tox_posargs;
char *conda_reactivate;
diff --git a/src/lib/core/include/docker.h b/src/lib/core/include/docker.h
index 7585d86..dd67f21 100644
--- a/src/lib/core/include/docker.h
+++ b/src/lib/core/include/docker.h
@@ -6,6 +6,8 @@
//! Flag to squelch output from docker_exec()
#define STASIS_DOCKER_QUIET 1 << 1
+#define STASIS_DOCKER_QUIET_STDOUT 1 << 2
+#define STASIS_DOCKER_QUIET_STDERR 1 << 3
//! Flag for older style docker build
#define STASIS_DOCKER_BUILD 1 << 1
@@ -83,7 +85,7 @@ int docker_exec(const char *args, unsigned flags);
* @return
*/
int docker_build(const char *dirpath, const char *args, int engine);
-int docker_script(const char *image, char *data, unsigned flags);
+int docker_script(const char *image, char *args, char *data, unsigned flags);
int docker_save(const char *image, const char *destdir, const char *compression_program);
void docker_sanitize_tag(char *str);
int docker_validate_compression_program(char *prog);
diff --git a/src/lib/core/include/strlist.h b/src/lib/core/include/strlist.h
index 18c60eb..1aaae3e 100644
--- a/src/lib/core/include/strlist.h
+++ b/src/lib/core/include/strlist.h
@@ -46,6 +46,7 @@ void strlist_append_strlist(struct StrList *pStrList1, struct StrList *pStrList2
void strlist_append(struct StrList **pStrList, char *str);
void strlist_append_array(struct StrList *pStrList, char **arr);
void strlist_append_tokenize(struct StrList *pStrList, char *str, char *delim);
+int strlist_appendf(struct StrList **pStrList, const char *fmt, ...);
struct StrList *strlist_copy(struct StrList *pStrList);
int strlist_cmp(struct StrList *a, struct StrList *b);
void strlist_free(struct StrList **pStrList);
diff --git a/src/lib/core/include/utils.h b/src/lib/core/include/utils.h
index ea98faf..335a7e4 100644
--- a/src/lib/core/include/utils.h
+++ b/src/lib/core/include/utils.h
@@ -27,6 +27,15 @@
#define LINE_SEP "\n"
#endif
+#if defined(STASIS_OS_LINUX)
+#define STASIS_RANDOM_GENERATOR_FILE "/dev/urandom"
+#elif defined(STASIS_OS_DARWIN)
+#define STASIS_RANDOM_GENERATOR_FILE "/dev/random"
+#else
+#define STASIS_RANDOM_GENERATOR_FILE NULL
+#define NEED_SRAND 1
+#endif
+
#define STASIS_XML_PRETTY_PRINT_PROG "xmllint"
#define STASIS_XML_PRETTY_PRINT_ARGS "--format"
@@ -470,4 +479,7 @@ void seconds_to_human_readable(int v, char *result, size_t maxlen);
#define STR_TO_TIMEOUT_INVALID_TIME_SCALE (-2)
int str_to_timeout(char *s);
+const char *get_random_generator_file();
+int get_random_bytes(char *result, size_t maxlen);
+
#endif //STASIS_UTILS_H
diff --git a/src/lib/core/strlist.c b/src/lib/core/strlist.c
index a0db5f3..3479c44 100644
--- a/src/lib/core/strlist.c
+++ b/src/lib/core/strlist.c
@@ -47,7 +47,6 @@ void strlist_append(struct StrList **pStrList, char *str) {
(*pStrList)->data = tmp;
(*pStrList)->data[(*pStrList)->num_inuse] = strdup(str);
(*pStrList)->data[(*pStrList)->num_alloc] = NULL;
- strcpy((*pStrList)->data[(*pStrList)->num_inuse], str);
(*pStrList)->num_inuse++;
(*pStrList)->num_alloc++;
}
@@ -231,6 +230,28 @@ void strlist_append_strlist(struct StrList *pStrList1, struct StrList *pStrList2
}
/**
+ * Append a formatted string
+ * Behavior is identical to asprintf-family of functions
+ * @param pStrList `StrList`
+ * @param fmt printf format string
+ * @param ... format arguments
+ * @return same as vasnprintf
+ */
+int strlist_appendf(struct StrList **pStrList, const char *fmt, ...) {
+ char *s = NULL;
+ va_list ap;
+ va_start(ap, fmt);
+ const int len = vasprintf(&s, fmt, ap);
+ va_end(ap);
+
+ if (pStrList && *pStrList && len >= 0) {
+ strlist_append(pStrList, s);
+ }
+ guard_free(s);
+ return len;
+}
+
+/**
* Produce a new copy of a `StrList`
* @param pStrList `StrList`
* @return `StrList` copy
diff --git a/src/lib/core/template_func_proto.c b/src/lib/core/template_func_proto.c
index 8324389..3e1cd99 100644
--- a/src/lib/core/template_func_proto.c
+++ b/src/lib/core/template_func_proto.c
@@ -55,8 +55,8 @@ int get_github_release_notes_auto_tplfunc_entrypoint(void *frame, void *data_out
strlist_append(&notes_list, note);
guard_free(note);
}
- guard_free(repository);
}
+ guard_free(repository);
}
}
// Return all notes as a single string
diff --git a/src/lib/core/utils.c b/src/lib/core/utils.c
index 00d747f..e106193 100644
--- a/src/lib/core/utils.c
+++ b/src/lib/core/utils.c
@@ -376,7 +376,8 @@ char *git_describe(const char *path) {
return NULL;
}
- FILE *pp = popen("git describe --first-parent --always --tags", "r");
+ // TODO: Use `-C [path]` if the version of git installed supports it
+ FILE *pp = popen("git describe --first-parent --long --always --tags", "r");
if (!pp) {
return NULL;
}
@@ -401,6 +402,7 @@ char *git_rev_parse(const char *path, char *args) {
return NULL;
}
+ // TODO: Use `-C [path]` if the version of git installed supports it
sprintf(cmd, "git rev-parse %s", args);
FILE *pp = popen(cmd, "r");
if (!pp) {
@@ -1120,3 +1122,53 @@ void seconds_to_human_readable(const int v, char *result, const size_t maxlen) {
snprintf(result + strlen(result), maxlen, "%ds", seconds);
}
+const char *get_random_generator_file() {
+ return STASIS_RANDOM_GENERATOR_FILE;
+}
+
+#ifdef NEED_SRAND
+static char stasis_srand_initialized = 0;
+#endif
+
+int get_random_bytes(char *result, size_t maxlen) {
+#ifdef NEED_SRAND
+ if (!srand_initialized) {
+ srand(time(NULL));
+ srand_initialized = 1;
+ }
+#endif
+ size_t bytes = 0;
+ const char *filename = get_random_generator_file();
+ FILE *fp = NULL;
+ if (filename != NULL) {
+ fp = fopen(filename, "rb");
+ if (!fp) {
+ SYSERROR("%s", "unable to open random generator");
+ return -1;
+ }
+ }
+
+ do {
+ int ch = 0;
+ if (fp) {
+ ch = fgetc(fp);
+ } else {
+ ch = rand() % 255;
+ }
+ if (fp && ferror(fp)) {
+ SYSERROR("%s", "unable to read from random generator");
+ return -1;
+ }
+ if (isalnum(ch)) {
+ result[bytes] = (char) ch;
+ bytes++;
+ }
+ } while (bytes < maxlen);
+
+ if (fp) {
+ fclose(fp);
+ }
+ result[bytes ? bytes - 1 : 0] = '\0';
+ return 0;
+}
+
diff --git a/src/lib/core/wheel.c b/src/lib/core/wheel.c
index c7e485a..79b5a21 100644
--- a/src/lib/core/wheel.c
+++ b/src/lib/core/wheel.c
@@ -113,6 +113,9 @@ struct Wheel *get_wheel_info(const char *basepath, const char *name, char *to_ma
void wheel_free(struct Wheel **wheel) {
struct Wheel *w = (*wheel);
+ if (!w) {
+ return;
+ }
guard_free(w->path_name);
guard_free(w->file_name);
guard_free(w->distribution);
diff --git a/src/lib/delivery/delivery_build.c b/src/lib/delivery/delivery_build.c
index 8370e6d..86555bd 100644
--- a/src/lib/delivery/delivery_build.c
+++ b/src/lib/delivery/delivery_build.c
@@ -25,11 +25,26 @@ int delivery_build_recipes(struct Delivery *ctx) {
char recipe_git_url[PATH_MAX];
char recipe_git_rev[PATH_MAX];
+ char tag[100];
+ if (ctx->tests[i].repository_info_tag) {
+ const int is_long_tag = num_chars(ctx->tests[i].repository_info_tag, '-') > 1;
+ if (is_long_tag) {
+ const size_t len = strcspn(ctx->tests[i].repository_info_tag, "-");
+ strncpy(tag, ctx->tests[i].repository_info_tag, len);
+ tag[len] = '\0';
+ } else {
+ strcpy(tag, ctx->tests[i].repository_info_tag);
+ tag[strlen(ctx->tests[i].repository_info_tag)] = '\0';
+ }
+ } else {
+ strcpy(tag, ctx->tests[i].version);
+ }
+
//sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}");
//sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository);
//sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version);
// TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^
- sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version);
+ sprintf(recipe_version, "{%% set version = \"%s\" %%}", tag);
sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository);
strcpy(recipe_git_rev, "");
sprintf(recipe_buildno, " number: 0");
@@ -127,7 +142,232 @@ int filter_repo_tags(char *repo, struct StrList *patterns) {
return result;
}
+static int read_without_line_endings(const size_t line, char ** arg) {
+ (void) line;
+ if (*arg) {
+ strip(*arg);
+ if (isempty(*arg)) {
+ return 1; // skip
+ }
+ }
+ return 0;
+}
+
+int manylinux_exec(const char *image, const char *script, const char *copy_to_container_dir, const char *copy_from_container_dir, const char *copy_to_host_dir) {
+ int result = -1; // fail by default
+ char *container_name = NULL;
+ char *source_copy_command = NULL;
+ char *copy_command = NULL;
+ char *rm_command = NULL;
+ char *nop_create_command = NULL;
+ char *nop_rm_command = NULL;
+ char *volume_rm_command = NULL;
+ char *find_command = NULL;
+ char *wheel_paths_filename = NULL;
+ char *args = NULL;
+
+ const uid_t uid = geteuid();
+ char suffix[7] = {0};
+
+ // setup
+
+ if (get_random_bytes(suffix, sizeof(suffix))) {
+ SYSERROR("%s", "unable to acquire value from random generator");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&container_name, "manylinux_build_%d_%zd_%s", uid, time(NULL), suffix) < 0) {
+ SYSERROR("%s", "unable to allocate memory for container name");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&args, "--name %s -w /build -v %s:/build", container_name, container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for docker arguments");
+ goto manylinux_fail;
+ }
+
+ if (!strstr(image, "manylinux")) {
+ SYSERROR("expected a manylinux image, but got %s", image);
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&nop_create_command, "run --name nop_%s -v %s:/build busybox", container_name, container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for nop container command");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&source_copy_command, "cp %s nop_%s:/build", copy_to_container_dir, container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for source copy command");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&nop_rm_command, "rm nop_%s", container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for nop container command");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&wheel_paths_filename, "wheel_paths_%s.txt", container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for wheel paths file name");
+ goto manylinux_fail;
+ }
+
+ if (asprintf(&find_command, "run --rm -t -v %s:/build busybox sh -c 'find %s -name \"*.whl\"' > %s", container_name, copy_from_container_dir, wheel_paths_filename) < 0) {
+ SYSERROR("%s", "unable to allocate memory for find command");
+ goto manylinux_fail;
+ }
+
+ // execute
+
+ if (docker_exec(nop_create_command, 0)) {
+ SYSERROR("%s", "docker nop container creation failed");
+ goto manylinux_fail;
+ }
+
+ if (docker_exec(source_copy_command, 0)) {
+ SYSERROR("%s", "docker source copy operation failed");
+ goto manylinux_fail;
+ }
+
+ if (docker_exec(nop_rm_command, STASIS_DOCKER_QUIET)) {
+ SYSERROR("%s", "docker nop container removal failed");
+ goto manylinux_fail;
+ }
+
+ if (docker_script(image, args, (char *) script, 0)) {
+ SYSERROR("%s", "manylinux execution failed");
+ goto manylinux_fail;
+ }
+
+ if (docker_exec(find_command, 0)) {
+ SYSERROR("%s", "docker find command failed");
+ goto manylinux_fail;
+ }
+
+ struct StrList *wheel_paths = strlist_init();
+ if (!wheel_paths) {
+ SYSERROR("%s", "wheel_paths not initialized");
+ goto manylinux_fail;
+ }
+
+ if (strlist_append_file(wheel_paths, wheel_paths_filename, read_without_line_endings)) {
+ SYSERROR("%s", "wheel_paths append failed");
+ goto manylinux_fail;
+ }
+
+ for (size_t i = 0; i < strlist_count(wheel_paths); i++) {
+ const char *item = strlist_item(wheel_paths, i);
+ if (asprintf(&copy_command, "cp %s:%s %s", container_name, item, copy_to_host_dir) < 0) {
+ SYSERROR("%s", "unable to allocate memory for docker copy command");
+ goto manylinux_fail;
+ }
+
+ if (docker_exec(copy_command, 0)) {
+ SYSERROR("%s", "docker copy operation failed");
+ goto manylinux_fail;
+ }
+ guard_free(copy_command);
+ }
+
+ // Success
+ result = 0;
+
+ manylinux_fail:
+ if (wheel_paths_filename) {
+ remove(wheel_paths_filename);
+ }
+
+ if (container_name) {
+ // Keep going on failure unless memory related.
+ // We don't want build debris everywhere.
+ if (asprintf(&rm_command, "rm %s", container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for rm command");
+ goto late_fail;
+ }
+
+ if (docker_exec(rm_command, STASIS_DOCKER_QUIET)) {
+ SYSERROR("%s", "docker container removal operation failed");
+ }
+
+ if (asprintf(&volume_rm_command, "volume rm -f %s", container_name) < 0) {
+ SYSERROR("%s", "unable to allocate memory for docker volume removal command");
+ goto late_fail;
+ }
+
+ if (docker_exec(volume_rm_command, STASIS_DOCKER_QUIET)) {
+ SYSERROR("%s", "docker volume removal operation failed");
+ }
+ }
+
+ late_fail:
+ guard_free(container_name);
+ guard_free(args);
+ guard_free(copy_command);
+ guard_free(rm_command);
+ guard_free(volume_rm_command);
+ guard_free(source_copy_command);
+ guard_free(nop_create_command);
+ guard_free(nop_rm_command);
+ guard_free(find_command);
+ guard_free(wheel_paths_filename);
+ guard_strlist_free(&wheel_paths);
+ return result;
+}
+
+int delivery_build_wheels_manylinux(struct Delivery *ctx, const char *outdir) {
+ msg(STASIS_MSG_L1, "Building wheels\n");
+
+ const char *manylinux_image = globals.wheel_builder_manylinux_image;
+ if (!manylinux_image) {
+ SYSERROR("%s", "manylinux_image not initialized");
+ return -1;
+ }
+
+ int manylinux_build_status = 0;
+
+ msg(STASIS_MSG_L2, "Using: %s\n", manylinux_image);
+ const struct Meta *meta = &ctx->meta;
+ const char *script_fmt =
+ "set -e -x\n"
+ "git config --global --add safe.directory /build\n"
+ "python%s -m pip install auditwheel build\n"
+ "python%s -m build -w .\n"
+ "auditwheel show dist/*.whl\n"
+ "auditwheel repair --allow-pure-python-wheel dist/*.whl\n";
+ char *script = NULL;
+ if (asprintf(&script, script_fmt,
+ meta->python, meta->python) < 0) {
+ SYSERROR("%s", "unable to allocate memory for build script");
+ return -1;
+ }
+ manylinux_build_status = manylinux_exec(
+ manylinux_image,
+ script,
+ "./",
+ "/build/wheelhouse",
+ outdir);
+
+ if (manylinux_build_status) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "manylinux build failed (%d)", manylinux_build_status);
+ guard_free(script);
+ return -1;
+ }
+ guard_free(script);
+ return 0;
+}
+
struct StrList *delivery_build_wheels(struct Delivery *ctx) {
+ const int on_linux = strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux") == 0;
+ const int docker_usable = ctx->deploy.docker.capabilities.usable;
+ int use_builder_build = strcmp(globals.wheel_builder, "native") == 0;
+ const int use_builder_cibuildwheel = strcmp(globals.wheel_builder, "cibuildwheel") == 0 && on_linux && docker_usable;
+ const int use_builder_manylinux = strcmp(globals.wheel_builder, "manylinux") == 0 && on_linux && docker_usable;
+
+ if (!use_builder_build && !use_builder_cibuildwheel && !use_builder_manylinux) {
+ msg(STASIS_MSG_WARN, "Cannot build wheel for platform using: %\n", globals.wheel_builder);
+ msg(STASIS_MSG_WARN, "Falling back to native toolchain.\n", globals.wheel_builder);
+ use_builder_build = 1;
+ }
+
struct StrList *result = NULL;
struct Process proc = {0};
@@ -161,6 +401,12 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) {
return NULL;
}
+ if (!ctx->tests[i].repository_info_tag) {
+ ctx->tests[i].repository_info_tag = strdup(git_describe(srcdir));
+ }
+ if (!ctx->tests[i].repository_info_ref) {
+ ctx->tests[i].repository_info_ref = strdup(git_rev_parse(srcdir, ctx->tests[i].version));
+ }
if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) {
filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags);
}
@@ -192,28 +438,41 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) {
guard_strlist_free(&result);
return NULL;
}
-
- if (asprintf(&cmd, "-m build -w -o %s", outdir) < 0) {
- SYSERROR("%s", "Unable to allocate memory for build command");
- return NULL;
- }
- if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")
- && ctx->deploy.docker.capabilities.usable) {
- guard_free(cmd);
- if (asprintf(&cmd, "-m cibuildwheel --output-dir %s --only cp%s-manylinux_%s",
- outdir, ctx->meta.python_compact, ctx->system.arch) < 0) {
- SYSERROR("%s", "Unable to allocate memory for cibuildwheel command");
+ if (use_builder_manylinux) {
+ if (delivery_build_wheels_manylinux(ctx, outdir)) {
+ fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name,
+ ctx->tests[i].version);
+ guard_strlist_free(&result);
+ guard_free(cmd);
return NULL;
}
- }
+ } else if (use_builder_build || use_builder_cibuildwheel) {
+ if (use_builder_build) {
+ if (asprintf(&cmd, "-m build -w -o %s", outdir) < 0) {
+ SYSERROR("%s", "Unable to allocate memory for build command");
+ return NULL;
+ }
+ } else if (use_builder_cibuildwheel) {
+ if (asprintf(&cmd, "-m cibuildwheel --output-dir %s --only cp%s-manylinux_%s",
+ outdir, ctx->meta.python_compact, ctx->system.arch) < 0) {
+ SYSERROR("%s", "Unable to allocate memory for cibuildwheel command");
+ return NULL;
+ }
+ }
- if (python_exec(cmd)) {
- fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name,
- ctx->tests[i].version);
- guard_strlist_free(&result);
- guard_free(cmd);
+ if (python_exec(cmd)) {
+ fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name,
+ ctx->tests[i].version);
+ guard_strlist_free(&result);
+ guard_free(cmd);
+ return NULL;
+ }
+ } else {
+ SYSERROR("unknown wheel builder backend: %s", globals.wheel_builder);
return NULL;
}
+
+ guard_free(cmd);
popd();
} else {
fprintf(stderr, "Unable to enter source directory %s: %s\n", srcdir, strerror(errno));
@@ -225,4 +484,3 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) {
}
return result;
}
-
diff --git a/src/lib/delivery/delivery_docker.c b/src/lib/delivery/delivery_docker.c
index 57015ad..2c43caf 100644
--- a/src/lib/delivery/delivery_docker.c
+++ b/src/lib/delivery/delivery_docker.c
@@ -111,7 +111,7 @@ int delivery_docker(struct Delivery *ctx) {
msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n");
} else {
int state;
- if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) {
+ if ((state = docker_script(tag, "--rm", ctx->deploy.docker.test_script, 0))) {
msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag);
// test failed -- don't save the image
return -1;
diff --git a/src/lib/delivery/delivery_init.c b/src/lib/delivery/delivery_init.c
index a60d6af..eb0b527 100644
--- a/src/lib/delivery/delivery_init.c
+++ b/src/lib/delivery/delivery_init.c
@@ -150,7 +150,7 @@ void delivery_init_dirs_stage1(struct Delivery *ctx) {
}
int delivery_init_platform(struct Delivery *ctx) {
- msg(STASIS_MSG_L2, "Setting architecture\n");
+ SYSDEBUG("%s", "Setting architecture\n");
char archsuffix[20];
struct utsname uts;
if (uname(&uts)) {
@@ -179,7 +179,7 @@ int delivery_init_platform(struct Delivery *ctx) {
strcpy(archsuffix, ctx->system.arch);
}
- msg(STASIS_MSG_L2, "Setting platform\n");
+ SYSDEBUG("%s", "Setting platform\n");
strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname);
if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) {
sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix);
@@ -287,6 +287,8 @@ int delivery_init(struct Delivery *ctx, int render_mode) {
int bootstrap_build_info(struct Delivery *ctx) {
struct Delivery local = {0};
+ memcpy(&local.deploy.docker.capabilities, &ctx->deploy.docker.capabilities, sizeof(local.deploy.docker.capabilities));
+
SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.cfg_path);
local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path);
SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.delivery_path);
diff --git a/src/lib/delivery/delivery_install.c b/src/lib/delivery/delivery_install.c
index f1637a3..f40a509 100644
--- a/src/lib/delivery/delivery_install.c
+++ b/src/lib/delivery/delivery_install.c
@@ -277,7 +277,8 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha
// not found
fprintf(stderr, "No wheel packages found that match the description of '%s'", info->name);
} else {
- // found
+ // found, replace the original version with newly detected version
+ guard_free(info->version);
info->version = strdup(whl->version);
}
guard_strlist_free(&tag_data);
diff --git a/src/lib/delivery/delivery_populate.c b/src/lib/delivery/delivery_populate.c
index 15ab6bd..4ea93c1 100644
--- a/src/lib/delivery/delivery_populate.c
+++ b/src/lib/delivery/delivery_populate.c
@@ -85,6 +85,45 @@ int populate_delivery_cfg(struct Delivery *ctx, int render_mode) {
}
globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err);
+ err = 0;
+ if (!globals.wheel_builder) {
+ globals.wheel_builder = ini_getval_str(cfg, "default", "wheel_builder", render_mode, &err);
+ if (err) {
+ msg(STASIS_MSG_WARN, "wheel_builder is undefined. Falling back to system toolchain: 'build'.\n");
+ globals.wheel_builder = strdup("build");
+ if (!globals.wheel_builder) {
+ SYSERROR("%s", "unable to allocate memory for default wheel_builder value");
+ return -1;
+ }
+ }
+ }
+
+ err = 0;
+ if (!globals.wheel_builder_manylinux_image) {
+ globals.wheel_builder_manylinux_image = ini_getval_str(cfg, "default", "wheel_builder_manylinux_image", render_mode, &err);
+ }
+
+ if (err && globals.wheel_builder && strcmp(globals.wheel_builder, "manylinux") == 0) {
+ SYSERROR("%s", "default:wheel_builder is set to 'manylinux', however default:wheel_builder_manylinux_image is not configured");
+ return -1;
+ }
+
+ if (strcmp(globals.wheel_builder, "manylinux") == 0) {
+ char *manifest_inspect_cmd = NULL;
+ if (asprintf(&manifest_inspect_cmd, "manifest inspect '%s'", globals.wheel_builder_manylinux_image) < 0) {
+ SYSERROR("%s", "unable to allocate memory for docker command");
+ guard_free(manifest_inspect_cmd);
+ return -1;
+ }
+ if (ctx->deploy.docker.capabilities.usable && docker_exec(manifest_inspect_cmd, STASIS_DOCKER_QUIET_STDOUT)) {
+ SYSERROR("Image provided by default:wheel_builder_manylinux_image does not exist: %s", globals.wheel_builder_manylinux_image);
+ guard_free(manifest_inspect_cmd);
+ return -1;
+ }
+ guard_free(manifest_inspect_cmd);
+ }
+
+
if (globals.jfrog.jfrog_artifactory_base_url) {
guard_free(globals.jfrog.jfrog_artifactory_base_url);
}
@@ -200,7 +239,9 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) {
normalize_ini_list(&ini, &ctx->conda.pip_packages_purge, "conda", "pip_packages_purge", render_mode);
// Delivery metadata consumed
- populate_mission_ini(&ctx, render_mode);
+ if (populate_mission_ini(&ctx, render_mode)) {
+ return -1;
+ }
if (ctx->info.release_name) {
guard_free(ctx->info.release_name);
@@ -320,6 +361,7 @@ int populate_mission_ini(struct Delivery **ctx, int render_mode) {
int err = 0;
if ((*ctx)->_stasis_ini_fp.mission) {
+ // mission configurations are optional
return 0;
}
@@ -333,12 +375,12 @@ int populate_mission_ini(struct Delivery **ctx, int render_mode) {
globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission);
}
- msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile);
+ SYSDEBUG("Reading mission configuration: %s\n", missionfile);
(*ctx)->_stasis_ini_fp.mission = ini_open(missionfile);
struct INIFILE *ini = (*ctx)->_stasis_ini_fp.mission;
if (!ini) {
msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read mission configuration: %s, %s\n", missionfile, strerror(errno));
- exit(1);
+ return -1;
}
(*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile);
diff --git a/stasis.ini b/stasis.ini
index 043fcec..4b0d1db 100644
--- a/stasis.ini
+++ b/stasis.ini
@@ -23,6 +23,18 @@ conda_packages =
; (list) Python packages to be installed/overridden in the base environment
;pip_packages =
+; (string) Python wheel builder [Linux only]
+; DEFAULT: system
+; OPTIONS:
+; system = Build using local system toolchain
+; cibuildwheel = Build using cibuildwheel and docker
+; manylinux = Build using manylinux and docker
+wheel_builder = manylinux
+
+; (string) Manylinux image [Linux only]
+; When wheel_builder is set to "manylinux", use the following image
+wheel_builder_manylinux_image = quay.io/pypa/manylinux2014
+
[jfrog_cli_download]
url = https://releases.jfrog.io/artifactory
product = jfrog-cli
diff --git a/tests/test_docker.c b/tests/test_docker.c
index d60522f..b0cf381 100644
--- a/tests/test_docker.c
+++ b/tests/test_docker.c
@@ -41,7 +41,7 @@ void test_docker_build_and_script_and_save() {
if (!pushd("test_docker_build")) {
stasis_testing_write_ascii("Dockerfile", dockerfile_contents);
STASIS_ASSERT(docker_build(".", "-t test_docker_build", cap_suite.build) == 0, "docker build test failed");
- STASIS_ASSERT(docker_script("test_docker_build", "uname -a", 0) == 0, "simple docker container script execution failed");
+ STASIS_ASSERT(docker_script("test_docker_build", "--rm", "uname -a", 0) == 0, "simple docker container script execution failed");
STASIS_ASSERT(docker_save("test_docker_build", ".", STASIS_DOCKER_IMAGE_COMPRESSION) == 0, "saving a simple image failed");
STASIS_ASSERT(docker_exec("load < test_docker_build.tar.*", 0) == 0, "loading a simple image failed");
docker_exec("image rm -f test_docker_build", 0);
diff --git a/tests/test_strlist.c b/tests/test_strlist.c
index 47722c0..38343f4 100644
--- a/tests/test_strlist.c
+++ b/tests/test_strlist.c
@@ -200,6 +200,20 @@ void test_strlist_append_tokenize() {
guard_strlist_free(&list);
}
+void test_strlist_appendf() {
+ const char *fmt = "%c %s %d";
+ struct StrList *list;
+ list = strlist_init();
+ const int len = strlist_appendf(NULL, fmt, 'a', "abc", strlen(fmt));
+ STASIS_ASSERT(strlist_appendf(&list, fmt, 'a', "abc", strlen(fmt)) == len, "length of formatted string should be 7");
+ const char *item = strlist_item(list, 0);
+ STASIS_ASSERT(item != NULL, "valid pointer expected, item should not be NULL");
+ STASIS_ASSERT(strncmp(item, "a", 1) == 0, "first character should be 'a'");
+ STASIS_ASSERT(strncmp(item + 2, "abc", 3) == 0, "string should be 'abc'");
+ STASIS_ASSERT(strncmp(item + 6, "8", 1) == 0, "length of the raw format should be 8");
+ guard_strlist_free(&list);
+}
+
void test_strlist_copy() {
struct StrList *list = strlist_init();
struct StrList *list_copy;
@@ -628,6 +642,7 @@ void test_strlist_item_as_long_double() {
int main(int argc, char *argv[]) {
STASIS_TEST_BEGIN_MAIN();
STASIS_TEST_FUNC *tests[] = {
+ test_strlist_appendf,
test_strlist_init,
test_strlist_free,
test_strlist_append,
diff --git a/tests/test_utils.c b/tests/test_utils.c
index 0e2eb7b..cfe79e0 100644
--- a/tests/test_utils.c
+++ b/tests/test_utils.c
@@ -213,17 +213,21 @@ void test_git_clone_and_describe() {
// test git_describe is functional
char *taginfo_none = git_describe(".");
STASIS_ASSERT(taginfo_none != NULL, "should be a git hash, not NULL");
+ puts(taginfo_none);
+ STASIS_ASSERT(is_git_sha(taginfo_none) == true, "not a git hash");
system("git tag -a 1.0.0 -m Mock");
system("git push --tags origin");
- char *taginfo = git_describe(".");
+ const char *taginfo = git_describe(".");
+ puts(taginfo);
STASIS_ASSERT(taginfo != NULL, "should be 1.0.0, not NULL");
- STASIS_ASSERT(strcmp(taginfo, "1.0.0") == 0, "just-created tag was not described correctly");
+ STASIS_ASSERT(startswith(taginfo, "1.0.0") == true, "just-created tag was not described correctly");
chdir("..");
char *taginfo_outer = git_describe(repo);
+ puts(taginfo_outer);
STASIS_ASSERT(taginfo_outer != NULL, "should be 1.0.0, not NULL");
- STASIS_ASSERT(strcmp(taginfo_outer, "1.0.0") == 0, "just-created tag was not described correctly (out-of-dir invocation)");
+ STASIS_ASSERT(startswith(taginfo_outer, "1.0.0") == true, "just-created tag was not described correctly (out-of-dir invocation)");
char *taginfo_bad = git_describe("abc1234_not_here_or_there");
STASIS_ASSERT(taginfo_bad == NULL, "a repository that shouldn't exist... exists and has a tag.");