diff options
Diffstat (limited to 'src/lib/delivery')
| -rw-r--r-- | src/lib/delivery/delivery.c | 62 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_build.c | 338 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_docker.c | 2 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_init.c | 13 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_install.c | 15 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_populate.c | 64 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_postprocess.c | 2 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_show.c | 10 | ||||
| -rw-r--r-- | src/lib/delivery/delivery_test.c | 64 | ||||
| -rw-r--r-- | src/lib/delivery/include/delivery.h | 75 |
10 files changed, 532 insertions, 113 deletions
diff --git a/src/lib/delivery/delivery.c b/src/lib/delivery/delivery.c index 600ddf9..11dd7b0 100644 --- a/src/lib/delivery/delivery.c +++ b/src/lib/delivery/delivery.c @@ -153,21 +153,21 @@ struct Delivery *delivery_duplicate(const struct Delivery *ctx) { result->deploy.jfrog_auth.url = strdup_maybe(ctx->deploy.jfrog_auth.url); result->deploy.jfrog_auth.user = strdup_maybe(ctx->deploy.jfrog_auth.user); - for (size_t i = 0; i < sizeof(result->tests) / sizeof(result->tests[0]); i++) { - result->tests[i].disable = ctx->tests[i].disable; - result->tests[i].parallel = ctx->tests[i].parallel; - result->tests[i].build_recipe = strdup_maybe(ctx->tests[i].build_recipe); - result->tests[i].name = strdup_maybe(ctx->tests[i].name); - result->tests[i].version = strdup_maybe(ctx->tests[i].version); - result->tests[i].repository = strdup_maybe(ctx->tests[i].repository); - result->tests[i].repository_info_ref = strdup_maybe(ctx->tests[i].repository_info_ref); - result->tests[i].repository_info_tag = strdup_maybe(ctx->tests[i].repository_info_tag); - result->tests[i].repository_remove_tags = strlist_copy(ctx->tests[i].repository_remove_tags); - if (ctx->tests[i].runtime.environ) { - result->tests[i].runtime.environ = runtime_copy(ctx->tests[i].runtime.environ->data); + for (size_t i = 0; result->tests && i < result->tests->num_used; i++) { + result->tests->test[i]->disable = ctx->tests->test[i]->disable; + result->tests->test[i]->parallel = ctx->tests->test[i]->parallel; + result->tests->test[i]->build_recipe = strdup_maybe(ctx->tests->test[i]->build_recipe); + result->tests->test[i]->name = strdup_maybe(ctx->tests->test[i]->name); + result->tests->test[i]->version = strdup_maybe(ctx->tests->test[i]->version); + result->tests->test[i]->repository = strdup_maybe(ctx->tests->test[i]->repository); + result->tests->test[i]->repository_info_ref = strdup_maybe(ctx->tests->test[i]->repository_info_ref); + result->tests->test[i]->repository_info_tag = strdup_maybe(ctx->tests->test[i]->repository_info_tag); + result->tests->test[i]->repository_remove_tags = strlist_copy(ctx->tests->test[i]->repository_remove_tags); + if (ctx->tests->test[i]->runtime->environ) { + result->tests->test[i]->runtime->environ = runtime_copy(ctx->tests->test[i]->runtime->environ->data); } - result->tests[i].script = strdup_maybe(ctx->tests[i].script); - result->tests[i].script_setup = strdup_maybe(ctx->tests[i].script_setup); + result->tests->test[i]->script = strdup_maybe(ctx->tests->test[i]->script); + result->tests->test[i]->script_setup = strdup_maybe(ctx->tests->test[i]->script_setup); } return result; @@ -175,7 +175,7 @@ struct Delivery *delivery_duplicate(const struct Delivery *ctx) { void delivery_free(struct Delivery *ctx) { guard_free(ctx->system.arch); - guard_array_free(ctx->system.platform); + guard_array_n_free(ctx->system.platform, DELIVERY_PLATFORM_MAX); guard_free(ctx->meta.name); guard_free(ctx->meta.version); guard_free(ctx->meta.codename); @@ -230,18 +230,24 @@ void delivery_free(struct Delivery *ctx) { guard_strlist_free(&ctx->conda.pip_packages_purge); guard_strlist_free(&ctx->conda.wheels_packages); - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - guard_free(ctx->tests[i].name); - guard_free(ctx->tests[i].version); - guard_free(ctx->tests[i].repository); - guard_free(ctx->tests[i].repository_info_ref); - guard_free(ctx->tests[i].repository_info_tag); - guard_strlist_free(&ctx->tests[i].repository_remove_tags); - guard_free(ctx->tests[i].script); - guard_free(ctx->tests[i].script_setup); - guard_free(ctx->tests[i].build_recipe); + for (size_t i = 0; ctx->tests && i < ctx->tests->num_used; i++) { + guard_free(ctx->tests->test[i]->name); + guard_free(ctx->tests->test[i]->version); + guard_free(ctx->tests->test[i]->repository); + guard_free(ctx->tests->test[i]->repository_info_ref); + guard_free(ctx->tests->test[i]->repository_info_tag); + guard_strlist_free(&ctx->tests->test[i]->repository_remove_tags); + guard_free(ctx->tests->test[i]->script); + guard_free(ctx->tests->test[i]->script_setup); + guard_free(ctx->tests->test[i]->build_recipe); // test-specific runtime variables - guard_runtime_free(ctx->tests[i].runtime.environ); + guard_runtime_free(ctx->tests->test[i]->runtime->environ); + guard_free(ctx->tests->test[i]->runtime); + guard_free(ctx->tests->test[i]); + } + if (ctx->tests) { + guard_free(ctx->tests->test); + guard_free(ctx->tests); } guard_free(ctx->rules.release_fmt); @@ -388,8 +394,8 @@ void delivery_defer_packages(struct Delivery *ctx, int type) { msg(STASIS_MSG_L3, "package '%s': ", package_name); // When spec is present in name, set tests->version to the version detected in the name - for (size_t x = 0; x < sizeof(ctx->tests) / sizeof(ctx->tests[0]) && ctx->tests[x].name != NULL; x++) { - struct Test *test = &ctx->tests[x]; + for (size_t x = 0; x < ctx->tests->num_used; x++) { + struct Test *test = ctx->tests->test[x]; char nametmp[1024] = {0}; strncpy(nametmp, package_name, sizeof(nametmp) - 1); diff --git a/src/lib/delivery/delivery_build.c b/src/lib/delivery/delivery_build.c index 8370e6d..0013e96 100644 --- a/src/lib/delivery/delivery_build.c +++ b/src/lib/delivery/delivery_build.c @@ -1,11 +1,11 @@ #include "delivery.h" int delivery_build_recipes(struct Delivery *ctx) { - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + for (size_t i = 0; i < ctx->tests->num_used; i++) { char *recipe_dir = NULL; - if (ctx->tests[i].build_recipe) { // build a conda recipe - if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) { - fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name); + if (ctx->tests->test[i]->build_recipe) { // build a conda recipe + if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests->test[i]->build_recipe, NULL, &recipe_dir)) { + fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests->test[i]->name); return -1; } if (!recipe_dir) { @@ -15,29 +15,48 @@ int delivery_build_recipes(struct Delivery *ctx) { int recipe_type = recipe_get_type(recipe_dir); if(!pushd(recipe_dir)) { if (RECIPE_TYPE_ASTROCONDA == recipe_type) { - pushd(path_basename(ctx->tests[i].repository)); + pushd(path_basename(ctx->tests->test[i]->repository)); } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) { pushd("recipe"); } - char recipe_version[100]; - char recipe_buildno[100]; + char recipe_version[200]; + char recipe_buildno[200]; char recipe_git_url[PATH_MAX]; char recipe_git_rev[PATH_MAX]; + char tag[100] = {0}; + if (ctx->tests->test[i]->repository_info_tag) { + const int is_long_tag = num_chars(ctx->tests->test[i]->repository_info_tag, '-') > 1; + if (is_long_tag) { + const size_t len = strcspn(ctx->tests->test[i]->repository_info_tag, "-"); + strncpy(tag, ctx->tests->test[i]->repository_info_tag, len); + tag[len] = '\0'; + } else { + strncpy(tag, ctx->tests->test[i]->repository_info_tag, sizeof(tag) - 1); + tag[strlen(ctx->tests->test[i]->repository_info_tag)] = '\0'; + } + } else { + strcpy(tag, ctx->tests->test[i]->version); + } + //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}"); - //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository); - //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version); + //sprintf(recipe_git_url, " git_url: %s", ctx->tests->test[i]->repository); + //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests->test[i]->version); // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^ - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version); - sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository); + // 03/2026 - How can we know if the repository URL supports archive downloads? + // Perhaps we can key it to the recipe type, because the archive is a requirement imposed + // by conda-forge. Hmm. + + sprintf(recipe_version, "{%% set version = \"%s\" %%}", tag); + sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests->test[i]->repository); strcpy(recipe_git_rev, ""); sprintf(recipe_buildno, " number: 0"); unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH; //file_replace_text("meta.yaml", "{% set version = ", recipe_version); if (ctx->meta.final) { // remove this. i.e. statis cannot deploy a release to conda-forge - sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version); + sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests->test[i]->version); // TODO: replace sha256 of tagged archive // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge. // For now, remove the sha256 requirement @@ -127,7 +146,232 @@ int filter_repo_tags(char *repo, struct StrList *patterns) { return result; } +static int read_without_line_endings(const size_t line, char ** arg) { + (void) line; + if (*arg) { + strip(*arg); + if (isempty(*arg)) { + return 1; // skip + } + } + return 0; +} + +int manylinux_exec(const char *image, const char *script, const char *copy_to_container_dir, const char *copy_from_container_dir, const char *copy_to_host_dir) { + int result = -1; // fail by default + char *container_name = NULL; + char *source_copy_command = NULL; + char *copy_command = NULL; + char *rm_command = NULL; + char *nop_create_command = NULL; + char *nop_rm_command = NULL; + char *volume_rm_command = NULL; + char *find_command = NULL; + char *wheel_paths_filename = NULL; + char *args = NULL; + + const uid_t uid = geteuid(); + char suffix[7] = {0}; + + // setup + + if (get_random_bytes(suffix, sizeof(suffix))) { + SYSERROR("%s", "unable to acquire value from random generator"); + goto manylinux_fail; + } + + if (asprintf(&container_name, "manylinux_build_%d_%zd_%s", uid, time(NULL), suffix) < 0) { + SYSERROR("%s", "unable to allocate memory for container name"); + goto manylinux_fail; + } + + if (asprintf(&args, "--name %s -w /build -v %s:/build", container_name, container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for docker arguments"); + goto manylinux_fail; + } + + if (!strstr(image, "manylinux")) { + SYSERROR("expected a manylinux image, but got %s", image); + goto manylinux_fail; + } + + if (asprintf(&nop_create_command, "run --name nop_%s -v %s:/build busybox", container_name, container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for nop container command"); + goto manylinux_fail; + } + + if (asprintf(&source_copy_command, "cp %s nop_%s:/build", copy_to_container_dir, container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for source copy command"); + goto manylinux_fail; + } + + if (asprintf(&nop_rm_command, "rm nop_%s", container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for nop container command"); + goto manylinux_fail; + } + + if (asprintf(&wheel_paths_filename, "%s/wheel_paths_%s.txt", globals.tmpdir, container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for wheel paths file name"); + goto manylinux_fail; + } + + if (asprintf(&find_command, "run --rm -t -v %s:/build busybox sh -c 'find %s -name \"*.whl\"' > %s", container_name, copy_from_container_dir, wheel_paths_filename) < 0) { + SYSERROR("%s", "unable to allocate memory for find command"); + goto manylinux_fail; + } + + // execute + + if (docker_exec(nop_create_command, 0)) { + SYSERROR("%s", "docker nop container creation failed"); + goto manylinux_fail; + } + + if (docker_exec(source_copy_command, 0)) { + SYSERROR("%s", "docker source copy operation failed"); + goto manylinux_fail; + } + + if (docker_exec(nop_rm_command, STASIS_DOCKER_QUIET)) { + SYSERROR("%s", "docker nop container removal failed"); + goto manylinux_fail; + } + + if (docker_script(image, args, (char *) script, 0)) { + SYSERROR("%s", "manylinux execution failed"); + goto manylinux_fail; + } + + if (docker_exec(find_command, 0)) { + SYSERROR("%s", "docker find command failed"); + goto manylinux_fail; + } + + struct StrList *wheel_paths = strlist_init(); + if (!wheel_paths) { + SYSERROR("%s", "wheel_paths not initialized"); + goto manylinux_fail; + } + + if (strlist_append_file(wheel_paths, wheel_paths_filename, read_without_line_endings)) { + SYSERROR("%s", "wheel_paths append failed"); + goto manylinux_fail; + } + + for (size_t i = 0; i < strlist_count(wheel_paths); i++) { + const char *item = strlist_item(wheel_paths, i); + if (asprintf(©_command, "cp %s:%s %s", container_name, item, copy_to_host_dir) < 0) { + SYSERROR("%s", "unable to allocate memory for docker copy command"); + goto manylinux_fail; + } + + if (docker_exec(copy_command, 0)) { + SYSERROR("%s", "docker copy operation failed"); + goto manylinux_fail; + } + guard_free(copy_command); + } + + // Success + result = 0; + + manylinux_fail: + if (wheel_paths_filename) { + remove(wheel_paths_filename); + } + + if (container_name) { + // Keep going on failure unless memory related. + // We don't want build debris everywhere. + if (asprintf(&rm_command, "rm %s", container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for rm command"); + goto late_fail; + } + + if (docker_exec(rm_command, STASIS_DOCKER_QUIET)) { + SYSERROR("%s", "docker container removal operation failed"); + } + + if (asprintf(&volume_rm_command, "volume rm -f %s", container_name) < 0) { + SYSERROR("%s", "unable to allocate memory for docker volume removal command"); + goto late_fail; + } + + if (docker_exec(volume_rm_command, STASIS_DOCKER_QUIET)) { + SYSERROR("%s", "docker volume removal operation failed"); + } + } + + late_fail: + guard_free(container_name); + guard_free(args); + guard_free(copy_command); + guard_free(rm_command); + guard_free(volume_rm_command); + guard_free(source_copy_command); + guard_free(nop_create_command); + guard_free(nop_rm_command); + guard_free(find_command); + guard_free(wheel_paths_filename); + guard_strlist_free(&wheel_paths); + return result; +} + +int delivery_build_wheels_manylinux(struct Delivery *ctx, const char *outdir) { + msg(STASIS_MSG_L1, "Building wheels\n"); + + const char *manylinux_image = globals.wheel_builder_manylinux_image; + if (!manylinux_image) { + SYSERROR("%s", "manylinux_image not initialized"); + return -1; + } + + int manylinux_build_status = 0; + + msg(STASIS_MSG_L2, "Using: %s\n", manylinux_image); + const struct Meta *meta = &ctx->meta; + const char *script_fmt = + "set -e -x\n" + "git config --global --add safe.directory /build\n" + "python%s -m pip install auditwheel build\n" + "python%s -m build -w .\n" + "auditwheel show dist/*.whl\n" + "auditwheel repair --allow-pure-python-wheel dist/*.whl\n"; + char *script = NULL; + if (asprintf(&script, script_fmt, + meta->python, meta->python) < 0) { + SYSERROR("%s", "unable to allocate memory for build script"); + return -1; + } + manylinux_build_status = manylinux_exec( + manylinux_image, + script, + "./", + "/build/wheelhouse", + outdir); + + if (manylinux_build_status) { + msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "manylinux build failed (%d)", manylinux_build_status); + guard_free(script); + return -1; + } + guard_free(script); + return 0; +} + struct StrList *delivery_build_wheels(struct Delivery *ctx) { + const int on_linux = strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux") == 0; + const int docker_usable = ctx->deploy.docker.capabilities.usable; + int use_builder_build = strcmp(globals.wheel_builder, "native") == 0; + const int use_builder_cibuildwheel = strcmp(globals.wheel_builder, "cibuildwheel") == 0 && on_linux && docker_usable; + const int use_builder_manylinux = strcmp(globals.wheel_builder, "manylinux") == 0 && on_linux && docker_usable; + + if (!use_builder_build && !use_builder_cibuildwheel && !use_builder_manylinux) { + msg(STASIS_MSG_WARN, "Cannot build wheel for platform using: %\n", globals.wheel_builder); + msg(STASIS_MSG_WARN, "Falling back to native toolchain.\n", globals.wheel_builder); + use_builder_build = 1; + } + struct StrList *result = NULL; struct Process proc = {0}; @@ -147,22 +391,28 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) { *spec = '\0'; } - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if ((ctx->tests[i].name && !strcmp(name, ctx->tests[i].name)) && (!ctx->tests[i].build_recipe && ctx->tests[i].repository)) { // build from source + for (size_t i = 0; i < ctx->tests->num_used; i++) { + if ((ctx->tests->test[i]->name && !strcmp(name, ctx->tests->test[i]->name)) && (!ctx->tests->test[i]->build_recipe && ctx->tests->test[i]->repository)) { // build from source char srcdir[PATH_MAX]; char wheeldir[PATH_MAX]; memset(srcdir, 0, sizeof(srcdir)); memset(wheeldir, 0, sizeof(wheeldir)); - sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name); - if (git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version)) { + sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests->test[i]->name); + if (git_clone(&proc, ctx->tests->test[i]->repository, srcdir, ctx->tests->test[i]->version)) { SYSERROR("Unable to checkout tag '%s' for package '%s' from repository '%s'\n", - ctx->tests[i].version, ctx->tests[i].name, ctx->tests[i].repository); + ctx->tests->test[i]->version, ctx->tests->test[i]->name, ctx->tests->test[i]->repository); return NULL; } - if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) { - filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags); + if (!ctx->tests->test[i]->repository_info_tag) { + ctx->tests->test[i]->repository_info_tag = strdup(git_describe(srcdir)); + } + if (!ctx->tests->test[i]->repository_info_ref) { + ctx->tests->test[i]->repository_info_ref = strdup(git_rev_parse(srcdir, ctx->tests->test[i]->version)); + } + if (ctx->tests->test[i]->repository_remove_tags && strlist_count(ctx->tests->test[i]->repository_remove_tags)) { + filter_repo_tags(srcdir, ctx->tests->test[i]->repository_remove_tags); } if (!pushd(srcdir)) { @@ -184,7 +434,7 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) { COE_CHECK_ABORT(dep_status, "Unreproducible delivery"); } - strcpy(dname, ctx->tests[i].name); + strcpy(dname, ctx->tests->test[i]->name); tolower_s(dname); sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname); if (mkdirs(outdir, 0755)) { @@ -192,28 +442,41 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) { guard_strlist_free(&result); return NULL; } - - if (asprintf(&cmd, "-m build -w -o %s", outdir) < 0) { - SYSERROR("%s", "Unable to allocate memory for build command"); - return NULL; - } - if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux") - && ctx->deploy.docker.capabilities.usable) { - guard_free(cmd); - if (asprintf(&cmd, "-m cibuildwheel --output-dir %s --only cp%s-manylinux_%s", - outdir, ctx->meta.python_compact, ctx->system.arch) < 0) { - SYSERROR("%s", "Unable to allocate memory for cibuildwheel command"); + if (use_builder_manylinux) { + if (delivery_build_wheels_manylinux(ctx, outdir)) { + fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests->test[i]->name, + ctx->tests->test[i]->version); + guard_strlist_free(&result); + guard_free(cmd); return NULL; } - } + } else if (use_builder_build || use_builder_cibuildwheel) { + if (use_builder_build) { + if (asprintf(&cmd, "-m build -w -o %s", outdir) < 0) { + SYSERROR("%s", "Unable to allocate memory for build command"); + return NULL; + } + } else if (use_builder_cibuildwheel) { + if (asprintf(&cmd, "-m cibuildwheel --output-dir %s --only cp%s-manylinux_%s", + outdir, ctx->meta.python_compact, ctx->system.arch) < 0) { + SYSERROR("%s", "Unable to allocate memory for cibuildwheel command"); + return NULL; + } + } - if (python_exec(cmd)) { - fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, - ctx->tests[i].version); - guard_strlist_free(&result); - guard_free(cmd); + if (python_exec(cmd)) { + fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests->test[i]->name, + ctx->tests->test[i]->version); + guard_strlist_free(&result); + guard_free(cmd); + return NULL; + } + } else { + SYSERROR("unknown wheel builder backend: %s", globals.wheel_builder); return NULL; } + + guard_free(cmd); popd(); } else { fprintf(stderr, "Unable to enter source directory %s: %s\n", srcdir, strerror(errno)); @@ -225,4 +488,3 @@ struct StrList *delivery_build_wheels(struct Delivery *ctx) { } return result; } - diff --git a/src/lib/delivery/delivery_docker.c b/src/lib/delivery/delivery_docker.c index 57015ad..2c43caf 100644 --- a/src/lib/delivery/delivery_docker.c +++ b/src/lib/delivery/delivery_docker.c @@ -111,7 +111,7 @@ int delivery_docker(struct Delivery *ctx) { msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n"); } else { int state; - if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) { + if ((state = docker_script(tag, "--rm", ctx->deploy.docker.test_script, 0))) { msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag); // test failed -- don't save the image return -1; diff --git a/src/lib/delivery/delivery_init.c b/src/lib/delivery/delivery_init.c index a60d6af..1666f0a 100644 --- a/src/lib/delivery/delivery_init.c +++ b/src/lib/delivery/delivery_init.c @@ -119,7 +119,7 @@ void delivery_init_dirs_stage1(struct Delivery *ctx) { } if (access(ctx->storage.mission_dir, F_OK)) { - msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno)); + msg(STASIS_MSG_L1, "%s: %s: mission directory does not exist\n", ctx->storage.mission_dir, strerror(errno)); exit(1); } @@ -150,7 +150,7 @@ void delivery_init_dirs_stage1(struct Delivery *ctx) { } int delivery_init_platform(struct Delivery *ctx) { - msg(STASIS_MSG_L2, "Setting architecture\n"); + SYSDEBUG("%s", "Setting architecture"); char archsuffix[20]; struct utsname uts; if (uname(&uts)) { @@ -179,7 +179,7 @@ int delivery_init_platform(struct Delivery *ctx) { strcpy(archsuffix, ctx->system.arch); } - msg(STASIS_MSG_L2, "Setting platform\n"); + SYSDEBUG("%s", "Setting platform"); strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname); if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) { sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix); @@ -287,6 +287,8 @@ int delivery_init(struct Delivery *ctx, int render_mode) { int bootstrap_build_info(struct Delivery *ctx) { struct Delivery local = {0}; + memcpy(&local.deploy.docker.capabilities, &ctx->deploy.docker.capabilities, sizeof(local.deploy.docker.capabilities)); + SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.cfg_path); local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path); SYSDEBUG("ini_open(%s)", ctx->_stasis_ini_fp.delivery_path); @@ -294,18 +296,22 @@ int bootstrap_build_info(struct Delivery *ctx) { if (delivery_init_platform(&local)) { SYSDEBUG("%s", "delivery_init_platform failed"); + delivery_free(&local); return -1; } if (populate_delivery_cfg(&local, INI_READ_RENDER)) { SYSDEBUG("%s", "populate_delivery_cfg failed"); + delivery_free(&local); return -1; } if (populate_delivery_ini(&local, INI_READ_RENDER)) { SYSDEBUG("%s", "populate_delivery_ini failed"); + delivery_free(&local); return -1; } if (populate_info(&local)) { SYSDEBUG("%s", "populate_info failed"); + delivery_free(&local); return -1; } ctx->info.build_name = strdup(local.info.build_name); @@ -315,6 +321,7 @@ int bootstrap_build_info(struct Delivery *ctx) { ctx->info.time_info = malloc(sizeof(*ctx->info.time_info)); if (!ctx->info.time_info) { SYSERROR("Unable to allocate %zu bytes for tm struct: %s", sizeof(*local.info.time_info), strerror(errno)); + delivery_free(&local); return -1; } } diff --git a/src/lib/delivery/delivery_install.c b/src/lib/delivery/delivery_install.c index f1637a3..2de80cf 100644 --- a/src/lib/delivery/delivery_install.c +++ b/src/lib/delivery/delivery_install.c @@ -2,7 +2,7 @@ static struct Test *requirement_from_test(struct Delivery *ctx, const char *name) { struct Test *result = NULL; - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { + for (size_t i = 0; i < ctx->tests->num_used; i++) { char *package_name = strdup(name); if (package_name) { char *spec = find_version_spec(package_name); @@ -11,8 +11,8 @@ static struct Test *requirement_from_test(struct Delivery *ctx, const char *name } remove_extras(package_name); - if (ctx->tests[i].name && !strcmp(package_name, ctx->tests[i].name)) { - result = &ctx->tests[i]; + if (ctx->tests->test[i]->name && !strcmp(package_name, ctx->tests->test[i]->name)) { + result = ctx->tests->test[i]; guard_free(package_name); break; } @@ -252,7 +252,7 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha } strlist_append_tokenize(tag_data, info->repository_info_tag, "-"); - struct Wheel *whl = NULL; + struct WheelInfo *whl = NULL; char *post_commit = NULL; char *hash = NULL; if (strlist_count(tag_data) > 1) { @@ -264,7 +264,7 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha // equal to the tag; setuptools_scm auto-increments the value, the user can change it manually, // etc. errno = 0; - whl = get_wheel_info(ctx->storage.wheel_artifact_dir, info->name, + whl = wheelinfo_get(ctx->storage.wheel_artifact_dir, info->name, (char *[]) {ctx->meta.python_compact, ctx->system.arch, "none", "any", post_commit, hash, @@ -277,11 +277,12 @@ int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, cha // not found fprintf(stderr, "No wheel packages found that match the description of '%s'", info->name); } else { - // found + // found, replace the original version with newly detected version + guard_free(info->version); info->version = strdup(whl->version); } guard_strlist_free(&tag_data); - wheel_free(&whl); + wheelinfo_free(&whl); } char req[255] = {0}; diff --git a/src/lib/delivery/delivery_populate.c b/src/lib/delivery/delivery_populate.c index 15ab6bd..d41e3a4 100644 --- a/src/lib/delivery/delivery_populate.c +++ b/src/lib/delivery/delivery_populate.c @@ -85,6 +85,45 @@ int populate_delivery_cfg(struct Delivery *ctx, int render_mode) { } globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err); + err = 0; + if (!globals.wheel_builder) { + globals.wheel_builder = ini_getval_str(cfg, "default", "wheel_builder", render_mode, &err); + if (err) { + msg(STASIS_MSG_WARN, "wheel_builder is undefined. Falling back to system toolchain: 'build'.\n"); + globals.wheel_builder = strdup("build"); + if (!globals.wheel_builder) { + SYSERROR("%s", "unable to allocate memory for default wheel_builder value"); + return -1; + } + } + } + + err = 0; + if (!globals.wheel_builder_manylinux_image) { + globals.wheel_builder_manylinux_image = ini_getval_str(cfg, "default", "wheel_builder_manylinux_image", render_mode, &err); + } + + if (err && globals.wheel_builder && strcmp(globals.wheel_builder, "manylinux") == 0) { + SYSERROR("%s", "default:wheel_builder is set to 'manylinux', however default:wheel_builder_manylinux_image is not configured"); + return -1; + } + + if (strcmp(globals.wheel_builder, "manylinux") == 0) { + char *manifest_inspect_cmd = NULL; + if (asprintf(&manifest_inspect_cmd, "manifest inspect '%s'", globals.wheel_builder_manylinux_image) < 0) { + SYSERROR("%s", "unable to allocate memory for docker command"); + guard_free(manifest_inspect_cmd); + return -1; + } + if (ctx->deploy.docker.capabilities.usable && docker_exec(manifest_inspect_cmd, STASIS_DOCKER_QUIET_STDOUT)) { + SYSERROR("Image provided by default:wheel_builder_manylinux_image does not exist: %s", globals.wheel_builder_manylinux_image); + guard_free(manifest_inspect_cmd); + return -1; + } + guard_free(manifest_inspect_cmd); + } + + if (globals.jfrog.jfrog_artifactory_base_url) { guard_free(globals.jfrog.jfrog_artifactory_base_url); } @@ -154,6 +193,7 @@ static void normalize_ini_list(struct INIFILE **inip, struct StrList **listp, ch (*inip) = ini; (*listp) = list; } + int populate_delivery_ini(struct Delivery *ctx, int render_mode) { struct INIFILE *ini = ctx->_stasis_ini_fp.delivery; struct INIData *rtdata; @@ -200,7 +240,9 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) { normalize_ini_list(&ini, &ctx->conda.pip_packages_purge, "conda", "pip_packages_purge", render_mode); // Delivery metadata consumed - populate_mission_ini(&ctx, render_mode); + if (populate_mission_ini(&ctx, render_mode)) { + return -1; + } if (ctx->info.release_name) { guard_free(ctx->info.release_name); @@ -236,11 +278,17 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) { ctx->conda.pip_packages_defer = strlist_init(); } - for (size_t z = 0, i = 0; i < ini->section_count; i++) { + ctx->tests = tests_init(TEST_NUM_ALLOC_INITIAL); + for (size_t i = 0; i < ini->section_count; i++) { char *section_name = ini->section[i]->key; if (startswith(section_name, "test:")) { union INIVal val; - struct Test *test = &ctx->tests[z]; + struct Test *test = test_init(); + if (!test) { + SYSERROR("%s", "unable to allocate memory for test structure"); + return -1; + } + val.as_char_p = strchr(ini->section[i]->key, ':') + 1; if (val.as_char_p && isempty(val.as_char_p)) { return 1; @@ -258,7 +306,8 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) { } test->repository_remove_tags = ini_getval_strlist(ini, section_name, "repository_remove_tags", LINE_SEP, render_mode, &err); test->build_recipe = ini_getval_str(ini, section_name, "build_recipe", render_mode, &err); - test->runtime.environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err); + + test->runtime->environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err); const char *timeout_str = ini_getval_str(ini, section_name, "timeout", render_mode, &err); if (timeout_str) { test->timeout = str_to_timeout((char *) timeout_str); @@ -271,7 +320,7 @@ int populate_delivery_ini(struct Delivery *ctx, int render_mode) { return 1; } } - z++; + tests_add(ctx->tests, test); } } @@ -320,6 +369,7 @@ int populate_mission_ini(struct Delivery **ctx, int render_mode) { int err = 0; if ((*ctx)->_stasis_ini_fp.mission) { + // mission configurations are optional return 0; } @@ -333,12 +383,12 @@ int populate_mission_ini(struct Delivery **ctx, int render_mode) { globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission); } - msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile); + SYSDEBUG("Reading mission configuration: %s\n", missionfile); (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile); struct INIFILE *ini = (*ctx)->_stasis_ini_fp.mission; if (!ini) { msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read mission configuration: %s, %s\n", missionfile, strerror(errno)); - exit(1); + return -1; } (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile); diff --git a/src/lib/delivery/delivery_postprocess.c b/src/lib/delivery/delivery_postprocess.c index 5029e02..a7bb2b4 100644 --- a/src/lib/delivery/delivery_postprocess.c +++ b/src/lib/delivery/delivery_postprocess.c @@ -28,7 +28,7 @@ int delivery_dump_metadata(struct Delivery *ctx) { return -1; } if (globals.verbose) { - printf("%s\n", filename); + msg(STASIS_MSG_L2, "%s\n", filename); } fprintf(fp, "name %s\n", ctx->meta.name); fprintf(fp, "version %s\n", ctx->meta.version); diff --git a/src/lib/delivery/delivery_show.c b/src/lib/delivery/delivery_show.c index adfa1be..f4ac825 100644 --- a/src/lib/delivery/delivery_show.c +++ b/src/lib/delivery/delivery_show.c @@ -84,13 +84,13 @@ void delivery_conda_show(struct Delivery *ctx) { void delivery_tests_show(struct Delivery *ctx) { printf("\n====TESTS====\n"); - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - if (!ctx->tests[i].name) { + for (size_t i = 0; i < ctx->tests->num_used; i++) { + if (!ctx->tests->test[i]->name) { continue; } - printf("%-20s %-20s %s\n", ctx->tests[i].name, - ctx->tests[i].version, - ctx->tests[i].repository); + printf("%-20s %-20s %s\n", ctx->tests->test[i]->name, + ctx->tests->test[i]->version, + ctx->tests->test[i]->repository); } } diff --git a/src/lib/delivery/delivery_test.c b/src/lib/delivery/delivery_test.c index 500ade9..3ba9d56 100644 --- a/src/lib/delivery/delivery_test.c +++ b/src/lib/delivery/delivery_test.c @@ -1,5 +1,59 @@ #include "delivery.h" +struct Tests *tests_init(const size_t num_tests) { + struct Tests *tests = calloc(1, sizeof(*tests)); + if (!tests) { + return NULL; + } + + tests->test = calloc(num_tests, sizeof(*tests->test)); + if (!tests->test) { + return NULL; + } + tests->num_used = 0; + tests->num_alloc = num_tests; + + return tests; +} + +int tests_add(struct Tests *tests, struct Test *x) { + if (tests->num_used >= tests->num_alloc) { +#ifdef DEBUG + const size_t old_alloc = tests->num_alloc; +#endif + struct Test **tmp = realloc(tests->test, tests->num_alloc++ * sizeof(*tests->test)); + SYSDEBUG("Increasing size of test array: %zu -> %zu", old_alloc, tests->num_alloc); + if (!tmp) { + SYSDEBUG("Failed to allocate %zu bytes for test array", tests->num_alloc * sizeof(*tests->test)); + return -1; + } + tests->test = tmp; + } + + SYSDEBUG("Adding test: '%s'", x->name); + tests->test[tests->num_used++] = x; + return 0; +} + +struct Test *test_init() { + struct Test *result = calloc(1, sizeof(*result)); + result->runtime = calloc(1, sizeof(*result->runtime)); + + return result; +} + +void test_free(struct Test **x) { + struct Test *test = *x; + guard_free(test); +} + +void tests_free(struct Tests **x) { + for (size_t i = 0; i < (*x)->num_alloc; i++) { + test_free(&(*x)->test[i]); + } + guard_free((*x)->test); +} + void delivery_tests_run(struct Delivery *ctx) { static const int SETUP = 0; static const int PARALLEL = 1; @@ -16,7 +70,7 @@ void delivery_tests_run(struct Delivery *ctx) { // amount of debug information. snprintf(globals.workaround.conda_reactivate, PATH_MAX - 1, "\nset +x; mamba activate ${CONDA_DEFAULT_ENV}; set -x\n"); - if (!ctx->tests[0].name) { + if (!ctx->tests || !ctx->tests->num_used) { msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n"); } else { pool[PARALLEL] = mp_pool_init("parallel", ctx->storage.tmpdir); @@ -60,8 +114,8 @@ void delivery_tests_run(struct Delivery *ctx) { // Iterate over our test records, retrieving the source code for each package, and assigning its scripted tasks // to the appropriate processing pool - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - struct Test *test = &ctx->tests[i]; + for (size_t i = 0; i < ctx->tests->num_used; i++) { + struct Test *test = ctx->tests->test[i]; if (!test->name && !test->repository && !test->script) { // skip unused test records continue; @@ -181,8 +235,8 @@ void delivery_tests_run(struct Delivery *ctx) { // Configure "script_setup" tasks // Directories should exist now, so no need to go through initializing everything all over again. - for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) { - struct Test *test = &ctx->tests[i]; + for (size_t i = 0; i < ctx->tests->num_used; i++) { + const struct Test *test = ctx->tests->test[i]; if (test->script_setup) { char destdir[PATH_MAX]; sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(test->repository)); diff --git a/src/lib/delivery/include/delivery.h b/src/lib/delivery/include/delivery.h index cae4b02..b5799ac 100644 --- a/src/lib/delivery/include/delivery.h +++ b/src/lib/delivery/include/delivery.h @@ -19,6 +19,8 @@ #include "multiprocessing.h" #include "recipe.h" #include "wheel.h" +#include "wheelinfo.h" +#include "environment.h" #define DELIVERY_PLATFORM_MAX 4 #define DELIVERY_PLATFORM_MAXLEN 65 @@ -44,6 +46,28 @@ struct Content { char *data; }; +//! Number of test records to allocate (grows dynamically) +#define TEST_NUM_ALLOC_INITIAL 10 + +/*! \struct Test + * \brief Test information + */ +struct Test { + char *name; ///< Name of package + char *version; ///< Version of package + char *repository; ///< Git repository of package + char *script_setup; ///< Commands to execute before the main script + char *script; ///< Commands to execute + bool disable; ///< Toggle a test block + bool parallel; ///< Toggle parallel or serial execution + char *build_recipe; ///< Conda recipe to build (optional) + char *repository_info_ref; ///< Git commit hash + char *repository_info_tag; ///< Git tag (first parent) + struct StrList *repository_remove_tags; ///< Git tags to remove (to fix duplicate commit tags) + struct Runtime *runtime; ///< Environment variables specific to the test context + int timeout; ///< Timeout in seconds +}; ///< An array of tests + /*! \struct Delivery * \brief A structure describing a full delivery object */ @@ -153,24 +177,11 @@ struct Delivery { RuntimeEnv *environ; ///< Environment variables } runtime; - /*! \struct Test - * \brief Test information - */ - struct Test { - char *name; ///< Name of package - char *version; ///< Version of package - char *repository; ///< Git repository of package - char *script_setup; ///< Commands to execute before the main script - char *script; ///< Commands to execute - bool disable; ///< Toggle a test block - bool parallel; ///< Toggle parallel or serial execution - char *build_recipe; ///< Conda recipe to build (optional) - char *repository_info_ref; ///< Git commit hash - char *repository_info_tag; ///< Git tag (first parent) - struct StrList *repository_remove_tags; ///< Git tags to remove (to fix duplicate commit tags) - struct Runtime runtime; ///< Environment variables specific to the test context - int timeout; ///< Timeout in seconds - } tests[1000]; ///< An array of tests + struct Tests { + struct Test **test; + size_t num_used; + size_t num_alloc; + } *tests; struct Deploy { struct JFRT_Auth jfrog_auth; @@ -489,4 +500,32 @@ void delivery_rewrite_stage2(struct Delivery *ctx, char *specfile); */ struct Delivery *delivery_duplicate(const struct Delivery *ctx); +/** + * Initialize a `Tests` structure + * @param num_tests number of test records + * @return a an initialized `Tests` structure + */ +struct Tests *tests_init(size_t num_tests); + +/** + * Add a `Test` structure to `Tests` + * @param tests list to add to + * @param x test to add to list + * @return 0=success, -1=error + */ +int tests_add(struct Tests *tests, struct Test *x); + +/** + * Free a `Test` structure + * @param x pointer to `Test` + */ +void test_free(struct Test **x); + +/** + * Initialize a `Test` structure + * @return an initialized `Test` structure + */ +struct Test *test_init(); + + #endif //STASIS_DELIVERY_H |
