aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt9
-rw-r--r--README.md68
-rw-r--r--examples/template/example.ini13
-rw-r--r--include/artifactory.h1
-rw-r--r--include/conda.h16
-rw-r--r--include/core.h38
-rw-r--r--include/core_mem.h18
-rw-r--r--include/delivery.h39
-rw-r--r--include/docker.h2
-rw-r--r--include/download.h2
-rw-r--r--include/envctl.h2
-rw-r--r--include/github.h1
-rw-r--r--include/ini.h1
-rw-r--r--include/multiprocessing.h131
-rw-r--r--include/package.h30
-rw-r--r--include/str.h1
-rw-r--r--include/strlist.h5
-rw-r--r--include/template_func_proto.h2
-rw-r--r--include/utils.h31
-rw-r--r--include/wheel.h36
-rw-r--r--mission/generic/Dockerfile.in2
-rw-r--r--mission/generic/base.yml6
-rw-r--r--mission/hst/Dockerfile.in2
-rw-r--r--mission/hst/base.yml32
-rw-r--r--mission/jwst/Dockerfile.in2
-rw-r--r--mission/jwst/base.yml6
-rw-r--r--mission/roman/Dockerfile.in2
-rw-r--r--mission/roman/base.yml9
-rw-r--r--src/CMakeLists.txt36
-rw-r--r--src/cli/CMakeLists.txt2
-rw-r--r--src/cli/stasis/CMakeLists.txt12
-rw-r--r--src/cli/stasis/args.c102
-rw-r--r--src/cli/stasis/args.h23
-rw-r--r--src/cli/stasis/callbacks.c31
-rw-r--r--src/cli/stasis/callbacks.h10
-rw-r--r--src/cli/stasis/stasis_main.c (renamed from src/stasis_main.c)350
-rw-r--r--src/cli/stasis/system_requirements.c82
-rw-r--r--src/cli/stasis/system_requirements.h13
-rw-r--r--src/cli/stasis/tpl.c46
-rw-r--r--src/cli/stasis/tpl.h10
-rw-r--r--src/cli/stasis_indexer/CMakeLists.txt6
-rw-r--r--src/cli/stasis_indexer/stasis_indexer.c (renamed from src/stasis_indexer.c)76
-rw-r--r--src/delivery.c2219
-rw-r--r--src/lib/CMakeLists.txt1
-rw-r--r--src/lib/core/CMakeLists.txt38
-rw-r--r--src/lib/core/artifactory.c (renamed from src/artifactory.c)2
-rw-r--r--src/lib/core/conda.c (renamed from src/conda.c)69
-rw-r--r--src/lib/core/copy.c (renamed from src/copy.c)0
-rw-r--r--src/lib/core/delivery.c317
-rw-r--r--src/lib/core/delivery_artifactory.c192
-rw-r--r--src/lib/core/delivery_build.c190
-rw-r--r--src/lib/core/delivery_conda.c110
-rw-r--r--src/lib/core/delivery_docker.c132
-rw-r--r--src/lib/core/delivery_init.c345
-rw-r--r--src/lib/core/delivery_install.c224
-rw-r--r--src/lib/core/delivery_populate.c348
-rw-r--r--src/lib/core/delivery_postprocess.c266
-rw-r--r--src/lib/core/delivery_show.c117
-rw-r--r--src/lib/core/delivery_test.c295
-rw-r--r--src/lib/core/docker.c (renamed from src/docker.c)6
-rw-r--r--src/lib/core/download.c (renamed from src/download.c)2
-rw-r--r--src/lib/core/envctl.c (renamed from src/envctl.c)1
-rw-r--r--src/lib/core/environment.c (renamed from src/environment.c)4
-rw-r--r--src/lib/core/github.c (renamed from src/github.c)1
-rw-r--r--src/lib/core/globals.c (renamed from src/globals.c)31
-rw-r--r--src/lib/core/ini.c (renamed from src/ini.c)16
-rw-r--r--src/lib/core/junitxml.c (renamed from src/junitxml.c)4
-rw-r--r--src/lib/core/multiprocessing.c449
-rw-r--r--src/lib/core/package.c41
-rw-r--r--src/lib/core/recipe.c (renamed from src/recipe.c)2
-rw-r--r--src/lib/core/relocation.c (renamed from src/relocation.c)0
-rw-r--r--src/lib/core/rules.c (renamed from src/rules.c)0
-rw-r--r--src/lib/core/str.c (renamed from src/str.c)31
-rw-r--r--src/lib/core/strlist.c (renamed from src/strlist.c)3
-rw-r--r--src/lib/core/system.c (renamed from src/system.c)13
-rw-r--r--src/lib/core/template.c (renamed from src/template.c)0
-rw-r--r--src/lib/core/template_func_proto.c (renamed from src/template_func_proto.c)52
-rw-r--r--src/lib/core/utils.c (renamed from src/utils.c)30
-rw-r--r--src/lib/core/wheel.c (renamed from src/wheel.c)54
-rw-r--r--stasis.ini2
-rw-r--r--tests/CMakeLists.txt4
-rw-r--r--tests/data/generic.ini18
-rw-r--r--tests/rt_generic.sh19
-rw-r--r--tests/test_artifactory.c2
-rw-r--r--tests/test_conda.c6
-rw-r--r--tests/test_docker.c2
-rw-r--r--tests/test_download.c1
-rw-r--r--tests/test_ini.c2
-rw-r--r--tests/test_junitxml.c1
-rw-r--r--tests/test_multiprocessing.c127
-rw-r--r--tests/test_recipe.c2
-rw-r--r--tests/test_str.c4
-rw-r--r--tests/test_wheel.c6
93 files changed, 4405 insertions, 2702 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index abb700c..caed929 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.15)
project(STASIS C)
include(GNUInstallDirs)
-set(nix_cflags -Wall -Wextra -fPIC)
+set(nix_cflags -Wall -Wextra -fPIC -D_GNU_SOURCE)
set(win_cflags /Wall)
set(CMAKE_C_STANDARD 99)
find_package(LibXml2)
@@ -11,13 +11,16 @@ link_libraries(CURL::libcurl)
link_libraries(LibXml2::LibXml2)
include_directories(${LIBXML2_INCLUDE_DIR})
+option(FORTIFY_SOURCE OFF)
+if (FORTIFY_SOURCE)
+ set(nix_cflags ${nix_cflags} -O -D_FORTIFY_SOURCE=1)
+endif ()
+
if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
- message("gnu options")
add_compile_options(${nix_cflags})
elseif (CMAKE_C_COMPILER_ID MATCHES "Clang")
add_compile_options(${nix_cflags})
elseif (CMAKE_C_COMPILER_ID STREQUAL "MSVC")
- message("microsoft visual c options")
add_compile_options(${win_cflags})
endif()
diff --git a/README.md b/README.md
index 586237a..f1d198e 100644
--- a/README.md
+++ b/README.md
@@ -117,6 +117,8 @@ Create some test cases for packages.
[test:our_cool_program]
version = 1.2.3
repository = https://github.com/org/our_cool_program
+script_setup =
+ pip install -e '.[test]'
script =
pytest -fEsx \
--basetemp="{{ func:basetemp_dir() }}" \
@@ -126,6 +128,8 @@ script =
[test:our_other_cool_program]
version = 4.5.6
repository = https://github.com/org/our_other_cool_program
+script_setup =
+ pip install -e '.[test]'
script =
pytest -fEsx \
--basetemp="{{ func:basetemp_dir() }}" \
@@ -143,22 +147,26 @@ stasis mydelivery.ini
## Command Line Options
-| Long Option | Short Option | Purpose |
-|:--------------------|:------------:|:---------------------------------------------------------------|
-| --help | -h | Display usage statement |
-| --version | -V | Display program version |
-| --continue-on-error | -C | Allow tests to fail |
-| --config ARG | -c ARG | Read STASIS configuration file |
-| --python ARG | -p ARG | Override version of Python in configuration |
-| --verbose | -v | Increase output verbosity |
-| --unbuffered | -U | Disable line buffering |
-| --update-base | n/a | Update conda installation prior to STATIS environment creation |
-| --overwrite | n/a | Overwrite an existing release |
-| --no-docker | n/a | Do not build docker images |
-| --no-artifactory | n/a | Do not upload artifacts to Artifactory |
-| --no-testing | n/a | Do not execute test scripts |
-| --no-rewrite | n/a | Do not rewrite paths and URLs in output files |
-| DELIVERY_FILE | n/a | STASIS delivery file |
+| Long Option | Short Option | Purpose |
+|:---------------------------|:------------:|:---------------------------------------------------------------|
+| --help | -h | Display usage statement |
+| --version | -V | Display program version |
+| --continue-on-error | -C | Allow tests to fail |
+| --config ARG | -c ARG | Read STASIS configuration file |
+| --cpu-limit ARG | -l ARG | Number of processes to spawn concurrently (default: cpus - 1) |
+| --pool-status-interval ARG | n/a | Report task status every n seconds (default: 30) |
+| --python ARG | -p ARG | Override version of Python in configuration |
+| --verbose | -v | Increase output verbosity |
+| --unbuffered | -U | Disable line buffering |
+| --update-base | n/a | Update conda installation prior to STATIS environment creation |
+| --fail-fast | n/a | On test error, terminate all tasks |
+| --overwrite | n/a | Overwrite an existing release |
+| --no-docker | n/a | Do not build docker images |
+| --no-artifactory | n/a | Do not upload artifacts to Artifactory |
+| --no-testing | n/a | Do not execute test scripts |
+| --no-parallel | n/a | Do not execute tests in parallel |
+| --no-rewrite | n/a | Do not rewrite paths and URLs in output files |
+| DELIVERY_FILE | n/a | STASIS delivery file |
## Environment variables
@@ -259,13 +267,16 @@ Environment variables exported are _global_ to all programs executed by stasis.
Sections starting with `test:` will be used during the testing phase of the stasis pipeline. Where the value of `name` following the colon is an arbitrary value, and only used for reporting which test-run is executing. Section names must be unique.
-| Key | Type | Purpose | Required |
-|--------------|--------|-------------------------------------------------------|----------|
-| build_recipe | String | Git repository path to package's conda recipe | N |
-| repository | String | Git repository path or URL to clone | Y |
-| version | String | Git commit or tag to check out | Y |
-| runtime | List | Export environment variables specific to test context | Y |
-| script | List | Body of a shell script that will execute the tests | Y |
+| Key | Type | Purpose | Required |
+|--------------|---------|-------------------------------------------------------------|----------|
+| disable | Boolean | Disable `script` execution (`script_setup` always executes) | N |
+| parallel | Boolean | Execute test block in parallel (default) or sequentially | N |
+| build_recipe | String | Git repository path to package's conda recipe | N |
+| repository | String | Git repository path or URL to clone | Y |
+| version | String | Git commit or tag to check out | Y |
+| runtime | List | Export environment variables specific to test context | Y |
+| script_setup | List | Body of a shell script that will install dependencies | N |
+| script | List | Body of a shell script that will execute the tests | Y |
### deploy:artifactory:_name_
@@ -320,7 +331,6 @@ Template strings can be accessed using the `{{ subject.key }}` notation in any S
| system.platform | System Platform (OS) |
| deploy.docker.registry | Docker registry |
| deploy.jfrog.repo | Artifactory destination repository |
-| workaround.tox_posargs | Return populated `-c` and `--root` tox arguments.<br/>Force-enables positional arguments in tox's command line parser. |
| workaround.conda_reactivate | Reinitialize the conda runtime environment.<br/>Use this after calling `conda install` from within a `[test:*].script`. |
The template engine also provides an interface to environment variables using the `{{ env:VARIABLE_NAME }}` notation.
@@ -336,11 +346,11 @@ python = {{ env:MY_DYNAMIC_PYTHON_VERSION }}
Template functions can be accessed using the `{{ func:NAME(ARG,...) }}` notation.
-| Name | Purpose |
-|-------------------------------|------------------------------------------------------------------|
-| get_github_release_notes_auto | Generate release notes for all test contexts |
-| basetemp_dir | Generate directory path to test block's temporary data directory |
-| junitxml_file | Generate directory path and file name for test result file |
+| Name | Arguments | Purpose |
+|-------------------------------|-----------|------------------------------------------------------------------|
+| get_github_release_notes_auto | n/a | Generate release notes for all test contexts |
+| basetemp_dir | n/a | Generate directory path to test block's temporary data directory |
+| junitxml_file | n/a | Generate directory path and file name for test result file |
# Mission files
diff --git a/examples/template/example.ini b/examples/template/example.ini
index 4a4c579..cca2089 100644
--- a/examples/template/example.ini
+++ b/examples/template/example.ini
@@ -43,11 +43,24 @@ pip_packages =
; key=value
[test:name] ; where test:"name" denotes the package name
+; (boolean) Do not execute "script"
+disable =
+
+; (boolean) Add to parallel task pool?
+; true = yes (default)
+; false = no (send to serial task pool)
+parallel =
+
; (string) Version of tested package
version =
; (string) Git repository of tested package
repository =
+; (list) Commands to execute before "script"
+; e.g. pip install -e '.[test]'
+script_setup =
+
; (list) Commands to execute against tested package
+; e.g. pytest
script =
diff --git a/include/artifactory.h b/include/artifactory.h
index c6e5c2b..e580886 100644
--- a/include/artifactory.h
+++ b/include/artifactory.h
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <stdlib.h>
#include "core.h"
+#include "download.h"
//! JFrog Artifactory Authentication struct
struct JFRT_Auth {
diff --git a/include/conda.h b/include/conda.h
index c546672..1eb42f4 100644
--- a/include/conda.h
+++ b/include/conda.h
@@ -4,7 +4,9 @@
#include <stdio.h>
#include <string.h>
+#include <sys/utsname.h>
#include "core.h"
+#include "download.h"
#define CONDA_INSTALL_PREFIX "conda"
#define PYPI_INDEX_DEFAULT "https://pypi.org/simple"
@@ -186,10 +188,18 @@ int conda_index(const char *path);
/**
* Determine whether a simple index contains a package
* @param index_url a file system path or url pointing to a simple index
- * @param name package name (required)
- * @param version package version (may be NULL)
+ * @param spec a pip package specification (e.g. `name==1.2.3`)
* @return not found = 0, found = 1, error = -1
*/
-int pip_index_provides(const char *index_url, const char *name, const char *version);
+int pip_index_provides(const char *index_url, const char *spec);
+
+/**
+ * Determine whether conda can find a package in its channel list
+ * @param spec a conda package specification (e.g. `name=1.2.3`)
+ * @return not found = 0, found = 1, error = -1
+ */
+int conda_provides(const char *spec);
+
+char *conda_get_active_environment();
#endif //STASIS_CONDA_H
diff --git a/include/core.h b/include/core.h
index ef90e96..b0a1a11 100644
--- a/include/core.h
+++ b/include/core.h
@@ -1,9 +1,10 @@
-//! @file stasis.h
+//! @file core.h
#ifndef STASIS_CORE_H
#define STASIS_CORE_H
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include <string.h>
#include <limits.h>
#include <unistd.h>
@@ -21,36 +22,7 @@
#define HTTP_ERROR(X) X >= 400
#include "config.h"
-#include "envctl.h"
-#include "template.h"
-#include "utils.h"
-#include "copy.h"
-#include "ini.h"
-#include "conda.h"
-#include "environment.h"
-#include "artifactory.h"
-#include "docker.h"
-#include "delivery.h"
-#include "str.h"
-#include "strlist.h"
-#include "system.h"
-#include "download.h"
-#include "recipe.h"
-#include "relocation.h"
-#include "wheel.h"
-#include "junitxml.h"
-#include "github.h"
-#include "template_func_proto.h"
-
-#define guard_runtime_free(X) do { if (X) { runtime_free(X); X = NULL; } } while (0)
-#define guard_strlist_free(X) do { if ((*X)) { strlist_free(X); (*X) = NULL; } } while (0)
-#define guard_free(X) do { if (X) { free(X); X = NULL; } } while (0)
-#define GENERIC_ARRAY_FREE(ARR) do { \
- for (size_t ARR_I = 0; ARR && ARR[ARR_I] != NULL; ARR_I++) { \
- guard_free(ARR[ARR_I]); \
- } \
- guard_free(ARR); \
-} while (0)
+#include "core_mem.h"
#define COE_CHECK_ABORT(COND, MSG) \
do {\
@@ -71,6 +43,10 @@ struct STASIS_GLOBAL {
bool enable_testing; //!< Enable package testing
bool enable_overwrite; //!< Enable release file clobbering
bool enable_rewrite_spec_stage_2; //!< Enable automatic @STR@ replacement in output files
+ bool enable_parallel; //!< Enable testing in parallel
+ long cpu_limit; //!< Limit parallel processing to n cores (default: max - 1)
+ long parallel_fail_fast; //!< Fail immediately on error
+ int pool_status_interval; //!< Report "Task is running" every n seconds
struct StrList *conda_packages; //!< Conda packages to install after initial activation
struct StrList *pip_packages; //!< Pip packages to install after initial activation
char *tmpdir; //!< Path to temporary storage directory
diff --git a/include/core_mem.h b/include/core_mem.h
new file mode 100644
index 0000000..bd50e9d
--- /dev/null
+++ b/include/core_mem.h
@@ -0,0 +1,18 @@
+//! @file core_mem.h
+#ifndef STASIS_CORE_MEM_H
+#define STASIS_CORE_MEM_H
+
+#include "environment.h"
+#include "strlist.h"
+
+#define guard_runtime_free(X) do { if (X) { runtime_free(X); X = NULL; } } while (0)
+#define guard_strlist_free(X) do { if ((*X)) { strlist_free(X); (*X) = NULL; } } while (0)
+#define guard_free(X) do { if (X) { free(X); X = NULL; } } while (0)
+#define GENERIC_ARRAY_FREE(ARR) do { \
+ for (size_t ARR_I = 0; ARR && ARR[ARR_I] != NULL; ARR_I++) { \
+ guard_free(ARR[ARR_I]); \
+ } \
+ guard_free(ARR); \
+} while (0)
+
+#endif //STASIS_CORE_MEM_H
diff --git a/include/delivery.h b/include/delivery.h
index 067cd0b..bd5137c 100644
--- a/include/delivery.h
+++ b/include/delivery.h
@@ -7,7 +7,18 @@
#include <stdbool.h>
#include <unistd.h>
#include <sys/utsname.h>
+#include <fnmatch.h>
+#include <sys/statvfs.h>
#include "core.h"
+#include "copy.h"
+#include "environment.h"
+#include "conda.h"
+#include "ini.h"
+#include "artifactory.h"
+#include "docker.h"
+#include "wheel.h"
+#include "multiprocessing.h"
+#include "recipe.h"
#define DELIVERY_PLATFORM_MAX 4
#define DELIVERY_PLATFORM_MAXLEN 65
@@ -149,7 +160,10 @@ struct Delivery {
char *name; ///< Name of package
char *version; ///< Version of package
char *repository; ///< Git repository of package
+ char *script_setup; ///< Commands to execute before the main script
char *script; ///< Commands to execute
+ bool disable; ///< Toggle a test block
+ bool parallel; ///< Toggle parallel or serial execution
char *build_recipe; ///< Conda recipe to build (optional)
char *repository_info_ref; ///< Git commit hash
char *repository_info_tag; ///< Git tag (first parent)
@@ -286,7 +300,7 @@ int delivery_copy_conda_artifacts(struct Delivery *ctx);
* Retrieve Conda installer
* @param installer_url URL to installation script
*/
-int delivery_get_installer(struct Delivery *ctx, char *installer_url);
+int delivery_get_conda_installer(struct Delivery *ctx, char *installer_url);
/**
* Generate URL based on Delivery context
@@ -294,7 +308,7 @@ int delivery_get_installer(struct Delivery *ctx, char *installer_url);
* @param result pointer to char
* @return in result
*/
-void delivery_get_installer_url(struct Delivery *ctx, char *result);
+void delivery_get_conda_installer_url(struct Delivery *ctx, char *result);
/**
* Install packages based on Delivery context
@@ -376,6 +390,12 @@ void delivery_gather_tool_versions(struct Delivery *ctx);
// helper function
int delivery_init_tmpdir(struct Delivery *ctx);
+void delivery_init_dirs_stage1(struct Delivery *ctx);
+
+void delivery_init_dirs_stage2(struct Delivery *ctx);
+
+int delivery_init_platform(struct Delivery *ctx);
+
int delivery_init_artifactory(struct Delivery *ctx);
int delivery_artifact_upload(struct Delivery *ctx);
@@ -386,10 +406,21 @@ int delivery_docker(struct Delivery *ctx);
int delivery_fixup_test_results(struct Delivery *ctx);
-int *bootstrap_build_info(struct Delivery *ctx);
+int bootstrap_build_info(struct Delivery *ctx);
int delivery_dump_metadata(struct Delivery *ctx);
+int populate_info(struct Delivery *ctx);
+
+int populate_delivery_cfg(struct Delivery *ctx, int render_mode);
+
+int populate_delivery_ini(struct Delivery *ctx, int render_mode);
+
+int populate_mission_ini(struct Delivery **ctx, int render_mode);
+
+void validate_delivery_ini(struct INIFILE *ini);
+
+int filter_repo_tags(char *repo, struct StrList *patterns);
/**
* Determine whether a release on-disk matches the release name in use
* @param ctx Delivery context
@@ -397,4 +428,6 @@ int delivery_dump_metadata(struct Delivery *ctx);
*/
int delivery_exists(struct Delivery *ctx);
+int delivery_overlay_packages_from_env(struct Delivery *ctx, const char *env_name);
+
#endif //STASIS_DELIVERY_H
diff --git a/include/docker.h b/include/docker.h
index ff8a8d5..7585d86 100644
--- a/include/docker.h
+++ b/include/docker.h
@@ -2,6 +2,8 @@
#ifndef STASIS_DOCKER_H
#define STASIS_DOCKER_H
+#include "core.h"
+
//! Flag to squelch output from docker_exec()
#define STASIS_DOCKER_QUIET 1 << 1
diff --git a/include/download.h b/include/download.h
index 058812e..0b6311e 100644
--- a/include/download.h
+++ b/include/download.h
@@ -2,6 +2,8 @@
#ifndef STASIS_DOWNLOAD_H
#define STASIS_DOWNLOAD_H
+#include <stdlib.h>
+#include <string.h>
#include <curl/curl.h>
size_t download_writer(void *fp, size_t size, size_t nmemb, void *stream);
diff --git a/include/envctl.h b/include/envctl.h
index c8ef357..659cae3 100644
--- a/include/envctl.h
+++ b/include/envctl.h
@@ -1,7 +1,9 @@
+//! @file envctl.h
#ifndef STASIS_ENVCTL_H
#define STASIS_ENVCTL_H
#include <stdlib.h>
+#include "core.h"
#define STASIS_ENVCTL_PASSTHRU 0
#define STASIS_ENVCTL_REQUIRED 1 << 1
diff --git a/include/github.h b/include/github.h
index cebeabf..f9b47a3 100644
--- a/include/github.h
+++ b/include/github.h
@@ -1,3 +1,4 @@
+//! @file github.h
#ifndef STASIS_GITHUB_H
#define STASIS_GITHUB_H
diff --git a/include/ini.h b/include/ini.h
index 3d0565b..557f157 100644
--- a/include/ini.h
+++ b/include/ini.h
@@ -5,6 +5,7 @@
#include <stdio.h>
#include <stddef.h>
#include <stdbool.h>
+#include "template.h"
#define INI_WRITE_RAW 0 ///< Dump INI data. Contents are not modified.
#define INI_WRITE_PRESERVE 1 ///< Dump INI data. Template strings are
diff --git a/include/multiprocessing.h b/include/multiprocessing.h
new file mode 100644
index 0000000..5919462
--- /dev/null
+++ b/include/multiprocessing.h
@@ -0,0 +1,131 @@
+/// @file multiprocessing.h
+#ifndef STASIS_MULTIPROCESSING_H
+#define STASIS_MULTIPROCESSING_H
+
+#include "core.h"
+#include <signal.h>
+#include <sys/wait.h>
+#include <semaphore.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+struct MultiProcessingTask {
+ pid_t pid; ///< Program PID
+ pid_t parent_pid; ///< Program PID (parent process)
+ int status; ///< Child process exit status
+ int signaled_by; ///< Last signal received, if any
+ time_t _now; ///< Current time
+ time_t _seconds; ///< Time elapsed (used by MultiprocessingPool.status_interval)
+ char ident[255]; ///< Identity of the pool task
+ char *cmd; ///< Shell command(s) to be executed
+ size_t cmd_len; ///< Length of command string (for mmap/munmap)
+ char working_dir[PATH_MAX]; ///< Path to directory `cmd` should be executed in
+ char log_file[PATH_MAX]; ///< Full path to stdout/stderr log file
+ char parent_script[PATH_MAX]; ///< Path to temporary script executing the task
+ struct {
+ struct timespec t_start;
+ struct timespec t_stop;
+ } time_data; ///< Wall-time counters
+};
+
+struct MultiProcessingPool {
+ struct MultiProcessingTask *task; ///< Array of tasks to execute
+ size_t num_used; ///< Number of tasks populated in the task array
+ size_t num_alloc; ///< Number of tasks allocated by the task array
+ char ident[255]; ///< Identity of task pool
+ char log_root[PATH_MAX]; ///< Base directory to store stderr/stdout log files
+ int status_interval; ///< Report a pooled task is "running" every n seconds
+};
+
+/// Maximum number of multiprocessing tasks STASIS can execute
+#define MP_POOL_TASK_MAX 1000
+
+/// Value signifies a process is unused or finished executing
+#define MP_POOL_PID_UNUSED 0
+
+/// Option flags for mp_pool_join()
+#define MP_POOL_FAIL_FAST 1 << 1
+
+/**
+ * Create a multiprocessing pool
+ *
+ * ```c
+ * #include "multiprocessing.h"
+ * #include "utils.h" // for get_cpu_count()
+ *
+ * int main(int argc, char *argv[]) {
+ * struct MultiProcessingPool *mp;
+ * mp = mp_pool_init("mypool", "/tmp/mypool_logs");
+ * if (mp) {
+ * char *commands[] = {
+ * "/bin/echo hello world",
+ * "/bin/echo world hello",
+ * NULL
+ * }
+ * for (size_t i = 0; commands[i] != NULL); i++) {
+ * struct MultiProcessingTask *task;
+ * char task_name[100];
+ *
+ * sprintf(task_name, "mytask%zu", i);
+ * task = mp_task(mp, task_name, commands[i]);
+ * if (!task) {
+ * // handle task creation error
+ * }
+ * }
+ * if (mp_pool_join(mp, get_cpu_count(), MP_POOL_FAIL_FAST)) {
+ * // handle pool execution error
+ * }
+ * mp_pool_free(&mp);
+ * } else {
+ * // handle pool initialization error
+ * }
+ * }
+ * ```
+ *
+ * @param ident a name to identify the pool
+ * @param log_root the path to store program output
+ * @return pointer to initialized MultiProcessingPool
+ * @return NULL on error
+ */
+struct MultiProcessingPool *mp_pool_init(const char *ident, const char *log_root);
+
+/**
+ * Create a multiprocessing pool task
+ *
+ * @param pool a pointer to MultiProcessingPool
+ * @param ident a name to identify the task
+ * @param cmd a command to execute
+ * @return pointer to MultiProcessingTask structure
+ * @return NULL on error
+ */
+struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const char *ident, char *working_dir, char *cmd);
+
+/**
+ * Execute all tasks in a pool
+ *
+ * @param pool a pointer to MultiProcessingPool
+ * @param jobs the number of processes to spawn at once (for serial execution use `1`)
+ * @param flags option to be OR'd (MP_POOL_FAIL_FAST)
+ * @return 0 on success
+ * @return >0 on failure
+ * @return <0 on error
+ */
+int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags);
+
+/**
+ * Show summary of pool tasks
+ *
+ * @param pool a pointer to MultiProcessingPool
+ */
+void mp_pool_show_summary(struct MultiProcessingPool *pool);
+
+/**
+ * Release resources allocated by mp_pool_init()
+ *
+ * @param a pointer to MultiProcessingPool
+ */
+void mp_pool_free(struct MultiProcessingPool **pool);
+
+
+#endif //STASIS_MULTIPROCESSING_H
diff --git a/include/package.h b/include/package.h
new file mode 100644
index 0000000..eff1874
--- /dev/null
+++ b/include/package.h
@@ -0,0 +1,30 @@
+#ifndef STASIS_PACKAGE_H
+#define STASIS_PACKAGE_H
+
+struct Package {
+ struct {
+ const char *name;
+ const char *version_spec;
+ const char *version;
+ } meta;
+ struct {
+ const char *uri;
+ unsigned handler;
+ } source;
+ struct {
+ struct Test *test;
+ size_t pass;
+ size_t fail;
+ size_t skip;
+ };
+ unsigned state;
+};
+
+struct Package *stasis_package_init(void);
+void stasis_package_set_name(struct Package *pkg, const char *name);
+void stasis_package_set_version(struct Package *pkg, const char *version);
+void stasis_package_set_version_spec(struct Package *pkg, const char *version_spec);
+void stasis_package_set_uri(struct Package *pkg, const char *uri);
+void stasis_package_set_handler(struct Package *pkg, unsigned handler);
+
+#endif //STASIS_PACKAGE_H
diff --git a/include/str.h b/include/str.h
index 4cf221d..7254225 100644
--- a/include/str.h
+++ b/include/str.h
@@ -9,6 +9,7 @@
#include <string.h>
#include <stdarg.h>
#include <ctype.h>
+#include "relocation.h"
#include "core.h"
#define STASIS_SORT_ALPHA 1 << 0
diff --git a/include/strlist.h b/include/strlist.h
index dd22a0a..cdbfc01 100644
--- a/include/strlist.h
+++ b/include/strlist.h
@@ -4,10 +4,15 @@
*/
#ifndef STASIS_STRLIST_H
#define STASIS_STRLIST_H
+
+typedef int (ReaderFn)(size_t line, char **);
+
#include <stdlib.h>
+#include "core.h"
#include "utils.h"
#include "str.h"
+
struct StrList {
size_t num_alloc;
size_t num_inuse;
diff --git a/include/template_func_proto.h b/include/template_func_proto.h
index 7778a11..286ccfb 100644
--- a/include/template_func_proto.h
+++ b/include/template_func_proto.h
@@ -1,3 +1,4 @@
+//! @file template_func_proto.h
#ifndef TEMPLATE_FUNC_PROTO_H
#define TEMPLATE_FUNC_PROTO_H
@@ -7,5 +8,6 @@ int get_github_release_notes_tplfunc_entrypoint(void *frame, void *data_out);
int get_github_release_notes_auto_tplfunc_entrypoint(void *frame, void *data_out);
int get_junitxml_file_entrypoint(void *frame, void *data_out);
int get_basetemp_dir_entrypoint(void *frame, void *data_out);
+int tox_run_entrypoint(void *frame, void *data_out);
#endif //TEMPLATE_FUNC_PROTO_H \ No newline at end of file
diff --git a/include/utils.h b/include/utils.h
index a3d244a..4ade817 100644
--- a/include/utils.h
+++ b/include/utils.h
@@ -8,7 +8,12 @@
#include <unistd.h>
#include <limits.h>
#include <errno.h>
+#include "core.h"
+#include "copy.h"
#include "system.h"
+#include "strlist.h"
+#include "utils.h"
+#include "ini.h"
#if defined(STASIS_OS_WINDOWS)
#define PATH_ENV_VAR "path"
@@ -25,8 +30,6 @@
#define STASIS_XML_PRETTY_PRINT_PROG "xmllint"
#define STASIS_XML_PRETTY_PRINT_ARGS "--format"
-typedef int (ReaderFn)(size_t line, char **);
-
/**
* Change directory. Push path on directory stack.
*
@@ -365,4 +368,28 @@ long get_cpu_count();
*/
int mkdirs(const char *_path, mode_t mode);
+/**
+ * Return pointer to a (possible) version specifier
+ *
+ * ```c
+ * char s[] = "abc==1.2.3";
+ * char *spec_begin = find_version_spec(s);
+ * // spec_begin is "==1.2.3"
+ *
+ * char package_name[255];
+ * char s[] = "abc";
+ * char *spec_pos = find_version_spec(s);
+ * if (spec_pos) {
+ * strncpy(package_name, spec_pos - s);
+ * // use spec
+ * } else {
+ * // spec not found
+ * }
+ *
+ * @param str a pointer to a buffer containing a package spec (i.e. abc==1.2.3, abc>=1.2.3, abc)
+ * @return a pointer to the first occurrence of a version spec character
+ * @return NULL if not found
+ */
+char *find_version_spec(char *package_name);
+
#endif //STASIS_UTILS_H
diff --git a/include/wheel.h b/include/wheel.h
index 619e0f7..1a689e9 100644
--- a/include/wheel.h
+++ b/include/wheel.h
@@ -1,3 +1,4 @@
+//! @file wheel.h
#ifndef STASIS_WHEEL_H
#define STASIS_WHEEL_H
@@ -5,20 +6,31 @@
#include <string.h>
#include <stdio.h>
#include "str.h"
-
-#define WHEEL_MATCH_EXACT 0
-#define WHEEL_MATCH_ANY 1
+#define WHEEL_MATCH_EXACT 0 ///< Match when all patterns are present
+#define WHEEL_MATCH_ANY 1 ///< Match when any patterns are present
struct Wheel {
- char *distribution;
- char *version;
- char *build_tag;
- char *python_tag;
- char *abi_tag;
- char *platform_tag;
- char *path_name;
- char *file_name;
+ char *distribution; ///< Package name
+ char *version; ///< Package version
+ char *build_tag; ///< Package build tag (optional)
+ char *python_tag; ///< Package Python tag (pyXY)
+ char *abi_tag; ///< Package ABI tag (cpXY, abiX, none)
+ char *platform_tag; ///< Package platform tag (linux_x86_64, any)
+ char *path_name; ///< Path to package on-disk
+ char *file_name; ///< Name of package on-disk
};
-struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_match[], unsigned match_mode);
+/**
+ * Extract metadata from a Python Wheel file name
+ *
+ * @param basepath directory containing a wheel file
+ * @param name of wheel file
+ * @param to_match a NULL terminated array of patterns (i.e. platform, arch, version, etc)
+ * @param match_mode WHEEL_MATCH_EXACT
+ * @param match_mode WHEEL_MATCH ANY
+ * @return pointer to populated Wheel on success
+ * @return NULL on error
+ */
+struct Wheel *get_wheel_info(const char *basepath, const char *name, char *to_match[], unsigned match_mode);
+void wheel_free(struct Wheel **wheel);
#endif //STASIS_WHEEL_H
diff --git a/mission/generic/Dockerfile.in b/mission/generic/Dockerfile.in
index 705ed81..23fed68 100644
--- a/mission/generic/Dockerfile.in
+++ b/mission/generic/Dockerfile.in
@@ -68,7 +68,7 @@ RUN conda config --set auto_update_conda false \
&& conda config --set always_yes true \
&& conda config --set quiet true \
&& conda config --set rollback_enabled false
-RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url ${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
+RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url file://${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
RUN mamba install \
git \
${CONDA_PACKAGES} \
diff --git a/mission/generic/base.yml b/mission/generic/base.yml
new file mode 100644
index 0000000..e72633c
--- /dev/null
+++ b/mission/generic/base.yml
@@ -0,0 +1,6 @@
+channels:
+ - conda-forge
+dependencies:
+ - pip
+ - python
+ - setuptools \ No newline at end of file
diff --git a/mission/hst/Dockerfile.in b/mission/hst/Dockerfile.in
index 705ed81..23fed68 100644
--- a/mission/hst/Dockerfile.in
+++ b/mission/hst/Dockerfile.in
@@ -68,7 +68,7 @@ RUN conda config --set auto_update_conda false \
&& conda config --set always_yes true \
&& conda config --set quiet true \
&& conda config --set rollback_enabled false
-RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url ${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
+RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url file://${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
RUN mamba install \
git \
${CONDA_PACKAGES} \
diff --git a/mission/hst/base.yml b/mission/hst/base.yml
new file mode 100644
index 0000000..af115cf
--- /dev/null
+++ b/mission/hst/base.yml
@@ -0,0 +1,32 @@
+channels:
+ - conda-forge
+dependencies:
+ - fitsverify
+ - hstcal
+ - pip
+ - python
+ - setuptools
+ - pip:
+ - acstools
+ - calcos
+ - costools
+ - crds
+ - drizzlepac
+ - fitsblender
+ - gwcs
+ - hasp
+ - nictools
+ - spherical_geometry
+ - stistools
+ - stregion
+ - stsci.image
+ - stsci.imagestats
+ - stsci.skypac
+ - stsci.stimage
+ - stsci.tools
+ - stwcs
+ - tweakwcs
+ - ullyses
+ - ullyses-utils
+ - wfc3tools
+ - wfpc2tools \ No newline at end of file
diff --git a/mission/jwst/Dockerfile.in b/mission/jwst/Dockerfile.in
index 705ed81..23fed68 100644
--- a/mission/jwst/Dockerfile.in
+++ b/mission/jwst/Dockerfile.in
@@ -68,7 +68,7 @@ RUN conda config --set auto_update_conda false \
&& conda config --set always_yes true \
&& conda config --set quiet true \
&& conda config --set rollback_enabled false
-RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url ${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
+RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url file://${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
RUN mamba install \
git \
${CONDA_PACKAGES} \
diff --git a/mission/jwst/base.yml b/mission/jwst/base.yml
new file mode 100644
index 0000000..e72633c
--- /dev/null
+++ b/mission/jwst/base.yml
@@ -0,0 +1,6 @@
+channels:
+ - conda-forge
+dependencies:
+ - pip
+ - python
+ - setuptools \ No newline at end of file
diff --git a/mission/roman/Dockerfile.in b/mission/roman/Dockerfile.in
index 705ed81..23fed68 100644
--- a/mission/roman/Dockerfile.in
+++ b/mission/roman/Dockerfile.in
@@ -68,7 +68,7 @@ RUN conda config --set auto_update_conda false \
&& conda config --set always_yes true \
&& conda config --set quiet true \
&& conda config --set rollback_enabled false
-RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url ${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
+RUN sed -i -e "s|@CONDA_CHANNEL@|${HOME}/packages/conda|;s|@PIP_ARGUMENTS@|--extra-index-url file://${HOME}/packages/wheels|;" ${HOME}/SNAPSHOT.yml
RUN mamba install \
git \
${CONDA_PACKAGES} \
diff --git a/mission/roman/base.yml b/mission/roman/base.yml
new file mode 100644
index 0000000..a1d49a0
--- /dev/null
+++ b/mission/roman/base.yml
@@ -0,0 +1,9 @@
+channels:
+ - conda-forge
+dependencies:
+ - pip
+ - python
+ - setuptools
+ - pip:
+ - romancal
+ - stcal \ No newline at end of file
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 2399dc5..bfee276 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -2,38 +2,6 @@ include_directories(${CMAKE_BINARY_DIR}/include)
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${PROJECT_BINARY_DIR})
-add_library(stasis_core STATIC
- globals.c
- str.c
- strlist.c
- ini.c
- conda.c
- environment.c
- utils.c
- system.c
- download.c
- delivery.c
- recipe.c
- relocation.c
- wheel.c
- copy.c
- artifactory.c
- template.c
- rules.c
- docker.c
- junitxml.c
- github.c
- template_func_proto.c
- envctl.c
-)
+add_subdirectory(lib)
+add_subdirectory(cli)
-add_executable(stasis
- stasis_main.c
-)
-target_link_libraries(stasis PRIVATE stasis_core)
-target_link_libraries(stasis PUBLIC LibXml2::LibXml2)
-add_executable(stasis_indexer
- stasis_indexer.c
-)
-target_link_libraries(stasis_indexer PRIVATE stasis_core)
-install(TARGETS stasis stasis_indexer RUNTIME)
diff --git a/src/cli/CMakeLists.txt b/src/cli/CMakeLists.txt
new file mode 100644
index 0000000..92a21b7
--- /dev/null
+++ b/src/cli/CMakeLists.txt
@@ -0,0 +1,2 @@
+add_subdirectory(stasis)
+add_subdirectory(stasis_indexer) \ No newline at end of file
diff --git a/src/cli/stasis/CMakeLists.txt b/src/cli/stasis/CMakeLists.txt
new file mode 100644
index 0000000..ff7fd88
--- /dev/null
+++ b/src/cli/stasis/CMakeLists.txt
@@ -0,0 +1,12 @@
+include_directories(${CMAKE_SOURCE_DIR})
+add_executable(stasis
+ stasis_main.c
+ args.c
+ callbacks.c
+ system_requirements.c
+ tpl.c
+)
+target_link_libraries(stasis PRIVATE stasis_core)
+target_link_libraries(stasis PUBLIC LibXml2::LibXml2)
+
+install(TARGETS stasis RUNTIME)
diff --git a/src/cli/stasis/args.c b/src/cli/stasis/args.c
new file mode 100644
index 0000000..ed11ab9
--- /dev/null
+++ b/src/cli/stasis/args.c
@@ -0,0 +1,102 @@
+#include "core.h"
+#include "args.h"
+
+struct option long_options[] = {
+ {"help", no_argument, 0, 'h'},
+ {"version", no_argument, 0, 'V'},
+ {"continue-on-error", no_argument, 0, 'C'},
+ {"config", required_argument, 0, 'c'},
+ {"cpu-limit", required_argument, 0, 'l'},
+ {"pool-status-interval", required_argument, 0, OPT_POOL_STATUS_INTERVAL},
+ {"python", required_argument, 0, 'p'},
+ {"verbose", no_argument, 0, 'v'},
+ {"unbuffered", no_argument, 0, 'U'},
+ {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE},
+ {"fail-fast", no_argument, 0, OPT_FAIL_FAST},
+ {"overwrite", no_argument, 0, OPT_OVERWRITE},
+ {"no-docker", no_argument, 0, OPT_NO_DOCKER},
+ {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY},
+ {"no-artifactory-build-info", no_argument, 0, OPT_NO_ARTIFACTORY_BUILD_INFO},
+ {"no-testing", no_argument, 0, OPT_NO_TESTING},
+ {"no-parallel", no_argument, 0, OPT_NO_PARALLEL},
+ {"no-rewrite", no_argument, 0, OPT_NO_REWRITE_SPEC_STAGE_2},
+ {0, 0, 0, 0},
+};
+
+const char *long_options_help[] = {
+ "Display this usage statement",
+ "Display program version",
+ "Allow tests to fail",
+ "Read configuration file",
+ "Number of processes to spawn concurrently (default: cpus - 1)",
+ "Report task status every n seconds (default: 30)",
+ "Override version of Python in configuration",
+ "Increase output verbosity",
+ "Disable line buffering",
+ "Update conda installation prior to STASIS environment creation",
+ "On error, immediately terminate all tasks",
+ "Overwrite an existing release",
+ "Do not build docker images",
+ "Do not upload artifacts to Artifactory",
+ "Do not upload build info objects to Artifactory",
+ "Do not execute test scripts",
+ "Do not execute tests in parallel",
+ "Do not rewrite paths and URLs in output files",
+ NULL,
+};
+
+static int get_option_max_width(struct option option[]) {
+ int i = 0;
+ int max = 0;
+ const int indent = 4;
+ while (option[i].name != 0) {
+ int len = (int) strlen(option[i].name);
+ if (option[i].has_arg) {
+ len += indent;
+ }
+ if (len > max) {
+ max = len;
+ }
+ i++;
+ }
+ return max;
+}
+
+void usage(char *progname) {
+ printf("usage: %s ", progname);
+ printf("[-");
+ for (int x = 0; long_options[x].val != 0; x++) {
+ if (long_options[x].has_arg == no_argument && long_options[x].val <= 'z') {
+ putchar(long_options[x].val);
+ }
+ }
+ printf("] {DELIVERY_FILE}\n");
+
+ int width = get_option_max_width(long_options);
+ for (int x = 0; long_options[x].name != 0; x++) {
+ char tmp[STASIS_NAME_MAX] = {0};
+ char output[sizeof(tmp)] = {0};
+ char opt_long[50] = {0}; // --? [ARG]?
+ char opt_short[50] = {0}; // -? [ARG]?
+
+ strcat(opt_long, "--");
+ strcat(opt_long, long_options[x].name);
+ if (long_options[x].has_arg) {
+ strcat(opt_long, " ARG");
+ }
+
+ if (long_options[x].val <= 'z') {
+ strcat(opt_short, "-");
+ opt_short[1] = (char) long_options[x].val;
+ if (long_options[x].has_arg) {
+ strcat(opt_short, " ARG");
+ }
+ } else {
+ strcat(opt_short, " ");
+ }
+
+ sprintf(tmp, " %%-%ds\t%%s\t\t%%s", width + 4);
+ sprintf(output, tmp, opt_long, opt_short, long_options_help[x]);
+ puts(output);
+ }
+}
diff --git a/src/cli/stasis/args.h b/src/cli/stasis/args.h
new file mode 100644
index 0000000..932eac7
--- /dev/null
+++ b/src/cli/stasis/args.h
@@ -0,0 +1,23 @@
+#ifndef STASIS_ARGS_H
+#define STASIS_ARGS_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define OPT_ALWAYS_UPDATE_BASE 1000
+#define OPT_NO_DOCKER 1001
+#define OPT_NO_ARTIFACTORY 1002
+#define OPT_NO_ARTIFACTORY_BUILD_INFO 1003
+#define OPT_NO_TESTING 1004
+#define OPT_OVERWRITE 1005
+#define OPT_NO_REWRITE_SPEC_STAGE_2 1006
+#define OPT_FAIL_FAST 1007
+#define OPT_NO_PARALLEL 1008
+#define OPT_POOL_STATUS_INTERVAL 1009
+
+extern struct option long_options[];
+void usage(char *progname);
+
+#endif //STASIS_ARGS_H
diff --git a/src/cli/stasis/callbacks.c b/src/cli/stasis/callbacks.c
new file mode 100644
index 0000000..aeaa25d
--- /dev/null
+++ b/src/cli/stasis/callbacks.c
@@ -0,0 +1,31 @@
+#include "callbacks.h"
+
+int callback_except_jf(const void *a, const void *b) {
+ const struct EnvCtl_Item *item = a;
+ const char *name = b;
+
+ if (!globals.enable_artifactory) {
+ return STASIS_ENVCTL_RET_IGNORE;
+ }
+
+ if (envctl_check_required(item->flags)) {
+ const char *content = getenv(name);
+ if (!content || isempty((char *) content)) {
+ return STASIS_ENVCTL_RET_FAIL;
+ }
+ }
+
+ return STASIS_ENVCTL_RET_SUCCESS;
+}
+
+int callback_except_gh(const void *a, const void *b) {
+ const struct EnvCtl_Item *item = a;
+ const char *name = b;
+ //printf("GH exception check: %s\n", name);
+ if (envctl_check_required(item->flags) && envctl_check_present(item, name)) {
+ return STASIS_ENVCTL_RET_SUCCESS;
+ }
+
+ return STASIS_ENVCTL_RET_FAIL;
+}
+
diff --git a/src/cli/stasis/callbacks.h b/src/cli/stasis/callbacks.h
new file mode 100644
index 0000000..369ce56
--- /dev/null
+++ b/src/cli/stasis/callbacks.h
@@ -0,0 +1,10 @@
+#ifndef STASIS_CALLBACKS_H
+#define STASIS_CALLBACKS_H
+
+#include "core.h"
+#include "envctl.h"
+
+int callback_except_jf(const void *a, const void *b);
+int callback_except_gh(const void *a, const void *b);
+
+#endif //STASIS_CALLBACKS_H
diff --git a/src/stasis_main.c b/src/cli/stasis/stasis_main.c
index 7ea465c..5325892 100644
--- a/src/stasis_main.c
+++ b/src/cli/stasis/stasis_main.c
@@ -2,203 +2,14 @@
#include <stdlib.h>
#include <string.h>
#include <limits.h>
-#include <getopt.h>
#include "core.h"
+#include "delivery.h"
-#define OPT_ALWAYS_UPDATE_BASE 1000
-#define OPT_NO_DOCKER 1001
-#define OPT_NO_ARTIFACTORY 1002
-#define OPT_NO_ARTIFACTORY_BUILD_INFO 1003
-#define OPT_NO_TESTING 1004
-#define OPT_OVERWRITE 1005
-#define OPT_NO_REWRITE_SPEC_STAGE_2 1006
-static struct option long_options[] = {
- {"help", no_argument, 0, 'h'},
- {"version", no_argument, 0, 'V'},
- {"continue-on-error", no_argument, 0, 'C'},
- {"config", required_argument, 0, 'c'},
- {"python", required_argument, 0, 'p'},
- {"verbose", no_argument, 0, 'v'},
- {"unbuffered", no_argument, 0, 'U'},
- {"update-base", no_argument, 0, OPT_ALWAYS_UPDATE_BASE},
- {"overwrite", no_argument, 0, OPT_OVERWRITE},
- {"no-docker", no_argument, 0, OPT_NO_DOCKER},
- {"no-artifactory", no_argument, 0, OPT_NO_ARTIFACTORY},
- {"no-artifactory-build-info", no_argument, 0, OPT_NO_ARTIFACTORY_BUILD_INFO},
- {"no-testing", no_argument, 0, OPT_NO_TESTING},
- {"no-rewrite", no_argument, 0, OPT_NO_REWRITE_SPEC_STAGE_2},
- {0, 0, 0, 0},
-};
-
-const char *long_options_help[] = {
- "Display this usage statement",
- "Display program version",
- "Allow tests to fail",
- "Read configuration file",
- "Override version of Python in configuration",
- "Increase output verbosity",
- "Disable line buffering",
- "Update conda installation prior to STASIS environment creation",
- "Overwrite an existing release",
- "Do not build docker images",
- "Do not upload artifacts to Artifactory",
- "Do not upload build info objects to Artifactory",
- "Do not execute test scripts",
- "Do not rewrite paths and URLs in output files",
- NULL,
-};
-
-static int get_option_max_width(struct option option[]) {
- int i = 0;
- int max = 0;
- const int indent = 4;
- while (option[i].name != 0) {
- int len = (int) strlen(option[i].name);
- if (option[i].has_arg) {
- len += indent;
- }
- if (len > max) {
- max = len;
- }
- i++;
- }
- return max;
-}
-
-static void usage(char *progname) {
- printf("usage: %s ", progname);
- printf("[-");
- for (int x = 0; long_options[x].val != 0; x++) {
- if (long_options[x].has_arg == no_argument && long_options[x].val <= 'z') {
- putchar(long_options[x].val);
- }
- }
- printf("] {DELIVERY_FILE}\n");
-
- int width = get_option_max_width(long_options);
- for (int x = 0; long_options[x].name != 0; x++) {
- char tmp[STASIS_NAME_MAX] = {0};
- char output[sizeof(tmp)] = {0};
- char opt_long[50] = {0}; // --? [ARG]?
- char opt_short[50] = {0}; // -? [ARG]?
-
- strcat(opt_long, "--");
- strcat(opt_long, long_options[x].name);
- if (long_options[x].has_arg) {
- strcat(opt_long, " ARG");
- }
+// local includes
+#include "args.h"
+#include "system_requirements.h"
+#include "tpl.h"
- if (long_options[x].val <= 'z') {
- strcat(opt_short, "-");
- opt_short[1] = (char) long_options[x].val;
- if (long_options[x].has_arg) {
- strcat(opt_short, " ARG");
- }
- } else {
- strcat(opt_short, " ");
- }
-
- sprintf(tmp, " %%-%ds\t%%s\t\t%%s", width + 4);
- sprintf(output, tmp, opt_long, opt_short, long_options_help[x]);
- puts(output);
- }
-}
-
-static int callback_except_jf(const void *a, const void *b) {
- const struct EnvCtl_Item *item = a;
- const char *name = b;
-
- if (!globals.enable_artifactory) {
- return STASIS_ENVCTL_RET_IGNORE;
- }
-
- if (envctl_check_required(item->flags)) {
- const char *content = getenv(name);
- if (!content || isempty((char *) content)) {
- return STASIS_ENVCTL_RET_FAIL;
- }
- }
-
- return STASIS_ENVCTL_RET_SUCCESS;
-}
-
-static int callback_except_gh(const void *a, const void *b) {
- const struct EnvCtl_Item *item = a;
- const char *name = b;
- //printf("GH exception check: %s\n", name);
- if (envctl_check_required(item->flags) && envctl_check_present(item, name)) {
- return STASIS_ENVCTL_RET_SUCCESS;
- }
-
- return STASIS_ENVCTL_RET_FAIL;
-}
-
-static void check_system_env_requirements() {
- msg(STASIS_MSG_L1, "Checking environment\n");
- globals.envctl = envctl_init();
- envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "TMPDIR");
- envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_ROOT");
- envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_SYSCONFDIR");
- envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_CPU_COUNT");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED | STASIS_ENVCTL_REDACT, callback_except_gh, "STASIS_GH_TOKEN");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_ARTIFACTORY_URL");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_ACCESS_TOKEN");
- envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_JF_USER");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_PASSWORD");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_KEY_PATH");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_PASSPHRASE");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_CERT_PATH");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_KEY_PATH");
- envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_REPO");
- envctl_do_required(globals.envctl, globals.verbose);
-}
-
-static void check_system_requirements(struct Delivery *ctx) {
- const char *tools_required[] = {
- "rsync",
- NULL,
- };
-
- msg(STASIS_MSG_L1, "Checking system requirements\n");
- for (size_t i = 0; tools_required[i] != NULL; i++) {
- if (!find_program(tools_required[i])) {
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]);
- exit(1);
- }
- }
-
- if (!globals.tmpdir && !ctx->storage.tmpdir) {
- delivery_init_tmpdir(ctx);
- }
-
- struct DockerCapabilities dcap;
- if (!docker_capable(&dcap)) {
- msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n");
- msg(STASIS_MSG_L3, "Available: %s\n", dcap.available ? "Yes" : "No");
- msg(STASIS_MSG_L3, "Usable: %s\n", dcap.usable ? "Yes" : "No");
- msg(STASIS_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap.podman ? "Yes" : "No");
- msg(STASIS_MSG_L3, "Build plugin(s): ");
- if (dcap.usable) {
- if (dcap.build & STASIS_DOCKER_BUILD) {
- printf("build ");
- }
- if (dcap.build & STASIS_DOCKER_BUILD_X) {
- printf("buildx ");
- }
- puts("");
- } else {
- printf("N/A\n");
- }
-
- // disable docker builds
- globals.enable_docker = false;
- }
-}
-
-static void check_requirements(struct Delivery *ctx) {
- check_system_requirements(ctx);
- check_system_env_requirements();
-}
int main(int argc, char *argv[]) {
struct Delivery ctx;
@@ -214,6 +25,10 @@ int main(int argc, char *argv[]) {
char installer_url[PATH_MAX];
char python_override_version[STASIS_NAME_MAX];
int user_disabled_docker = false;
+ globals.cpu_limit = get_cpu_count();
+ if (globals.cpu_limit > 1) {
+ globals.cpu_limit--; // max - 1
+ }
memset(env_name, 0, sizeof(env_name));
memset(env_name_testing, 0, sizeof(env_name_testing));
@@ -241,9 +56,29 @@ int main(int argc, char *argv[]) {
case 'p':
strcpy(python_override_version, optarg);
break;
+ case 'l':
+ globals.cpu_limit = strtol(optarg, NULL, 10);
+ if (globals.cpu_limit <= 1) {
+ globals.cpu_limit = 1;
+ globals.enable_parallel = false; // No point
+ }
+ break;
case OPT_ALWAYS_UPDATE_BASE:
globals.always_update_base_environment = true;
break;
+ case OPT_FAIL_FAST:
+ globals.parallel_fail_fast = true;
+ break;
+ case OPT_POOL_STATUS_INTERVAL:
+ globals.pool_status_interval = (int) strtol(optarg, NULL, 10);
+ if (globals.pool_status_interval < 1) {
+ globals.pool_status_interval = 1;
+ } else if (globals.pool_status_interval > 60 * 10) {
+ // Possible poor choice alert
+ fprintf(stderr, "Caution: Excessive pausing between status updates may cause third-party CI/CD"
+ " jobs to fail if the stdout/stderr streams are idle for too long!\n");
+ }
+ break;
case 'U':
setenv("PYTHONUNBUFFERED", "1", 1);
fflush(stdout);
@@ -273,6 +108,9 @@ int main(int argc, char *argv[]) {
case OPT_NO_REWRITE_SPEC_STAGE_2:
globals.enable_rewrite_spec_stage_2 = false;
break;
+ case OPT_NO_PARALLEL:
+ globals.enable_parallel = false;
+ break;
case '?':
default:
exit(1);
@@ -297,45 +135,8 @@ int main(int argc, char *argv[]) {
msg(STASIS_MSG_L1, "Setup\n");
- // Expose variables for use with the template engine
- // NOTE: These pointers are populated by delivery_init() so please avoid using
- // tpl_render() until then.
- tpl_register("meta.name", &ctx.meta.name);
- tpl_register("meta.version", &ctx.meta.version);
- tpl_register("meta.codename", &ctx.meta.codename);
- tpl_register("meta.mission", &ctx.meta.mission);
- tpl_register("meta.python", &ctx.meta.python);
- tpl_register("meta.python_compact", &ctx.meta.python_compact);
- tpl_register("info.time_str_epoch", &ctx.info.time_str_epoch);
- tpl_register("info.release_name", &ctx.info.release_name);
- tpl_register("info.build_name", &ctx.info.build_name);
- tpl_register("info.build_number", &ctx.info.build_number);
- tpl_register("storage.tmpdir", &ctx.storage.tmpdir);
- tpl_register("storage.output_dir", &ctx.storage.output_dir);
- tpl_register("storage.delivery_dir", &ctx.storage.delivery_dir);
- tpl_register("storage.conda_artifact_dir", &ctx.storage.conda_artifact_dir);
- tpl_register("storage.wheel_artifact_dir", &ctx.storage.wheel_artifact_dir);
- tpl_register("storage.build_sources_dir", &ctx.storage.build_sources_dir);
- tpl_register("storage.build_docker_dir", &ctx.storage.build_docker_dir);
- tpl_register("storage.results_dir", &ctx.storage.results_dir);
- tpl_register("storage.tools_dir", &ctx.storage.tools_dir);
- tpl_register("conda.installer_baseurl", &ctx.conda.installer_baseurl);
- tpl_register("conda.installer_name", &ctx.conda.installer_name);
- tpl_register("conda.installer_version", &ctx.conda.installer_version);
- tpl_register("conda.installer_arch", &ctx.conda.installer_arch);
- tpl_register("conda.installer_platform", &ctx.conda.installer_platform);
- tpl_register("deploy.jfrog.repo", &globals.jfrog.repo);
- tpl_register("deploy.jfrog.url", &globals.jfrog.url);
- tpl_register("deploy.docker.registry", &ctx.deploy.docker.registry);
- tpl_register("workaround.tox_posargs", &globals.workaround.tox_posargs);
- tpl_register("workaround.conda_reactivate", &globals.workaround.conda_reactivate);
-
- // Expose function(s) to the template engine
- // Prototypes can be found in template_func_proto.h
- tpl_register_func("get_github_release_notes", &get_github_release_notes_tplfunc_entrypoint, 3, NULL);
- tpl_register_func("get_github_release_notes_auto", &get_github_release_notes_auto_tplfunc_entrypoint, 1, &ctx);
- tpl_register_func("junitxml_file", &get_junitxml_file_entrypoint, 1, &ctx);
- tpl_register_func("basetemp_dir", &get_basetemp_dir_entrypoint, 1, &ctx);
+ tpl_setup_vars(&ctx);
+ tpl_setup_funcs(&ctx);
// Set up PREFIX/etc directory information
// The user may manipulate the base directory path with STASIS_SYSCONFDIR
@@ -423,9 +224,9 @@ int main(int argc, char *argv[]) {
}
msg(STASIS_MSG_L1, "Conda setup\n");
- delivery_get_installer_url(&ctx, installer_url);
+ delivery_get_conda_installer_url(&ctx, installer_url);
msg(STASIS_MSG_L2, "Downloading: %s\n", installer_url);
- if (delivery_get_installer(&ctx, installer_url)) {
+ if (delivery_get_conda_installer(&ctx, installer_url)) {
msg(STASIS_MSG_ERROR, "download failed: %s\n", installer_url);
exit(1);
}
@@ -443,33 +244,68 @@ int main(int argc, char *argv[]) {
msg(STASIS_MSG_L2, "Configuring: %s\n", ctx.storage.conda_install_prefix);
delivery_conda_enable(&ctx, ctx.storage.conda_install_prefix);
+ check_pathvar(&ctx);
+
+ //
+ // Implied environment creation modes/actions
+ //
+ // 1. No base environment config
+ // 1a. Caller is warned
+ // 1b. Caller has full control over all packages
+ // 2. Default base environment (etc/stasis/mission/[name]/base.yml)
+ // 2a. Depends on packages defined by base.yml
+ // 2b. Caller may issue a reduced package set in the INI config
+ // 2c. Caller must be vigilant to avoid incompatible packages (base.yml
+ // *should* have no version constraints)
+ // 3. External base environment (based_on=schema://[release_name].yml)
+ // 3a. Depends on a previous release or arbitrary yaml configuration
+ // 3b. Bugs, conflicts, and dependency resolution issues are inherited and
+ // must be handled in the INI config
+ msg(STASIS_MSG_L1, "Creating release environment(s)\n");
- char *pathvar = NULL;
- pathvar = getenv("PATH");
- if (!pathvar) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "PATH variable is not set. Cannot continue.\n");
- exit(1);
- } else {
- char pathvar_tmp[STASIS_BUFSIZ];
- sprintf(pathvar_tmp, "%s/bin:%s", ctx.storage.conda_install_prefix, pathvar);
- setenv("PATH", pathvar_tmp, 1);
- pathvar = NULL;
+ char *mission_base = NULL;
+ if (isempty(ctx.meta.based_on)) {
+ guard_free(ctx.meta.based_on);
+ char *mission_base_orig = NULL;
+
+ if (asprintf(&mission_base_orig, "%s/%s/base.yml", ctx.storage.mission_dir, ctx.meta.mission) < 0) {
+ SYSERROR("Unable to allocate bytes for %s/%s/base.yml path\n", ctx.storage.mission_dir, ctx.meta.mission);
+ exit(1);
+ }
+
+ if (access(mission_base_orig, F_OK) < 0) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Mission does not provide a base.yml configuration: %s (%s)\n",
+ ctx.meta.mission, ctx.storage.mission_dir);
+ } else {
+ msg(STASIS_MSG_L2, "Using base environment configuration: %s\n", mission_base_orig);
+ if (asprintf(&mission_base, "%s/%s-base.yml", ctx.storage.tmpdir, ctx.info.release_name) < 0) {
+ SYSERROR("%s", "Unable to allocate bytes for temporary base.yml configuration");
+ remove(mission_base);
+ exit(1);
+ }
+ copy2(mission_base_orig, mission_base, CT_OWNER | CT_PERM);
+ char spec[255] = {0};
+ snprintf(spec, sizeof(spec) - 1, "- python=%s\n", ctx.meta.python);
+ file_replace_text(mission_base, "- python\n", spec, 0);
+ ctx.meta.based_on = mission_base;
+ }
+ guard_free(mission_base_orig);
}
- msg(STASIS_MSG_L1, "Creating release environment(s)\n");
- if (ctx.meta.based_on && strlen(ctx.meta.based_on)) {
+ if (!isempty(ctx.meta.based_on)) {
if (conda_env_remove(env_name)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove release environment: %s\n", env_name_testing);
- exit(1);
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove release environment: %s\n", env_name);
+ exit(1);
}
- msg(STASIS_MSG_L2, "Based on release: %s\n", ctx.meta.based_on);
+
+ msg(STASIS_MSG_L2, "Based on: %s\n", ctx.meta.based_on);
if (conda_env_create_from_uri(env_name, ctx.meta.based_on)) {
msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "unable to install release environment using configuration file\n");
exit(1);
}
if (conda_env_remove(env_name_testing)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove testing environment\n");
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "failed to remove testing environment %s\n", env_name_testing);
exit(1);
}
if (conda_env_create_from_uri(env_name_testing, ctx.meta.based_on)) {
@@ -486,6 +322,8 @@ int main(int argc, char *argv[]) {
exit(1);
}
}
+ // The base environment configuration not used past this point
+ remove(mission_base);
// Activate test environment
msg(STASIS_MSG_L1, "Activating test environment\n");
@@ -505,10 +343,18 @@ int main(int argc, char *argv[]) {
}
if (pip_exec("install build")) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed");
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "'build' tool installation failed\n");
exit(1);
}
+ if (!isempty(ctx.meta.based_on)) {
+ msg(STASIS_MSG_L1, "Generating package overlay from environment: %s\n", env_name);
+ if (delivery_overlay_packages_from_env(&ctx, env_name)) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "%s", "Failed to generate package overlay. Resulting environment integrity cannot be guaranteed.\n");
+ exit(1);
+ }
+ }
+
msg(STASIS_MSG_L1, "Filter deliverable packages\n");
delivery_defer_packages(&ctx, DEFER_CONDA);
delivery_defer_packages(&ctx, DEFER_PIP);
diff --git a/src/cli/stasis/system_requirements.c b/src/cli/stasis/system_requirements.c
new file mode 100644
index 0000000..4554b93
--- /dev/null
+++ b/src/cli/stasis/system_requirements.c
@@ -0,0 +1,82 @@
+#include "system_requirements.h"
+
+void check_system_env_requirements() {
+ msg(STASIS_MSG_L1, "Checking environment\n");
+ globals.envctl = envctl_init();
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "TMPDIR");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_ROOT");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_SYSCONFDIR");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_CPU_COUNT");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED | STASIS_ENVCTL_REDACT, callback_except_gh, "STASIS_GH_TOKEN");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_ARTIFACTORY_URL");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_ACCESS_TOKEN");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_PASSTHRU, NULL, "STASIS_JF_USER");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_PASSWORD");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_KEY_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_SSH_PASSPHRASE");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_CERT_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REDACT, NULL, "STASIS_JF_CLIENT_CERT_KEY_PATH");
+ envctl_register(&globals.envctl, STASIS_ENVCTL_REQUIRED, callback_except_jf, "STASIS_JF_REPO");
+ envctl_do_required(globals.envctl, globals.verbose);
+}
+
+void check_system_requirements(struct Delivery *ctx) {
+ const char *tools_required[] = {
+ "rsync",
+ NULL,
+ };
+
+ msg(STASIS_MSG_L1, "Checking system requirements\n");
+ for (size_t i = 0; tools_required[i] != NULL; i++) {
+ if (!find_program(tools_required[i])) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "'%s' must be installed.\n", tools_required[i]);
+ exit(1);
+ }
+ }
+
+ if (!globals.tmpdir && !ctx->storage.tmpdir) {
+ delivery_init_tmpdir(ctx);
+ }
+
+ struct DockerCapabilities dcap;
+ if (!docker_capable(&dcap)) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Docker is broken\n");
+ msg(STASIS_MSG_L3, "Available: %s\n", dcap.available ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Usable: %s\n", dcap.usable ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Podman [Docker Emulation]: %s\n", dcap.podman ? "Yes" : "No");
+ msg(STASIS_MSG_L3, "Build plugin(s): ");
+ if (dcap.usable) {
+ if (dcap.build & STASIS_DOCKER_BUILD) {
+ printf("build ");
+ }
+ if (dcap.build & STASIS_DOCKER_BUILD_X) {
+ printf("buildx ");
+ }
+ puts("");
+ } else {
+ printf("N/A\n");
+ }
+
+ // disable docker builds
+ globals.enable_docker = false;
+ }
+}
+
+void check_requirements(struct Delivery *ctx) {
+ check_system_requirements(ctx);
+ check_system_env_requirements();
+}
+
+char *check_pathvar(struct Delivery *ctx) {
+ char *pathvar = NULL;
+ pathvar = getenv("PATH");
+ if (!pathvar) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "PATH variable is not set. Cannot continue.\n");
+ exit(1);
+ } else {
+ char pathvar_tmp[STASIS_BUFSIZ];
+ sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.conda_install_prefix, pathvar);
+ setenv("PATH", pathvar_tmp, 1);
+ pathvar = NULL;
+ }
+} \ No newline at end of file
diff --git a/src/cli/stasis/system_requirements.h b/src/cli/stasis/system_requirements.h
new file mode 100644
index 0000000..4c2231a
--- /dev/null
+++ b/src/cli/stasis/system_requirements.h
@@ -0,0 +1,13 @@
+#ifndef STASIS_SYSTEM_REQUIREMENTS_H
+#define STASIS_SYSTEM_REQUIREMENTS_H
+
+#include "delivery.h"
+#include "callbacks.h"
+#include "envctl.h"
+
+void check_system_env_requirements();
+void check_system_requirements(struct Delivery *ctx);
+void check_requirements(struct Delivery *ctx);
+char *check_pathvar(struct Delivery *ctx);
+
+#endif //STASIS_SYSTEM_REQUIREMENTS_H
diff --git a/src/cli/stasis/tpl.c b/src/cli/stasis/tpl.c
new file mode 100644
index 0000000..08eb1f3
--- /dev/null
+++ b/src/cli/stasis/tpl.c
@@ -0,0 +1,46 @@
+#include "delivery.h"
+#include "tpl.h"
+
+void tpl_setup_vars(struct Delivery *ctx) {
+ // Expose variables for use with the template engine
+ // NOTE: These pointers are populated by delivery_init() so please avoid using
+ // tpl_render() until then.
+ tpl_register("meta.name", &ctx->meta.name);
+ tpl_register("meta.version", &ctx->meta.version);
+ tpl_register("meta.codename", &ctx->meta.codename);
+ tpl_register("meta.mission", &ctx->meta.mission);
+ tpl_register("meta.python", &ctx->meta.python);
+ tpl_register("meta.python_compact", &ctx->meta.python_compact);
+ tpl_register("info.time_str_epoch", &ctx->info.time_str_epoch);
+ tpl_register("info.release_name", &ctx->info.release_name);
+ tpl_register("info.build_name", &ctx->info.build_name);
+ tpl_register("info.build_number", &ctx->info.build_number);
+ tpl_register("storage.tmpdir", &ctx->storage.tmpdir);
+ tpl_register("storage.output_dir", &ctx->storage.output_dir);
+ tpl_register("storage.delivery_dir", &ctx->storage.delivery_dir);
+ tpl_register("storage.conda_artifact_dir", &ctx->storage.conda_artifact_dir);
+ tpl_register("storage.wheel_artifact_dir", &ctx->storage.wheel_artifact_dir);
+ tpl_register("storage.build_sources_dir", &ctx->storage.build_sources_dir);
+ tpl_register("storage.build_docker_dir", &ctx->storage.build_docker_dir);
+ tpl_register("storage.results_dir", &ctx->storage.results_dir);
+ tpl_register("storage.tools_dir", &ctx->storage.tools_dir);
+ tpl_register("conda.installer_baseurl", &ctx->conda.installer_baseurl);
+ tpl_register("conda.installer_name", &ctx->conda.installer_name);
+ tpl_register("conda.installer_version", &ctx->conda.installer_version);
+ tpl_register("conda.installer_arch", &ctx->conda.installer_arch);
+ tpl_register("conda.installer_platform", &ctx->conda.installer_platform);
+ tpl_register("deploy.jfrog.repo", &globals.jfrog.repo);
+ tpl_register("deploy.jfrog.url", &globals.jfrog.url);
+ tpl_register("deploy.docker.registry", &ctx->deploy.docker.registry);
+ tpl_register("workaround.conda_reactivate", &globals.workaround.conda_reactivate);
+}
+
+void tpl_setup_funcs(struct Delivery *ctx) {
+ // Expose function(s) to the template engine
+ // Prototypes can be found in template_func_proto.h
+ tpl_register_func("get_github_release_notes", &get_github_release_notes_tplfunc_entrypoint, 3, NULL);
+ tpl_register_func("get_github_release_notes_auto", &get_github_release_notes_auto_tplfunc_entrypoint, 1, ctx);
+ tpl_register_func("junitxml_file", &get_junitxml_file_entrypoint, 1, ctx);
+ tpl_register_func("basetemp_dir", &get_basetemp_dir_entrypoint, 1, ctx);
+ tpl_register_func("tox_run", &tox_run_entrypoint, 2, ctx);
+} \ No newline at end of file
diff --git a/src/cli/stasis/tpl.h b/src/cli/stasis/tpl.h
new file mode 100644
index 0000000..398f0fe
--- /dev/null
+++ b/src/cli/stasis/tpl.h
@@ -0,0 +1,10 @@
+#ifndef STASIS_TPL_H
+#define STASIS_TPL_H
+
+#include "template.h"
+#include "template_func_proto.h"
+
+void tpl_setup_vars(struct Delivery *ctx);
+void tpl_setup_funcs(struct Delivery *ctx);
+
+#endif //STASIS_TPL_H
diff --git a/src/cli/stasis_indexer/CMakeLists.txt b/src/cli/stasis_indexer/CMakeLists.txt
new file mode 100644
index 0000000..eae1394
--- /dev/null
+++ b/src/cli/stasis_indexer/CMakeLists.txt
@@ -0,0 +1,6 @@
+add_executable(stasis_indexer
+ stasis_indexer.c
+)
+target_link_libraries(stasis_indexer PRIVATE stasis_core)
+
+install(TARGETS stasis_indexer RUNTIME)
diff --git a/src/stasis_indexer.c b/src/cli/stasis_indexer/stasis_indexer.c
index ef6375b..bd59920 100644
--- a/src/stasis_indexer.c
+++ b/src/cli/stasis_indexer/stasis_indexer.c
@@ -1,6 +1,7 @@
#include <getopt.h>
#include <fnmatch.h>
-#include "core.h"
+#include "delivery.h"
+#include "junitxml.h"
static struct option long_options[] = {
{"help", no_argument, 0, 'h'},
@@ -72,9 +73,9 @@ int indexer_combine_rootdirs(const char *dest, char **rootdirs, const size_t roo
if (!access(srcdir_with_output, F_OK)) {
srcdir = srcdir_with_output;
}
- sprintf(cmd + strlen(cmd), "'%s'/ ", srcdir);
+ snprintf(cmd + strlen(cmd), sizeof(srcdir) - strlen(srcdir) + 4, "'%s'/ ", srcdir);
}
- sprintf(cmd + strlen(cmd), "%s/", destdir);
+ snprintf(cmd + strlen(cmd), sizeof(cmd) - strlen(destdir) + 1, " %s/", destdir);
if (globals.verbose) {
puts(cmd);
@@ -311,12 +312,12 @@ int indexer_make_website(struct Delivery *ctx) {
// >= 1.10.0.1
if (pandoc_version >= 0x010a0001) {
- strcat(pandoc_versioned_args, "-f markdown+autolink_bare_uris ");
+ strcat(pandoc_versioned_args, "-f gfm+autolink_bare_uris ");
}
- // >= 3.1.10
- if (pandoc_version >= 0x03010a00) {
- strcat(pandoc_versioned_args, "-f markdown+alerts ");
+ // > 3.1.9
+ if (pandoc_version > 0x03010900) {
+ strcat(pandoc_versioned_args, "-f gfm+alerts ");
}
}
@@ -368,6 +369,8 @@ int indexer_make_website(struct Delivery *ctx) {
// This might be negative when killed by a signal.
// Otherwise, the return code is not critical to us.
if (system(cmd) < 0) {
+ guard_free(css_filename);
+ guard_strlist_free(&dirs);
return 1;
}
if (file_replace_text(fullpath_dest, ".md", ".html", 0)) {
@@ -381,28 +384,55 @@ int indexer_make_website(struct Delivery *ctx) {
char link_dest[PATH_MAX] = {0};
strcpy(link_from, "README.html");
sprintf(link_dest, "%s/%s", root, "index.html");
- symlink(link_from, link_dest);
+ if (symlink(link_from, link_dest)) {
+ SYSERROR("Warning: symlink(%s, %s) failed: %s", link_from, link_dest, strerror(errno));
+ }
}
}
guard_strlist_free(&inputs);
}
+ guard_free(css_filename);
guard_strlist_free(&dirs);
return 0;
}
-int indexer_conda(struct Delivery *ctx) {
+static int micromamba_configure(const struct Delivery *ctx, struct MicromambaInfo *m) {
int status = 0;
- char micromamba_prefix[PATH_MAX] = {0};
- sprintf(micromamba_prefix, "%s/bin", ctx->storage.tools_dir);
- struct MicromambaInfo m = {.conda_prefix = globals.conda_install_prefix, .micromamba_prefix = micromamba_prefix};
+ char *micromamba_prefix = NULL;
+ if (asprintf(&micromamba_prefix, "%s/bin", ctx->storage.tools_dir) < 0) {
+ return -1;
+ }
+ m->conda_prefix = globals.conda_install_prefix;
+ m->micromamba_prefix = micromamba_prefix;
+
+ size_t pathvar_len = (strlen(getenv("PATH")) + strlen(m->micromamba_prefix) + strlen(m->conda_prefix)) + 3 + 4 + 1;
+ // ^^^^^^^^^^^^^^^^^^
+ // 3 = separators
+ // 4 = chars (/bin)
+ // 1 = nul terminator
+ char *pathvar = calloc(pathvar_len, sizeof(*pathvar));
+ if (!pathvar) {
+ SYSERROR("%s", "Unable to allocate bytes for temporary path string");
+ exit(1);
+ }
+ snprintf(pathvar, pathvar_len, "%s/bin:%s:%s", m->conda_prefix, m->micromamba_prefix, getenv("PATH"));
+ setenv("PATH", pathvar, 1);
+ guard_free(pathvar);
- status += micromamba(&m, "config prepend --env channels conda-forge");
+ status += micromamba(m, "config prepend --env channels conda-forge");
if (!globals.verbose) {
- status += micromamba(&m, "config set --env quiet true");
+ status += micromamba(m, "config set --env quiet true");
}
- status += micromamba(&m, "config set --env always_yes true");
- status += micromamba(&m, "install conda-build");
+ status += micromamba(m, "config set --env always_yes true");
+ status += micromamba(m, "install conda-build pandoc");
+
+ return status;
+}
+
+int indexer_conda(struct Delivery *ctx, struct MicromambaInfo m) {
+ int status = 0;
+
status += micromamba(&m, "run conda index %s", ctx->storage.conda_artifact_dir);
return status;
}
@@ -733,6 +763,12 @@ int main(int argc, char *argv[]) {
int i = 0;
while (optind < argc) {
+ if (argv[optind]) {
+ if (access(argv[optind], F_OK) < 0) {
+ fprintf(stderr, "%s: %s\n", argv[optind], strerror(errno));
+ exit(1);
+ }
+ }
// use first positional argument
rootdirs[i] = realpath(argv[optind], NULL);
optind++;
@@ -820,8 +856,14 @@ int main(int argc, char *argv[]) {
mkdirs(ctx.storage.wheel_artifact_dir, 0755);
}
+ struct MicromambaInfo m;
+ if (micromamba_configure(&ctx, &m)) {
+ SYSERROR("%s", "Unable to configure micromamba");
+ exit(1);
+ }
+
msg(STASIS_MSG_L1, "Indexing conda packages\n");
- if (indexer_conda(&ctx)) {
+ if (indexer_conda(&ctx, m)) {
SYSERROR("%s", "Conda package indexing operation failed");
exit(1);
}
diff --git a/src/delivery.c b/src/delivery.c
deleted file mode 100644
index 3e99aad..0000000
--- a/src/delivery.c
+++ /dev/null
@@ -1,2219 +0,0 @@
-#define _GNU_SOURCE
-
-#include <fnmatch.h>
-#include "core.h"
-
-extern struct STASIS_GLOBAL globals;
-
-static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) {
- int status = ini_has_key(ini, section_name, key);
- if (!status) {
- SYSERROR("%s:%s key is required but not defined", section_name, key);
- exit(1);
- }
-}
-
-static void conv_str(char **x, union INIVal val) {
- if (*x) {
- guard_free(*x);
- }
- if (val.as_char_p) {
- char *tplop = tpl_render(val.as_char_p);
- if (tplop) {
- *x = tplop;
- } else {
- *x = NULL;
- }
- } else {
- *x = NULL;
- }
-}
-
-int delivery_init_tmpdir(struct Delivery *ctx) {
- char *tmpdir = NULL;
- char *x = NULL;
- int unusable = 0;
- errno = 0;
-
- x = getenv("TMPDIR");
- if (x) {
- guard_free(ctx->storage.tmpdir);
- tmpdir = strdup(x);
- } else {
- tmpdir = ctx->storage.tmpdir;
- }
-
- if (!tmpdir) {
- // memory error
- return -1;
- }
-
- // If the directory doesn't exist, create it
- if (access(tmpdir, F_OK) < 0) {
- if (mkdirs(tmpdir, 0755) < 0) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno));
- goto l_delivery_init_tmpdir_fatal;
- }
- }
-
- // If we can't read, write, or execute, then die
- if (access(tmpdir, R_OK | W_OK | X_OK) < 0) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s requires at least 0755 permissions.\n");
- goto l_delivery_init_tmpdir_fatal;
- }
-
- struct statvfs st;
- if (statvfs(tmpdir, &st) < 0) {
- goto l_delivery_init_tmpdir_fatal;
- }
-
-#if defined(STASIS_OS_LINUX)
- // If we can't execute programs, or write data to the file system at all, then die
- if ((st.f_flag & ST_NOEXEC) != 0) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted with noexec\n", tmpdir);
- goto l_delivery_init_tmpdir_fatal;
- }
-#endif
- if ((st.f_flag & ST_RDONLY) != 0) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted read-only\n", tmpdir);
- goto l_delivery_init_tmpdir_fatal;
- }
-
- if (!globals.tmpdir) {
- globals.tmpdir = strdup(tmpdir);
- }
-
- if (!ctx->storage.tmpdir) {
- ctx->storage.tmpdir = strdup(globals.tmpdir);
- }
- return unusable;
-
- l_delivery_init_tmpdir_fatal:
- unusable = 1;
- return unusable;
-}
-
-void delivery_free(struct Delivery *ctx) {
- guard_free(ctx->system.arch);
- GENERIC_ARRAY_FREE(ctx->system.platform);
- guard_free(ctx->meta.name);
- guard_free(ctx->meta.version);
- guard_free(ctx->meta.codename);
- guard_free(ctx->meta.mission);
- guard_free(ctx->meta.python);
- guard_free(ctx->meta.mission);
- guard_free(ctx->meta.python_compact);
- guard_free(ctx->meta.based_on);
- guard_runtime_free(ctx->runtime.environ);
- guard_free(ctx->storage.root);
- guard_free(ctx->storage.tmpdir);
- guard_free(ctx->storage.delivery_dir);
- guard_free(ctx->storage.tools_dir);
- guard_free(ctx->storage.package_dir);
- guard_free(ctx->storage.results_dir);
- guard_free(ctx->storage.output_dir);
- guard_free(ctx->storage.conda_install_prefix);
- guard_free(ctx->storage.conda_artifact_dir);
- guard_free(ctx->storage.conda_staging_dir);
- guard_free(ctx->storage.conda_staging_url);
- guard_free(ctx->storage.wheel_artifact_dir);
- guard_free(ctx->storage.wheel_staging_dir);
- guard_free(ctx->storage.wheel_staging_url);
- guard_free(ctx->storage.build_dir);
- guard_free(ctx->storage.build_recipes_dir);
- guard_free(ctx->storage.build_sources_dir);
- guard_free(ctx->storage.build_testing_dir);
- guard_free(ctx->storage.build_docker_dir);
- guard_free(ctx->storage.mission_dir);
- guard_free(ctx->storage.docker_artifact_dir);
- guard_free(ctx->storage.meta_dir);
- guard_free(ctx->storage.package_dir);
- guard_free(ctx->storage.cfgdump_dir);
- guard_free(ctx->info.time_str_epoch);
- guard_free(ctx->info.build_name);
- guard_free(ctx->info.build_number);
- guard_free(ctx->info.release_name);
- guard_free(ctx->conda.installer_baseurl);
- guard_free(ctx->conda.installer_name);
- guard_free(ctx->conda.installer_version);
- guard_free(ctx->conda.installer_platform);
- guard_free(ctx->conda.installer_arch);
- guard_free(ctx->conda.installer_path);
- guard_free(ctx->conda.tool_version);
- guard_free(ctx->conda.tool_build_version);
- guard_strlist_free(&ctx->conda.conda_packages);
- guard_strlist_free(&ctx->conda.conda_packages_defer);
- guard_strlist_free(&ctx->conda.pip_packages);
- guard_strlist_free(&ctx->conda.pip_packages_defer);
- guard_strlist_free(&ctx->conda.wheels_packages);
-
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- guard_free(ctx->tests[i].name);
- guard_free(ctx->tests[i].version);
- guard_free(ctx->tests[i].repository);
- guard_free(ctx->tests[i].repository_info_ref);
- guard_free(ctx->tests[i].repository_info_tag);
- guard_strlist_free(&ctx->tests[i].repository_remove_tags);
- guard_free(ctx->tests[i].script);
- guard_free(ctx->tests[i].build_recipe);
- // test-specific runtime variables
- guard_runtime_free(ctx->tests[i].runtime.environ);
- }
-
- guard_free(ctx->rules.release_fmt);
- guard_free(ctx->rules.build_name_fmt);
- guard_free(ctx->rules.build_number_fmt);
-
- guard_free(ctx->deploy.docker.test_script);
- guard_free(ctx->deploy.docker.registry);
- guard_free(ctx->deploy.docker.image_compression);
- guard_strlist_free(&ctx->deploy.docker.tags);
- guard_strlist_free(&ctx->deploy.docker.build_args);
-
- for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(ctx->deploy.jfrog[0]); i++) {
- guard_free(ctx->deploy.jfrog[i].repo);
- guard_free(ctx->deploy.jfrog[i].dest);
- guard_strlist_free(&ctx->deploy.jfrog[i].files);
- }
-
- if (ctx->_stasis_ini_fp.delivery) {
- ini_free(&ctx->_stasis_ini_fp.delivery);
- }
- guard_free(ctx->_stasis_ini_fp.delivery_path);
-
- if (ctx->_stasis_ini_fp.cfg) {
- // optional extras
- ini_free(&ctx->_stasis_ini_fp.cfg);
- }
- guard_free(ctx->_stasis_ini_fp.cfg_path);
-
- if (ctx->_stasis_ini_fp.mission) {
- ini_free(&ctx->_stasis_ini_fp.mission);
- }
- guard_free(ctx->_stasis_ini_fp.mission_path);
-}
-
-void delivery_init_dirs_stage2(struct Delivery *ctx) {
- path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes");
- path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources");
- path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing");
- path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker");
-
- path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery");
- path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results");
- path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages");
- path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config");
- path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta");
-
- path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda");
- path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels");
- path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker");
-}
-
-void delivery_init_dirs_stage1(struct Delivery *ctx) {
- char *rootdir = getenv("STASIS_ROOT");
- if (rootdir) {
- if (isempty(rootdir)) {
- fprintf(stderr, "STASIS_ROOT is set, but empty. Please assign a file system path to this environment variable.\n");
- exit(1);
- }
- path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name);
- } else {
- // use "stasis" in current working directory
- path_store(&ctx->storage.root, PATH_MAX, "stasis", ctx->info.build_name);
- }
- path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools");
- path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp");
- if (delivery_init_tmpdir(ctx)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir);
- if (globals.tmpdir)
- guard_free(globals.tmpdir);
- exit(1);
- }
-
- path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build");
- path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output");
-
- if (!ctx->storage.mission_dir) {
- path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission");
- }
-
- if (access(ctx->storage.mission_dir, F_OK)) {
- msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno));
- exit(1);
- }
-
- // Override installation prefix using global configuration key
- if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) {
- // user wants a specific path
- globals.conda_fresh_start = false;
- /*
- if (mkdirs(globals.conda_install_prefix, 0755)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create directory: %s: %s\n",
- strerror(errno), globals.conda_install_prefix);
- exit(1);
- }
- */
- /*
- ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL);
- if (!ctx->storage.conda_install_prefix) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n");
- exit(1);
- }
- ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix);
- */
- path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda");
- } else {
- // install conda under the STASIS tree
- path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda");
- }
-}
-
-int delivery_init_platform(struct Delivery *ctx) {
- msg(STASIS_MSG_L2, "Setting architecture\n");
- char archsuffix[20];
- struct utsname uts;
- if (uname(&uts)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "uname() failed: %s\n", strerror(errno));
- return -1;
- }
-
- ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform));
- if (!ctx->system.platform) {
- SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX);
- return -1;
- }
- for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) {
- ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0]));
- }
-
- ctx->system.arch = strdup(uts.machine);
- if (!ctx->system.arch) {
- // memory error
- return -1;
- }
-
- if (!strcmp(ctx->system.arch, "x86_64")) {
- strcpy(archsuffix, "64");
- } else {
- strcpy(archsuffix, ctx->system.arch);
- }
-
- msg(STASIS_MSG_L2, "Setting platform\n");
- strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname);
- if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) {
- sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix);
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX");
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos");
- } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) {
- sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix);
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux");
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux");
- } else {
- // Not explicitly supported systems
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]);
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]);
- strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]);
- tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
- }
-
- long cpu_count = get_cpu_count();
- if (!cpu_count) {
- fprintf(stderr, "Unable to determine CPU count. Falling back to 1.\n");
- cpu_count = 1;
- }
- char ncpus[100] = {0};
- sprintf(ncpus, "%ld", cpu_count);
-
- // Declare some important bits as environment variables
- setenv("CPU_COUNT", ncpus, 1);
- setenv("STASIS_CPU_COUNT", ncpus, 1);
- setenv("STASIS_ARCH", ctx->system.arch, 1);
- setenv("STASIS_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1);
- setenv("STASIS_CONDA_ARCH", ctx->system.arch, 1);
- setenv("STASIS_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1);
- setenv("STASIS_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1);
-
- // Register template variables
- // These were moved out of main() because we can't take the address of system.platform[x]
- // _before_ the array has been initialized.
- tpl_register("system.arch", &ctx->system.arch);
- tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
-
- return 0;
-}
-
-static int populate_mission_ini(struct Delivery **ctx, int render_mode) {
- int err = 0;
- struct INIFILE *ini;
-
- if ((*ctx)->_stasis_ini_fp.mission) {
- return 0;
- }
-
- // Now populate the rules
- char missionfile[PATH_MAX] = {0};
- if (getenv("STASIS_SYSCONFDIR")) {
- sprintf(missionfile, "%s/%s/%s/%s.ini",
- getenv("STASIS_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission);
- } else {
- sprintf(missionfile, "%s/%s/%s/%s.ini",
- globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission);
- }
-
- msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile);
- (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile);
- ini = (*ctx)->_stasis_ini_fp.mission;
- if (!ini) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read misson configuration: %s, %s\n", missionfile, strerror(errno));
- exit(1);
- }
- (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile);
-
- (*ctx)->rules.release_fmt = ini_getval_str(ini, "meta", "release_fmt", render_mode, &err);
-
- // Used for setting artifactory build info
- (*ctx)->rules.build_name_fmt = ini_getval_str(ini, "meta", "build_name_fmt", render_mode, &err);
-
- // Used for setting artifactory build info
- (*ctx)->rules.build_number_fmt = ini_getval_str(ini, "meta", "build_number_fmt", render_mode, &err);
- return 0;
-}
-
-void validate_delivery_ini(struct INIFILE *ini) {
- if (!ini) {
- SYSERROR("%s", "INIFILE is NULL!");
- exit(1);
- }
- if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) {
- ini_has_key_required(ini, "meta", "name");
- ini_has_key_required(ini, "meta", "version");
- ini_has_key_required(ini, "meta", "rc");
- ini_has_key_required(ini, "meta", "mission");
- ini_has_key_required(ini, "meta", "python");
- } else {
- SYSERROR("%s", "[meta] configuration section is required");
- exit(1);
- }
-
- if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) {
- ini_has_key_required(ini, "conda", "installer_name");
- ini_has_key_required(ini, "conda", "installer_version");
- ini_has_key_required(ini, "conda", "installer_platform");
- ini_has_key_required(ini, "conda", "installer_arch");
- } else {
- SYSERROR("%s", "[conda] configuration section is required");
- exit(1);
- }
-
- for (size_t i = 0; i < ini->section_count; i++) {
- struct INISection *section = ini->section[i];
- if (section && startswith(section->key, "test:")) {
- char *name = strstr(section->key, ":");
- if (name && strlen(name) > 1) {
- name = &name[1];
- }
- //ini_has_key_required(ini, section->key, "version");
- //ini_has_key_required(ini, section->key, "repository");
- if (globals.enable_testing) {
- ini_has_key_required(ini, section->key, "script");
- }
- }
- }
-
- if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) {
- // yeah?
- }
-
- for (size_t i = 0; i < ini->section_count; i++) {
- struct INISection *section = ini->section[i];
- if (section && startswith(section->key, "deploy:artifactory")) {
- ini_has_key_required(ini, section->key, "files");
- ini_has_key_required(ini, section->key, "dest");
- }
- }
-}
-
-static int populate_delivery_ini(struct Delivery *ctx, int render_mode) {
- union INIVal val;
- struct INIFILE *ini = ctx->_stasis_ini_fp.delivery;
- struct INIData *rtdata;
- RuntimeEnv *rt;
-
- validate_delivery_ini(ini);
- // Populate runtime variables first they may be interpreted by other
- // keys in the configuration
- rt = runtime_copy(__environ);
- while ((rtdata = ini_getall(ini, "runtime")) != NULL) {
- char rec[STASIS_BUFSIZ];
- sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value)));
- runtime_set(rt, rtdata->key, rtdata->value);
- }
- runtime_apply(rt);
- ctx->runtime.environ = rt;
-
- int err = 0;
- ctx->meta.mission = ini_getval_str(ini, "meta", "mission", render_mode, &err);
-
- if (!strcasecmp(ctx->meta.mission, "hst")) {
- ctx->meta.codename = ini_getval_str(ini, "meta", "codename", render_mode, &err);
- } else {
- ctx->meta.codename = NULL;
- }
-
- ctx->meta.version = ini_getval_str(ini, "meta", "version", render_mode, &err);
- ctx->meta.name = ini_getval_str(ini, "meta", "name", render_mode, &err);
- ctx->meta.rc = ini_getval_int(ini, "meta", "rc", render_mode, &err);
- ctx->meta.final = ini_getval_bool(ini, "meta", "final", render_mode, &err);
- ctx->meta.based_on = ini_getval_str(ini, "meta", "based_on", render_mode, &err);
-
- if (!ctx->meta.python) {
- ctx->meta.python = ini_getval_str(ini, "meta", "python", render_mode, &err);
- guard_free(ctx->meta.python_compact);
- ctx->meta.python_compact = to_short_version(ctx->meta.python);
- } else {
- ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python);
- }
-
- ctx->conda.installer_name = ini_getval_str(ini, "conda", "installer_name", render_mode, &err);
- ctx->conda.installer_version = ini_getval_str(ini, "conda", "installer_version", render_mode, &err);
- ctx->conda.installer_platform = ini_getval_str(ini, "conda", "installer_platform", render_mode, &err);
- ctx->conda.installer_arch = ini_getval_str(ini, "conda", "installer_arch", render_mode, &err);
- ctx->conda.installer_baseurl = ini_getval_str(ini, "conda", "installer_baseurl", render_mode, &err);
- ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", " "LINE_SEP, render_mode, &err);
-
- if (ctx->conda.conda_packages->data && ctx->conda.conda_packages->data[0] && strpbrk(ctx->conda.conda_packages->data[0], " \t")) {
- normalize_space(ctx->conda.conda_packages->data[0]);
- replace_text(ctx->conda.conda_packages->data[0], " ", LINE_SEP, 0);
- char *pip_packages_replacement = join(ctx->conda.conda_packages->data, LINE_SEP);
- ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "conda_packages", pip_packages_replacement);
- guard_free(pip_packages_replacement);
- guard_strlist_free(&ctx->conda.conda_packages);
- ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", LINE_SEP, render_mode, &err);
- }
-
- for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) {
- char *pkg = strlist_item(ctx->conda.conda_packages, i);
- if (strpbrk(pkg, ";#") || isempty(pkg)) {
- strlist_remove(ctx->conda.conda_packages, i);
- }
- }
-
- ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err);
- if (ctx->conda.pip_packages->data && ctx->conda.pip_packages->data[0] && strpbrk(ctx->conda.pip_packages->data[0], " \t")) {
- normalize_space(ctx->conda.pip_packages->data[0]);
- replace_text(ctx->conda.pip_packages->data[0], " ", LINE_SEP, 0);
- char *pip_packages_replacement = join(ctx->conda.pip_packages->data, LINE_SEP);
- ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "pip_packages", pip_packages_replacement);
- guard_free(pip_packages_replacement);
- guard_strlist_free(&ctx->conda.pip_packages);
- ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err);
- }
-
- for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) {
- char *pkg = strlist_item(ctx->conda.pip_packages, i);
- if (strpbrk(pkg, ";#") || isempty(pkg)) {
- strlist_remove(ctx->conda.pip_packages, i);
- }
- }
-
- // Delivery metadata consumed
- populate_mission_ini(&ctx, render_mode);
-
- if (ctx->info.release_name) {
- guard_free(ctx->info.release_name);
- guard_free(ctx->info.build_name);
- guard_free(ctx->info.build_number);
- }
-
- if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) {
- fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt);
- return -1;
- }
-
- if (!ctx->info.build_name) {
- delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt);
- }
- if (!ctx->info.build_number) {
- delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt);
- }
-
- // Best I can do to make output directories unique. Annoying.
- delivery_init_dirs_stage2(ctx);
-
- if (!ctx->conda.conda_packages_defer) {
- ctx->conda.conda_packages_defer = strlist_init();
- }
- if (!ctx->conda.pip_packages_defer) {
- ctx->conda.pip_packages_defer = strlist_init();
- }
-
- for (size_t z = 0, i = 0; i < ini->section_count; i++) {
- char *section_name = ini->section[i]->key;
- if (startswith(section_name, "test:")) {
- struct Test *test = &ctx->tests[z];
- val.as_char_p = strchr(ini->section[i]->key, ':') + 1;
- if (val.as_char_p && isempty(val.as_char_p)) {
- return 1;
- }
- conv_str(&test->name, val);
-
- test->version = ini_getval_str(ini, section_name, "version", render_mode, &err);
- test->repository = ini_getval_str(ini, section_name, "repository", render_mode, &err);
- test->script = ini_getval_str(ini, section_name, "script", INI_READ_RAW, &err);
- test->repository_remove_tags = ini_getval_strlist(ini, section_name, "repository_remove_tags", LINE_SEP, render_mode, &err);
- test->build_recipe = ini_getval_str(ini, section_name, "build_recipe", render_mode, &err);
- test->runtime.environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err);
- z++;
- }
- }
-
- for (size_t z = 0, i = 0; i < ini->section_count; i++) {
- char *section_name = ini->section[i]->key;
- struct Deploy *deploy = &ctx->deploy;
- if (startswith(section_name, "deploy:artifactory")) {
- struct JFrog *jfrog = &deploy->jfrog[z];
- // Artifactory base configuration
-
- jfrog->upload_ctx.workaround_parent_only = ini_getval_bool(ini, section_name, "workaround_parent_only", render_mode, &err);
- jfrog->upload_ctx.exclusions = ini_getval_str(ini, section_name, "exclusions", render_mode, &err);
- jfrog->upload_ctx.explode = ini_getval_bool(ini, section_name, "explode", render_mode, &err);
- jfrog->upload_ctx.recursive = ini_getval_bool(ini, section_name, "recursive", render_mode, &err);
- jfrog->upload_ctx.retries = ini_getval_int(ini, section_name, "retries", render_mode, &err);
- jfrog->upload_ctx.retry_wait_time = ini_getval_int(ini, section_name, "retry_wait_time", render_mode, &err);
- jfrog->upload_ctx.detailed_summary = ini_getval_bool(ini, section_name, "detailed_summary", render_mode, &err);
- jfrog->upload_ctx.quiet = ini_getval_bool(ini, section_name, "quiet", render_mode, &err);
- jfrog->upload_ctx.regexp = ini_getval_bool(ini, section_name, "regexp", render_mode, &err);
- jfrog->upload_ctx.spec = ini_getval_str(ini, section_name, "spec", render_mode, &err);
- jfrog->upload_ctx.flat = ini_getval_bool(ini, section_name, "flat", render_mode, &err);
- jfrog->repo = ini_getval_str(ini, section_name, "repo", render_mode, &err);
- jfrog->dest = ini_getval_str(ini, section_name, "dest", render_mode, &err);
- jfrog->files = ini_getval_strlist(ini, section_name, "files", LINE_SEP, render_mode, &err);
- z++;
- }
- }
-
- for (size_t i = 0; i < ini->section_count; i++) {
- char *section_name = ini->section[i]->key;
- struct Deploy *deploy = &ctx->deploy;
- if (startswith(ini->section[i]->key, "deploy:docker")) {
- struct Docker *docker = &deploy->docker;
-
- docker->registry = ini_getval_str(ini, section_name, "registry", render_mode, &err);
- docker->image_compression = ini_getval_str(ini, section_name, "image_compression", render_mode, &err);
- docker->test_script = ini_getval_str(ini, section_name, "test_script", render_mode, &err);
- docker->build_args = ini_getval_strlist(ini, section_name, "build_args", LINE_SEP, render_mode, &err);
- docker->tags = ini_getval_strlist(ini, section_name, "tags", LINE_SEP, render_mode, &err);
- }
- }
- return 0;
-}
-
-int populate_delivery_cfg(struct Delivery *ctx, int render_mode) {
- struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg;
- if (!cfg) {
- return -1;
- }
- int err = 0;
- ctx->storage.conda_staging_dir = ini_getval_str(cfg, "default", "conda_staging_dir", render_mode, &err);
- ctx->storage.conda_staging_url = ini_getval_str(cfg, "default", "conda_staging_url", render_mode, &err);
- ctx->storage.wheel_staging_dir = ini_getval_str(cfg, "default", "wheel_staging_dir", render_mode, &err);
- ctx->storage.wheel_staging_url = ini_getval_str(cfg, "default", "wheel_staging_url", render_mode, &err);
- globals.conda_fresh_start = ini_getval_bool(cfg, "default", "conda_fresh_start", render_mode, &err);
- if (!globals.continue_on_error) {
- globals.continue_on_error = ini_getval_bool(cfg, "default", "continue_on_error", render_mode, &err);
- }
- if (!globals.always_update_base_environment) {
- globals.always_update_base_environment = ini_getval_bool(cfg, "default", "always_update_base_environment", render_mode, &err);
- }
- globals.conda_install_prefix = ini_getval_str(cfg, "default", "conda_install_prefix", render_mode, &err);
- globals.conda_packages = ini_getval_strlist(cfg, "default", "conda_packages", LINE_SEP, render_mode, &err);
- globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err);
-
- globals.jfrog.jfrog_artifactory_base_url = ini_getval_str(cfg, "jfrog_cli_download", "url", render_mode, &err);
- globals.jfrog.jfrog_artifactory_product = ini_getval_str(cfg, "jfrog_cli_download", "product", render_mode, &err);
- globals.jfrog.cli_major_ver = ini_getval_str(cfg, "jfrog_cli_download", "version_series", render_mode, &err);
- globals.jfrog.version = ini_getval_str(cfg, "jfrog_cli_download", "version", render_mode, &err);
- globals.jfrog.remote_filename = ini_getval_str(cfg, "jfrog_cli_download", "filename", render_mode, &err);
- globals.jfrog.url = ini_getval_str(cfg, "deploy:artifactory", "url", render_mode, &err);
- globals.jfrog.repo = ini_getval_str(cfg, "deploy:artifactory", "repo", render_mode, &err);
-
- return 0;
-}
-
-static int populate_info(struct Delivery *ctx) {
- if (!ctx->info.time_str_epoch) {
- // Record timestamp used for release
- time(&ctx->info.time_now);
- ctx->info.time_info = localtime(&ctx->info.time_now);
-
- ctx->info.time_str_epoch = calloc(STASIS_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch));
- if (!ctx->info.time_str_epoch) {
- msg(STASIS_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n");
- return -1;
- }
- snprintf(ctx->info.time_str_epoch, STASIS_TIME_STR_MAX - 1, "%li", ctx->info.time_now);
- }
- return 0;
-}
-
-int *bootstrap_build_info(struct Delivery *ctx) {
- struct Delivery local;
- memset(&local, 0, sizeof(local));
- local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path);
- local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path);
- delivery_init_platform(&local);
- populate_delivery_cfg(&local, INI_READ_RENDER);
- populate_delivery_ini(&local, INI_READ_RENDER);
- populate_info(&local);
- ctx->info.build_name = strdup(local.info.build_name);
- ctx->info.build_number = strdup(local.info.build_number);
- ctx->info.release_name = strdup(local.info.release_name);
- memcpy(&ctx->info.time_info, &local.info.time_info, sizeof(ctx->info.time_info));
- ctx->info.time_now = local.info.time_now;
- ctx->info.time_str_epoch = strdup(local.info.time_str_epoch);
- delivery_free(&local);
- return 0;
-}
-
-int delivery_init(struct Delivery *ctx, int render_mode) {
- populate_info(ctx);
- populate_delivery_cfg(ctx, INI_READ_RENDER);
-
- // Set artifactory URL via environment variable if possible
- char *jfurl = getenv("STASIS_JF_ARTIFACTORY_URL");
- if (jfurl) {
- if (globals.jfrog.url) {
- guard_free(globals.jfrog.url);
- }
- globals.jfrog.url = strdup(jfurl);
- }
-
- // Set artifactory repository via environment if possible
- char *jfrepo = getenv("STASIS_JF_REPO");
- if (jfrepo) {
- if (globals.jfrog.repo) {
- guard_free(globals.jfrog.repo);
- }
- globals.jfrog.repo = strdup(jfrepo);
- }
-
- // Configure architecture and platform information
- delivery_init_platform(ctx);
-
- // Create STASIS directory structure
- delivery_init_dirs_stage1(ctx);
-
- char config_local[PATH_MAX];
- sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config");
- setenv("XDG_CONFIG_HOME", config_local, 1);
-
- char cache_local[PATH_MAX];
- sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache");
- setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1);
-
- // add tools to PATH
- char pathvar_tmp[STASIS_BUFSIZ];
- sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH"));
- setenv("PATH", pathvar_tmp, 1);
-
- // Prevent git from paginating output
- setenv("GIT_PAGER", "", 1);
-
- populate_delivery_ini(ctx, render_mode);
-
- if (ctx->deploy.docker.tags) {
- for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) {
- char *item = strlist_item(ctx->deploy.docker.tags, i);
- tolower_s(item);
- }
- }
-
- if (ctx->deploy.docker.image_compression) {
- if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) {
- SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression);
- return -1;
- }
- }
- return 0;
-}
-
-int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) {
- size_t fmt_len = strlen(fmt);
-
- if (!*dest) {
- *dest = calloc(STASIS_NAME_MAX, sizeof(**dest));
- if (!*dest) {
- return -1;
- }
- }
-
- for (size_t i = 0; i < fmt_len; i++) {
- if (fmt[i] == '%' && strlen(&fmt[i])) {
- i++;
- switch (fmt[i]) {
- case 'n': // name
- strcat(*dest, ctx->meta.name);
- break;
- case 'c': // codename
- strcat(*dest, ctx->meta.codename);
- break;
- case 'm': // mission
- strcat(*dest, ctx->meta.mission);
- break;
- case 'r': // revision
- sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc);
- break;
- case 'R': // "final"-aware revision
- if (ctx->meta.final)
- strcat(*dest, "final");
- else
- sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc);
- break;
- case 'v': // version
- strcat(*dest, ctx->meta.version);
- break;
- case 'P': // python version
- strcat(*dest, ctx->meta.python);
- break;
- case 'p': // python version major/minor
- strcat(*dest, ctx->meta.python_compact);
- break;
- case 'a': // system architecture name
- strcat(*dest, ctx->system.arch);
- break;
- case 'o': // system platform (OS) name
- strcat(*dest, ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
- break;
- case 't': // unix epoch
- sprintf(*dest + strlen(*dest), "%ld", ctx->info.time_now);
- break;
- default: // unknown formatter, write as-is
- sprintf(*dest + strlen(*dest), "%c%c", fmt[i - 1], fmt[i]);
- break;
- }
- } else { // write non-format text
- sprintf(*dest + strlen(*dest), "%c", fmt[i]);
- }
- }
- return 0;
-}
-
-void delivery_debug_show(struct Delivery *ctx) {
- printf("\n====DEBUG====\n");
- printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir);
- printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir);
- printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No");
- printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No");
- printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No");
-}
-
-void delivery_meta_show(struct Delivery *ctx) {
- if (globals.verbose) {
- delivery_debug_show(ctx);
- }
-
- printf("\n====DELIVERY====\n");
- printf("%-20s %-10s\n", "Target Python:", ctx->meta.python);
- printf("%-20s %-10s\n", "Name:", ctx->meta.name);
- printf("%-20s %-10s\n", "Mission:", ctx->meta.mission);
- if (ctx->meta.codename) {
- printf("%-20s %-10s\n", "Codename:", ctx->meta.codename);
- }
- if (ctx->meta.version) {
- printf("%-20s %-10s\n", "Version", ctx->meta.version);
- }
- if (!ctx->meta.final) {
- printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc);
- }
- printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No");
- printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New");
-}
-
-void delivery_conda_show(struct Delivery *ctx) {
- printf("\n====CONDA====\n");
- printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix);
-
- puts("Native Packages:");
- if (strlist_count(ctx->conda.conda_packages) || strlist_count(ctx->conda.conda_packages_defer)) {
- struct StrList *list_conda = strlist_init();
- if (strlist_count(ctx->conda.conda_packages)) {
- strlist_append_strlist(list_conda, ctx->conda.conda_packages);
- }
- if (strlist_count(ctx->conda.conda_packages_defer)) {
- strlist_append_strlist(list_conda, ctx->conda.conda_packages_defer);
- }
- strlist_sort(list_conda, STASIS_SORT_ALPHA);
-
- for (size_t i = 0; i < strlist_count(list_conda); i++) {
- char *token = strlist_item(list_conda, i);
- if (isempty(token) || isblank(*token) || startswith(token, "-")) {
- continue;
- }
- printf("%21s%s\n", "", token);
- }
- guard_strlist_free(&list_conda);
- } else {
- printf("%21s%s\n", "", "N/A");
- }
-
- puts("Python Packages:");
- if (strlist_count(ctx->conda.pip_packages) || strlist_count(ctx->conda.pip_packages_defer)) {
- struct StrList *list_python = strlist_init();
- if (strlist_count(ctx->conda.pip_packages)) {
- strlist_append_strlist(list_python, ctx->conda.pip_packages);
- }
- if (strlist_count(ctx->conda.pip_packages_defer)) {
- strlist_append_strlist(list_python, ctx->conda.pip_packages_defer);
- }
- strlist_sort(list_python, STASIS_SORT_ALPHA);
-
- for (size_t i = 0; i < strlist_count(list_python); i++) {
- char *token = strlist_item(list_python, i);
- if (isempty(token) || isblank(*token) || startswith(token, "-")) {
- continue;
- }
- printf("%21s%s\n", "", token);
- }
- guard_strlist_free(&list_python);
- } else {
- printf("%21s%s\n", "", "N/A");
- }
-}
-
-void delivery_tests_show(struct Delivery *ctx) {
- printf("\n====TESTS====\n");
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- if (!ctx->tests[i].name) {
- continue;
- }
- printf("%-20s %-20s %s\n", ctx->tests[i].name,
- ctx->tests[i].version,
- ctx->tests[i].repository);
- }
-}
-
-void delivery_runtime_show(struct Delivery *ctx) {
- printf("\n====RUNTIME====\n");
- struct StrList *rt = NULL;
- rt = strlist_copy(ctx->runtime.environ);
- if (!rt) {
- // no data
- return;
- }
- strlist_sort(rt, STASIS_SORT_ALPHA);
- size_t total = strlist_count(rt);
- for (size_t i = 0; i < total; i++) {
- char *item = strlist_item(rt, i);
- if (!item) {
- // not supposed to occur
- msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i);
- return;
- }
- printf("%s\n", item);
- }
-}
-
-int delivery_build_recipes(struct Delivery *ctx) {
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- char *recipe_dir = NULL;
- if (ctx->tests[i].build_recipe) { // build a conda recipe
- int recipe_type;
- int status;
- if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) {
- fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name);
- return -1;
- }
- recipe_type = recipe_get_type(recipe_dir);
- pushd(recipe_dir);
- {
- if (RECIPE_TYPE_ASTROCONDA == recipe_type) {
- pushd(path_basename(ctx->tests[i].repository));
- } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) {
- pushd("recipe");
- }
-
- char recipe_version[100];
- char recipe_buildno[100];
- char recipe_git_url[PATH_MAX];
- char recipe_git_rev[PATH_MAX];
-
- //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}");
- //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository);
- //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version);
- // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^
- sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version);
- sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository);
- strcpy(recipe_git_rev, "");
- sprintf(recipe_buildno, " number: 0");
-
- unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH;
- //file_replace_text("meta.yaml", "{% set version = ", recipe_version);
- if (ctx->meta.final) {
- sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version);
- // TODO: replace sha256 of tagged archive
- // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge.
- // For now, remove the sha256 requirement
- file_replace_text("meta.yaml", "sha256:", "\n", flags);
- } else {
- file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags);
- file_replace_text("meta.yaml", " url:", recipe_git_url, flags);
- //file_replace_text("meta.yaml", "sha256:", recipe_git_rev);
- file_replace_text("meta.yaml", " sha256:", "\n", flags);
- file_replace_text("meta.yaml", " number:", recipe_buildno, flags);
- }
-
- char command[PATH_MAX];
- if (RECIPE_TYPE_CONDA_FORGE == recipe_type) {
- char arch[STASIS_NAME_MAX] = {0};
- char platform[STASIS_NAME_MAX] = {0};
-
- strcpy(platform, ctx->system.platform[DELIVERY_PLATFORM]);
- if (strstr(platform, "Darwin")) {
- memset(platform, 0, sizeof(platform));
- strcpy(platform, "osx");
- }
- tolower_s(platform);
- if (strstr(ctx->system.arch, "arm64")) {
- strcpy(arch, "arm64");
- } else if (strstr(ctx->system.arch, "64")) {
- strcpy(arch, "64");
- } else {
- strcat(arch, "32"); // blind guess
- }
- tolower_s(arch);
-
- sprintf(command, "mambabuild --python=%s -m ../.ci_support/%s_%s_.yaml .",
- ctx->meta.python, platform, arch);
- } else {
- sprintf(command, "mambabuild --python=%s .", ctx->meta.python);
- }
- status = conda_exec(command);
- if (status) {
- return -1;
- }
-
- if (RECIPE_TYPE_GENERIC != recipe_type) {
- popd();
- }
- popd();
- }
- }
- if (recipe_dir) {
- guard_free(recipe_dir);
- }
- }
- return 0;
-}
-
-static int filter_repo_tags(char *repo, struct StrList *patterns) {
- int result = 0;
-
- if (!pushd(repo)) {
- int list_status = 0;
- char *tags_raw = shell_output("git tag -l", &list_status);
- struct StrList *tags = strlist_init();
- strlist_append_tokenize(tags, tags_raw, LINE_SEP);
-
- for (size_t i = 0; tags && i < strlist_count(tags); i++) {
- char *tag = strlist_item(tags, i);
- for (size_t p = 0; p < strlist_count(patterns); p++) {
- char *pattern = strlist_item(patterns, p);
- int match = fnmatch(pattern, tag, 0);
- if (!match) {
- char cmd[PATH_MAX] = {0};
- sprintf(cmd, "git tag -d %s", tag);
- result += system(cmd);
- break;
- }
- }
- }
- guard_strlist_free(&tags);
- guard_free(tags_raw);
- popd();
- } else {
- result = -1;
- }
- return result;
-}
-
-struct StrList *delivery_build_wheels(struct Delivery *ctx) {
- struct StrList *result = NULL;
- struct Process proc;
- memset(&proc, 0, sizeof(proc));
-
- result = strlist_init();
- if (!result) {
- perror("unable to allocate memory for string list");
- return NULL;
- }
-
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source
- char srcdir[PATH_MAX];
- char wheeldir[PATH_MAX];
- memset(srcdir, 0, sizeof(srcdir));
- memset(wheeldir, 0, sizeof(wheeldir));
-
- sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name);
- git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version);
-
- if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) {
- filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags);
- }
-
- pushd(srcdir);
- {
- char dname[NAME_MAX];
- char outdir[PATH_MAX];
- char cmd[PATH_MAX * 2];
- memset(dname, 0, sizeof(dname));
- memset(outdir, 0, sizeof(outdir));
- memset(cmd, 0, sizeof(outdir));
-
- strcpy(dname, ctx->tests[i].name);
- tolower_s(dname);
- sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname);
- if (mkdirs(outdir, 0755)) {
- fprintf(stderr, "failed to create output directory: %s\n", outdir);
- }
-
- sprintf(cmd, "-m build -w -o %s", outdir);
- if (python_exec(cmd)) {
- fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version);
- strlist_free(&result);
- return NULL;
- }
- popd();
- }
- }
- }
- return result;
-}
-
-static const struct Test *requirement_from_test(struct Delivery *ctx, const char *name) {
- struct Test *result;
-
- result = NULL;
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- if (ctx->tests[i].name && strstr(name, ctx->tests[i].name)) {
- result = &ctx->tests[i];
- break;
- }
- }
- return result;
-}
-
-int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) {
- char cmd[PATH_MAX];
- char pkgs[STASIS_BUFSIZ];
- char *env_current = getenv("CONDA_DEFAULT_ENV");
-
- if (env_current) {
- // The requested environment is not the current environment
- if (strcmp(env_current, env_name) != 0) {
- // Activate the requested environment
- printf("Activating: %s\n", env_name);
- conda_activate(conda_install_dir, env_name);
- runtime_replace(&ctx->runtime.environ, __environ);
- }
- }
-
- memset(cmd, 0, sizeof(cmd));
- memset(pkgs, 0, sizeof(pkgs));
- strcat(cmd, "install");
-
- typedef int (*Runner)(const char *);
- Runner runner = NULL;
- if (INSTALL_PKG_CONDA & type) {
- runner = conda_exec;
- } else if (INSTALL_PKG_PIP & type) {
- runner = pip_exec;
- }
-
- if (INSTALL_PKG_CONDA_DEFERRED & type) {
- strcat(cmd, " --use-local");
- } else if (INSTALL_PKG_PIP_DEFERRED & type) {
- // Don't change the baseline package set unless we're working with a
- // new build. Release candidates will need to keep packages as stable
- // as possible between releases.
- if (!ctx->meta.based_on) {
- strcat(cmd, " --upgrade");
- }
- sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir);
- }
-
- for (size_t x = 0; manifest[x] != NULL; x++) {
- char *name = NULL;
- for (size_t p = 0; p < strlist_count(manifest[x]); p++) {
- name = strlist_item(manifest[x], p);
- strip(name);
- if (!strlen(name)) {
- continue;
- }
- if (INSTALL_PKG_PIP_DEFERRED & type) {
- struct Test *info = (struct Test *) requirement_from_test(ctx, name);
- if (info) {
- if (!strcmp(info->version, "HEAD")) {
- struct StrList *tag_data = strlist_init();
- if (!tag_data) {
- SYSERROR("%s", "Unable to allocate memory for tag data\n");
- return -1;
- }
- strlist_append_tokenize(tag_data, info->repository_info_tag, "-");
-
- struct Wheel *whl = NULL;
- char *post_commit = NULL;
- char *hash = NULL;
- if (strlist_count(tag_data) > 1) {
- post_commit = strlist_item(tag_data, 1);
- hash = strlist_item(tag_data, 2);
- }
-
- // We can't match on version here (index 0). The wheel's version is not guaranteed to be
- // equal to the tag; setuptools_scm auto-increments the value, the user can change it manually,
- // etc.
- whl = get_wheel_file(ctx->storage.wheel_artifact_dir, info->name,
- (char *[]) {ctx->meta.python_compact, ctx->system.arch,
- "none", "any",
- post_commit, hash,
- NULL}, WHEEL_MATCH_ANY);
-
- guard_strlist_free(&tag_data);
- info->version = whl->version;
- sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, whl->version);
- } else {
- sprintf(cmd + strlen(cmd), " '%s==%s'", info->name, info->version);
- }
- } else {
- fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name);
- return -1;
- }
- } else {
- if (startswith(name, "--") || startswith(name, "-")) {
- sprintf(cmd + strlen(cmd), " %s", name);
- } else {
- sprintf(cmd + strlen(cmd), " '%s'", name);
- }
- }
- }
- int status = runner(cmd);
- if (status) {
- return status;
- }
- }
- return 0;
-}
-
-void delivery_get_installer_url(struct Delivery *ctx, char *result) {
- if (ctx->conda.installer_version) {
- // Use version specified by configuration file
- sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl,
- ctx->conda.installer_name,
- ctx->conda.installer_version,
- ctx->conda.installer_platform,
- ctx->conda.installer_arch);
- } else {
- // Use latest installer
- sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl,
- ctx->conda.installer_name,
- ctx->conda.installer_platform,
- ctx->conda.installer_arch);
- }
-
-}
-
-int delivery_get_installer(struct Delivery *ctx, char *installer_url) {
- char script_path[PATH_MAX];
- char *installer = path_basename(installer_url);
-
- memset(script_path, 0, sizeof(script_path));
- sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer);
- if (access(script_path, F_OK)) {
- // Script doesn't exist
- long fetch_status = download(installer_url, script_path, NULL);
- if (HTTP_ERROR(fetch_status) || fetch_status < 0) {
- // download failed
- return -1;
- }
- } else {
- msg(STASIS_MSG_RESTRICT | STASIS_MSG_L3, "Skipped, installer already exists\n", script_path);
- }
-
- ctx->conda.installer_path = strdup(script_path);
- if (!ctx->conda.installer_path) {
- SYSERROR("Unable to duplicate script_path: '%s'", script_path);
- return -1;
- }
-
- return 0;
-}
-
-int delivery_copy_conda_artifacts(struct Delivery *ctx) {
- char cmd[STASIS_BUFSIZ];
- char conda_build_dir[PATH_MAX];
- char subdir[PATH_MAX];
- memset(cmd, 0, sizeof(cmd));
- memset(conda_build_dir, 0, sizeof(conda_build_dir));
- memset(subdir, 0, sizeof(subdir));
-
- sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld");
- // One must run conda build at least once to create the "conda-bld" directory.
- // When this directory is missing there can be no build artifacts.
- if (access(conda_build_dir, F_OK) < 0) {
- msg(STASIS_MSG_RESTRICT | STASIS_MSG_WARN | STASIS_MSG_L3,
- "Skipped: 'conda build' has never been executed.\n");
- return 0;
- }
-
- snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s",
- conda_build_dir,
- ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR],
- ctx->storage.conda_artifact_dir);
-
- return system(cmd);
-}
-
-int delivery_copy_wheel_artifacts(struct Delivery *ctx) {
- char cmd[PATH_MAX];
- memset(cmd, 0, sizeof(cmd));
- snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s",
- ctx->storage.build_sources_dir,
- ctx->storage.wheel_artifact_dir);
- return system(cmd);
-}
-
-int delivery_index_wheel_artifacts(struct Delivery *ctx) {
- struct dirent *rec;
- DIR *dp;
- FILE *top_fp;
-
- dp = opendir(ctx->storage.wheel_artifact_dir);
- if (!dp) {
- return -1;
- }
-
- // Generate a "dumb" local pypi index that is compatible with:
- // pip install --extra-index-url
- char top_index[PATH_MAX];
- memset(top_index, 0, sizeof(top_index));
- sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir);
- top_fp = fopen(top_index, "w+");
- if (!top_fp) {
- return -2;
- }
-
- while ((rec = readdir(dp)) != NULL) {
- // skip directories
- if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) {
- continue;
- }
-
- FILE *bottom_fp;
- char bottom_index[PATH_MAX * 2];
- memset(bottom_index, 0, sizeof(bottom_index));
- sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name);
- bottom_fp = fopen(bottom_index, "w+");
- if (!bottom_fp) {
- return -3;
- }
-
- if (globals.verbose) {
- printf("+ %s\n", rec->d_name);
- }
- // Add record to top level index
- fprintf(top_fp, "<a href=\"%s/\">%s</a><br/>\n", rec->d_name, rec->d_name);
-
- char dpath[PATH_MAX * 2];
- memset(dpath, 0, sizeof(dpath));
- sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name);
- struct StrList *packages = listdir(dpath);
- if (!packages) {
- fclose(top_fp);
- fclose(bottom_fp);
- return -4;
- }
-
- for (size_t i = 0; i < strlist_count(packages); i++) {
- char *package = strlist_item(packages, i);
- if (!endswith(package, ".whl")) {
- continue;
- }
- if (globals.verbose) {
- printf("`- %s\n", package);
- }
- // Write record to bottom level index
- fprintf(bottom_fp, "<a href=\"%s\">%s</a><br/>\n", package, package);
- }
- fclose(bottom_fp);
-
- guard_strlist_free(&packages);
- }
- closedir(dp);
- fclose(top_fp);
- return 0;
-}
-
-void delivery_install_conda(char *install_script, char *conda_install_dir) {
- struct Process proc;
- memset(&proc, 0, sizeof(proc));
-
- if (globals.conda_fresh_start) {
- if (!access(conda_install_dir, F_OK)) {
- // directory exists so remove it
- if (rmtree(conda_install_dir)) {
- perror("unable to remove previous installation");
- exit(1);
- }
-
- // Proceed with the installation
- // -b = batch mode (non-interactive)
- char cmd[PATH_MAX] = {0};
- snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s",
- find_program("bash"),
- install_script,
- conda_install_dir);
- if (shell_safe(&proc, cmd)) {
- fprintf(stderr, "conda installation failed\n");
- exit(1);
- }
- } else {
- // Proceed with the installation
- // -b = batch mode (non-interactive)
- char cmd[PATH_MAX] = {0};
- snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s",
- find_program("bash"),
- install_script,
- conda_install_dir);
- if (shell_safe(&proc, cmd)) {
- fprintf(stderr, "conda installation failed\n");
- exit(1);
- }
- }
- } else {
- msg(STASIS_MSG_L3, "Conda removal disabled by configuration\n");
- }
-}
-
-void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) {
- if (conda_activate(conda_install_dir, "base")) {
- fprintf(stderr, "conda activation failed\n");
- exit(1);
- }
-
- // Setting the CONDARC environment variable appears to be the only consistent
- // way to make sure the file is used. Not setting this variable leads to strange
- // behavior, especially if a conda environment is already active when STASIS is loaded.
- char rcpath[PATH_MAX];
- sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc");
- setenv("CONDARC", rcpath, 1);
- if (runtime_replace(&ctx->runtime.environ, __environ)) {
- perror("unable to replace runtime environment after activating conda");
- exit(1);
- }
-
- if (conda_setup_headless()) {
- // no COE check. this call must succeed.
- exit(1);
- }
-}
-
-void delivery_defer_packages(struct Delivery *ctx, int type) {
- struct StrList *dataptr = NULL;
- struct StrList *deferred = NULL;
- char *name = NULL;
- char cmd[PATH_MAX];
-
- memset(cmd, 0, sizeof(cmd));
-
- char mode[10];
- if (DEFER_CONDA == type) {
- dataptr = ctx->conda.conda_packages;
- deferred = ctx->conda.conda_packages_defer;
- strcpy(mode, "conda");
- } else if (DEFER_PIP == type) {
- dataptr = ctx->conda.pip_packages;
- deferred = ctx->conda.pip_packages_defer;
- strcpy(mode, "pip");
- }
- msg(STASIS_MSG_L2, "Filtering %s packages by test definition...\n", mode);
-
- struct StrList *filtered = NULL;
- filtered = strlist_init();
- for (size_t i = 0; i < strlist_count(dataptr); i++) {
- int ignore_pkg = 0;
-
- name = strlist_item(dataptr, i);
- if (!strlen(name) || isblank(*name) || isspace(*name)) {
- // no data
- continue;
- }
- msg(STASIS_MSG_L3, "package '%s': ", name);
-
- // Compile a list of packages that are *also* to be tested.
- char *version;
- char *spec_begin = strpbrk(name, "@~=<>!");
- char *spec_end = spec_begin;
- if (spec_end) {
- // A version is present in the package name. Jump past operator(s).
- while (*spec_end != '\0' && !isalnum(*spec_end)) {
- spec_end++;
- }
- }
-
- // When spec is present in name, set tests->version to the version detected in the name
- for (size_t x = 0; x < sizeof(ctx->tests) / sizeof(ctx->tests[0]) && ctx->tests[x].name != NULL; x++) {
- struct Test *test = &ctx->tests[x];
- version = NULL;
-
- char nametmp[1024] = {0};
- if (spec_end != NULL && spec_begin != NULL) {
- strncpy(nametmp, name, spec_begin - name);
- } else {
- strcpy(nametmp, name);
- }
- // Is the [test:NAME] in the package name?
- if (!strcmp(nametmp, test->name)) {
- // Override test->version when a version is provided by the (pip|conda)_package list item
- guard_free(test->version);
- if (spec_begin && spec_end) {
- *spec_begin = '\0';
- test->version = strdup(spec_end);
- } else {
- // There are too many possible default branches nowadays: master, main, develop, xyz, etc.
- // HEAD is a safe bet.
- test->version = strdup("HEAD");
- }
- version = test->version;
-
- // Is the list item a git+schema:// URL?
- if (strstr(name, "git+") && strstr(name, "://")) {
- char *xrepo = strstr(name, "+");
- if (xrepo) {
- xrepo++;
- guard_free(test->repository);
- test->repository = strdup(xrepo);
- xrepo = NULL;
- }
- // Extract the name of the package
- char *xbasename = path_basename(name);
- if (xbasename) {
- // Replace the git+schema:// URL with the package name
- strlist_set(&dataptr, i, xbasename);
- name = strlist_item(dataptr, i);
- }
- }
-
- if (DEFER_PIP == type && pip_index_provides(PYPI_INDEX_DEFAULT, name, version)) {
- fprintf(stderr, "(%s present on index %s): ", version, PYPI_INDEX_DEFAULT);
- ignore_pkg = 0;
- } else {
- ignore_pkg = 1;
- }
- break;
- }
- }
-
- if (ignore_pkg) {
- char build_at[PATH_MAX];
- if (DEFER_CONDA == type) {
- sprintf(build_at, "%s=%s", name, version);
- name = build_at;
- }
-
- printf("BUILD FOR HOST\n");
- strlist_append(&deferred, name);
- } else {
- printf("USE EXISTING\n");
- strlist_append(&filtered, name);
- }
- }
-
- if (!strlist_count(deferred)) {
- msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No %s packages were filtered by test definitions\n", mode);
- } else {
- if (DEFER_CONDA == type) {
- strlist_free(&ctx->conda.conda_packages);
- ctx->conda.conda_packages = strlist_copy(filtered);
- } else if (DEFER_PIP == type) {
- strlist_free(&ctx->conda.pip_packages);
- ctx->conda.pip_packages = strlist_copy(filtered);
- }
- }
- if (filtered) {
- strlist_free(&filtered);
- }
-}
-
-const char *release_header = "# delivery_name: %s\n"
- "# delivery_fmt: %s\n"
- "# creation_time: %s\n"
- "# conda_ident: %s\n"
- "# conda_build_ident: %s\n";
-
-char *delivery_get_release_header(struct Delivery *ctx) {
- char output[STASIS_BUFSIZ];
- char stamp[100];
- strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info);
- sprintf(output, release_header,
- ctx->info.release_name,
- ctx->rules.release_fmt,
- stamp,
- ctx->conda.tool_version,
- ctx->conda.tool_build_version);
- return strdup(output);
-}
-
-int delivery_dump_metadata(struct Delivery *ctx) {
- FILE *fp;
- char filename[PATH_MAX];
- sprintf(filename, "%s/meta-%s.stasis", ctx->storage.meta_dir, ctx->info.release_name);
- fp = fopen(filename, "w+");
- if (!fp) {
- return -1;
- }
- if (globals.verbose) {
- printf("%s\n", filename);
- }
- fprintf(fp, "name %s\n", ctx->meta.name);
- fprintf(fp, "version %s\n", ctx->meta.version);
- fprintf(fp, "rc %d\n", ctx->meta.rc);
- fprintf(fp, "python %s\n", ctx->meta.python);
- fprintf(fp, "python_compact %s\n", ctx->meta.python_compact);
- fprintf(fp, "mission %s\n", ctx->meta.mission);
- fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : "");
- fprintf(fp, "platform %s %s %s %s\n",
- ctx->system.platform[DELIVERY_PLATFORM],
- ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR],
- ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER],
- ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
- fprintf(fp, "arch %s\n", ctx->system.arch);
- fprintf(fp, "time %s\n", ctx->info.time_str_epoch);
- fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt);
- fprintf(fp, "release_name %s\n", ctx->info.release_name);
- fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt);
- fprintf(fp, "build_name %s\n", ctx->info.build_name);
- fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt);
- fprintf(fp, "build_number %s\n", ctx->info.build_number);
- fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl);
- fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name);
- fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version);
- fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform);
- fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch);
-
- fclose(fp);
- return 0;
-}
-
-void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) {
- char output[PATH_MAX];
- char *header = NULL;
- char *tempfile = NULL;
- FILE *tp = NULL;
-
- if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) {
- header = delivery_get_release_header(ctx);
- if (!header) {
- msg(STASIS_MSG_ERROR, "failed to generate release header string\n", filename);
- exit(1);
- }
- tempfile = xmkstemp(&tp, "w+");
- if (!tempfile || !tp) {
- msg(STASIS_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno));
- exit(1);
- }
- fprintf(tp, "%s", header);
-
- // Read the original file
- char **contents = file_readlines(filename, 0, 0, NULL);
- if (!contents) {
- msg(STASIS_MSG_ERROR, "%s: unable to read %s", filename);
- exit(1);
- }
-
- // Write temporary data
- for (size_t i = 0; contents[i] != NULL; i++) {
- if (startswith(contents[i], "channels:")) {
- // Allow for additional conda channel injection
- if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) {
- fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]);
- continue;
- }
- } else if (strstr(contents[i], "- pip:")) {
- if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) {
- // Allow for additional pip argument injection
- fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]);
- continue;
- }
- } else if (startswith(contents[i], "prefix:")) {
- // Remove the prefix key
- if (strstr(contents[i], "/") || strstr(contents[i], "\\")) {
- // path is on the same line as the key
- continue;
- } else {
- // path is on the next line?
- if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) {
- i++;
- }
- continue;
- }
- }
- fprintf(tp, "%s", contents[i]);
- }
- GENERIC_ARRAY_FREE(contents);
- guard_free(header);
- fflush(tp);
- fclose(tp);
-
- // Replace the original file with our temporary data
- if (copy2(tempfile, filename, CT_PERM) < 0) {
- fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename);
- exit(1);
- }
- remove(tempfile);
- guard_free(tempfile);
- } else if (globals.enable_rewrite_spec_stage_2 && stage == DELIVERY_REWRITE_SPEC_STAGE_2) {
- // Replace "local" channel with the staging URL
- if (ctx->storage.conda_staging_url) {
- file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0);
- } else if (globals.jfrog.repo) {
- sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name);
- file_replace_text(filename, "@CONDA_CHANNEL@", output, 0);
- } else {
- msg(STASIS_MSG_WARN, "conda_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.conda_artifact_dir);
- file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_artifact_dir, 0);
- }
-
- if (ctx->storage.wheel_staging_url) {
- file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0);
- } else if (globals.enable_artifactory && globals.jfrog.url && globals.jfrog.repo) {
- sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name);
- file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0);
- } else {
- msg(STASIS_MSG_WARN, "wheel_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.wheel_artifact_dir);
- sprintf(output, "--extra-index-url file://%s", ctx->storage.wheel_artifact_dir);
- file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0);
- }
- }
-}
-
-int delivery_index_conda_artifacts(struct Delivery *ctx) {
- return conda_index(ctx->storage.conda_artifact_dir);
-}
-
-void delivery_tests_run(struct Delivery *ctx) {
- struct Process proc;
- memset(&proc, 0, sizeof(proc));
-
- if (!globals.workaround.conda_reactivate) {
- globals.workaround.conda_reactivate = calloc(PATH_MAX, sizeof(*globals.workaround.conda_reactivate));
- } else {
- memset(globals.workaround.conda_reactivate, 0, PATH_MAX);
- }
- snprintf(globals.workaround.conda_reactivate, PATH_MAX - 1, "\nmamba activate ${CONDA_DEFAULT_ENV}\n");
-
- if (!ctx->tests[0].name) {
- msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n");
- } else {
- for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
- if (!ctx->tests[i].name && !ctx->tests[i].repository && !ctx->tests[i].script) {
- // skip unused test records
- continue;
- }
- msg(STASIS_MSG_L2, "Executing tests for %s %s\n", ctx->tests[i].name, ctx->tests[i].version);
- if (!ctx->tests[i].script || !strlen(ctx->tests[i].script)) {
- msg(STASIS_MSG_WARN | STASIS_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n",
- ctx->tests[i].name);
- continue;
- }
-
- char destdir[PATH_MAX];
- sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(ctx->tests[i].repository));
-
- if (!access(destdir, F_OK)) {
- msg(STASIS_MSG_L3, "Purging repository %s\n", destdir);
- if (rmtree(destdir)) {
- COE_CHECK_ABORT(1, "Unable to remove repository\n");
- }
- }
- msg(STASIS_MSG_L3, "Cloning repository %s\n", ctx->tests[i].repository);
- if (!git_clone(&proc, ctx->tests[i].repository, destdir, ctx->tests[i].version)) {
- ctx->tests[i].repository_info_tag = strdup(git_describe(destdir));
- ctx->tests[i].repository_info_ref = strdup(git_rev_parse(destdir, "HEAD"));
- } else {
- COE_CHECK_ABORT(1, "Unable to clone repository\n");
- }
-
- if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) {
- filter_repo_tags(destdir, ctx->tests[i].repository_remove_tags);
- }
-
- if (pushd(destdir)) {
- COE_CHECK_ABORT(1, "Unable to enter repository directory\n");
- } else {
-#if 1
- int status;
- char *cmd = calloc(strlen(ctx->tests[i].script) + STASIS_BUFSIZ, sizeof(*cmd));
-
- msg(STASIS_MSG_L3, "Testing %s\n", ctx->tests[i].name);
- memset(&proc, 0, sizeof(proc));
-
- // Apply workaround for tox positional arguments
- char *toxconf = NULL;
- if (!access("tox.ini", F_OK)) {
- if (!fix_tox_conf("tox.ini", &toxconf)) {
- msg(STASIS_MSG_L3, "Fixing tox positional arguments\n");
- if (!globals.workaround.tox_posargs) {
- globals.workaround.tox_posargs = calloc(PATH_MAX, sizeof(*globals.workaround.tox_posargs));
- } else {
- memset(globals.workaround.tox_posargs, 0, PATH_MAX);
- }
- snprintf(globals.workaround.tox_posargs, PATH_MAX - 1, "-c %s --root .", toxconf);
- }
- }
-
- // enable trace mode before executing each test script
- strcpy(cmd, ctx->tests[i].script);
- char *cmd_rendered = tpl_render(cmd);
- if (cmd_rendered) {
- if (strcmp(cmd_rendered, cmd) != 0) {
- strcpy(cmd, cmd_rendered);
- cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0;
- }
- guard_free(cmd_rendered);
- } else {
- SYSERROR("An error occurred while rendering the following:\n%s", cmd);
- exit(1);
- }
-
- puts(cmd);
- char runner_cmd[0xFFFF] = {0};
- sprintf(runner_cmd, "set +x\nsource %s/etc/profile.d/conda.sh\nsource %s/etc/profile.d/mamba.sh\n\nmamba activate ${CONDA_DEFAULT_ENV}\n\n%s\n",
- ctx->storage.conda_install_prefix,
- ctx->storage.conda_install_prefix,
- cmd);
- status = shell(&proc, runner_cmd);
- if (status) {
- msg(STASIS_MSG_ERROR, "Script failure: %s\n%s\n\nExit code: %d\n", ctx->tests[i].name, ctx->tests[i].script, status);
- popd();
- guard_free(cmd);
- if (!globals.continue_on_error) {
- tpl_free();
- delivery_free(ctx);
- globals_free();
- }
- COE_CHECK_ABORT(1, "Test failure");
- }
- guard_free(cmd);
-
- if (toxconf) {
- remove(toxconf);
- guard_free(toxconf);
- }
- popd();
-#else
- msg(STASIS_MSG_WARNING | STASIS_MSG_L3, "TESTING DISABLED BY CODE!\n");
-#endif
- }
- }
- }
-}
-
-void delivery_gather_tool_versions(struct Delivery *ctx) {
- int status = 0;
-
- // Extract version from tool output
- ctx->conda.tool_version = shell_output("conda --version", &status);
- if (ctx->conda.tool_version)
- strip(ctx->conda.tool_version);
-
- ctx->conda.tool_build_version = shell_output("conda build --version", &status);
- if (ctx->conda.tool_build_version)
- strip(ctx->conda.tool_version);
-}
-
-int delivery_init_artifactory(struct Delivery *ctx) {
- int status = 0;
- char dest[PATH_MAX] = {0};
- char filepath[PATH_MAX] = {0};
- snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir);
- snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir);
-
- if (!access(filepath, F_OK)) {
- // already have it
- msg(STASIS_MSG_L3, "Skipped download, %s already exists\n", filepath);
- goto delivery_init_artifactory_envsetup;
- }
-
- char *platform = ctx->system.platform[DELIVERY_PLATFORM];
- msg(STASIS_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch);
- if ((status = artifactory_download_cli(dest,
- globals.jfrog.jfrog_artifactory_base_url,
- globals.jfrog.jfrog_artifactory_product,
- globals.jfrog.cli_major_ver,
- globals.jfrog.version,
- platform,
- ctx->system.arch,
- globals.jfrog.remote_filename))) {
- remove(filepath);
- }
-
- delivery_init_artifactory_envsetup:
- // CI (ridiculously generic, why?) disables interactive prompts and progress bar output
- setenv("CI", "1", 1);
-
- // JFROG_CLI_HOME_DIR is where .jfrog is stored
- char path[PATH_MAX] = {0};
- snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir);
- setenv("JFROG_CLI_HOME_DIR", path, 1);
-
- // JFROG_CLI_TEMP_DIR is where the obvious is stored
- setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1);
- return status;
-}
-
-int delivery_artifact_upload(struct Delivery *ctx) {
- int status = 0;
-
- if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) {
- fprintf(stderr, "Failed to initialize Artifactory authentication context\n");
- return -1;
- }
-
- for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) {
- if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) {
- break;
- }
- jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx);
-
- if (!globals.jfrog.repo) {
- msg(STASIS_MSG_WARN, "Artifactory repository path is not configured!\n");
- fprintf(stderr, "set STASIS_JF_REPO environment variable...\nOr append to configuration file:\n\n");
- fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n");
- status++;
- break;
- } else if (!ctx->deploy.jfrog[i].repo) {
- ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo);
- }
-
- if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) {
- // Unlikely to trigger if the config parser is working correctly
- msg(STASIS_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n");
- status++;
- break;
- }
-
- ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true;
- ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name;
- ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number;
-
- char files[PATH_MAX];
- char dest[PATH_MAX]; // repo + remote dir
-
- if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) {
- msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url);
- return -1;
- }
-
- if (strlist_count(ctx->deploy.jfrog[i].files)) {
- for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) {
- memset(dest, 0, sizeof(dest));
- memset(files, 0, sizeof(files));
- snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest);
- snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f));
- status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest);
- }
- }
- }
-
- if (globals.enable_artifactory_build_info) {
- if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) {
- jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name,
- ctx->deploy.jfrog[0].upload_ctx.build_number);
- jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name,
- ctx->deploy.jfrog[0].upload_ctx.build_number);
- }
- } else {
- msg(STASIS_MSG_WARN | STASIS_MSG_L2, "Artifactory build info upload is disabled by CLI argument\n");
- }
-
- return status;
-}
-
-int delivery_mission_render_files(struct Delivery *ctx) {
- if (!ctx->storage.mission_dir) {
- fprintf(stderr, "Mission directory is not configured. Context not initialized?\n");
- return -1;
- }
- struct Data {
- char *src;
- char *dest;
- } data;
- struct INIFILE *cfg = ctx->_stasis_ini_fp.mission;
- union INIVal val;
-
- memset(&data, 0, sizeof(data));
- data.src = calloc(PATH_MAX, sizeof(*data.src));
- if (!data.src) {
- perror("data.src");
- return -1;
- }
-
- for (size_t i = 0; i < cfg->section_count; i++) {
- char *section_name = cfg->section[i]->key;
- if (!startswith(section_name, "template:")) {
- continue;
- }
- val.as_char_p = strchr(section_name, ':') + 1;
- if (val.as_char_p && isempty(val.as_char_p)) {
- guard_free(data.src);
- return 1;
- }
- sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p);
- msg(STASIS_MSG_L2, "%s\n", data.src);
-
- int err = 0;
- data.dest = ini_getval_str(cfg, section_name, "destination", INI_READ_RENDER, &err);
-
- char *contents;
- struct stat st;
- if (lstat(data.src, &st)) {
- perror(data.src);
- guard_free(data.dest);
- continue;
- }
-
- contents = calloc(st.st_size + 1, sizeof(*contents));
- if (!contents) {
- perror("template file contents");
- guard_free(data.dest);
- continue;
- }
-
- FILE *fp;
- fp = fopen(data.src, "rb");
- if (!fp) {
- perror(data.src);
- guard_free(contents);
- guard_free(data.dest);
- continue;
- }
-
- if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) {
- perror("while reading template file");
- guard_free(contents);
- guard_free(data.dest);
- fclose(fp);
- continue;
- }
- fclose(fp);
-
- msg(STASIS_MSG_L3, "Writing %s\n", data.dest);
- if (tpl_render_to_file(contents, data.dest)) {
- guard_free(contents);
- guard_free(data.dest);
- continue;
- }
- guard_free(contents);
- guard_free(data.dest);
- }
-
- guard_free(data.src);
- return 0;
-}
-
-int delivery_docker(struct Delivery *ctx) {
- if (!docker_capable(&ctx->deploy.docker.capabilities)) {
- return -1;
- }
- char tag[STASIS_NAME_MAX];
- char args[PATH_MAX];
- int has_registry = ctx->deploy.docker.registry != NULL;
- size_t total_tags = strlist_count(ctx->deploy.docker.tags);
- size_t total_build_args = strlist_count(ctx->deploy.docker.build_args);
-
- if (!has_registry) {
- msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n");
- }
-
- if (!total_tags) {
- char default_tag[PATH_MAX];
- msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n");
- // generate local tag
- memset(default_tag, 0, sizeof(default_tag));
- sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact);
- tolower_s(default_tag);
-
- // Add tag
- ctx->deploy.docker.tags = strlist_init();
- strlist_append(&ctx->deploy.docker.tags, default_tag);
-
- if (has_registry) {
- // generate tag for target registry
- memset(default_tag, 0, sizeof(default_tag));
- sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact);
- tolower_s(default_tag);
-
- // Add tag
- strlist_append(&ctx->deploy.docker.tags, default_tag);
- }
- // regenerate total tag available
- total_tags = strlist_count(ctx->deploy.docker.tags);
- }
-
- memset(args, 0, sizeof(args));
-
- // Append image tags to command
- for (size_t i = 0; i < total_tags; i++) {
- char *tag_orig = strlist_item(ctx->deploy.docker.tags, i);
- strcpy(tag, tag_orig);
- docker_sanitize_tag(tag);
- sprintf(args + strlen(args), " -t \"%s\" ", tag);
- }
-
- // Append build arguments to command (i.e. --build-arg "key=value"
- for (size_t i = 0; i < total_build_args; i++) {
- char *build_arg = strlist_item(ctx->deploy.docker.build_args, i);
- if (!build_arg) {
- break;
- }
- sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg);
- }
-
- // Build the image
- char delivery_file[PATH_MAX];
- char dest[PATH_MAX];
- char rsync_cmd[PATH_MAX * 2];
- memset(delivery_file, 0, sizeof(delivery_file));
- memset(dest, 0, sizeof(dest));
-
- sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name);
- if (access(delivery_file, F_OK) < 0) {
- fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file);
- return -1;
- }
-
- sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name);
- if (copy2(delivery_file, dest, CT_PERM)) {
- fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno));
- return -1;
- }
-
- memset(dest, 0, sizeof(dest));
- sprintf(dest, "%s/packages", ctx->storage.build_docker_dir);
-
- msg(STASIS_MSG_L2, "Copying conda packages\n");
- memset(rsync_cmd, 0, sizeof(rsync_cmd));
- sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest);
- if (system(rsync_cmd)) {
- fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n");
- return -1;
- }
-
- msg(STASIS_MSG_L2, "Copying wheel packages\n");
- memset(rsync_cmd, 0, sizeof(rsync_cmd));
- sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest);
- if (system(rsync_cmd)) {
- fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n");
- }
-
- if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) {
- return -1;
- }
-
- // Test the image
- // All tags point back to the same image so test the first one we see
- // regardless of how many are defined
- strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0));
- docker_sanitize_tag(tag);
-
- msg(STASIS_MSG_L2, "Executing image test script for %s\n", tag);
- if (ctx->deploy.docker.test_script) {
- if (isempty(ctx->deploy.docker.test_script)) {
- msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n");
- } else {
- int state;
- if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) {
- msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag);
- // test failed -- don't save the image
- return -1;
- }
- }
- } else {
- msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "No image test script defined\n");
- }
-
- // Test successful, save image
- if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) {
- // save failed
- return -1;
- }
-
- return 0;
-}
-
-int delivery_fixup_test_results(struct Delivery *ctx) {
- struct dirent *rec;
- DIR *dp;
-
- dp = opendir(ctx->storage.results_dir);
- if (!dp) {
- perror(ctx->storage.results_dir);
- return -1;
- }
-
- while ((rec = readdir(dp)) != NULL) {
- char path[PATH_MAX];
- memset(path, 0, sizeof(path));
-
- if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..")) {
- continue;
- } else if (!endswith(rec->d_name, ".xml")) {
- continue;
- }
-
- sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name);
- msg(STASIS_MSG_L3, "%s\n", rec->d_name);
- if (xml_pretty_print_in_place(path, STASIS_XML_PRETTY_PRINT_PROG, STASIS_XML_PRETTY_PRINT_ARGS)) {
- msg(STASIS_MSG_L3 | STASIS_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name);
- }
- }
-
- closedir(dp);
- return 0;
-}
-
-int delivery_exists(struct Delivery *ctx) {
- int release_exists = 0;
- char release_pattern[PATH_MAX] = {0};
- sprintf(release_pattern, "*%s*", ctx->info.release_name);
-
- if (globals.enable_artifactory) {
- if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) {
- fprintf(stderr, "Failed to initialize Artifactory authentication context\n");
- return -1; // error
- }
-
- struct JFRT_Search search = {.fail_no_op = true};
- release_exists = jfrog_cli_rt_search(&ctx->deploy.jfrog_auth, &search, globals.jfrog.repo, release_pattern);
- if (release_exists != 2) {
- if (!globals.enable_overwrite && !release_exists) {
- // --fail_no_op returns 2 on failure
- // without: it returns an empty list "[]" and exit code 0
- return 1; // found
- }
- }
- } else {
- struct StrList *files = listdir(ctx->storage.delivery_dir);
- for (size_t i = 0; i < strlist_count(files); i++) {
- char *filename = strlist_item(files, i);
- release_exists = fnmatch(release_pattern, filename, FNM_PATHNAME);
- if (!globals.enable_overwrite && !release_exists) {
- guard_strlist_free(&files);
- return 1; // found
- }
- }
- guard_strlist_free(&files);
- }
- return 0; // not found
-}
diff --git a/src/lib/CMakeLists.txt b/src/lib/CMakeLists.txt
new file mode 100644
index 0000000..82bfe4a
--- /dev/null
+++ b/src/lib/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(core) \ No newline at end of file
diff --git a/src/lib/core/CMakeLists.txt b/src/lib/core/CMakeLists.txt
new file mode 100644
index 0000000..c569187
--- /dev/null
+++ b/src/lib/core/CMakeLists.txt
@@ -0,0 +1,38 @@
+include_directories(${PROJECT_BINARY_DIR})
+
+add_library(stasis_core STATIC
+ globals.c
+ str.c
+ strlist.c
+ ini.c
+ conda.c
+ environment.c
+ utils.c
+ system.c
+ download.c
+ delivery_postprocess.c
+ delivery_conda.c
+ delivery_docker.c
+ delivery_install.c
+ delivery_artifactory.c
+ delivery_test.c
+ delivery_build.c
+ delivery_show.c
+ delivery_populate.c
+ delivery_init.c
+ delivery.c
+ recipe.c
+ relocation.c
+ wheel.c
+ copy.c
+ artifactory.c
+ template.c
+ rules.c
+ docker.c
+ junitxml.c
+ github.c
+ template_func_proto.c
+ envctl.c
+ multiprocessing.c
+)
+
diff --git a/src/artifactory.c b/src/lib/core/artifactory.c
index 6c4079a..6b9635d 100644
--- a/src/artifactory.c
+++ b/src/lib/core/artifactory.c
@@ -1,4 +1,4 @@
-#include "core.h"
+#include "artifactory.h"
extern struct STASIS_GLOBAL globals;
diff --git a/src/conda.c b/src/lib/core/conda.c
index ff55f14..35caf02 100644
--- a/src/conda.c
+++ b/src/lib/core/conda.c
@@ -2,7 +2,6 @@
// Created by jhunk on 5/14/23.
//
-#include <unistd.h>
#include "conda.h"
int micromamba(struct MicromambaInfo *info, char *command, ...) {
@@ -79,37 +78,26 @@ int pip_exec(const char *args) {
return system(command);
}
-int pip_index_provides(const char *index_url, const char *name, const char *version) {
+int pip_index_provides(const char *index_url, const char *spec) {
char cmd[PATH_MAX] = {0};
- char name_local[255];
- char version_local[255] = {0};
- char spec[255] = {0};
+ char spec_local[255] = {0};
- if (isempty((char *) name) < 0) {
- // no package name means nothing to do.
+ if (isempty((char *) spec)) {
+ // NULL or zero-length; no package spec means there's nothing to do.
return -1;
}
- // Fix up the package name
- strncpy(name_local, name, sizeof(name_local) - 1);
- tolower_s(name_local);
- lstrip(name_local);
- strip(name_local);
-
- if (version) {
- // Fix up the package version
- strncpy(version_local, version, sizeof(version_local) - 1);
- tolower_s(version_local);
- lstrip(version_local);
- strip(version_local);
- sprintf(spec, "==%s", version);
- }
+ // Normalize the local spec string
+ strncpy(spec_local, spec, sizeof(spec_local) - 1);
+ tolower_s(spec_local);
+ lstrip(spec_local);
+ strip(spec_local);
char logfile[] = "/tmp/STASIS-package_exists.XXXXXX";
int logfd = mkstemp(logfile);
if (logfd < 0) {
perror(logfile);
- remove(logfile); // fail harmlessly if not present
+ remove(logfile); // fail harmlessly if not present
return -1;
}
@@ -121,7 +109,7 @@ int pip_index_provides(const char *index_url, const char *name, const char *vers
strcpy(proc.f_stdout, logfile);
// Do an installation in dry-run mode to see if the package exists in the given index.
- snprintf(cmd, sizeof(cmd) - 1, "python -m pip install --dry-run --no-deps --index-url=%s %s%s", index_url, name_local, spec);
+ snprintf(cmd, sizeof(cmd) - 1, "python -m pip install --dry-run --no-deps --index-url=%s %s", index_url, spec_local);
status = shell(&proc, cmd);
// Print errors only when shell() itself throws one
@@ -222,7 +210,7 @@ int conda_activate(const char *root, const char *env_name) {
// Fully activate conda and record its effect on the runtime environment
char command[PATH_MAX * 3];
- snprintf(command, sizeof(command) - 1, "source %s; source %s; conda activate %s &>/dev/null; env -0", path_conda, path_mamba, env_name);
+ snprintf(command, sizeof(command) - 1, "set -a; source %s; source %s; conda activate %s &>/dev/null; env -0", path_conda, path_mamba, env_name);
int retval = shell(&proc, command);
if (retval) {
// it didn't work; drop out for cleanup
@@ -437,6 +425,39 @@ int conda_env_export(char *name, char *output_dir, char *output_filename) {
return conda_exec(env_command);
}
+char *conda_get_active_environment() {
+ const char *name = getenv("CONDA_DEFAULT_ENV");
+ if (!name) {
+ return NULL;
+ }
+
+ char *result = NULL;
+ result = strdup(name);
+ if (!result) {
+ return NULL;
+ }
+
+ return result;
+}
+
+int conda_provides(const char *spec) {
+ struct Process proc;
+ memset(&proc, 0, sizeof(proc));
+ strcpy(proc.f_stdout, "/dev/null");
+ strcpy(proc.f_stderr, "/dev/null");
+
+ // It's worth noting the departure from using conda_exec() here:
+ // conda_exec() expects the program output to be visible to the user.
+ // For this operation we only need the exit value.
+ char cmd[PATH_MAX] = {0};
+ snprintf(cmd, sizeof(cmd) - 1, "mamba search --use-index-cache %s", spec);
+ if (shell(&proc, cmd) < 0) {
+ fprintf(stderr, "shell: %s", strerror(errno));
+ return -1;
+ }
+ return proc.returncode == 0;
+}
+
int conda_index(const char *path) {
char command[PATH_MAX];
sprintf(command, "index %s", path);
diff --git a/src/copy.c b/src/lib/core/copy.c
index f69a756..f69a756 100644
--- a/src/copy.c
+++ b/src/lib/core/copy.c
diff --git a/src/lib/core/delivery.c b/src/lib/core/delivery.c
new file mode 100644
index 0000000..e32ed4c
--- /dev/null
+++ b/src/lib/core/delivery.c
@@ -0,0 +1,317 @@
+#include "delivery.h"
+
+void delivery_free(struct Delivery *ctx) {
+ guard_free(ctx->system.arch);
+ GENERIC_ARRAY_FREE(ctx->system.platform);
+ guard_free(ctx->meta.name);
+ guard_free(ctx->meta.version);
+ guard_free(ctx->meta.codename);
+ guard_free(ctx->meta.mission);
+ guard_free(ctx->meta.python);
+ guard_free(ctx->meta.mission);
+ guard_free(ctx->meta.python_compact);
+ guard_free(ctx->meta.based_on);
+ guard_runtime_free(ctx->runtime.environ);
+ guard_free(ctx->storage.root);
+ guard_free(ctx->storage.tmpdir);
+ guard_free(ctx->storage.delivery_dir);
+ guard_free(ctx->storage.tools_dir);
+ guard_free(ctx->storage.package_dir);
+ guard_free(ctx->storage.results_dir);
+ guard_free(ctx->storage.output_dir);
+ guard_free(ctx->storage.conda_install_prefix);
+ guard_free(ctx->storage.conda_artifact_dir);
+ guard_free(ctx->storage.conda_staging_dir);
+ guard_free(ctx->storage.conda_staging_url);
+ guard_free(ctx->storage.wheel_artifact_dir);
+ guard_free(ctx->storage.wheel_staging_dir);
+ guard_free(ctx->storage.wheel_staging_url);
+ guard_free(ctx->storage.build_dir);
+ guard_free(ctx->storage.build_recipes_dir);
+ guard_free(ctx->storage.build_sources_dir);
+ guard_free(ctx->storage.build_testing_dir);
+ guard_free(ctx->storage.build_docker_dir);
+ guard_free(ctx->storage.mission_dir);
+ guard_free(ctx->storage.docker_artifact_dir);
+ guard_free(ctx->storage.meta_dir);
+ guard_free(ctx->storage.package_dir);
+ guard_free(ctx->storage.cfgdump_dir);
+ guard_free(ctx->info.time_str_epoch);
+ guard_free(ctx->info.build_name);
+ guard_free(ctx->info.build_number);
+ guard_free(ctx->info.release_name);
+ guard_free(ctx->conda.installer_baseurl);
+ guard_free(ctx->conda.installer_name);
+ guard_free(ctx->conda.installer_version);
+ guard_free(ctx->conda.installer_platform);
+ guard_free(ctx->conda.installer_arch);
+ guard_free(ctx->conda.installer_path);
+ guard_free(ctx->conda.tool_version);
+ guard_free(ctx->conda.tool_build_version);
+ guard_strlist_free(&ctx->conda.conda_packages);
+ guard_strlist_free(&ctx->conda.conda_packages_defer);
+ guard_strlist_free(&ctx->conda.pip_packages);
+ guard_strlist_free(&ctx->conda.pip_packages_defer);
+ guard_strlist_free(&ctx->conda.wheels_packages);
+
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ guard_free(ctx->tests[i].name);
+ guard_free(ctx->tests[i].version);
+ guard_free(ctx->tests[i].repository);
+ guard_free(ctx->tests[i].repository_info_ref);
+ guard_free(ctx->tests[i].repository_info_tag);
+ guard_strlist_free(&ctx->tests[i].repository_remove_tags);
+ guard_free(ctx->tests[i].script);
+ guard_free(ctx->tests[i].build_recipe);
+ // test-specific runtime variables
+ guard_runtime_free(ctx->tests[i].runtime.environ);
+ }
+
+ guard_free(ctx->rules.release_fmt);
+ guard_free(ctx->rules.build_name_fmt);
+ guard_free(ctx->rules.build_number_fmt);
+
+ guard_free(ctx->deploy.docker.test_script);
+ guard_free(ctx->deploy.docker.registry);
+ guard_free(ctx->deploy.docker.image_compression);
+ guard_strlist_free(&ctx->deploy.docker.tags);
+ guard_strlist_free(&ctx->deploy.docker.build_args);
+
+ for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(ctx->deploy.jfrog[0]); i++) {
+ guard_free(ctx->deploy.jfrog[i].repo);
+ guard_free(ctx->deploy.jfrog[i].dest);
+ guard_strlist_free(&ctx->deploy.jfrog[i].files);
+ }
+
+ if (ctx->_stasis_ini_fp.delivery) {
+ ini_free(&ctx->_stasis_ini_fp.delivery);
+ }
+ guard_free(ctx->_stasis_ini_fp.delivery_path);
+
+ if (ctx->_stasis_ini_fp.cfg) {
+ // optional extras
+ ini_free(&ctx->_stasis_ini_fp.cfg);
+ }
+ guard_free(ctx->_stasis_ini_fp.cfg_path);
+
+ if (ctx->_stasis_ini_fp.mission) {
+ ini_free(&ctx->_stasis_ini_fp.mission);
+ }
+ guard_free(ctx->_stasis_ini_fp.mission_path);
+}
+
+int delivery_format_str(struct Delivery *ctx, char **dest, const char *fmt) {
+ size_t fmt_len = strlen(fmt);
+
+ if (!*dest) {
+ *dest = calloc(STASIS_NAME_MAX, sizeof(**dest));
+ if (!*dest) {
+ return -1;
+ }
+ }
+
+ for (size_t i = 0; i < fmt_len; i++) {
+ if (fmt[i] == '%' && strlen(&fmt[i])) {
+ i++;
+ switch (fmt[i]) {
+ case 'n': // name
+ strcat(*dest, ctx->meta.name);
+ break;
+ case 'c': // codename
+ strcat(*dest, ctx->meta.codename);
+ break;
+ case 'm': // mission
+ strcat(*dest, ctx->meta.mission);
+ break;
+ case 'r': // revision
+ sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc);
+ break;
+ case 'R': // "final"-aware revision
+ if (ctx->meta.final)
+ strcat(*dest, "final");
+ else
+ sprintf(*dest + strlen(*dest), "%d", ctx->meta.rc);
+ break;
+ case 'v': // version
+ strcat(*dest, ctx->meta.version);
+ break;
+ case 'P': // python version
+ strcat(*dest, ctx->meta.python);
+ break;
+ case 'p': // python version major/minor
+ strcat(*dest, ctx->meta.python_compact);
+ break;
+ case 'a': // system architecture name
+ strcat(*dest, ctx->system.arch);
+ break;
+ case 'o': // system platform (OS) name
+ strcat(*dest, ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
+ break;
+ case 't': // unix epoch
+ sprintf(*dest + strlen(*dest), "%ld", ctx->info.time_now);
+ break;
+ default: // unknown formatter, write as-is
+ sprintf(*dest + strlen(*dest), "%c%c", fmt[i - 1], fmt[i]);
+ break;
+ }
+ } else { // write non-format text
+ sprintf(*dest + strlen(*dest), "%c", fmt[i]);
+ }
+ }
+ return 0;
+}
+
+void delivery_defer_packages(struct Delivery *ctx, int type) {
+ struct StrList *dataptr = NULL;
+ struct StrList *deferred = NULL;
+ char *name = NULL;
+ char cmd[PATH_MAX];
+
+ memset(cmd, 0, sizeof(cmd));
+
+ char mode[10];
+ if (DEFER_CONDA == type) {
+ dataptr = ctx->conda.conda_packages;
+ deferred = ctx->conda.conda_packages_defer;
+ strcpy(mode, "conda");
+ } else if (DEFER_PIP == type) {
+ dataptr = ctx->conda.pip_packages;
+ deferred = ctx->conda.pip_packages_defer;
+ strcpy(mode, "pip");
+ } else {
+ SYSERROR("BUG: type %d does not map to a supported package manager!\n", type);
+ exit(1);
+ }
+ msg(STASIS_MSG_L2, "Filtering %s packages by test definition...\n", mode);
+
+ struct StrList *filtered = NULL;
+ filtered = strlist_init();
+ for (size_t i = 0; i < strlist_count(dataptr); i++) {
+ int build_for_host = 0;
+
+ name = strlist_item(dataptr, i);
+ if (!strlen(name) || isblank(*name) || isspace(*name)) {
+ // no data
+ continue;
+ }
+
+ // Compile a list of packages that are *also* to be tested.
+ char *spec_begin = strpbrk(name, "@~=<>!");
+ char *spec_end = spec_begin;
+ char package_name[255] = {0};
+
+ if (spec_end) {
+ // A version is present in the package name. Jump past operator(s).
+ while (*spec_end != '\0' && !isalnum(*spec_end)) {
+ spec_end++;
+ }
+ strncpy(package_name, name, spec_begin - name);
+ } else {
+ strncpy(package_name, name, sizeof(package_name) - 1);
+ }
+
+ msg(STASIS_MSG_L3, "package '%s': ", package_name);
+
+ // When spec is present in name, set tests->version to the version detected in the name
+ for (size_t x = 0; x < sizeof(ctx->tests) / sizeof(ctx->tests[0]) && ctx->tests[x].name != NULL; x++) {
+ struct Test *test = &ctx->tests[x];
+ char nametmp[1024] = {0};
+
+ if (spec_end != NULL && spec_begin != NULL) {
+ strncpy(nametmp, name, spec_begin - name);
+ } else {
+ strcpy(nametmp, name);
+ }
+ // Is the [test:NAME] in the package name?
+ if (!strcmp(nametmp, test->name)) {
+ // Override test->version when a version is provided by the (pip|conda)_package list item
+ guard_free(test->version);
+ if (spec_begin && spec_end) {
+ test->version = strdup(spec_end);
+ } else {
+ // There are too many possible default branches nowadays: master, main, develop, xyz, etc.
+ // HEAD is a safe bet.
+ test->version = strdup("HEAD");
+ }
+
+ // Is the list item a git+schema:// URL?
+ if (strstr(nametmp, "git+") && strstr(nametmp, "://")) {
+ char *xrepo = strstr(nametmp, "+");
+ if (xrepo) {
+ xrepo++;
+ guard_free(test->repository);
+ test->repository = strdup(xrepo);
+ xrepo = NULL;
+ }
+ // Extract the name of the package
+ char *xbasename = path_basename(nametmp);
+ if (xbasename) {
+ // Replace the git+schema:// URL with the package name
+ strlist_set(&dataptr, i, xbasename);
+ name = strlist_item(dataptr, i);
+ }
+ }
+
+ int upstream_exists = 0;
+ if (DEFER_PIP == type) {
+ upstream_exists = pip_index_provides(PYPI_INDEX_DEFAULT, name);
+ } else if (DEFER_CONDA == type) {
+ upstream_exists = conda_provides(name);
+ } else {
+ fprintf(stderr, "\nUnknown package type: %d\n", type);
+ exit(1);
+ }
+
+ if (upstream_exists < 0) {
+ fprintf(stderr, "%s's existence command failed for '%s'\n"
+ "(This may be due to a network/firewall issue!)\n", mode, name);
+ exit(1);
+ }
+ if (!upstream_exists) {
+ build_for_host = 1;
+ } else {
+ build_for_host = 0;
+ }
+
+ break;
+ }
+ }
+
+ if (build_for_host) {
+ printf("BUILD FOR HOST\n");
+ strlist_append(&deferred, name);
+ } else {
+ printf("USE EXTERNAL\n");
+ strlist_append(&filtered, name);
+ }
+ }
+
+ if (!strlist_count(deferred)) {
+ msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No %s packages were filtered by test definitions\n", mode);
+ } else {
+ if (DEFER_CONDA == type) {
+ strlist_free(&ctx->conda.conda_packages);
+ ctx->conda.conda_packages = strlist_copy(filtered);
+ } else if (DEFER_PIP == type) {
+ strlist_free(&ctx->conda.pip_packages);
+ ctx->conda.pip_packages = strlist_copy(filtered);
+ }
+ }
+ if (filtered) {
+ strlist_free(&filtered);
+ }
+}
+
+void delivery_gather_tool_versions(struct Delivery *ctx) {
+ int status = 0;
+
+ // Extract version from tool output
+ ctx->conda.tool_version = shell_output("conda --version", &status);
+ if (ctx->conda.tool_version)
+ strip(ctx->conda.tool_version);
+
+ ctx->conda.tool_build_version = shell_output("conda build --version", &status);
+ if (ctx->conda.tool_build_version)
+ strip(ctx->conda.tool_version);
+}
+
diff --git a/src/lib/core/delivery_artifactory.c b/src/lib/core/delivery_artifactory.c
new file mode 100644
index 0000000..27f4823
--- /dev/null
+++ b/src/lib/core/delivery_artifactory.c
@@ -0,0 +1,192 @@
+#include "delivery.h"
+
+int delivery_init_artifactory(struct Delivery *ctx) {
+ int status = 0;
+ char dest[PATH_MAX] = {0};
+ char filepath[PATH_MAX] = {0};
+ snprintf(dest, sizeof(dest) - 1, "%s/bin", ctx->storage.tools_dir);
+ snprintf(filepath, sizeof(dest) - 1, "%s/bin/jf", ctx->storage.tools_dir);
+
+ if (!access(filepath, F_OK)) {
+ // already have it
+ msg(STASIS_MSG_L3, "Skipped download, %s already exists\n", filepath);
+ goto delivery_init_artifactory_envsetup;
+ }
+
+ char *platform = ctx->system.platform[DELIVERY_PLATFORM];
+ msg(STASIS_MSG_L3, "Downloading %s for %s %s\n", globals.jfrog.remote_filename, platform, ctx->system.arch);
+ if ((status = artifactory_download_cli(dest,
+ globals.jfrog.jfrog_artifactory_base_url,
+ globals.jfrog.jfrog_artifactory_product,
+ globals.jfrog.cli_major_ver,
+ globals.jfrog.version,
+ platform,
+ ctx->system.arch,
+ globals.jfrog.remote_filename))) {
+ remove(filepath);
+ }
+
+ delivery_init_artifactory_envsetup:
+ // CI (ridiculously generic, why?) disables interactive prompts and progress bar output
+ setenv("CI", "1", 1);
+
+ // JFROG_CLI_HOME_DIR is where .jfrog is stored
+ char path[PATH_MAX] = {0};
+ snprintf(path, sizeof(path) - 1, "%s/.jfrog", ctx->storage.build_dir);
+ setenv("JFROG_CLI_HOME_DIR", path, 1);
+
+ // JFROG_CLI_TEMP_DIR is where the obvious is stored
+ setenv("JFROG_CLI_TEMP_DIR", ctx->storage.tmpdir, 1);
+ return status;
+}
+
+int delivery_artifact_upload(struct Delivery *ctx) {
+ int status = 0;
+
+ if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) {
+ fprintf(stderr, "Failed to initialize Artifactory authentication context\n");
+ return -1;
+ }
+
+ for (size_t i = 0; i < sizeof(ctx->deploy.jfrog) / sizeof(*ctx->deploy.jfrog); i++) {
+ if (!ctx->deploy.jfrog[i].files || !ctx->deploy.jfrog[i].dest) {
+ break;
+ }
+ jfrt_upload_init(&ctx->deploy.jfrog[i].upload_ctx);
+
+ if (!globals.jfrog.repo) {
+ msg(STASIS_MSG_WARN, "Artifactory repository path is not configured!\n");
+ fprintf(stderr, "set STASIS_JF_REPO environment variable...\nOr append to configuration file:\n\n");
+ fprintf(stderr, "[deploy:artifactory]\nrepo = example/generic/repo/path\n\n");
+ status++;
+ break;
+ } else if (!ctx->deploy.jfrog[i].repo) {
+ ctx->deploy.jfrog[i].repo = strdup(globals.jfrog.repo);
+ }
+
+ if (!ctx->deploy.jfrog[i].repo || isempty(ctx->deploy.jfrog[i].repo) || !strlen(ctx->deploy.jfrog[i].repo)) {
+ // Unlikely to trigger if the config parser is working correctly
+ msg(STASIS_MSG_ERROR, "Artifactory repository path is empty. Cannot continue.\n");
+ status++;
+ break;
+ }
+
+ ctx->deploy.jfrog[i].upload_ctx.workaround_parent_only = true;
+ ctx->deploy.jfrog[i].upload_ctx.build_name = ctx->info.build_name;
+ ctx->deploy.jfrog[i].upload_ctx.build_number = ctx->info.build_number;
+
+ char files[PATH_MAX];
+ char dest[PATH_MAX]; // repo + remote dir
+
+ if (jfrog_cli_rt_ping(&ctx->deploy.jfrog_auth)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Unable to contact artifactory server: %s\n", ctx->deploy.jfrog_auth.url);
+ return -1;
+ }
+
+ if (strlist_count(ctx->deploy.jfrog[i].files)) {
+ for (size_t f = 0; f < strlist_count(ctx->deploy.jfrog[i].files); f++) {
+ memset(dest, 0, sizeof(dest));
+ memset(files, 0, sizeof(files));
+ snprintf(dest, sizeof(dest) - 1, "%s/%s", ctx->deploy.jfrog[i].repo, ctx->deploy.jfrog[i].dest);
+ snprintf(files, sizeof(files) - 1, "%s", strlist_item(ctx->deploy.jfrog[i].files, f));
+ status += jfrog_cli_rt_upload(&ctx->deploy.jfrog_auth, &ctx->deploy.jfrog[i].upload_ctx, files, dest);
+ }
+ }
+ }
+
+ if (globals.enable_artifactory_build_info) {
+ if (!status && ctx->deploy.jfrog[0].files && ctx->deploy.jfrog[0].dest) {
+ jfrog_cli_rt_build_collect_env(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name,
+ ctx->deploy.jfrog[0].upload_ctx.build_number);
+ jfrog_cli_rt_build_publish(&ctx->deploy.jfrog_auth, ctx->deploy.jfrog[0].upload_ctx.build_name,
+ ctx->deploy.jfrog[0].upload_ctx.build_number);
+ }
+ } else {
+ msg(STASIS_MSG_WARN | STASIS_MSG_L2, "Artifactory build info upload is disabled by CLI argument\n");
+ }
+
+ return status;
+}
+
+int delivery_mission_render_files(struct Delivery *ctx) {
+ if (!ctx->storage.mission_dir) {
+ fprintf(stderr, "Mission directory is not configured. Context not initialized?\n");
+ return -1;
+ }
+ struct Data {
+ char *src;
+ char *dest;
+ } data;
+ struct INIFILE *cfg = ctx->_stasis_ini_fp.mission;
+ union INIVal val;
+
+ memset(&data, 0, sizeof(data));
+ data.src = calloc(PATH_MAX, sizeof(*data.src));
+ if (!data.src) {
+ perror("data.src");
+ return -1;
+ }
+
+ for (size_t i = 0; i < cfg->section_count; i++) {
+ char *section_name = cfg->section[i]->key;
+ if (!startswith(section_name, "template:")) {
+ continue;
+ }
+ val.as_char_p = strchr(section_name, ':') + 1;
+ if (val.as_char_p && isempty(val.as_char_p)) {
+ guard_free(data.src);
+ return 1;
+ }
+ sprintf(data.src, "%s/%s/%s", ctx->storage.mission_dir, ctx->meta.mission, val.as_char_p);
+ msg(STASIS_MSG_L2, "%s\n", data.src);
+
+ int err = 0;
+ data.dest = ini_getval_str(cfg, section_name, "destination", INI_READ_RENDER, &err);
+
+ char *contents;
+ struct stat st;
+ if (lstat(data.src, &st)) {
+ perror(data.src);
+ guard_free(data.dest);
+ continue;
+ }
+
+ contents = calloc(st.st_size + 1, sizeof(*contents));
+ if (!contents) {
+ perror("template file contents");
+ guard_free(data.dest);
+ continue;
+ }
+
+ FILE *fp;
+ fp = fopen(data.src, "rb");
+ if (!fp) {
+ perror(data.src);
+ guard_free(contents);
+ guard_free(data.dest);
+ continue;
+ }
+
+ if (fread(contents, st.st_size, sizeof(*contents), fp) < 1) {
+ perror("while reading template file");
+ guard_free(contents);
+ guard_free(data.dest);
+ fclose(fp);
+ continue;
+ }
+ fclose(fp);
+
+ msg(STASIS_MSG_L3, "Writing %s\n", data.dest);
+ if (tpl_render_to_file(contents, data.dest)) {
+ guard_free(contents);
+ guard_free(data.dest);
+ continue;
+ }
+ guard_free(contents);
+ guard_free(data.dest);
+ }
+
+ guard_free(data.src);
+ return 0;
+}
+
diff --git a/src/lib/core/delivery_build.c b/src/lib/core/delivery_build.c
new file mode 100644
index 0000000..b4d610a
--- /dev/null
+++ b/src/lib/core/delivery_build.c
@@ -0,0 +1,190 @@
+#include "delivery.h"
+
+int delivery_build_recipes(struct Delivery *ctx) {
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ char *recipe_dir = NULL;
+ if (ctx->tests[i].build_recipe) { // build a conda recipe
+ int recipe_type;
+ int status;
+ if (recipe_clone(ctx->storage.build_recipes_dir, ctx->tests[i].build_recipe, NULL, &recipe_dir)) {
+ fprintf(stderr, "Encountered an issue while cloning recipe for: %s\n", ctx->tests[i].name);
+ return -1;
+ }
+ if (!recipe_dir) {
+ fprintf(stderr, "BUG: recipe_clone() succeeded but recipe_dir is NULL: %s\n", strerror(errno));
+ return -1;
+ }
+ recipe_type = recipe_get_type(recipe_dir);
+ if(!pushd(recipe_dir)) {
+ if (RECIPE_TYPE_ASTROCONDA == recipe_type) {
+ pushd(path_basename(ctx->tests[i].repository));
+ } else if (RECIPE_TYPE_CONDA_FORGE == recipe_type) {
+ pushd("recipe");
+ }
+
+ char recipe_version[100];
+ char recipe_buildno[100];
+ char recipe_git_url[PATH_MAX];
+ char recipe_git_rev[PATH_MAX];
+
+ //sprintf(recipe_version, "{%% set version = GIT_DESCRIBE_TAG ~ \".dev\" ~ GIT_DESCRIBE_NUMBER ~ \"+\" ~ GIT_DESCRIBE_HASH %%}");
+ //sprintf(recipe_git_url, " git_url: %s", ctx->tests[i].repository);
+ //sprintf(recipe_git_rev, " git_rev: %s", ctx->tests[i].version);
+ // TODO: Conditionally download archives if github.com is the origin. Else, use raw git_* keys ^^^
+ sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].repository_info_tag ? ctx->tests[i].repository_info_tag : ctx->tests[i].version);
+ sprintf(recipe_git_url, " url: %s/archive/refs/tags/{{ version }}.tar.gz", ctx->tests[i].repository);
+ strcpy(recipe_git_rev, "");
+ sprintf(recipe_buildno, " number: 0");
+
+ unsigned flags = REPLACE_TRUNCATE_AFTER_MATCH;
+ //file_replace_text("meta.yaml", "{% set version = ", recipe_version);
+ if (ctx->meta.final) { // remove this. i.e. statis cannot deploy a release to conda-forge
+ sprintf(recipe_version, "{%% set version = \"%s\" %%}", ctx->tests[i].version);
+ // TODO: replace sha256 of tagged archive
+ // TODO: leave the recipe unchanged otherwise. in theory this should produce the same conda package hash as conda forge.
+ // For now, remove the sha256 requirement
+ file_replace_text("meta.yaml", "sha256:", "\n", flags);
+ } else {
+ file_replace_text("meta.yaml", "{% set version = ", recipe_version, flags);
+ file_replace_text("meta.yaml", " url:", recipe_git_url, flags);
+ //file_replace_text("meta.yaml", "sha256:", recipe_git_rev);
+ file_replace_text("meta.yaml", " sha256:", "\n", flags);
+ file_replace_text("meta.yaml", " number:", recipe_buildno, flags);
+ }
+
+ char command[PATH_MAX];
+ if (RECIPE_TYPE_CONDA_FORGE == recipe_type) {
+ char arch[STASIS_NAME_MAX] = {0};
+ char platform[STASIS_NAME_MAX] = {0};
+
+ strcpy(platform, ctx->system.platform[DELIVERY_PLATFORM]);
+ if (strstr(platform, "Darwin")) {
+ memset(platform, 0, sizeof(platform));
+ strcpy(platform, "osx");
+ }
+ tolower_s(platform);
+ if (strstr(ctx->system.arch, "arm64")) {
+ strcpy(arch, "arm64");
+ } else if (strstr(ctx->system.arch, "64")) {
+ strcpy(arch, "64");
+ } else {
+ strcat(arch, "32"); // blind guess
+ }
+ tolower_s(arch);
+
+ sprintf(command, "mambabuild --python=%s -m ../.ci_support/%s_%s_.yaml .",
+ ctx->meta.python, platform, arch);
+ } else {
+ sprintf(command, "mambabuild --python=%s .", ctx->meta.python);
+ }
+ status = conda_exec(command);
+ if (status) {
+ guard_free(recipe_dir);
+ return -1;
+ }
+
+ if (RECIPE_TYPE_GENERIC != recipe_type) {
+ popd();
+ }
+ popd();
+ } else {
+ fprintf(stderr, "Unable to enter recipe directory %s: %s\n", recipe_dir, strerror(errno));
+ guard_free(recipe_dir);
+ return -1;
+ }
+ }
+ guard_free(recipe_dir);
+ }
+ return 0;
+}
+
+int filter_repo_tags(char *repo, struct StrList *patterns) {
+ int result = 0;
+
+ if (!pushd(repo)) {
+ int list_status = 0;
+ char *tags_raw = shell_output("git tag -l", &list_status);
+ struct StrList *tags = strlist_init();
+ strlist_append_tokenize(tags, tags_raw, LINE_SEP);
+
+ for (size_t i = 0; tags && i < strlist_count(tags); i++) {
+ char *tag = strlist_item(tags, i);
+ for (size_t p = 0; p < strlist_count(patterns); p++) {
+ char *pattern = strlist_item(patterns, p);
+ int match = fnmatch(pattern, tag, 0);
+ if (!match) {
+ char cmd[PATH_MAX] = {0};
+ sprintf(cmd, "git tag -d %s", tag);
+ result += system(cmd);
+ break;
+ }
+ }
+ }
+ guard_strlist_free(&tags);
+ guard_free(tags_raw);
+ popd();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+
+struct StrList *delivery_build_wheels(struct Delivery *ctx) {
+ struct StrList *result = NULL;
+ struct Process proc;
+ memset(&proc, 0, sizeof(proc));
+
+ result = strlist_init();
+ if (!result) {
+ perror("unable to allocate memory for string list");
+ return NULL;
+ }
+
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ if (!ctx->tests[i].build_recipe && ctx->tests[i].repository) { // build from source
+ char srcdir[PATH_MAX];
+ char wheeldir[PATH_MAX];
+ memset(srcdir, 0, sizeof(srcdir));
+ memset(wheeldir, 0, sizeof(wheeldir));
+
+ sprintf(srcdir, "%s/%s", ctx->storage.build_sources_dir, ctx->tests[i].name);
+ git_clone(&proc, ctx->tests[i].repository, srcdir, ctx->tests[i].version);
+
+ if (ctx->tests[i].repository_remove_tags && strlist_count(ctx->tests[i].repository_remove_tags)) {
+ filter_repo_tags(srcdir, ctx->tests[i].repository_remove_tags);
+ }
+
+ if (!pushd(srcdir)) {
+ char dname[NAME_MAX];
+ char outdir[PATH_MAX];
+ char cmd[PATH_MAX * 2];
+ memset(dname, 0, sizeof(dname));
+ memset(outdir, 0, sizeof(outdir));
+ memset(cmd, 0, sizeof(outdir));
+
+ strcpy(dname, ctx->tests[i].name);
+ tolower_s(dname);
+ sprintf(outdir, "%s/%s", ctx->storage.wheel_artifact_dir, dname);
+ if (mkdirs(outdir, 0755)) {
+ fprintf(stderr, "failed to create output directory: %s\n", outdir);
+ guard_strlist_free(&result);
+ return NULL;
+ }
+
+ sprintf(cmd, "-m build -w -o %s", outdir);
+ if (python_exec(cmd)) {
+ fprintf(stderr, "failed to generate wheel package for %s-%s\n", ctx->tests[i].name, ctx->tests[i].version);
+ guard_strlist_free(&result);
+ return NULL;
+ }
+ popd();
+ } else {
+ fprintf(stderr, "Unable to enter source directory %s: %s\n", srcdir, strerror(errno));
+ guard_strlist_free(&result);
+ return NULL;
+ }
+ }
+ }
+ return result;
+}
+
diff --git a/src/lib/core/delivery_conda.c b/src/lib/core/delivery_conda.c
new file mode 100644
index 0000000..93a06fc
--- /dev/null
+++ b/src/lib/core/delivery_conda.c
@@ -0,0 +1,110 @@
+#include "delivery.h"
+
+void delivery_get_conda_installer_url(struct Delivery *ctx, char *result) {
+ if (ctx->conda.installer_version) {
+ // Use version specified by configuration file
+ sprintf(result, "%s/%s-%s-%s-%s.sh", ctx->conda.installer_baseurl,
+ ctx->conda.installer_name,
+ ctx->conda.installer_version,
+ ctx->conda.installer_platform,
+ ctx->conda.installer_arch);
+ } else {
+ // Use latest installer
+ sprintf(result, "%s/%s-%s-%s.sh", ctx->conda.installer_baseurl,
+ ctx->conda.installer_name,
+ ctx->conda.installer_platform,
+ ctx->conda.installer_arch);
+ }
+
+}
+
+int delivery_get_conda_installer(struct Delivery *ctx, char *installer_url) {
+ char script_path[PATH_MAX];
+ char *installer = path_basename(installer_url);
+
+ memset(script_path, 0, sizeof(script_path));
+ sprintf(script_path, "%s/%s", ctx->storage.tmpdir, installer);
+ if (access(script_path, F_OK)) {
+ // Script doesn't exist
+ long fetch_status = download(installer_url, script_path, NULL);
+ if (HTTP_ERROR(fetch_status) || fetch_status < 0) {
+ // download failed
+ return -1;
+ }
+ } else {
+ msg(STASIS_MSG_RESTRICT | STASIS_MSG_L3, "Skipped, installer already exists\n", script_path);
+ }
+
+ ctx->conda.installer_path = strdup(script_path);
+ if (!ctx->conda.installer_path) {
+ SYSERROR("Unable to duplicate script_path: '%s'", script_path);
+ return -1;
+ }
+
+ return 0;
+}
+
+void delivery_install_conda(char *install_script, char *conda_install_dir) {
+ struct Process proc;
+ memset(&proc, 0, sizeof(proc));
+
+ if (globals.conda_fresh_start) {
+ if (!access(conda_install_dir, F_OK)) {
+ // directory exists so remove it
+ if (rmtree(conda_install_dir)) {
+ perror("unable to remove previous installation");
+ exit(1);
+ }
+
+ // Proceed with the installation
+ // -b = batch mode (non-interactive)
+ char cmd[PATH_MAX] = {0};
+ snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s",
+ find_program("bash"),
+ install_script,
+ conda_install_dir);
+ if (shell_safe(&proc, cmd)) {
+ fprintf(stderr, "conda installation failed\n");
+ exit(1);
+ }
+ } else {
+ // Proceed with the installation
+ // -b = batch mode (non-interactive)
+ char cmd[PATH_MAX] = {0};
+ snprintf(cmd, sizeof(cmd) - 1, "%s %s -b -p %s",
+ find_program("bash"),
+ install_script,
+ conda_install_dir);
+ if (shell_safe(&proc, cmd)) {
+ fprintf(stderr, "conda installation failed\n");
+ exit(1);
+ }
+ }
+ } else {
+ msg(STASIS_MSG_L3, "Conda removal disabled by configuration\n");
+ }
+}
+
+void delivery_conda_enable(struct Delivery *ctx, char *conda_install_dir) {
+ if (conda_activate(conda_install_dir, "base")) {
+ fprintf(stderr, "conda activation failed\n");
+ exit(1);
+ }
+
+ // Setting the CONDARC environment variable appears to be the only consistent
+ // way to make sure the file is used. Not setting this variable leads to strange
+ // behavior, especially if a conda environment is already active when STASIS is loaded.
+ char rcpath[PATH_MAX];
+ sprintf(rcpath, "%s/%s", conda_install_dir, ".condarc");
+ setenv("CONDARC", rcpath, 1);
+ if (runtime_replace(&ctx->runtime.environ, __environ)) {
+ perror("unable to replace runtime environment after activating conda");
+ exit(1);
+ }
+
+ if (conda_setup_headless()) {
+ // no COE check. this call must succeed.
+ exit(1);
+ }
+}
+
diff --git a/src/lib/core/delivery_docker.c b/src/lib/core/delivery_docker.c
new file mode 100644
index 0000000..e1d7f60
--- /dev/null
+++ b/src/lib/core/delivery_docker.c
@@ -0,0 +1,132 @@
+#include "delivery.h"
+
+int delivery_docker(struct Delivery *ctx) {
+ if (!docker_capable(&ctx->deploy.docker.capabilities)) {
+ return -1;
+ }
+ char tag[STASIS_NAME_MAX];
+ char args[PATH_MAX];
+ int has_registry = ctx->deploy.docker.registry != NULL;
+ size_t total_tags = strlist_count(ctx->deploy.docker.tags);
+ size_t total_build_args = strlist_count(ctx->deploy.docker.build_args);
+
+ if (!has_registry) {
+ msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker registry defined. You will need to manually retag the resulting image.\n");
+ }
+
+ if (!total_tags) {
+ char default_tag[PATH_MAX];
+ msg(STASIS_MSG_WARN | STASIS_MSG_L2, "No docker tags defined by configuration. Generating default tag(s).\n");
+ // generate local tag
+ memset(default_tag, 0, sizeof(default_tag));
+ sprintf(default_tag, "%s:%s-py%s", ctx->meta.name, ctx->info.build_name, ctx->meta.python_compact);
+ tolower_s(default_tag);
+
+ // Add tag
+ ctx->deploy.docker.tags = strlist_init();
+ strlist_append(&ctx->deploy.docker.tags, default_tag);
+
+ if (has_registry) {
+ // generate tag for target registry
+ memset(default_tag, 0, sizeof(default_tag));
+ sprintf(default_tag, "%s/%s:%s-py%s", ctx->deploy.docker.registry, ctx->meta.name, ctx->info.build_number, ctx->meta.python_compact);
+ tolower_s(default_tag);
+
+ // Add tag
+ strlist_append(&ctx->deploy.docker.tags, default_tag);
+ }
+ // regenerate total tag available
+ total_tags = strlist_count(ctx->deploy.docker.tags);
+ }
+
+ memset(args, 0, sizeof(args));
+
+ // Append image tags to command
+ for (size_t i = 0; i < total_tags; i++) {
+ char *tag_orig = strlist_item(ctx->deploy.docker.tags, i);
+ strcpy(tag, tag_orig);
+ docker_sanitize_tag(tag);
+ sprintf(args + strlen(args), " -t \"%s\" ", tag);
+ }
+
+ // Append build arguments to command (i.e. --build-arg "key=value"
+ for (size_t i = 0; i < total_build_args; i++) {
+ char *build_arg = strlist_item(ctx->deploy.docker.build_args, i);
+ if (!build_arg) {
+ break;
+ }
+ sprintf(args + strlen(args), " --build-arg \"%s\" ", build_arg);
+ }
+
+ // Build the image
+ char delivery_file[PATH_MAX];
+ char dest[PATH_MAX];
+ char rsync_cmd[PATH_MAX * 2];
+ memset(delivery_file, 0, sizeof(delivery_file));
+ memset(dest, 0, sizeof(dest));
+
+ sprintf(delivery_file, "%s/%s.yml", ctx->storage.delivery_dir, ctx->info.release_name);
+ if (access(delivery_file, F_OK) < 0) {
+ fprintf(stderr, "docker build cannot proceed without delivery file: %s\n", delivery_file);
+ return -1;
+ }
+
+ sprintf(dest, "%s/%s.yml", ctx->storage.build_docker_dir, ctx->info.release_name);
+ if (copy2(delivery_file, dest, CT_PERM)) {
+ fprintf(stderr, "Failed to copy delivery file to %s: %s\n", dest, strerror(errno));
+ return -1;
+ }
+
+ memset(dest, 0, sizeof(dest));
+ sprintf(dest, "%s/packages", ctx->storage.build_docker_dir);
+
+ msg(STASIS_MSG_L2, "Copying conda packages\n");
+ memset(rsync_cmd, 0, sizeof(rsync_cmd));
+ sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.conda_artifact_dir, dest);
+ if (system(rsync_cmd)) {
+ fprintf(stderr, "Failed to copy conda artifacts to docker build directory\n");
+ return -1;
+ }
+
+ msg(STASIS_MSG_L2, "Copying wheel packages\n");
+ memset(rsync_cmd, 0, sizeof(rsync_cmd));
+ sprintf(rsync_cmd, "rsync -avi --progress '%s' '%s'", ctx->storage.wheel_artifact_dir, dest);
+ if (system(rsync_cmd)) {
+ fprintf(stderr, "Failed to copy wheel artifactory to docker build directory\n");
+ }
+
+ if (docker_build(ctx->storage.build_docker_dir, args, ctx->deploy.docker.capabilities.build)) {
+ return -1;
+ }
+
+ // Test the image
+ // All tags point back to the same image so test the first one we see
+ // regardless of how many are defined
+ strcpy(tag, strlist_item(ctx->deploy.docker.tags, 0));
+ docker_sanitize_tag(tag);
+
+ msg(STASIS_MSG_L2, "Executing image test script for %s\n", tag);
+ if (ctx->deploy.docker.test_script) {
+ if (isempty(ctx->deploy.docker.test_script)) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "Image test script has no content\n");
+ } else {
+ int state;
+ if ((state = docker_script(tag, ctx->deploy.docker.test_script, 0))) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_ERROR, "Non-zero exit (%d) from test script. %s image archive will not be generated.\n", state >> 8, tag);
+ // test failed -- don't save the image
+ return -1;
+ }
+ }
+ } else {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "No image test script defined\n");
+ }
+
+ // Test successful, save image
+ if (docker_save(path_basename(tag), ctx->storage.docker_artifact_dir, ctx->deploy.docker.image_compression)) {
+ // save failed
+ return -1;
+ }
+
+ return 0;
+}
+
diff --git a/src/lib/core/delivery_init.c b/src/lib/core/delivery_init.c
new file mode 100644
index 0000000..e914f99
--- /dev/null
+++ b/src/lib/core/delivery_init.c
@@ -0,0 +1,345 @@
+#include "delivery.h"
+
+int has_mount_flags(const char *mount_point, const unsigned long flags) {
+ struct statvfs st;
+ if (statvfs(mount_point, &st)) {
+ SYSERROR("Unable to determine mount-point flags: %s", strerror(errno));
+ return -1;
+ }
+ return (st.f_flag & flags) != 0;
+}
+
+int delivery_init_tmpdir(struct Delivery *ctx) {
+ char *tmpdir = NULL;
+ char *x = NULL;
+ int unusable = 0;
+ errno = 0;
+
+ x = getenv("TMPDIR");
+ if (x) {
+ guard_free(ctx->storage.tmpdir);
+ tmpdir = strdup(x);
+ } else {
+ tmpdir = ctx->storage.tmpdir;
+ }
+
+ if (!tmpdir) {
+ // memory error
+ return -1;
+ }
+
+ // If the directory doesn't exist, create it
+ if (access(tmpdir, F_OK) < 0) {
+ if (mkdirs(tmpdir, 0755) < 0) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create temporary storage directory: %s (%s)\n", tmpdir, strerror(errno));
+ goto l_delivery_init_tmpdir_fatal;
+ }
+ }
+
+ // If we can't read, write, or execute, then die
+ if (access(tmpdir, R_OK | W_OK | X_OK) < 0) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s requires at least 0755 permissions.\n");
+ goto l_delivery_init_tmpdir_fatal;
+ }
+
+ struct statvfs st;
+ if (statvfs(tmpdir, &st) < 0) {
+ goto l_delivery_init_tmpdir_fatal;
+ }
+
+#if defined(STASIS_OS_LINUX)
+ // If we can't execute programs, or write data to the file system at all, then die
+ if ((st.f_flag & ST_NOEXEC) != 0) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted with noexec\n", tmpdir);
+ goto l_delivery_init_tmpdir_fatal;
+ }
+#endif
+ if ((st.f_flag & ST_RDONLY) != 0) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "%s is mounted read-only\n", tmpdir);
+ goto l_delivery_init_tmpdir_fatal;
+ }
+
+ if (!globals.tmpdir) {
+ globals.tmpdir = strdup(tmpdir);
+ }
+
+ if (!ctx->storage.tmpdir) {
+ ctx->storage.tmpdir = strdup(globals.tmpdir);
+ }
+ return unusable;
+
+ l_delivery_init_tmpdir_fatal:
+ unusable = 1;
+ return unusable;
+}
+
+void delivery_init_dirs_stage2(struct Delivery *ctx) {
+ path_store(&ctx->storage.build_recipes_dir, PATH_MAX, ctx->storage.build_dir, "recipes");
+ path_store(&ctx->storage.build_sources_dir, PATH_MAX, ctx->storage.build_dir, "sources");
+ path_store(&ctx->storage.build_testing_dir, PATH_MAX, ctx->storage.build_dir, "testing");
+ path_store(&ctx->storage.build_docker_dir, PATH_MAX, ctx->storage.build_dir, "docker");
+
+ path_store(&ctx->storage.delivery_dir, PATH_MAX, ctx->storage.output_dir, "delivery");
+ path_store(&ctx->storage.results_dir, PATH_MAX, ctx->storage.output_dir, "results");
+ path_store(&ctx->storage.package_dir, PATH_MAX, ctx->storage.output_dir, "packages");
+ path_store(&ctx->storage.cfgdump_dir, PATH_MAX, ctx->storage.output_dir, "config");
+ path_store(&ctx->storage.meta_dir, PATH_MAX, ctx->storage.output_dir, "meta");
+
+ path_store(&ctx->storage.conda_artifact_dir, PATH_MAX, ctx->storage.package_dir, "conda");
+ path_store(&ctx->storage.wheel_artifact_dir, PATH_MAX, ctx->storage.package_dir, "wheels");
+ path_store(&ctx->storage.docker_artifact_dir, PATH_MAX, ctx->storage.package_dir, "docker");
+}
+
+void delivery_init_dirs_stage1(struct Delivery *ctx) {
+ char *rootdir = getenv("STASIS_ROOT");
+ if (rootdir) {
+ if (isempty(rootdir)) {
+ fprintf(stderr, "STASIS_ROOT is set, but empty. Please assign a file system path to this environment variable.\n");
+ exit(1);
+ }
+ path_store(&ctx->storage.root, PATH_MAX, rootdir, ctx->info.build_name);
+ } else {
+ // use "stasis" in current working directory
+ path_store(&ctx->storage.root, PATH_MAX, "stasis", ctx->info.build_name);
+ }
+ path_store(&ctx->storage.tools_dir, PATH_MAX, ctx->storage.root, "tools");
+ path_store(&ctx->storage.tmpdir, PATH_MAX, ctx->storage.root, "tmp");
+ if (delivery_init_tmpdir(ctx)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Set $TMPDIR to a location other than %s\n", globals.tmpdir);
+ if (globals.tmpdir)
+ guard_free(globals.tmpdir);
+ exit(1);
+ }
+
+ path_store(&ctx->storage.build_dir, PATH_MAX, ctx->storage.root, "build");
+ path_store(&ctx->storage.output_dir, PATH_MAX, ctx->storage.root, "output");
+
+ if (!ctx->storage.mission_dir) {
+ path_store(&ctx->storage.mission_dir, PATH_MAX, globals.sysconfdir, "mission");
+ }
+
+ if (access(ctx->storage.mission_dir, F_OK)) {
+ msg(STASIS_MSG_L1, "%s: %s\n", ctx->storage.mission_dir, strerror(errno));
+ exit(1);
+ }
+
+ // Override installation prefix using global configuration key
+ if (globals.conda_install_prefix && strlen(globals.conda_install_prefix)) {
+ // user wants a specific path
+ globals.conda_fresh_start = false;
+ /*
+ if (mkdirs(globals.conda_install_prefix, 0755)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "Unable to create directory: %s: %s\n",
+ strerror(errno), globals.conda_install_prefix);
+ exit(1);
+ }
+ */
+ /*
+ ctx->storage.conda_install_prefix = realpath(globals.conda_install_prefix, NULL);
+ if (!ctx->storage.conda_install_prefix) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L1, "realpath(): Conda installation prefix reassignment failed\n");
+ exit(1);
+ }
+ ctx->storage.conda_install_prefix = strdup(globals.conda_install_prefix);
+ */
+ path_store(&ctx->storage.conda_install_prefix, PATH_MAX, globals.conda_install_prefix, "conda");
+ } else {
+ // install conda under the STASIS tree
+ path_store(&ctx->storage.conda_install_prefix, PATH_MAX, ctx->storage.tools_dir, "conda");
+ }
+}
+
+int delivery_init_platform(struct Delivery *ctx) {
+ msg(STASIS_MSG_L2, "Setting architecture\n");
+ char archsuffix[20];
+ struct utsname uts;
+ if (uname(&uts)) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "uname() failed: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ctx->system.platform = calloc(DELIVERY_PLATFORM_MAX + 1, sizeof(*ctx->system.platform));
+ if (!ctx->system.platform) {
+ SYSERROR("Unable to allocate %d records for platform array\n", DELIVERY_PLATFORM_MAX);
+ return -1;
+ }
+ for (size_t i = 0; i < DELIVERY_PLATFORM_MAX; i++) {
+ ctx->system.platform[i] = calloc(DELIVERY_PLATFORM_MAXLEN, sizeof(*ctx->system.platform[0]));
+ }
+
+ ctx->system.arch = strdup(uts.machine);
+ if (!ctx->system.arch) {
+ // memory error
+ return -1;
+ }
+
+ if (!strcmp(ctx->system.arch, "x86_64")) {
+ strcpy(archsuffix, "64");
+ } else {
+ strcpy(archsuffix, ctx->system.arch);
+ }
+
+ msg(STASIS_MSG_L2, "Setting platform\n");
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM], uts.sysname);
+ if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Darwin")) {
+ sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "osx-%s", archsuffix);
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "MacOSX");
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "macos");
+ } else if (!strcmp(ctx->system.platform[DELIVERY_PLATFORM], "Linux")) {
+ sprintf(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], "linux-%s", archsuffix);
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], "Linux");
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], "linux");
+ } else {
+ // Not explicitly supported systems
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], ctx->system.platform[DELIVERY_PLATFORM]);
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], ctx->system.platform[DELIVERY_PLATFORM]);
+ strcpy(ctx->system.platform[DELIVERY_PLATFORM_RELEASE], ctx->system.platform[DELIVERY_PLATFORM]);
+ tolower_s(ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
+ }
+
+ long cpu_count = get_cpu_count();
+ if (!cpu_count) {
+ fprintf(stderr, "Unable to determine CPU count. Falling back to 1.\n");
+ cpu_count = 1;
+ }
+ char ncpus[100] = {0};
+ sprintf(ncpus, "%ld", cpu_count);
+
+ // Declare some important bits as environment variables
+ setenv("CPU_COUNT", ncpus, 1);
+ setenv("STASIS_CPU_COUNT", ncpus, 1);
+ setenv("STASIS_ARCH", ctx->system.arch, 1);
+ setenv("STASIS_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM], 1);
+ setenv("STASIS_CONDA_ARCH", ctx->system.arch, 1);
+ setenv("STASIS_CONDA_PLATFORM", ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER], 1);
+ setenv("STASIS_CONDA_PLATFORM_SUBDIR", ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR], 1);
+
+ // Register template variables
+ // These were moved out of main() because we can't take the address of system.platform[x]
+ // _before_ the array has been initialized.
+ tpl_register("system.arch", &ctx->system.arch);
+ tpl_register("system.platform", &ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
+
+ return 0;
+}
+
+int delivery_init(struct Delivery *ctx, int render_mode) {
+ populate_info(ctx);
+ populate_delivery_cfg(ctx, INI_READ_RENDER);
+
+ // Set artifactory URL via environment variable if possible
+ char *jfurl = getenv("STASIS_JF_ARTIFACTORY_URL");
+ if (jfurl) {
+ if (globals.jfrog.url) {
+ guard_free(globals.jfrog.url);
+ }
+ globals.jfrog.url = strdup(jfurl);
+ }
+
+ // Set artifactory repository via environment if possible
+ char *jfrepo = getenv("STASIS_JF_REPO");
+ if (jfrepo) {
+ if (globals.jfrog.repo) {
+ guard_free(globals.jfrog.repo);
+ }
+ globals.jfrog.repo = strdup(jfrepo);
+ }
+
+ // Configure architecture and platform information
+ delivery_init_platform(ctx);
+
+ // Create STASIS directory structure
+ delivery_init_dirs_stage1(ctx);
+
+ char config_local[PATH_MAX];
+ sprintf(config_local, "%s/%s", ctx->storage.tmpdir, "config");
+ setenv("XDG_CONFIG_HOME", config_local, 1);
+
+ char cache_local[PATH_MAX];
+ sprintf(cache_local, "%s/%s", ctx->storage.tmpdir, "cache");
+ setenv("XDG_CACHE_HOME", ctx->storage.tmpdir, 1);
+
+ // add tools to PATH
+ char pathvar_tmp[STASIS_BUFSIZ];
+ sprintf(pathvar_tmp, "%s/bin:%s", ctx->storage.tools_dir, getenv("PATH"));
+ setenv("PATH", pathvar_tmp, 1);
+
+ // Prevent git from paginating output
+ setenv("GIT_PAGER", "", 1);
+
+ populate_delivery_ini(ctx, render_mode);
+
+ if (ctx->deploy.docker.tags) {
+ for (size_t i = 0; i < strlist_count(ctx->deploy.docker.tags); i++) {
+ char *item = strlist_item(ctx->deploy.docker.tags, i);
+ tolower_s(item);
+ }
+ }
+
+ if (ctx->deploy.docker.image_compression) {
+ if (docker_validate_compression_program(ctx->deploy.docker.image_compression)) {
+ SYSERROR("[deploy:docker].image_compression - invalid command / program is not installed: %s", ctx->deploy.docker.image_compression);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int bootstrap_build_info(struct Delivery *ctx) {
+ struct Delivery local;
+ memset(&local, 0, sizeof(local));
+ local._stasis_ini_fp.cfg = ini_open(ctx->_stasis_ini_fp.cfg_path);
+ local._stasis_ini_fp.delivery = ini_open(ctx->_stasis_ini_fp.delivery_path);
+ delivery_init_platform(&local);
+ populate_delivery_cfg(&local, INI_READ_RENDER);
+ populate_delivery_ini(&local, INI_READ_RENDER);
+ populate_info(&local);
+ ctx->info.build_name = strdup(local.info.build_name);
+ ctx->info.build_number = strdup(local.info.build_number);
+ ctx->info.release_name = strdup(local.info.release_name);
+ ctx->info.time_info = malloc(sizeof(*ctx->info.time_info));
+ if (!ctx->info.time_info) {
+ SYSERROR("Unable to allocate %zu bytes for tm struct: %s", sizeof(*local.info.time_info), strerror(errno));
+ return -1;
+ }
+ memcpy(ctx->info.time_info, local.info.time_info, sizeof(*local.info.time_info));
+ ctx->info.time_now = local.info.time_now;
+ ctx->info.time_str_epoch = strdup(local.info.time_str_epoch);
+ delivery_free(&local);
+ return 0;
+}
+
+int delivery_exists(struct Delivery *ctx) {
+ int release_exists = 0;
+ char release_pattern[PATH_MAX] = {0};
+ sprintf(release_pattern, "*%s*", ctx->info.release_name);
+
+ if (globals.enable_artifactory) {
+ if (jfrt_auth_init(&ctx->deploy.jfrog_auth)) {
+ fprintf(stderr, "Failed to initialize Artifactory authentication context\n");
+ return -1; // error
+ }
+
+ struct JFRT_Search search = {.fail_no_op = true};
+ release_exists = jfrog_cli_rt_search(&ctx->deploy.jfrog_auth, &search, globals.jfrog.repo, release_pattern);
+ if (release_exists != 2) {
+ if (!globals.enable_overwrite && !release_exists) {
+ // --fail_no_op returns 2 on failure
+ // without: it returns an empty list "[]" and exit code 0
+ return 1; // found
+ }
+ }
+ } else {
+ struct StrList *files = listdir(ctx->storage.delivery_dir);
+ for (size_t i = 0; i < strlist_count(files); i++) {
+ char *filename = strlist_item(files, i);
+ release_exists = fnmatch(release_pattern, filename, FNM_PATHNAME);
+ if (!globals.enable_overwrite && !release_exists) {
+ guard_strlist_free(&files);
+ return 1; // found
+ }
+ }
+ guard_strlist_free(&files);
+ }
+ return 0; // not found
+}
diff --git a/src/lib/core/delivery_install.c b/src/lib/core/delivery_install.c
new file mode 100644
index 0000000..76c3f4a
--- /dev/null
+++ b/src/lib/core/delivery_install.c
@@ -0,0 +1,224 @@
+#include "delivery.h"
+
+static struct Test *requirement_from_test(struct Delivery *ctx, const char *name) {
+ struct Test *result = NULL;
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ if (ctx->tests[i].name && !strcmp(name, ctx->tests[i].name)) {
+ result = &ctx->tests[i];
+ break;
+ }
+ }
+ return result;
+}
+
+static char *have_spec_in_config(struct Delivery *ctx, const char *name) {
+ for (size_t x = 0; x < strlist_count(ctx->conda.pip_packages); x++) {
+ char *config_spec = strlist_item(ctx->conda.pip_packages, x);
+ char *op = find_version_spec(config_spec);
+ char package[255] = {0};
+ if (op) {
+ strncpy(package, config_spec, op - config_spec);
+ } else {
+ strncpy(package, config_spec, sizeof(package) - 1);
+ }
+ if (strncmp(package, name, strlen(package)) == 0) {
+ return config_spec;
+ }
+ }
+ return NULL;
+}
+
+int delivery_overlay_packages_from_env(struct Delivery *ctx, const char *env_name) {
+ char *current_env = conda_get_active_environment();
+ int need_restore = current_env && strcmp(env_name, current_env) != 0;
+
+ conda_activate(ctx->storage.conda_install_prefix, env_name);
+ // Retrieve a listing of python packages installed under "env_name"
+ int freeze_status = 0;
+ char *freeze_output = shell_output("python -m pip freeze", &freeze_status);
+ if (freeze_status) {
+ guard_free(freeze_output);
+ guard_free(current_env);
+ return -1;
+ }
+
+ if (need_restore) {
+ // Restore the original conda environment
+ conda_activate(ctx->storage.conda_install_prefix, current_env);
+ }
+ guard_free(current_env);
+
+ struct StrList *frozen_list = strlist_init();
+ strlist_append_tokenize(frozen_list, freeze_output, LINE_SEP);
+ guard_free(freeze_output);
+
+ struct StrList *new_list = strlist_init();
+
+ // - consume package specs that have no test blocks.
+ // - these will be third-party packages like numpy, scipy, etc.
+ // - and they need to be present at the head of the list so they
+ // get installed first.
+ for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) {
+ char *spec = strlist_item(ctx->conda.pip_packages, i);
+ char spec_name[255] = {0};
+ char *op = find_version_spec(spec);
+ if (op) {
+ strncpy(spec_name, spec, op - spec);
+ } else {
+ strncpy(spec_name, spec, sizeof(spec_name) - 1);
+ }
+ struct Test *test_block = requirement_from_test(ctx, spec_name);
+ if (!test_block) {
+ msg(STASIS_MSG_L2 | STASIS_MSG_WARN, "from config without test: %s\n", spec);
+ strlist_append(&new_list, spec);
+ }
+ }
+
+ // now consume packages that have a test block
+ // if the ini provides a spec, override the environment's version.
+ // otherwise, use the spec derived from the environment
+ for (size_t i = 0; i < strlist_count(frozen_list); i++) {
+ char *frozen_spec = strlist_item(frozen_list, i);
+ char frozen_name[255] = {0};
+ char *op = find_version_spec(frozen_spec);
+ // we only care about packages with specs here. if something else arrives, ignore it
+ if (op) {
+ strncpy(frozen_name, frozen_spec, op - frozen_spec);
+ } else {
+ strncpy(frozen_name, frozen_spec, sizeof(frozen_name) - 1);
+ }
+ struct Test *test = requirement_from_test(ctx, frozen_name);
+ if (test && strcmp(test->name, frozen_name) == 0) {
+ char *config_spec = have_spec_in_config(ctx, frozen_name);
+ if (config_spec) {
+ msg(STASIS_MSG_L2, "from config: %s\n", config_spec);
+ strlist_append(&new_list, config_spec);
+ } else {
+ msg(STASIS_MSG_L2, "from environment: %s\n", frozen_spec);
+ strlist_append(&new_list, frozen_spec);
+ }
+ }
+ }
+
+ // Replace the package manifest as needed
+ if (strlist_count(new_list)) {
+ guard_strlist_free(&ctx->conda.pip_packages);
+ ctx->conda.pip_packages = strlist_copy(new_list);
+ }
+ guard_strlist_free(&new_list);
+ guard_strlist_free(&frozen_list);
+ return 0;
+}
+
+int delivery_install_packages(struct Delivery *ctx, char *conda_install_dir, char *env_name, int type, struct StrList **manifest) {
+ char cmd[PATH_MAX];
+ char pkgs[STASIS_BUFSIZ];
+ char *env_current = getenv("CONDA_DEFAULT_ENV");
+
+ if (env_current) {
+ // The requested environment is not the current environment
+ if (strcmp(env_current, env_name) != 0) {
+ // Activate the requested environment
+ printf("Activating: %s\n", env_name);
+ conda_activate(conda_install_dir, env_name);
+ runtime_replace(&ctx->runtime.environ, __environ);
+ }
+ }
+
+ memset(cmd, 0, sizeof(cmd));
+ memset(pkgs, 0, sizeof(pkgs));
+ strcat(cmd, "install");
+
+ typedef int (*Runner)(const char *);
+ Runner runner = NULL;
+ if (INSTALL_PKG_CONDA & type) {
+ runner = conda_exec;
+ } else if (INSTALL_PKG_PIP & type) {
+ runner = pip_exec;
+ }
+
+ if (INSTALL_PKG_CONDA_DEFERRED & type) {
+ strcat(cmd, " --use-local");
+ } else if (INSTALL_PKG_PIP_DEFERRED & type) {
+ // Don't change the baseline package set unless we're working with a
+ // new build. Release candidates will need to keep packages as stable
+ // as possible between releases.
+ if (!ctx->meta.based_on) {
+ strcat(cmd, " --upgrade");
+ }
+ sprintf(cmd + strlen(cmd), " --extra-index-url 'file://%s'", ctx->storage.wheel_artifact_dir);
+ }
+
+ for (size_t x = 0; manifest[x] != NULL; x++) {
+ char *name = NULL;
+ for (size_t p = 0; p < strlist_count(manifest[x]); p++) {
+ name = strlist_item(manifest[x], p);
+ strip(name);
+ if (!strlen(name)) {
+ continue;
+ }
+ if (INSTALL_PKG_PIP_DEFERRED & type) {
+ struct Test *info = requirement_from_test(ctx, name);
+ if (info) {
+ if (!strcmp(info->version, "HEAD")) {
+ struct StrList *tag_data = strlist_init();
+ if (!tag_data) {
+ SYSERROR("%s", "Unable to allocate memory for tag data\n");
+ return -1;
+ }
+ strlist_append_tokenize(tag_data, info->repository_info_tag, "-");
+
+ struct Wheel *whl = NULL;
+ char *post_commit = NULL;
+ char *hash = NULL;
+ if (strlist_count(tag_data) > 1) {
+ post_commit = strlist_item(tag_data, 1);
+ hash = strlist_item(tag_data, 2);
+ }
+
+ // We can't match on version here (index 0). The wheel's version is not guaranteed to be
+ // equal to the tag; setuptools_scm auto-increments the value, the user can change it manually,
+ // etc.
+ errno = 0;
+ whl = get_wheel_info(ctx->storage.wheel_artifact_dir, info->name,
+ (char *[]) {ctx->meta.python_compact, ctx->system.arch,
+ "none", "any",
+ post_commit, hash,
+ NULL}, WHEEL_MATCH_ANY);
+ if (!whl && errno) {
+ // error
+ SYSERROR("Unable to read Python wheel info: %s\n", strerror(errno));
+ exit(1);
+ } else if (!whl) {
+ // not found
+ fprintf(stderr, "No wheel packages found that match the description of '%s'", info->name);
+ } else {
+ // found
+ guard_strlist_free(&tag_data);
+ info->version = strdup(whl->version);
+ }
+ wheel_free(&whl);
+ }
+ snprintf(cmd + strlen(cmd),
+ sizeof(cmd) - strlen(cmd) - strlen(info->name) - strlen(info->version) + 5,
+ " '%s==%s'", info->name, info->version);
+ } else {
+ fprintf(stderr, "Deferred package '%s' is not present in the tested package list!\n", name);
+ return -1;
+ }
+ } else {
+ if (startswith(name, "--") || startswith(name, "-")) {
+ sprintf(cmd + strlen(cmd), " %s", name);
+ } else {
+ sprintf(cmd + strlen(cmd), " '%s'", name);
+ }
+ }
+ }
+ int status = runner(cmd);
+ if (status) {
+ return status;
+ }
+ }
+ return 0;
+}
+
diff --git a/src/lib/core/delivery_populate.c b/src/lib/core/delivery_populate.c
new file mode 100644
index 0000000..b37f677
--- /dev/null
+++ b/src/lib/core/delivery_populate.c
@@ -0,0 +1,348 @@
+#include "delivery.h"
+
+static void ini_has_key_required(struct INIFILE *ini, const char *section_name, char *key) {
+ int status = ini_has_key(ini, section_name, key);
+ if (!status) {
+ SYSERROR("%s:%s key is required but not defined", section_name, key);
+ exit(1);
+ }
+}
+
+static void conv_str(char **x, union INIVal val) {
+ if (*x) {
+ guard_free(*x);
+ }
+ if (val.as_char_p) {
+ char *tplop = tpl_render(val.as_char_p);
+ if (tplop) {
+ *x = tplop;
+ } else {
+ *x = NULL;
+ }
+ } else {
+ *x = NULL;
+ }
+}
+
+
+
+int populate_info(struct Delivery *ctx) {
+ if (!ctx->info.time_str_epoch) {
+ // Record timestamp used for release
+ time(&ctx->info.time_now);
+ ctx->info.time_info = localtime(&ctx->info.time_now);
+
+ ctx->info.time_str_epoch = calloc(STASIS_TIME_STR_MAX, sizeof(*ctx->info.time_str_epoch));
+ if (!ctx->info.time_str_epoch) {
+ msg(STASIS_MSG_ERROR, "Unable to allocate memory for Unix epoch string\n");
+ return -1;
+ }
+ snprintf(ctx->info.time_str_epoch, STASIS_TIME_STR_MAX - 1, "%li", ctx->info.time_now);
+ }
+ return 0;
+}
+
+int populate_delivery_cfg(struct Delivery *ctx, int render_mode) {
+ struct INIFILE *cfg = ctx->_stasis_ini_fp.cfg;
+ if (!cfg) {
+ return -1;
+ }
+ int err = 0;
+ ctx->storage.conda_staging_dir = ini_getval_str(cfg, "default", "conda_staging_dir", render_mode, &err);
+ ctx->storage.conda_staging_url = ini_getval_str(cfg, "default", "conda_staging_url", render_mode, &err);
+ ctx->storage.wheel_staging_dir = ini_getval_str(cfg, "default", "wheel_staging_dir", render_mode, &err);
+ ctx->storage.wheel_staging_url = ini_getval_str(cfg, "default", "wheel_staging_url", render_mode, &err);
+ globals.conda_fresh_start = ini_getval_bool(cfg, "default", "conda_fresh_start", render_mode, &err);
+ if (!globals.continue_on_error) {
+ globals.continue_on_error = ini_getval_bool(cfg, "default", "continue_on_error", render_mode, &err);
+ }
+ if (!globals.always_update_base_environment) {
+ globals.always_update_base_environment = ini_getval_bool(cfg, "default", "always_update_base_environment", render_mode, &err);
+ }
+ globals.conda_install_prefix = ini_getval_str(cfg, "default", "conda_install_prefix", render_mode, &err);
+ globals.conda_packages = ini_getval_strlist(cfg, "default", "conda_packages", LINE_SEP, render_mode, &err);
+ globals.pip_packages = ini_getval_strlist(cfg, "default", "pip_packages", LINE_SEP, render_mode, &err);
+
+ globals.jfrog.jfrog_artifactory_base_url = ini_getval_str(cfg, "jfrog_cli_download", "url", render_mode, &err);
+ globals.jfrog.jfrog_artifactory_product = ini_getval_str(cfg, "jfrog_cli_download", "product", render_mode, &err);
+ globals.jfrog.cli_major_ver = ini_getval_str(cfg, "jfrog_cli_download", "version_series", render_mode, &err);
+ globals.jfrog.version = ini_getval_str(cfg, "jfrog_cli_download", "version", render_mode, &err);
+ globals.jfrog.remote_filename = ini_getval_str(cfg, "jfrog_cli_download", "filename", render_mode, &err);
+ globals.jfrog.url = ini_getval_str(cfg, "deploy:artifactory", "url", render_mode, &err);
+ globals.jfrog.repo = ini_getval_str(cfg, "deploy:artifactory", "repo", render_mode, &err);
+
+ return 0;
+}
+
+int populate_delivery_ini(struct Delivery *ctx, int render_mode) {
+ union INIVal val;
+ struct INIFILE *ini = ctx->_stasis_ini_fp.delivery;
+ struct INIData *rtdata;
+ RuntimeEnv *rt;
+
+ validate_delivery_ini(ini);
+ // Populate runtime variables first they may be interpreted by other
+ // keys in the configuration
+ rt = runtime_copy(__environ);
+ while ((rtdata = ini_getall(ini, "runtime")) != NULL) {
+ char rec[STASIS_BUFSIZ];
+ sprintf(rec, "%s=%s", lstrip(strip(rtdata->key)), lstrip(strip(rtdata->value)));
+ runtime_set(rt, rtdata->key, rtdata->value);
+ }
+ runtime_apply(rt);
+ ctx->runtime.environ = rt;
+
+ int err = 0;
+ ctx->meta.mission = ini_getval_str(ini, "meta", "mission", render_mode, &err);
+
+ if (!strcasecmp(ctx->meta.mission, "hst")) {
+ ctx->meta.codename = ini_getval_str(ini, "meta", "codename", render_mode, &err);
+ } else {
+ ctx->meta.codename = NULL;
+ }
+
+ ctx->meta.version = ini_getval_str(ini, "meta", "version", render_mode, &err);
+ ctx->meta.name = ini_getval_str(ini, "meta", "name", render_mode, &err);
+ ctx->meta.rc = ini_getval_int(ini, "meta", "rc", render_mode, &err);
+ ctx->meta.final = ini_getval_bool(ini, "meta", "final", render_mode, &err);
+ ctx->meta.based_on = ini_getval_str(ini, "meta", "based_on", render_mode, &err);
+
+ if (!ctx->meta.python) {
+ ctx->meta.python = ini_getval_str(ini, "meta", "python", render_mode, &err);
+ guard_free(ctx->meta.python_compact);
+ ctx->meta.python_compact = to_short_version(ctx->meta.python);
+ } else {
+ ini_setval(&ini, INI_SETVAL_REPLACE, "meta", "python", ctx->meta.python);
+ }
+
+ ctx->conda.installer_name = ini_getval_str(ini, "conda", "installer_name", render_mode, &err);
+ ctx->conda.installer_version = ini_getval_str(ini, "conda", "installer_version", render_mode, &err);
+ ctx->conda.installer_platform = ini_getval_str(ini, "conda", "installer_platform", render_mode, &err);
+ ctx->conda.installer_arch = ini_getval_str(ini, "conda", "installer_arch", render_mode, &err);
+ ctx->conda.installer_baseurl = ini_getval_str(ini, "conda", "installer_baseurl", render_mode, &err);
+ ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", " "LINE_SEP, render_mode, &err);
+
+ if (ctx->conda.conda_packages->data && ctx->conda.conda_packages->data[0] && strpbrk(ctx->conda.conda_packages->data[0], " \t")) {
+ normalize_space(ctx->conda.conda_packages->data[0]);
+ replace_text(ctx->conda.conda_packages->data[0], " ", LINE_SEP, 0);
+ char *pip_packages_replacement = join(ctx->conda.conda_packages->data, LINE_SEP);
+ ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "conda_packages", pip_packages_replacement);
+ guard_free(pip_packages_replacement);
+ guard_strlist_free(&ctx->conda.conda_packages);
+ ctx->conda.conda_packages = ini_getval_strlist(ini, "conda", "conda_packages", LINE_SEP, render_mode, &err);
+ }
+
+ for (size_t i = 0; i < strlist_count(ctx->conda.conda_packages); i++) {
+ char *pkg = strlist_item(ctx->conda.conda_packages, i);
+ if (strpbrk(pkg, ";#") || isempty(pkg)) {
+ strlist_remove(ctx->conda.conda_packages, i);
+ }
+ }
+
+ ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err);
+ if (ctx->conda.pip_packages->data && ctx->conda.pip_packages->data[0] && strpbrk(ctx->conda.pip_packages->data[0], " \t")) {
+ normalize_space(ctx->conda.pip_packages->data[0]);
+ replace_text(ctx->conda.pip_packages->data[0], " ", LINE_SEP, 0);
+ char *pip_packages_replacement = join(ctx->conda.pip_packages->data, LINE_SEP);
+ ini_setval(&ini, INI_SETVAL_REPLACE, "conda", "pip_packages", pip_packages_replacement);
+ guard_free(pip_packages_replacement);
+ guard_strlist_free(&ctx->conda.pip_packages);
+ ctx->conda.pip_packages = ini_getval_strlist(ini, "conda", "pip_packages", LINE_SEP, render_mode, &err);
+ }
+
+ for (size_t i = 0; i < strlist_count(ctx->conda.pip_packages); i++) {
+ char *pkg = strlist_item(ctx->conda.pip_packages, i);
+ if (strpbrk(pkg, ";#") || isempty(pkg)) {
+ strlist_remove(ctx->conda.pip_packages, i);
+ }
+ }
+
+ // Delivery metadata consumed
+ populate_mission_ini(&ctx, render_mode);
+
+ if (ctx->info.release_name) {
+ guard_free(ctx->info.release_name);
+ guard_free(ctx->info.build_name);
+ guard_free(ctx->info.build_number);
+ }
+
+ if (delivery_format_str(ctx, &ctx->info.release_name, ctx->rules.release_fmt)) {
+ fprintf(stderr, "Failed to generate release name. Format used: %s\n", ctx->rules.release_fmt);
+ return -1;
+ }
+
+ if (!ctx->info.build_name) {
+ delivery_format_str(ctx, &ctx->info.build_name, ctx->rules.build_name_fmt);
+ }
+ if (!ctx->info.build_number) {
+ delivery_format_str(ctx, &ctx->info.build_number, ctx->rules.build_number_fmt);
+ }
+
+ // Best I can do to make output directories unique. Annoying.
+ delivery_init_dirs_stage2(ctx);
+
+ if (!ctx->conda.conda_packages_defer) {
+ ctx->conda.conda_packages_defer = strlist_init();
+ }
+ if (!ctx->conda.pip_packages_defer) {
+ ctx->conda.pip_packages_defer = strlist_init();
+ }
+
+ for (size_t z = 0, i = 0; i < ini->section_count; i++) {
+ char *section_name = ini->section[i]->key;
+ if (startswith(section_name, "test:")) {
+ struct Test *test = &ctx->tests[z];
+ val.as_char_p = strchr(ini->section[i]->key, ':') + 1;
+ if (val.as_char_p && isempty(val.as_char_p)) {
+ return 1;
+ }
+ conv_str(&test->name, val);
+
+ test->version = ini_getval_str(ini, section_name, "version", render_mode, &err);
+ test->repository = ini_getval_str(ini, section_name, "repository", render_mode, &err);
+ test->script_setup = ini_getval_str(ini, section_name, "script_setup", INI_READ_RAW, &err);
+ test->script = ini_getval_str(ini, section_name, "script", INI_READ_RAW, &err);
+ test->disable = ini_getval_bool(ini, section_name, "disable", render_mode, &err);
+ test->parallel = ini_getval_bool(ini, section_name, "parallel", render_mode, &err);
+ if (err) {
+ test->parallel = true;
+ }
+ test->repository_remove_tags = ini_getval_strlist(ini, section_name, "repository_remove_tags", LINE_SEP, render_mode, &err);
+ test->build_recipe = ini_getval_str(ini, section_name, "build_recipe", render_mode, &err);
+ test->runtime.environ = ini_getval_strlist(ini, section_name, "runtime", LINE_SEP, render_mode, &err);
+ z++;
+ }
+ }
+
+ for (size_t z = 0, i = 0; i < ini->section_count; i++) {
+ char *section_name = ini->section[i]->key;
+ struct Deploy *deploy = &ctx->deploy;
+ if (startswith(section_name, "deploy:artifactory")) {
+ struct JFrog *jfrog = &deploy->jfrog[z];
+ // Artifactory base configuration
+
+ jfrog->upload_ctx.workaround_parent_only = ini_getval_bool(ini, section_name, "workaround_parent_only", render_mode, &err);
+ jfrog->upload_ctx.exclusions = ini_getval_str(ini, section_name, "exclusions", render_mode, &err);
+ jfrog->upload_ctx.explode = ini_getval_bool(ini, section_name, "explode", render_mode, &err);
+ jfrog->upload_ctx.recursive = ini_getval_bool(ini, section_name, "recursive", render_mode, &err);
+ jfrog->upload_ctx.retries = ini_getval_int(ini, section_name, "retries", render_mode, &err);
+ jfrog->upload_ctx.retry_wait_time = ini_getval_int(ini, section_name, "retry_wait_time", render_mode, &err);
+ jfrog->upload_ctx.detailed_summary = ini_getval_bool(ini, section_name, "detailed_summary", render_mode, &err);
+ jfrog->upload_ctx.quiet = ini_getval_bool(ini, section_name, "quiet", render_mode, &err);
+ jfrog->upload_ctx.regexp = ini_getval_bool(ini, section_name, "regexp", render_mode, &err);
+ jfrog->upload_ctx.spec = ini_getval_str(ini, section_name, "spec", render_mode, &err);
+ jfrog->upload_ctx.flat = ini_getval_bool(ini, section_name, "flat", render_mode, &err);
+ jfrog->repo = ini_getval_str(ini, section_name, "repo", render_mode, &err);
+ jfrog->dest = ini_getval_str(ini, section_name, "dest", render_mode, &err);
+ jfrog->files = ini_getval_strlist(ini, section_name, "files", LINE_SEP, render_mode, &err);
+ z++;
+ }
+ }
+
+ for (size_t i = 0; i < ini->section_count; i++) {
+ char *section_name = ini->section[i]->key;
+ struct Deploy *deploy = &ctx->deploy;
+ if (startswith(ini->section[i]->key, "deploy:docker")) {
+ struct Docker *docker = &deploy->docker;
+
+ docker->registry = ini_getval_str(ini, section_name, "registry", render_mode, &err);
+ docker->image_compression = ini_getval_str(ini, section_name, "image_compression", render_mode, &err);
+ docker->test_script = ini_getval_str(ini, section_name, "test_script", render_mode, &err);
+ docker->build_args = ini_getval_strlist(ini, section_name, "build_args", LINE_SEP, render_mode, &err);
+ docker->tags = ini_getval_strlist(ini, section_name, "tags", LINE_SEP, render_mode, &err);
+ }
+ }
+ return 0;
+}
+
+int populate_mission_ini(struct Delivery **ctx, int render_mode) {
+ int err = 0;
+ struct INIFILE *ini;
+
+ if ((*ctx)->_stasis_ini_fp.mission) {
+ return 0;
+ }
+
+ // Now populate the rules
+ char missionfile[PATH_MAX] = {0};
+ if (getenv("STASIS_SYSCONFDIR")) {
+ sprintf(missionfile, "%s/%s/%s/%s.ini",
+ getenv("STASIS_SYSCONFDIR"), "mission", (*ctx)->meta.mission, (*ctx)->meta.mission);
+ } else {
+ sprintf(missionfile, "%s/%s/%s/%s.ini",
+ globals.sysconfdir, "mission", (*ctx)->meta.mission, (*ctx)->meta.mission);
+ }
+
+ msg(STASIS_MSG_L2, "Reading mission configuration: %s\n", missionfile);
+ (*ctx)->_stasis_ini_fp.mission = ini_open(missionfile);
+ ini = (*ctx)->_stasis_ini_fp.mission;
+ if (!ini) {
+ msg(STASIS_MSG_ERROR | STASIS_MSG_L2, "Failed to read mission configuration: %s, %s\n", missionfile, strerror(errno));
+ exit(1);
+ }
+ (*ctx)->_stasis_ini_fp.mission_path = strdup(missionfile);
+
+ (*ctx)->rules.release_fmt = ini_getval_str(ini, "meta", "release_fmt", render_mode, &err);
+
+ // Used for setting artifactory build info
+ (*ctx)->rules.build_name_fmt = ini_getval_str(ini, "meta", "build_name_fmt", render_mode, &err);
+
+ // Used for setting artifactory build info
+ (*ctx)->rules.build_number_fmt = ini_getval_str(ini, "meta", "build_number_fmt", render_mode, &err);
+ return 0;
+}
+
+void validate_delivery_ini(struct INIFILE *ini) {
+ if (!ini) {
+ SYSERROR("%s", "INIFILE is NULL!");
+ exit(1);
+ }
+ if (ini_section_search(&ini, INI_SEARCH_EXACT, "meta")) {
+ ini_has_key_required(ini, "meta", "name");
+ ini_has_key_required(ini, "meta", "version");
+ ini_has_key_required(ini, "meta", "rc");
+ ini_has_key_required(ini, "meta", "mission");
+ ini_has_key_required(ini, "meta", "python");
+ } else {
+ SYSERROR("%s", "[meta] configuration section is required");
+ exit(1);
+ }
+
+ if (ini_section_search(&ini, INI_SEARCH_EXACT, "conda")) {
+ ini_has_key_required(ini, "conda", "installer_name");
+ ini_has_key_required(ini, "conda", "installer_version");
+ ini_has_key_required(ini, "conda", "installer_platform");
+ ini_has_key_required(ini, "conda", "installer_arch");
+ } else {
+ SYSERROR("%s", "[conda] configuration section is required");
+ exit(1);
+ }
+
+ for (size_t i = 0; i < ini->section_count; i++) {
+ struct INISection *section = ini->section[i];
+ if (section && startswith(section->key, "test:")) {
+ char *name = strstr(section->key, ":");
+ if (name && strlen(name) > 1) {
+ name = &name[1];
+ }
+ //ini_has_key_required(ini, section->key, "version");
+ //ini_has_key_required(ini, section->key, "repository");
+ if (globals.enable_testing) {
+ ini_has_key_required(ini, section->key, "script");
+ }
+ }
+ }
+
+ if (ini_section_search(&ini, INI_SEARCH_EXACT, "deploy:docker")) {
+ // yeah?
+ }
+
+ for (size_t i = 0; i < ini->section_count; i++) {
+ struct INISection *section = ini->section[i];
+ if (section && startswith(section->key, "deploy:artifactory")) {
+ ini_has_key_required(ini, section->key, "files");
+ ini_has_key_required(ini, section->key, "dest");
+ }
+ }
+}
+
diff --git a/src/lib/core/delivery_postprocess.c b/src/lib/core/delivery_postprocess.c
new file mode 100644
index 0000000..1a902e3
--- /dev/null
+++ b/src/lib/core/delivery_postprocess.c
@@ -0,0 +1,266 @@
+#include "delivery.h"
+
+
+const char *release_header = "# delivery_name: %s\n"
+ "# delivery_fmt: %s\n"
+ "# creation_time: %s\n"
+ "# conda_ident: %s\n"
+ "# conda_build_ident: %s\n";
+
+char *delivery_get_release_header(struct Delivery *ctx) {
+ char output[STASIS_BUFSIZ];
+ char stamp[100];
+ strftime(stamp, sizeof(stamp) - 1, "%c", ctx->info.time_info);
+ sprintf(output, release_header,
+ ctx->info.release_name,
+ ctx->rules.release_fmt,
+ stamp,
+ ctx->conda.tool_version,
+ ctx->conda.tool_build_version);
+ return strdup(output);
+}
+
+int delivery_dump_metadata(struct Delivery *ctx) {
+ FILE *fp;
+ char filename[PATH_MAX];
+ sprintf(filename, "%s/meta-%s.stasis", ctx->storage.meta_dir, ctx->info.release_name);
+ fp = fopen(filename, "w+");
+ if (!fp) {
+ return -1;
+ }
+ if (globals.verbose) {
+ printf("%s\n", filename);
+ }
+ fprintf(fp, "name %s\n", ctx->meta.name);
+ fprintf(fp, "version %s\n", ctx->meta.version);
+ fprintf(fp, "rc %d\n", ctx->meta.rc);
+ fprintf(fp, "python %s\n", ctx->meta.python);
+ fprintf(fp, "python_compact %s\n", ctx->meta.python_compact);
+ fprintf(fp, "mission %s\n", ctx->meta.mission);
+ fprintf(fp, "codename %s\n", ctx->meta.codename ? ctx->meta.codename : "");
+ fprintf(fp, "platform %s %s %s %s\n",
+ ctx->system.platform[DELIVERY_PLATFORM],
+ ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR],
+ ctx->system.platform[DELIVERY_PLATFORM_CONDA_INSTALLER],
+ ctx->system.platform[DELIVERY_PLATFORM_RELEASE]);
+ fprintf(fp, "arch %s\n", ctx->system.arch);
+ fprintf(fp, "time %s\n", ctx->info.time_str_epoch);
+ fprintf(fp, "release_fmt %s\n", ctx->rules.release_fmt);
+ fprintf(fp, "release_name %s\n", ctx->info.release_name);
+ fprintf(fp, "build_name_fmt %s\n", ctx->rules.build_name_fmt);
+ fprintf(fp, "build_name %s\n", ctx->info.build_name);
+ fprintf(fp, "build_number_fmt %s\n", ctx->rules.build_number_fmt);
+ fprintf(fp, "build_number %s\n", ctx->info.build_number);
+ fprintf(fp, "conda_installer_baseurl %s\n", ctx->conda.installer_baseurl);
+ fprintf(fp, "conda_installer_name %s\n", ctx->conda.installer_name);
+ fprintf(fp, "conda_installer_version %s\n", ctx->conda.installer_version);
+ fprintf(fp, "conda_installer_platform %s\n", ctx->conda.installer_platform);
+ fprintf(fp, "conda_installer_arch %s\n", ctx->conda.installer_arch);
+
+ fclose(fp);
+ return 0;
+}
+
+void delivery_rewrite_spec(struct Delivery *ctx, char *filename, unsigned stage) {
+ char output[PATH_MAX];
+ char *header = NULL;
+ char *tempfile = NULL;
+ FILE *tp = NULL;
+
+ if (stage == DELIVERY_REWRITE_SPEC_STAGE_1) {
+ header = delivery_get_release_header(ctx);
+ if (!header) {
+ msg(STASIS_MSG_ERROR, "failed to generate release header string\n", filename);
+ exit(1);
+ }
+ tempfile = xmkstemp(&tp, "w+");
+ if (!tempfile || !tp) {
+ msg(STASIS_MSG_ERROR, "%s: unable to create temporary file\n", strerror(errno));
+ exit(1);
+ }
+ fprintf(tp, "%s", header);
+
+ // Read the original file
+ char **contents = file_readlines(filename, 0, 0, NULL);
+ if (!contents) {
+ msg(STASIS_MSG_ERROR, "%s: unable to read %s", filename);
+ exit(1);
+ }
+
+ // Write temporary data
+ for (size_t i = 0; contents[i] != NULL; i++) {
+ if (startswith(contents[i], "channels:")) {
+ // Allow for additional conda channel injection
+ if (ctx->conda.conda_packages_defer && strlist_count(ctx->conda.conda_packages_defer)) {
+ fprintf(tp, "%s - @CONDA_CHANNEL@\n", contents[i]);
+ continue;
+ }
+ } else if (strstr(contents[i], "- pip:")) {
+ if (ctx->conda.pip_packages_defer && strlist_count(ctx->conda.pip_packages_defer)) {
+ // Allow for additional pip argument injection
+ fprintf(tp, "%s - @PIP_ARGUMENTS@\n", contents[i]);
+ continue;
+ }
+ } else if (startswith(contents[i], "prefix:")) {
+ // Remove the prefix key
+ if (strstr(contents[i], "/") || strstr(contents[i], "\\")) {
+ // path is on the same line as the key
+ continue;
+ } else {
+ // path is on the next line?
+ if (contents[i + 1] && (strstr(contents[i + 1], "/") || strstr(contents[i + 1], "\\"))) {
+ i++;
+ }
+ continue;
+ }
+ }
+ fprintf(tp, "%s", contents[i]);
+ }
+ GENERIC_ARRAY_FREE(contents);
+ guard_free(header);
+ fflush(tp);
+ fclose(tp);
+
+ // Replace the original file with our temporary data
+ if (copy2(tempfile, filename, CT_PERM) < 0) {
+ fprintf(stderr, "%s: could not rename '%s' to '%s'\n", strerror(errno), tempfile, filename);
+ exit(1);
+ }
+ remove(tempfile);
+ guard_free(tempfile);
+ } else if (globals.enable_rewrite_spec_stage_2 && stage == DELIVERY_REWRITE_SPEC_STAGE_2) {
+ // Replace "local" channel with the staging URL
+ if (ctx->storage.conda_staging_url) {
+ file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_staging_url, 0);
+ } else if (globals.jfrog.repo) {
+ sprintf(output, "%s/%s/%s/%s/packages/conda", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name);
+ file_replace_text(filename, "@CONDA_CHANNEL@", output, 0);
+ } else {
+ msg(STASIS_MSG_WARN, "conda_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.conda_artifact_dir);
+ file_replace_text(filename, "@CONDA_CHANNEL@", ctx->storage.conda_artifact_dir, 0);
+ }
+
+ if (ctx->storage.wheel_staging_url) {
+ file_replace_text(filename, "@PIP_ARGUMENTS@", ctx->storage.wheel_staging_url, 0);
+ } else if (globals.enable_artifactory && globals.jfrog.url && globals.jfrog.repo) {
+ sprintf(output, "--extra-index-url %s/%s/%s/%s/packages/wheels", globals.jfrog.url, globals.jfrog.repo, ctx->meta.mission, ctx->info.build_name);
+ file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0);
+ } else {
+ msg(STASIS_MSG_WARN, "wheel_staging_dir is not configured. Using fallback: '%s'\n", ctx->storage.wheel_artifact_dir);
+ sprintf(output, "--extra-index-url file://%s", ctx->storage.wheel_artifact_dir);
+ file_replace_text(filename, "@PIP_ARGUMENTS@", output, 0);
+ }
+ }
+}
+
+int delivery_copy_conda_artifacts(struct Delivery *ctx) {
+ char cmd[STASIS_BUFSIZ];
+ char conda_build_dir[PATH_MAX];
+ char subdir[PATH_MAX];
+ memset(cmd, 0, sizeof(cmd));
+ memset(conda_build_dir, 0, sizeof(conda_build_dir));
+ memset(subdir, 0, sizeof(subdir));
+
+ sprintf(conda_build_dir, "%s/%s", ctx->storage.conda_install_prefix, "conda-bld");
+ // One must run conda build at least once to create the "conda-bld" directory.
+ // When this directory is missing there can be no build artifacts.
+ if (access(conda_build_dir, F_OK) < 0) {
+ msg(STASIS_MSG_RESTRICT | STASIS_MSG_WARN | STASIS_MSG_L3,
+ "Skipped: 'conda build' has never been executed.\n");
+ return 0;
+ }
+
+ snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/%s %s",
+ conda_build_dir,
+ ctx->system.platform[DELIVERY_PLATFORM_CONDA_SUBDIR],
+ ctx->storage.conda_artifact_dir);
+
+ return system(cmd);
+}
+
+int delivery_index_conda_artifacts(struct Delivery *ctx) {
+ return conda_index(ctx->storage.conda_artifact_dir);
+}
+
+int delivery_copy_wheel_artifacts(struct Delivery *ctx) {
+ char cmd[PATH_MAX];
+ memset(cmd, 0, sizeof(cmd));
+ snprintf(cmd, sizeof(cmd) - 1, "rsync -avi --progress %s/*/dist/*.whl %s",
+ ctx->storage.build_sources_dir,
+ ctx->storage.wheel_artifact_dir);
+ return system(cmd);
+}
+
+int delivery_index_wheel_artifacts(struct Delivery *ctx) {
+ struct dirent *rec;
+ DIR *dp;
+ FILE *top_fp;
+
+ dp = opendir(ctx->storage.wheel_artifact_dir);
+ if (!dp) {
+ return -1;
+ }
+
+ // Generate a "dumb" local pypi index that is compatible with:
+ // pip install --extra-index-url
+ char top_index[PATH_MAX];
+ memset(top_index, 0, sizeof(top_index));
+ sprintf(top_index, "%s/index.html", ctx->storage.wheel_artifact_dir);
+ top_fp = fopen(top_index, "w+");
+ if (!top_fp) {
+ closedir(dp);
+ return -2;
+ }
+
+ while ((rec = readdir(dp)) != NULL) {
+ // skip directories
+ if (DT_REG == rec->d_type || !strcmp(rec->d_name, "..") || !strcmp(rec->d_name, ".")) {
+ continue;
+ }
+
+ FILE *bottom_fp;
+ char bottom_index[PATH_MAX * 2];
+ memset(bottom_index, 0, sizeof(bottom_index));
+ sprintf(bottom_index, "%s/%s/index.html", ctx->storage.wheel_artifact_dir, rec->d_name);
+ bottom_fp = fopen(bottom_index, "w+");
+ if (!bottom_fp) {
+ closedir(dp);
+ return -3;
+ }
+
+ if (globals.verbose) {
+ printf("+ %s\n", rec->d_name);
+ }
+ // Add record to top level index
+ fprintf(top_fp, "<a href=\"%s/\">%s</a><br/>\n", rec->d_name, rec->d_name);
+
+ char dpath[PATH_MAX * 2];
+ memset(dpath, 0, sizeof(dpath));
+ sprintf(dpath, "%s/%s", ctx->storage.wheel_artifact_dir, rec->d_name);
+ struct StrList *packages = listdir(dpath);
+ if (!packages) {
+ closedir(dp);
+ fclose(top_fp);
+ fclose(bottom_fp);
+ return -4;
+ }
+
+ for (size_t i = 0; i < strlist_count(packages); i++) {
+ char *package = strlist_item(packages, i);
+ if (!endswith(package, ".whl")) {
+ continue;
+ }
+ if (globals.verbose) {
+ printf("`- %s\n", package);
+ }
+ // Write record to bottom level index
+ fprintf(bottom_fp, "<a href=\"%s\">%s</a><br/>\n", package, package);
+ }
+ fclose(bottom_fp);
+
+ guard_strlist_free(&packages);
+ }
+ closedir(dp);
+ fclose(top_fp);
+ return 0;
+}
diff --git a/src/lib/core/delivery_show.c b/src/lib/core/delivery_show.c
new file mode 100644
index 0000000..adfa1be
--- /dev/null
+++ b/src/lib/core/delivery_show.c
@@ -0,0 +1,117 @@
+#include "delivery.h"
+
+void delivery_debug_show(struct Delivery *ctx) {
+ printf("\n====DEBUG====\n");
+ printf("%-20s %-10s\n", "System configuration directory:", globals.sysconfdir);
+ printf("%-20s %-10s\n", "Mission directory:", ctx->storage.mission_dir);
+ printf("%-20s %-10s\n", "Testing enabled:", globals.enable_testing ? "Yes" : "No");
+ printf("%-20s %-10s\n", "Docker image builds enabled:", globals.enable_docker ? "Yes" : "No");
+ printf("%-20s %-10s\n", "Artifact uploading enabled:", globals.enable_artifactory ? "Yes" : "No");
+}
+
+void delivery_meta_show(struct Delivery *ctx) {
+ if (globals.verbose) {
+ delivery_debug_show(ctx);
+ }
+
+ printf("\n====DELIVERY====\n");
+ printf("%-20s %-10s\n", "Target Python:", ctx->meta.python);
+ printf("%-20s %-10s\n", "Name:", ctx->meta.name);
+ printf("%-20s %-10s\n", "Mission:", ctx->meta.mission);
+ if (ctx->meta.codename) {
+ printf("%-20s %-10s\n", "Codename:", ctx->meta.codename);
+ }
+ if (ctx->meta.version) {
+ printf("%-20s %-10s\n", "Version", ctx->meta.version);
+ }
+ if (!ctx->meta.final) {
+ printf("%-20s %-10d\n", "RC Level:", ctx->meta.rc);
+ }
+ printf("%-20s %-10s\n", "Final Release:", ctx->meta.final ? "Yes" : "No");
+ printf("%-20s %-10s\n", "Based On:", ctx->meta.based_on ? ctx->meta.based_on : "New");
+}
+
+void delivery_conda_show(struct Delivery *ctx) {
+ printf("\n====CONDA====\n");
+ printf("%-20s %-10s\n", "Prefix:", ctx->storage.conda_install_prefix);
+
+ puts("Native Packages:");
+ if (strlist_count(ctx->conda.conda_packages) || strlist_count(ctx->conda.conda_packages_defer)) {
+ struct StrList *list_conda = strlist_init();
+ if (strlist_count(ctx->conda.conda_packages)) {
+ strlist_append_strlist(list_conda, ctx->conda.conda_packages);
+ }
+ if (strlist_count(ctx->conda.conda_packages_defer)) {
+ strlist_append_strlist(list_conda, ctx->conda.conda_packages_defer);
+ }
+ strlist_sort(list_conda, STASIS_SORT_ALPHA);
+
+ for (size_t i = 0; i < strlist_count(list_conda); i++) {
+ char *token = strlist_item(list_conda, i);
+ if (isempty(token) || isblank(*token) || startswith(token, "-")) {
+ continue;
+ }
+ printf("%21s%s\n", "", token);
+ }
+ guard_strlist_free(&list_conda);
+ } else {
+ printf("%21s%s\n", "", "N/A");
+ }
+
+ puts("Python Packages:");
+ if (strlist_count(ctx->conda.pip_packages) || strlist_count(ctx->conda.pip_packages_defer)) {
+ struct StrList *list_python = strlist_init();
+ if (strlist_count(ctx->conda.pip_packages)) {
+ strlist_append_strlist(list_python, ctx->conda.pip_packages);
+ }
+ if (strlist_count(ctx->conda.pip_packages_defer)) {
+ strlist_append_strlist(list_python, ctx->conda.pip_packages_defer);
+ }
+ strlist_sort(list_python, STASIS_SORT_ALPHA);
+
+ for (size_t i = 0; i < strlist_count(list_python); i++) {
+ char *token = strlist_item(list_python, i);
+ if (isempty(token) || isblank(*token) || startswith(token, "-")) {
+ continue;
+ }
+ printf("%21s%s\n", "", token);
+ }
+ guard_strlist_free(&list_python);
+ } else {
+ printf("%21s%s\n", "", "N/A");
+ }
+}
+
+void delivery_tests_show(struct Delivery *ctx) {
+ printf("\n====TESTS====\n");
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ if (!ctx->tests[i].name) {
+ continue;
+ }
+ printf("%-20s %-20s %s\n", ctx->tests[i].name,
+ ctx->tests[i].version,
+ ctx->tests[i].repository);
+ }
+}
+
+void delivery_runtime_show(struct Delivery *ctx) {
+ printf("\n====RUNTIME====\n");
+ struct StrList *rt = NULL;
+ rt = strlist_copy(ctx->runtime.environ);
+ if (!rt) {
+ // no data
+ return;
+ }
+ strlist_sort(rt, STASIS_SORT_ALPHA);
+ size_t total = strlist_count(rt);
+ for (size_t i = 0; i < total; i++) {
+ char *item = strlist_item(rt, i);
+ if (!item) {
+ // not supposed to occur
+ msg(STASIS_MSG_WARN | STASIS_MSG_L1, "Encountered unexpected NULL at record %zu of %zu of runtime array.\n", i);
+ return;
+ }
+ printf("%s\n", item);
+ }
+}
+
diff --git a/src/lib/core/delivery_test.c b/src/lib/core/delivery_test.c
new file mode 100644
index 0000000..cb78f64
--- /dev/null
+++ b/src/lib/core/delivery_test.c
@@ -0,0 +1,295 @@
+#include "delivery.h"
+
+void delivery_tests_run(struct Delivery *ctx) {
+ static const int SETUP = 0;
+ static const int PARALLEL = 1;
+ static const int SERIAL = 2;
+ struct MultiProcessingPool *pool[3];
+ struct Process proc;
+ memset(&proc, 0, sizeof(proc));
+
+ if (!globals.workaround.conda_reactivate) {
+ globals.workaround.conda_reactivate = calloc(PATH_MAX, sizeof(*globals.workaround.conda_reactivate));
+ } else {
+ memset(globals.workaround.conda_reactivate, 0, PATH_MAX);
+ }
+ // Test blocks always run with xtrace enabled. Disable, and reenable it. Conda's wrappers produce an incredible
+ // amount of debug information.
+ snprintf(globals.workaround.conda_reactivate, PATH_MAX - 1, "\nset +x; mamba activate ${CONDA_DEFAULT_ENV}; set -x\n");
+
+ if (!ctx->tests[0].name) {
+ msg(STASIS_MSG_WARN | STASIS_MSG_L2, "no tests are defined!\n");
+ } else {
+ pool[PARALLEL] = mp_pool_init("parallel", ctx->storage.tmpdir);
+ if (!pool[PARALLEL]) {
+ perror("mp_pool_init/parallel");
+ exit(1);
+ }
+ pool[PARALLEL]->status_interval = globals.pool_status_interval;
+
+ pool[SERIAL] = mp_pool_init("serial", ctx->storage.tmpdir);
+ if (!pool[SERIAL]) {
+ perror("mp_pool_init/serial");
+ exit(1);
+ }
+ pool[SERIAL]->status_interval = globals.pool_status_interval;
+
+ pool[SETUP] = mp_pool_init("setup", ctx->storage.tmpdir);
+ if (!pool[SETUP]) {
+ perror("mp_pool_init/setup");
+ exit(1);
+ }
+ pool[SETUP]->status_interval = globals.pool_status_interval;
+
+ // Test block scripts shall exit non-zero on error.
+ // This will fail a test block immediately if "string" is not found in file.txt:
+ // grep string file.txt
+ //
+ // And this is how to avoid that scenario:
+ // #1:
+ // if ! grep string file.txt; then
+ // # handle error
+ // fi
+ //
+ // #2:
+ // grep string file.txt || handle error
+ //
+ // #3:
+ // # Use ':' as a NO-OP if/when the result doesn't matter
+ // grep string file.txt || :
+ const char *runner_cmd_fmt = "set -e -x\n%s\n";
+
+ // Iterate over our test records, retrieving the source code for each package, and assigning its scripted tasks
+ // to the appropriate processing pool
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ struct Test *test = &ctx->tests[i];
+ if (!test->name && !test->repository && !test->script) {
+ // skip unused test records
+ continue;
+ }
+ msg(STASIS_MSG_L2, "Loading tests for %s %s\n", test->name, test->version);
+ if (!test->script || !strlen(test->script)) {
+ msg(STASIS_MSG_WARN | STASIS_MSG_L3, "Nothing to do. To fix, declare a 'script' in section: [test:%s]\n",
+ test->name);
+ continue;
+ }
+
+ char destdir[PATH_MAX];
+ sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(test->repository));
+
+ if (!access(destdir, F_OK)) {
+ msg(STASIS_MSG_L3, "Purging repository %s\n", destdir);
+ if (rmtree(destdir)) {
+ COE_CHECK_ABORT(1, "Unable to remove repository\n");
+ }
+ }
+ msg(STASIS_MSG_L3, "Cloning repository %s\n", test->repository);
+ if (!git_clone(&proc, test->repository, destdir, test->version)) {
+ test->repository_info_tag = strdup(git_describe(destdir));
+ test->repository_info_ref = strdup(git_rev_parse(destdir, "HEAD"));
+ } else {
+ COE_CHECK_ABORT(1, "Unable to clone repository\n");
+ }
+
+ if (test->repository_remove_tags && strlist_count(test->repository_remove_tags)) {
+ filter_repo_tags(destdir, test->repository_remove_tags);
+ }
+
+ if (pushd(destdir)) {
+ COE_CHECK_ABORT(1, "Unable to enter repository directory\n");
+ } else {
+ char *cmd = calloc(strlen(test->script) + STASIS_BUFSIZ, sizeof(*cmd));
+ if (!cmd) {
+ SYSERROR("Unable to allocate test script buffer: %s", strerror(errno));
+ exit(1);
+ }
+
+ msg(STASIS_MSG_L3, "Queuing task for %s\n", test->name);
+ memset(&proc, 0, sizeof(proc));
+
+ strcpy(cmd, test->script);
+ char *cmd_rendered = tpl_render(cmd);
+ if (cmd_rendered) {
+ if (strcmp(cmd_rendered, cmd) != 0) {
+ strcpy(cmd, cmd_rendered);
+ cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0;
+ }
+ guard_free(cmd_rendered);
+ } else {
+ SYSERROR("An error occurred while rendering the following:\n%s", cmd);
+ exit(1);
+ }
+
+ if (test->disable) {
+ msg(STASIS_MSG_L2, "Script execution disabled by configuration\n", test->name);
+ guard_free(cmd);
+ continue;
+ }
+
+ char *runner_cmd = NULL;
+ char pool_name[100] = "parallel";
+ struct MultiProcessingTask *task = NULL;
+ int selected = PARALLEL;
+ if (!globals.enable_parallel || !test->parallel) {
+ selected = SERIAL;
+ memset(pool_name, 0, sizeof(pool_name));
+ strcpy(pool_name, "serial");
+ }
+
+ if (asprintf(&runner_cmd, runner_cmd_fmt, cmd) < 0) {
+ SYSERROR("Unable to allocate memory for runner command: %s", strerror(errno));
+ exit(1);
+ }
+ task = mp_pool_task(pool[selected], test->name, destdir, runner_cmd);
+ if (!task) {
+ SYSERROR("Failed to add task to %s pool: %s", pool_name, runner_cmd);
+ popd();
+ if (!globals.continue_on_error) {
+ guard_free(runner_cmd);
+ tpl_free();
+ delivery_free(ctx);
+ globals_free();
+ }
+ exit(1);
+ }
+ guard_free(runner_cmd);
+ guard_free(cmd);
+ popd();
+
+ }
+ }
+
+ // Configure "script_setup" tasks
+ // Directories should exist now, so no need to go through initializing everything all over again.
+ for (size_t i = 0; i < sizeof(ctx->tests) / sizeof(ctx->tests[0]); i++) {
+ struct Test *test = &ctx->tests[i];
+ if (test->script_setup) {
+ char destdir[PATH_MAX];
+ sprintf(destdir, "%s/%s", ctx->storage.build_sources_dir, path_basename(test->repository));
+ if (access(destdir, F_OK)) {
+ SYSERROR("%s: %s", destdir, strerror(errno));
+ exit(1);
+ }
+ if (!pushd(destdir)) {
+ const size_t cmd_len = strlen(test->script_setup) + STASIS_BUFSIZ;
+ char *cmd = calloc(cmd_len, sizeof(*cmd));
+ if (!cmd) {
+ SYSERROR("Unable to allocate test script_setup buffer: %s", strerror(errno));
+ exit(1);
+ }
+
+ strncpy(cmd, test->script_setup, cmd_len - 1);
+ char *cmd_rendered = tpl_render(cmd);
+ if (cmd_rendered) {
+ if (strcmp(cmd_rendered, cmd) != 0) {
+ strncpy(cmd, cmd_rendered, cmd_len - 1);
+ cmd[strlen(cmd_rendered) ? strlen(cmd_rendered) - 1 : 0] = 0;
+ }
+ guard_free(cmd_rendered);
+ } else {
+ SYSERROR("An error occurred while rendering the following:\n%s", cmd);
+ exit(1);
+ }
+
+ struct MultiProcessingTask *task = NULL;
+ char *runner_cmd = NULL;
+ if (asprintf(&runner_cmd, runner_cmd_fmt, cmd) < 0) {
+ SYSERROR("Unable to allocate memory for runner command: %s", strerror(errno));
+ exit(1);
+ }
+
+ task = mp_pool_task(pool[SETUP], test->name, destdir, runner_cmd);
+ if (!task) {
+ SYSERROR("Failed to add task %s to setup pool: %s", test->name, runner_cmd);
+ popd();
+ if (!globals.continue_on_error) {
+ guard_free(runner_cmd);
+ tpl_free();
+ delivery_free(ctx);
+ globals_free();
+ }
+ exit(1);
+ }
+ guard_free(runner_cmd);
+ guard_free(cmd);
+ popd();
+ } else {
+ SYSERROR("Failed to change directory: %s\n", destdir);
+ exit(1);
+ }
+ }
+ }
+
+ size_t opt_flags = 0;
+ if (globals.parallel_fail_fast) {
+ opt_flags |= MP_POOL_FAIL_FAST;
+ }
+
+ // Execute all queued tasks
+ for (size_t p = 0; p < sizeof(pool) / sizeof(*pool); p++) {
+ int pool_status;
+ long jobs = globals.cpu_limit;
+
+ if (!pool[p]->num_used) {
+ // Skip empty pool
+ continue;
+ }
+
+ // Setup tasks run sequentially
+ if (p == (size_t) SETUP || p == (size_t) SERIAL) {
+ jobs = 1;
+ }
+
+ // Run tasks in the pool
+ // 1. Setup (builds)
+ // 2. Parallel (fast jobs)
+ // 3. Serial (long jobs)
+ pool_status = mp_pool_join(pool[p], jobs, opt_flags);
+
+ // On error show a summary of the current pool, and die
+ if (pool_status != 0) {
+ mp_pool_show_summary(pool[p]);
+ COE_CHECK_ABORT(true, "Task failure");
+ }
+ }
+
+ // All tasks were successful
+ for (size_t p = 0; p < sizeof(pool) / sizeof(*pool); p++) {
+ if (pool[p]->num_used) {
+ // Only show pools that actually had jobs to run
+ mp_pool_show_summary(pool[p]);
+ }
+ mp_pool_free(&pool[p]);
+ }
+ }
+}
+
+int delivery_fixup_test_results(struct Delivery *ctx) {
+ struct dirent *rec;
+ DIR *dp;
+
+ dp = opendir(ctx->storage.results_dir);
+ if (!dp) {
+ perror(ctx->storage.results_dir);
+ return -1;
+ }
+
+ while ((rec = readdir(dp)) != NULL) {
+ char path[PATH_MAX];
+ memset(path, 0, sizeof(path));
+
+ if (!strcmp(rec->d_name, ".") || !strcmp(rec->d_name, "..") || !endswith(rec->d_name, ".xml")) {
+ continue;
+ }
+
+ sprintf(path, "%s/%s", ctx->storage.results_dir, rec->d_name);
+ msg(STASIS_MSG_L3, "%s\n", rec->d_name);
+ if (xml_pretty_print_in_place(path, STASIS_XML_PRETTY_PRINT_PROG, STASIS_XML_PRETTY_PRINT_ARGS)) {
+ msg(STASIS_MSG_L3 | STASIS_MSG_WARN, "Failed to rewrite file '%s'\n", rec->d_name);
+ }
+ }
+
+ closedir(dp);
+ return 0;
+}
+
diff --git a/src/docker.c b/src/lib/core/docker.c
index da7c1ce..5834ef9 100644
--- a/src/docker.c
+++ b/src/lib/core/docker.c
@@ -1,4 +1,3 @@
-#include "core.h"
#include "docker.h"
@@ -44,8 +43,9 @@ int docker_script(const char *image, char *data, unsigned flags) {
do {
memset(buffer, 0, sizeof(buffer));
- fgets(buffer, sizeof(buffer) - 1, infile);
- fputs(buffer, outfile);
+ if (fgets(buffer, sizeof(buffer) - 1, infile) != NULL) {
+ fputs(buffer, outfile);
+ }
} while (!feof(infile));
fclose(infile);
diff --git a/src/download.c b/src/lib/core/download.c
index f83adda..bfb323e 100644
--- a/src/download.c
+++ b/src/lib/core/download.c
@@ -2,8 +2,6 @@
// Created by jhunk on 10/5/23.
//
-#include <string.h>
-#include <stdlib.h>
#include "download.h"
size_t download_writer(void *fp, size_t size, size_t nmemb, void *stream) {
diff --git a/src/envctl.c b/src/lib/core/envctl.c
index 78dd760..9037d9d 100644
--- a/src/envctl.c
+++ b/src/lib/core/envctl.c
@@ -1,5 +1,4 @@
#include "envctl.h"
-#include "core.h"
struct EnvCtl *envctl_init() {
struct EnvCtl *result;
diff --git a/src/environment.c b/src/lib/core/environment.c
index 924fbf8..580062c 100644
--- a/src/environment.c
+++ b/src/lib/core/environment.c
@@ -305,7 +305,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) {
// Handle literal statement "$$var"
// Value becomes "$var" (unexpanded)
if (strncmp(&input[i], delim_literal, strlen(delim_literal)) == 0) {
- strncat(expanded, &delim, 1);
+ strncat(expanded, &delim, 2);
i += strlen(delim_literal);
// Ignore opening brace
if (input[i] == '{') {
@@ -349,7 +349,7 @@ char *runtime_expand_var(RuntimeEnv *env, char *input) {
continue;
}
// Append expanded environment variable to output
- strncat(expanded, tmp, strlen(tmp));
+ strncat(expanded, tmp, STASIS_BUFSIZ - 1);
if (env) {
guard_free(tmp);
}
diff --git a/src/github.c b/src/lib/core/github.c
index 36e2e7c..c5e4534 100644
--- a/src/github.c
+++ b/src/lib/core/github.c
@@ -2,6 +2,7 @@
#include <stdlib.h>
#include <string.h>
#include "core.h"
+#include "github.h"
struct GHContent {
char *data;
diff --git a/src/globals.c b/src/lib/core/globals.c
index 1e27959..83465f1 100644
--- a/src/globals.c
+++ b/src/lib/core/globals.c
@@ -1,6 +1,7 @@
#include <stdlib.h>
#include <stdbool.h>
#include "core.h"
+#include "envctl.h"
const char *VERSION = "1.0.0";
const char *AUTHOR = "Joseph Hunkeler";
@@ -25,19 +26,22 @@ const char *BANNER =
"Association of Universities for Research in Astronomy (AURA)\n";
struct STASIS_GLOBAL globals = {
- .verbose = false,
- .continue_on_error = false,
- .always_update_base_environment = false,
- .conda_fresh_start = true,
- .conda_install_prefix = NULL,
- .conda_packages = NULL,
- .pip_packages = NULL,
- .tmpdir = NULL,
- .enable_docker = true,
- .enable_artifactory = true,
- .enable_artifactory_build_info = true,
- .enable_testing = true,
- .enable_rewrite_spec_stage_2 = true,
+ .verbose = false, ///< Toggle verbose mode
+ .continue_on_error = false, ///< Do not stop program on error
+ .always_update_base_environment = false, ///< Run "conda update --all" after installing Conda
+ .conda_fresh_start = true, ///< Remove/reinstall Conda at startup
+ .conda_install_prefix = NULL, ///< Path to install Conda
+ .conda_packages = NULL, ///< Conda packages to install
+ .pip_packages = NULL, ///< Python packages to install
+ .tmpdir = NULL, ///< Path to store temporary data
+ .enable_docker = true, ///< Toggle docker usage
+ .enable_artifactory = true, ///< Toggle artifactory server usage
+ .enable_artifactory_build_info = true, ///< Toggle build-info uploads
+ .enable_testing = true, ///< Toggle [test] block "script" execution. "script_setup" always executes.
+ .enable_rewrite_spec_stage_2 = true, ///< Leave template stings in output files
+ .enable_parallel = true, ///< Toggle testing in parallel
+ .parallel_fail_fast = false, ///< Kill ALL multiprocessing tasks immediately on error
+ .pool_status_interval = 30, ///< Report "Task is running"
};
void globals_free() {
@@ -55,7 +59,6 @@ void globals_free() {
guard_free(globals.jfrog.jfrog_artifactory_base_url);
guard_free(globals.jfrog.jfrog_artifactory_product);
guard_free(globals.jfrog.remote_filename);
- guard_free(globals.workaround.tox_posargs);
guard_free(globals.workaround.conda_reactivate);
if (globals.envctl) {
envctl_free(&globals.envctl);
diff --git a/src/ini.c b/src/lib/core/ini.c
index e98b409..d44e1cc 100644
--- a/src/ini.c
+++ b/src/lib/core/ini.c
@@ -319,10 +319,10 @@ int ini_data_append(struct INIFILE **ini, char *section_name, char *key, char *v
}
struct INIData **tmp = realloc(section->data, (section->data_count + 1) * sizeof(**section->data));
- if (tmp != section->data) {
- section->data = tmp;
- } else if (!tmp) {
+ if (tmp == NULL) {
return 1;
+ } else {
+ section->data = tmp;
}
if (!ini_data_get((*ini), section_name, key)) {
struct INIData **data = section->data;
@@ -350,11 +350,11 @@ int ini_data_append(struct INIFILE **ini, char *section_name, char *key, char *v
size_t value_len_new = value_len_old + value_len;
char *value_tmp = NULL;
value_tmp = realloc(data->value, value_len_new + 2);
- if (value_tmp != data->value) {
- data->value = value_tmp;
- } else if (!value_tmp) {
+ if (!value_tmp) {
SYSERROR("Unable to increase data->value size to %zu bytes", value_len_new + 2);
return -1;
+ } else {
+ data->value = value_tmp;
}
strcat(data->value, value);
}
@@ -393,9 +393,9 @@ int ini_setval(struct INIFILE **ini, unsigned type, char *section_name, char *ke
int ini_section_create(struct INIFILE **ini, char *key) {
struct INISection **tmp = realloc((*ini)->section, ((*ini)->section_count + 1) * sizeof(**(*ini)->section));
- if (!tmp) {
+ if (tmp == NULL) {
return 1;
- } else if (tmp != (*ini)->section) {
+ } else {
(*ini)->section = tmp;
}
diff --git a/src/junitxml.c b/src/lib/core/junitxml.c
index 9c7e5b4..c7d0834 100644
--- a/src/junitxml.c
+++ b/src/lib/core/junitxml.c
@@ -37,9 +37,9 @@ void junitxml_testsuite_free(struct JUNIT_Testsuite **testsuite) {
static int testsuite_append_testcase(struct JUNIT_Testsuite **testsuite, struct JUNIT_Testcase *testcase) {
struct JUNIT_Testsuite *suite = (*testsuite);
struct JUNIT_Testcase **tmp = realloc(suite->testcase, (suite->_tc_alloc + 1 ) * sizeof(*testcase));
- if (!tmp) {
+ if (tmp == NULL) {
return -1;
- } else if (tmp != suite->testcase) {
+ } else {
suite->testcase = tmp;
}
suite->testcase[suite->_tc_inuse] = testcase;
diff --git a/src/lib/core/multiprocessing.c b/src/lib/core/multiprocessing.c
new file mode 100644
index 0000000..484c566
--- /dev/null
+++ b/src/lib/core/multiprocessing.c
@@ -0,0 +1,449 @@
+#include "core.h"
+#include "multiprocessing.h"
+
+/// The sum of all tasks started by mp_task()
+size_t mp_global_task_count = 0;
+
+static struct MultiProcessingTask *mp_pool_next_available(struct MultiProcessingPool *pool) {
+ return &pool->task[pool->num_used];
+}
+
+int child(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) {
+ FILE *fp_log = NULL;
+
+ // The task starts inside the requested working directory
+ if (chdir(task->working_dir)) {
+ perror(task->working_dir);
+ exit(1);
+ }
+
+ // Record the task start time
+ if (clock_gettime(CLOCK_REALTIME, &task->time_data.t_start) < 0) {
+ perror("clock_gettime");
+ exit(1);
+ }
+
+ // Redirect stdout and stderr to the log file
+ fflush(stdout);
+ fflush(stderr);
+ // Set log file name
+ sprintf(task->log_file + strlen(task->log_file), "task-%zu-%d.log", mp_global_task_count, task->parent_pid);
+ fp_log = freopen(task->log_file, "w+", stdout);
+ if (!fp_log) {
+ fprintf(stderr, "unable to open '%s' for writing: %s\n", task->log_file, strerror(errno));
+ return -1;
+ }
+ dup2(fileno(stdout), fileno(stderr));
+
+ // Generate timestamp for log header
+ time_t t = time(NULL);
+ char *timebuf = ctime(&t);
+ if (timebuf) {
+ // strip line feed from timestamp
+ timebuf[strlen(timebuf) ? strlen(timebuf) - 1 : 0] = 0;
+ }
+
+ // Generate log header
+ fprintf(fp_log, "# STARTED: %s\n", timebuf ? timebuf : "unknown");
+ fprintf(fp_log, "# PID: %d\n", task->parent_pid);
+ fprintf(fp_log, "# WORKDIR: %s\n", task->working_dir);
+ fprintf(fp_log, "# COMMAND:\n%s\n", task->cmd);
+ fprintf(fp_log, "# OUTPUT:\n");
+ // Commit header to log file / clean up
+ fflush(fp_log);
+
+ // Execute task
+ fflush(stdout);
+ fflush(stderr);
+ char *args[] = {"bash", "--norc", task->parent_script, (char *) NULL};
+ return execvp("/bin/bash", args);
+}
+
+int parent(struct MultiProcessingPool *pool, struct MultiProcessingTask *task, pid_t pid, int *child_status) {
+ printf("[%s:%s] Task started (pid: %d)\n", pool->ident, task->ident, pid);
+
+ // Give the child process access to our PID value
+ task->pid = pid;
+ task->parent_pid = pid;
+
+ mp_global_task_count++;
+
+ // Check child's status
+ pid_t code = waitpid(pid, child_status, WUNTRACED | WCONTINUED | WNOHANG);
+ if (code < 0) {
+ perror("waitpid failed");
+ return -1;
+ }
+ return 0;
+}
+
+static int mp_task_fork(struct MultiProcessingPool *pool, struct MultiProcessingTask *task) {
+ pid_t pid = fork();
+ int child_status = 0;
+ if (pid == -1) {
+ return -1;
+ } else if (pid == 0) {
+ child(pool, task);
+ }
+ return parent(pool, task, pid, &child_status);
+}
+
+struct MultiProcessingTask *mp_pool_task(struct MultiProcessingPool *pool, const char *ident, char *working_dir, char *cmd) {
+ struct MultiProcessingTask *slot = mp_pool_next_available(pool);
+ if (pool->num_used != pool->num_alloc) {
+ pool->num_used++;
+ } else {
+ fprintf(stderr, "Maximum number of tasks reached\n");
+ return NULL;
+ }
+
+ // Set default status to "error"
+ slot->status = -1;
+
+ // Set task identifier string
+ memset(slot->ident, 0, sizeof(slot->ident));
+ strncpy(slot->ident, ident, sizeof(slot->ident) - 1);
+
+ // Set log file path
+ memset(slot->log_file, 0, sizeof(*slot->log_file));
+ strcat(slot->log_file, pool->log_root);
+ strcat(slot->log_file, "/");
+
+ // Set working directory
+ if (isempty(working_dir)) {
+ strcpy(slot->working_dir, ".");
+ } else {
+ strncpy(slot->working_dir, working_dir, PATH_MAX - 1);
+ }
+
+ // Create a temporary file to act as our intermediate command script
+ FILE *tp = NULL;
+ char *t_name = NULL;
+ t_name = xmkstemp(&tp, "w");
+ if (!t_name || !tp) {
+ return NULL;
+ }
+
+ // Set the script's permissions so that only the calling user can use it
+ // This should help prevent eavesdropping if keys are applied in plain-text
+ // somewhere.
+ chmod(t_name, 0700);
+
+ // Record the script path
+ memset(slot->parent_script, 0, sizeof(slot->parent_script));
+ strncpy(slot->parent_script, t_name, PATH_MAX - 1);
+ guard_free(t_name);
+
+ // Populate the script
+ fprintf(tp, "#!/bin/bash\n%s\n", cmd);
+ fflush(tp);
+ fclose(tp);
+
+ // Record the command(s)
+ slot->cmd_len = (strlen(cmd) * sizeof(*cmd)) + 1;
+ slot->cmd = mmap(NULL, slot->cmd_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ memset(slot->cmd, 0, slot->cmd_len);
+ strncpy(slot->cmd, cmd, slot->cmd_len);
+
+ return slot;
+}
+
+static void get_task_duration(struct MultiProcessingTask *task, struct timespec *result) {
+ // based on the timersub() macro in time.h
+ // This implementation uses timespec and increases the resolution from microseconds to nanoseconds.
+ struct timespec *start = &task->time_data.t_start;
+ struct timespec *stop = &task->time_data.t_stop;
+ result->tv_sec = (stop->tv_sec - start->tv_sec);
+ result->tv_nsec = (stop->tv_nsec - start->tv_nsec);
+ if (result->tv_nsec < 0) {
+ --result->tv_sec;
+ result->tv_nsec += 1000000000L;
+ }
+}
+
+void mp_pool_show_summary(struct MultiProcessingPool *pool) {
+ print_banner("=", 79);
+ printf("Pool execution summary for \"%s\"\n", pool->ident);
+ print_banner("=", 79);
+ printf("STATUS PID DURATION IDENT\n");
+ for (size_t i = 0; i < pool->num_used; i++) {
+ struct MultiProcessingTask *task = &pool->task[i];
+ char status_str[10] = {0};
+ if (!task->status && !task->signaled_by) {
+ strcpy(status_str, "DONE");
+ } else if (task->signaled_by) {
+ strcpy(status_str, "TERM");
+ } else {
+ strcpy(status_str, "FAIL");
+ }
+
+ struct timespec duration;
+ get_task_duration(task, &duration);
+ long diff = duration.tv_sec + duration.tv_nsec / 1000000000L;
+ printf("%-4s %10d %7lds %-10s\n", status_str, task->parent_pid, diff, task->ident) ;
+ }
+ puts("");
+}
+
+static int show_log_contents(FILE *stream, struct MultiProcessingTask *task) {
+ FILE *fp = fopen(task->log_file, "r");
+ if (!fp) {
+ return -1;
+ }
+ char buf[BUFSIZ] = {0};
+ while ((fgets(buf, sizeof(buf) - 1, fp)) != NULL) {
+ fprintf(stream, "%s", buf);
+ memset(buf, 0, sizeof(buf));
+ }
+ fprintf(stream, "\n");
+ fclose(fp);
+ return 0;
+}
+
+int mp_pool_kill(struct MultiProcessingPool *pool, int signum) {
+ printf("Sending signal %d to pool '%s'\n", signum, pool->ident);
+ for (size_t i = 0; i < pool->num_used; i++) {
+ struct MultiProcessingTask *slot = &pool->task[i];
+ if (!slot) {
+ return -1;
+ }
+ // Kill tasks in progress
+ if (slot->pid > 0) {
+ int status;
+ printf("Sending signal %d to task '%s' (pid: %d)\n", signum, slot->ident, slot->pid);
+ status = kill(slot->pid, signum);
+ if (status && errno != ESRCH) {
+ fprintf(stderr, "Task '%s' (pid: %d) did not respond: %s\n", slot->ident, slot->pid, strerror(errno));
+ } else {
+ // Wait for process to handle the signal, then set the status accordingly
+ if (waitpid(slot->pid, &status, 0) >= 0) {
+ slot->signaled_by = WTERMSIG(status);
+ // Record the task stop time
+ if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) {
+ perror("clock_gettime");
+ exit(1);
+ }
+ // We are short-circuiting the normal flow, and the process is now dead, so mark it as such
+ slot->pid = MP_POOL_PID_UNUSED;
+ }
+ }
+ }
+ if (!access(slot->log_file, F_OK)) {
+ remove(slot->log_file);
+ }
+ if (!access(slot->parent_script, F_OK)) {
+ remove(slot->parent_script);
+ }
+ }
+ return 0;
+}
+
+int mp_pool_join(struct MultiProcessingPool *pool, size_t jobs, size_t flags) {
+ int status = 0;
+ int failures = 0;
+ size_t tasks_complete = 0;
+ size_t lower_i = 0;
+ size_t upper_i = jobs;
+
+ do {
+ size_t hang_check = 0;
+ if (upper_i >= pool->num_used) {
+ size_t x = upper_i - pool->num_used;
+ upper_i -= (size_t) x;
+ }
+
+ for (size_t i = lower_i; i < upper_i; i++) {
+ struct MultiProcessingTask *slot = &pool->task[i];
+ if (slot->status == -1) {
+ if (mp_task_fork(pool, slot)) {
+ fprintf(stderr, "%s: mp_task_fork failed\n", slot->ident);
+ kill(0, SIGTERM);
+ }
+ }
+
+ // Has the child been processed already?
+ if (slot->pid == MP_POOL_PID_UNUSED) {
+ // Child is already used up, skip it
+ hang_check++;
+ if (hang_check >= pool->num_used) {
+ // If you join a pool that's already finished it will spin
+ // forever. This protects the program from entering an
+ // infinite loop.
+ fprintf(stderr, "%s is deadlocked\n", pool->ident);
+ failures++;
+ goto pool_deadlocked;
+ }
+ continue;
+ }
+
+ // Is the process finished?
+ pid_t pid = waitpid(slot->pid, &status, WNOHANG | WUNTRACED | WCONTINUED);
+ int task_ended = WIFEXITED(status);
+ int task_ended_by_signal = WIFSIGNALED(status);
+ int task_stopped = WIFSTOPPED(status);
+ int task_continued = WIFCONTINUED(status);
+ int status_exit = WEXITSTATUS(status);
+ int status_signal = WTERMSIG(status);
+ int status_stopped = WSTOPSIG(status);
+
+ // Update status
+ slot->status = status_exit;
+ slot->signaled_by = status_signal;
+
+ char progress[1024] = {0};
+ if (pid > 0) {
+ double percent = ((double) (tasks_complete + 1) / (double) pool->num_used) * 100;
+ snprintf(progress, sizeof(progress) - 1, "[%s:%s] [%3.1f%%]", pool->ident, slot->ident, percent);
+
+ // The process ended in one the following ways
+ // Note: SIGSTOP nor SIGCONT will not increment the tasks_complete counter
+ if (task_stopped) {
+ printf("%s Task was suspended (%d)\n", progress, status_stopped);
+ continue;
+ } else if (task_continued) {
+ printf("%s Task was resumed\n", progress);
+ continue;
+ } else if (task_ended_by_signal) {
+ printf("%s Task ended by signal %d (%s)\n", progress, status_signal, strsignal(status_signal));
+ tasks_complete++;
+ } else if (task_ended) {
+ printf("%s Task ended (status: %d)\n", progress, status_exit);
+ tasks_complete++;
+ } else {
+ fprintf(stderr, "%s Task state is unknown (0x%04X)\n", progress, status);
+ }
+
+ // Show the log (always)
+ if (show_log_contents(stdout, slot)) {
+ perror(slot->log_file);
+ }
+
+ // Record the task stop time
+ if (clock_gettime(CLOCK_REALTIME, &slot->time_data.t_stop) < 0) {
+ perror("clock_gettime");
+ exit(1);
+ }
+
+ if (status >> 8 != 0 || (status & 0xff) != 0) {
+ fprintf(stderr, "%s Task failed\n", progress);
+ failures++;
+
+ if (flags & MP_POOL_FAIL_FAST && pool->num_used > 1) {
+ mp_pool_kill(pool, SIGTERM);
+ return -2;
+ }
+ } else {
+ printf("%s Task finished\n", progress);
+ }
+
+ // Clean up logs and scripts left behind by the task
+ if (remove(slot->log_file)) {
+ fprintf(stderr, "%s Unable to remove log file: '%s': %s\n", progress, slot->parent_script, strerror(errno));
+ }
+ if (remove(slot->parent_script)) {
+ fprintf(stderr, "%s Unable to remove temporary script '%s': %s\n", progress, slot->parent_script, strerror(errno));
+ }
+
+ // Update progress and tell the poller to ignore the PID. The process is gone.
+ slot->pid = MP_POOL_PID_UNUSED;
+ } else if (pid < 0) {
+ fprintf(stderr, "waitpid failed: %s\n", strerror(errno));
+ return -1;
+ } else {
+ // Track the number of seconds elapsed for each task.
+ // When a task has executed for longer than status_intervals, print a status update
+ // _seconds represents the time between intervals, not the total runtime of the task
+ slot->_seconds = time(NULL) - slot->_now;
+ if (slot->_seconds > pool->status_interval) {
+ slot->_now = time(NULL);
+ slot->_seconds = 0;
+ }
+ if (slot->_seconds == 0) {
+ printf("[%s:%s] Task is running (pid: %d)\n", pool->ident, slot->ident, slot->parent_pid);
+ }
+ }
+ }
+
+ if (tasks_complete == pool->num_used) {
+ break;
+ }
+
+ if (tasks_complete == upper_i) {
+ lower_i += jobs;
+ upper_i += jobs;
+ }
+
+ // Poll again after a short delay
+ sleep(1);
+ } while (1);
+
+ pool_deadlocked:
+ puts("");
+ return failures;
+}
+
+
+struct MultiProcessingPool *mp_pool_init(const char *ident, const char *log_root) {
+ struct MultiProcessingPool *pool;
+
+ if (!ident || !log_root) {
+ // Pool must have an ident string
+ // log_root must be set
+ return NULL;
+ }
+
+ // The pool is shared with children
+ pool = mmap(NULL, sizeof(*pool), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+
+ // Set pool identity string
+ memset(pool->ident, 0, sizeof(pool->ident));
+ strncpy(pool->ident, ident, sizeof(pool->ident) - 1);
+
+ // Set logging base directory
+ memset(pool->log_root, 0, sizeof(pool->log_root));
+ strncpy(pool->log_root, log_root, sizeof(pool->log_root) - 1);
+ pool->num_used = 0;
+ pool->num_alloc = MP_POOL_TASK_MAX;
+
+ // Create the log directory
+ if (mkdirs(log_root, 0700) < 0) {
+ if (errno != EEXIST) {
+ perror(log_root);
+ mp_pool_free(&pool);
+ return NULL;
+ }
+ }
+
+ // Task array is shared with children
+ pool->task = mmap(NULL, (pool->num_alloc + 1) * sizeof(*pool->task), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (pool->task == MAP_FAILED) {
+ perror("mmap");
+ mp_pool_free(&pool);
+ return NULL;
+ }
+
+ return pool;
+}
+
+void mp_pool_free(struct MultiProcessingPool **pool) {
+ for (size_t i = 0; i < (*pool)->num_alloc; i++) {
+ }
+ // Unmap all pool tasks
+ if ((*pool)->task) {
+ if ((*pool)->task->cmd) {
+ if (munmap((*pool)->task->cmd, (*pool)->task->cmd_len) < 0) {
+ perror("munmap");
+ }
+ }
+ if (munmap((*pool)->task, sizeof(*(*pool)->task) * (*pool)->num_alloc) < 0) {
+ perror("munmap");
+ }
+ }
+ // Unmap the pool
+ if ((*pool)) {
+ if (munmap((*pool), sizeof(*(*pool))) < 0) {
+ perror("munmap");
+ }
+ (*pool) = NULL;
+ }
+} \ No newline at end of file
diff --git a/src/lib/core/package.c b/src/lib/core/package.c
new file mode 100644
index 0000000..e34673b
--- /dev/null
+++ b/src/lib/core/package.c
@@ -0,0 +1,41 @@
+#include <stdlib.h>
+#include "package.h"
+#include "core.h"
+
+struct Package *stasis_package_init() {
+ struct Package *result;
+ result = calloc(1, sizeof(*result));
+ return result;
+}
+
+void stasis_package_set_name(struct Package *pkg, const char *name) {
+ if (pkg->meta.name) {
+ guard_free(pkg->meta.name);
+ }
+ pkg->meta.name = strdup(name);
+}
+
+void stasis_package_set_version(struct Package *pkg, const char *version) {
+ if (pkg->meta.version) {
+ guard_free(pkg->meta.version);
+ }
+ pkg->meta.version = strdup(version);
+}
+
+void stasis_package_set_version_spec(struct Package *pkg, const char *version_spec) {
+ if (pkg->meta.version_spec) {
+ guard_free(pkg->meta.version_spec);
+ }
+ pkg->meta.version_spec = strdup(version_spec);
+}
+
+void stasis_package_set_uri(struct Package *pkg, const char *uri) {
+ if (pkg->source.uri) {
+ guard_free(pkg->source.uri);
+ }
+ pkg->source.uri = uri;
+}
+
+void stasis_package_set_handler(struct Package *pkg, unsigned handler) {
+ pkg->source.handler = handler;
+} \ No newline at end of file
diff --git a/src/recipe.c b/src/lib/core/recipe.c
index e51fde6..833908c 100644
--- a/src/recipe.c
+++ b/src/lib/core/recipe.c
@@ -16,7 +16,7 @@ int recipe_clone(char *recipe_dir, char *url, char *gitref, char **result) {
return -1;
}
}
- strncpy(*result, destdir, PATH_MAX - 1);
+ strncpy(*result, destdir, PATH_MAX);
if (!access(destdir, F_OK)) {
if (!strcmp(destdir, "/")) {
diff --git a/src/relocation.c b/src/lib/core/relocation.c
index 852aca4..852aca4 100644
--- a/src/relocation.c
+++ b/src/lib/core/relocation.c
diff --git a/src/rules.c b/src/lib/core/rules.c
index e42ee07..e42ee07 100644
--- a/src/rules.c
+++ b/src/lib/core/rules.c
diff --git a/src/str.c b/src/lib/core/str.c
index 6afbf73..868a6c7 100644
--- a/src/str.c
+++ b/src/lib/core/str.c
@@ -175,7 +175,7 @@ char *join_ex(char *separator, ...) {
}
// Initialize array
- argv = calloc(argc + 1, sizeof(char *));
+ argv = calloc(argc + 1, sizeof(char **));
if (argv == NULL) {
perror("join_ex calloc failed");
return NULL;
@@ -196,8 +196,9 @@ char *join_ex(char *separator, ...) {
char **tmp = realloc(argv, (argc + 1) * sizeof(char *));
if (tmp == NULL) {
perror("join_ex realloc failed");
+ guard_free(argv);
return NULL;
- } else if (tmp != argv) {
+ } else {
argv = tmp;
}
size += strlen(current) + separator_len;
@@ -223,21 +224,35 @@ char *join_ex(char *separator, ...) {
}
char *substring_between(char *sptr, const char *delims) {
+ char delim_open[255] = {0};
+ char delim_close[255] = {0};
if (sptr == NULL || delims == NULL) {
return NULL;
}
// Ensure we have enough delimiters to continue
size_t delim_count = strlen(delims);
- if (delim_count < 2 || delim_count % 2) {
+ if (delim_count < 2 || delim_count % 2 || (delim_count > (sizeof(delim_open) - 1)) != 0) {
return NULL;
}
+ size_t delim_take = delim_count / 2;
- char delim_open[255] = {0};
- strncpy(delim_open, delims, delim_count / 2);
+ // How else am I supposed to consume the first and last n chars of the string? Give me a break.
+ // warning: ‘__builtin___strncpy_chk’ specified bound depends on the length of the source argument
+ // ---
+ //strncpy(delim_open, delims, delim_take);
+ size_t i = 0;
+ while (i < delim_take && i < sizeof(delim_open)) {
+ delim_open[i] = delims[i];
+ i++;
+ }
- char delim_close[255] = {0};
- strcpy(delim_close, &delims[delim_count / 2]);
+ //strncpy(delim_close, &delims[delim_take], delim_take);
+ i = 0;
+ while (i < delim_take && i < sizeof(delim_close)) {
+ delim_close[i] = delims[i + delim_take];
+ i++;
+ }
// Create pointers to the delimiters
char *start = strstr(sptr, delim_open);
@@ -569,7 +584,7 @@ char **strdup_array(char **array) {
for (elems = 0; array[elems] != NULL; elems++);
// Create new array
- result = calloc(elems + 1, sizeof(result));
+ result = calloc(elems + 1, sizeof(*result));
for (size_t i = 0; i < elems; i++) {
result[i] = strdup(array[i]);
}
diff --git a/src/strlist.c b/src/lib/core/strlist.c
index de76744..f0bffa8 100644
--- a/src/strlist.c
+++ b/src/lib/core/strlist.c
@@ -2,6 +2,7 @@
* String array convenience functions
* @file strlist.c
*/
+#include "download.h"
#include "strlist.h"
#include "utils.h"
@@ -331,7 +332,7 @@ void strlist_set(struct StrList **pStrList, size_t index, char *value) {
}
memset((*pStrList)->data[index], '\0', strlen(value) + 1);
- strncpy((*pStrList)->data[index], value, strlen(value));
+ strcpy((*pStrList)->data[index], value);
}
}
diff --git a/src/system.c b/src/lib/core/system.c
index a564769..4e605ec 100644
--- a/src/system.c
+++ b/src/lib/core/system.c
@@ -46,11 +46,19 @@ int shell(struct Process *proc, char *args) {
if (strlen(proc->f_stdout)) {
fp_out = freopen(proc->f_stdout, "w+", stdout);
+ if (!fp_out) {
+ fprintf(stderr, "Unable to redirect stdout to %s: %s\n", proc->f_stdout, strerror(errno));
+ exit(1);
+ }
}
if (strlen(proc->f_stderr)) {
if (!proc->redirect_stderr) {
fp_err = freopen(proc->f_stderr, "w+", stderr);
+ if (!fp_err) {
+ fprintf(stderr, "Unable to redirect stderr to %s: %s\n", proc->f_stdout, strerror(errno));
+ exit(1);
+ }
}
}
@@ -59,7 +67,10 @@ int shell(struct Process *proc, char *args) {
fclose(fp_err);
fclose(stderr);
}
- dup2(fileno(stdout), fileno(stderr));
+ if (dup2(fileno(stdout), fileno(stderr)) < 0) {
+ fprintf(stderr, "Unable to redirect stderr to stdout: %s\n", strerror(errno));
+ exit(1);
+ }
}
return execl("/bin/bash", "bash", "--norc", t_name, (char *) NULL);
diff --git a/src/template.c b/src/lib/core/template.c
index a412fa8..a412fa8 100644
--- a/src/template.c
+++ b/src/lib/core/template.c
diff --git a/src/template_func_proto.c b/src/lib/core/template_func_proto.c
index 3cf66e4..3305b4d 100644
--- a/src/template_func_proto.c
+++ b/src/lib/core/template_func_proto.c
@@ -1,4 +1,6 @@
#include "template_func_proto.h"
+#include "delivery.h"
+#include "github.h"
int get_github_release_notes_tplfunc_entrypoint(void *frame, void *data_out) {
int result;
@@ -74,7 +76,10 @@ int get_junitxml_file_entrypoint(void *frame, void *data_out) {
const struct Delivery *ctx = (const struct Delivery *) f->data_in;
char cwd[PATH_MAX] = {0};
- getcwd(cwd, PATH_MAX - 1);
+ if (!getcwd(cwd, PATH_MAX - 1)) {
+ SYSERROR("unable to determine current working directory: %s", strerror(errno));
+ return -1;
+ }
char nametmp[PATH_MAX] = {0};
strcpy(nametmp, cwd);
char *name = path_basename(nametmp);
@@ -96,7 +101,10 @@ int get_basetemp_dir_entrypoint(void *frame, void *data_out) {
const struct Delivery *ctx = (const struct Delivery *) f->data_in;
char cwd[PATH_MAX] = {0};
- getcwd(cwd, PATH_MAX - 1);
+ if (!getcwd(cwd, PATH_MAX - 1)) {
+ SYSERROR("unable to determine current working directory: %s", strerror(errno));
+ return -1;
+ }
char nametmp[PATH_MAX] = {0};
strcpy(nametmp, cwd);
char *name = path_basename(nametmp);
@@ -109,4 +117,44 @@ int get_basetemp_dir_entrypoint(void *frame, void *data_out) {
sprintf(*output, "%s/truth-%s-%s", ctx->storage.tmpdir, name, ctx->info.release_name);
return result;
+}
+
+int tox_run_entrypoint(void *frame, void *data_out) {
+ char **output = (char **) data_out;
+ struct tplfunc_frame *f = (struct tplfunc_frame *) frame;
+ const struct Delivery *ctx = (const struct Delivery *) f->data_in;
+
+ // Apply workaround for tox positional arguments
+ char *toxconf = NULL;
+ if (!access("tox.ini", F_OK)) {
+ if (!fix_tox_conf("tox.ini", &toxconf)) {
+ msg(STASIS_MSG_L3, "Fixing tox positional arguments\n");
+ *output = calloc(STASIS_BUFSIZ, sizeof(**output));
+ if (!*output) {
+ return -1;
+ }
+ char *basetemp_path = NULL;
+ if (get_basetemp_dir_entrypoint(f, &basetemp_path)) {
+ return -2;
+ }
+ char *jxml_path = NULL;
+ if (get_junitxml_file_entrypoint(f, &jxml_path)) {
+ guard_free(basetemp_path);
+ return -3;
+ }
+ const char *tox_target = f->argv[0].t_char_ptr;
+ const char *pytest_args = f->argv[1].t_char_ptr;
+ if (isempty(toxconf) || !strcmp(toxconf, "/")) {
+ SYSERROR("Unsafe toxconf path: '%s'", toxconf);
+ guard_free(basetemp_path);
+ guard_free(jxml_path);
+ return -4;
+ }
+ snprintf(*output, STASIS_BUFSIZ - 1, "\npip install tox && (tox -e py%s%s -c %s --root . -- --basetemp=\"%s\" --junitxml=\"%s\" %s ; rm -f '%s')\n", ctx->meta.python_compact, tox_target, toxconf, basetemp_path, jxml_path, pytest_args ? pytest_args : "", toxconf);
+
+ guard_free(jxml_path);
+ guard_free(basetemp_path);
+ }
+ }
+ return 0;
} \ No newline at end of file
diff --git a/src/utils.c b/src/lib/core/utils.c
index c0b3733..89950df 100644
--- a/src/utils.c
+++ b/src/lib/core/utils.c
@@ -1,5 +1,6 @@
#include <stdarg.h>
#include "core.h"
+#include "utils.h"
char *dirstack[STASIS_DIRSTACK_MAX];
const ssize_t dirstack_max = sizeof(dirstack) / sizeof(dirstack[0]);
@@ -34,7 +35,7 @@ int popd() {
int rmtree(char *_path) {
int status = 0;
char path[PATH_MAX] = {0};
- strncpy(path, _path, sizeof(path));
+ strncpy(path, _path, sizeof(path) - 1);
DIR *dir;
struct dirent *d_entity;
@@ -122,10 +123,10 @@ char *expandpath(const char *_path) {
}
// Construct the new path
- strncat(result, home, PATH_MAX - 1);
+ strncat(result, home, sizeof(result) - strlen(home) + 1);
if (sep) {
- strncat(result, DIR_SEP, PATH_MAX - 1);
- strncat(result, ptmp, PATH_MAX - 1);
+ strncat(result, DIR_SEP, sizeof(result) - strlen(home) + 1);
+ strncat(result, ptmp, sizeof(result) - strlen(home) + 1);
}
return strdup(result);
@@ -315,7 +316,7 @@ int git_clone(struct Process *proc, char *url, char *destdir, char *gitref) {
}
static char command[PATH_MAX];
- sprintf(command, "%s clone --recursive %s", program, url);
+ sprintf(command, "%s clone -c advice.detachedHead=false --recursive %s", program, url);
if (destdir && access(destdir, F_OK) < 0) {
sprintf(command + strlen(command), " %s", destdir);
result = shell(proc, command);
@@ -444,7 +445,10 @@ void msg(unsigned type, char *fmt, ...) {
void debug_shell() {
msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "ENTERING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET);
- system("/bin/bash -c 'PS1=\"(STASIS DEBUG) \\W $ \" bash --norc --noprofile'");
+ if (system("/bin/bash -c 'PS1=\"(STASIS DEBUG) \\W $ \" bash --norc --noprofile'") < 0) {
+ SYSERROR("unable to spawn debug shell: %s", strerror(errno));
+ exit(errno);
+ }
msg(STASIS_MSG_L1 | STASIS_MSG_WARN, "EXITING STASIS DEBUG SHELL\n" STASIS_COLOR_RESET);
exit(255);
}
@@ -465,12 +469,23 @@ char *xmkstemp(FILE **fp, const char *mode) {
fd = mkstemp(t_name);
*fp = fdopen(fd, mode);
if (!*fp) {
+ // unable to open, die
if (fd > 0)
close(fd);
*fp = NULL;
return NULL;
}
+
char *path = strdup(t_name);
+ if (!path) {
+ // strdup failed, die
+ if (*fp) {
+ // close the file handle
+ fclose(*fp);
+ *fp = NULL;
+ }
+ // fall through. path is NULL.
+ }
return path;
}
@@ -800,3 +815,6 @@ int mkdirs(const char *_path, mode_t mode) {
return status;
}
+char *find_version_spec(char *str) {
+ return strpbrk(str, "@~=<>!");
+}
diff --git a/src/wheel.c b/src/lib/core/wheel.c
index b96df57..4692d0a 100644
--- a/src/wheel.c
+++ b/src/lib/core/wheel.c
@@ -1,6 +1,6 @@
#include "wheel.h"
-struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_match[], unsigned match_mode) {
+struct Wheel *get_wheel_info(const char *basepath, const char *name, char *to_match[], unsigned match_mode) {
DIR *dp;
struct dirent *rec;
struct Wheel *result = NULL;
@@ -47,13 +47,41 @@ struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_ma
}
result = calloc(1, sizeof(*result));
+ if (!result) {
+ SYSERROR("Unable to allocate %zu bytes for wheel struct", sizeof(*result));
+ closedir(dp);
+ return NULL;
+ }
+
result->path_name = realpath(package_path, NULL);
+ if (!result->path_name) {
+ SYSERROR("Unable to resolve absolute path to %s: %s", filename, strerror(errno));
+ wheel_free(&result);
+ closedir(dp);
+ return NULL;
+ }
result->file_name = strdup(rec->d_name);
+ if (!result->file_name) {
+ SYSERROR("Unable to allocate bytes for %s: %s", rec->d_name, strerror(errno));
+ wheel_free(&result);
+ closedir(dp);
+ return NULL;
+ }
size_t parts_total;
char **parts = split(filename, "-", 0);
+ if (!parts) {
+ // This shouldn't happen unless a wheel file is present in the
+ // directory with a malformed file name, or we've managed to
+ // exhaust the system's memory
+ SYSERROR("%s has no '-' separators! (Delete this file and try again)", filename);
+ wheel_free(&result);
+ closedir(dp);
+ return NULL;
+ }
+
for (parts_total = 0; parts[parts_total] != NULL; parts_total++);
- if (parts_total < 6) {
+ if (parts_total == 5) {
// no build tag
result->distribution = strdup(parts[0]);
result->version = strdup(parts[1]);
@@ -61,7 +89,7 @@ struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_ma
result->python_tag = strdup(parts[2]);
result->abi_tag = strdup(parts[3]);
result->platform_tag = strdup(parts[4]);
- } else {
+ } else if (parts_total == 6) {
// has build tag
result->distribution = strdup(parts[0]);
result->version = strdup(parts[1]);
@@ -69,6 +97,13 @@ struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_ma
result->python_tag = strdup(parts[3]);
result->abi_tag = strdup(parts[4]);
result->platform_tag = strdup(parts[5]);
+ } else {
+ SYSERROR("Unknown wheel name format: %s. Expected 5 or 6 strings "
+ "separated by '-', but got %zu instead", filename, parts_total);
+ GENERIC_ARRAY_FREE(parts);
+ wheel_free(&result);
+ closedir(dp);
+ return NULL;
}
GENERIC_ARRAY_FREE(parts);
break;
@@ -76,3 +111,16 @@ struct Wheel *get_wheel_file(const char *basepath, const char *name, char *to_ma
closedir(dp);
return result;
}
+
+void wheel_free(struct Wheel **wheel) {
+ struct Wheel *w = (*wheel);
+ guard_free(w->path_name);
+ guard_free(w->file_name);
+ guard_free(w->distribution);
+ guard_free(w->version);
+ guard_free(w->build_tag);
+ guard_free(w->python_tag);
+ guard_free(w->abi_tag);
+ guard_free(w->python_tag);
+ guard_free(w);
+}
diff --git a/stasis.ini b/stasis.ini
index 875ca26..ba3331a 100644
--- a/stasis.ini
+++ b/stasis.ini
@@ -10,7 +10,7 @@ always_update_base_environment = false
conda_fresh_start = true
; (string) Install conda in a custom prefix
-; DEFAULT: Conda will be installed under stasis/conda
+; DEFAULT: Conda will be installed under stasis/tools/conda
; NOTE: conda_fresh_start will automatically be set to "false"
;conda_install_prefix = /path/to/conda
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 62f58a9..f4380e0 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -5,8 +5,8 @@ include_directories(
find_program(BASH_PROGRAM bash)
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/tests)
set(CTEST_BINARY_DIRECTORY ${PROJECT_BINARY_DIR}/tests)
-set(nix_gnu_cflags -Wno-error -Wno-unused-parameter -Wno-discarded-qualifiers)
-set(nix_clang_cflags -Wno-unused-parameter -Wno-incompatible-pointer-types-discards-qualifiers)
+set(nix_gnu_cflags -Wno-format-truncation -Wno-error -Wno-unused-parameter -Wno-unused-result -Wno-discarded-qualifiers)
+set(nix_clang_cflags -Wno-format-truncation -Wno-unused-parameter -Wno-unused-result -Wno-incompatible-pointer-types-discards-qualifiers)
set(win_msvc_cflags /Wall)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/data/generic.ini ${CMAKE_CURRENT_BINARY_DIR} COPYONLY)
diff --git a/tests/data/generic.ini b/tests/data/generic.ini
index c1e5c9c..fd67ed7 100644
--- a/tests/data/generic.ini
+++ b/tests/data/generic.ini
@@ -17,6 +17,7 @@ installer_baseurl = https://github.com/conda-forge/miniforge/releases/download/{
;conda_packages =
pip_packages =
firewatch==0.0.4
+ tweakwcs==0.8.8
[runtime]
@@ -25,8 +26,21 @@ PYTHONUNBUFFERED = 1
[test:firewatch]
repository = https://github.com/astroconda/firewatch
-script =
+script_setup =
pip install -e '.'
+script =
+ firewatch -c conda-forge -p ${STASIS_CONDA_PLATFORM_SUBDIR} | grep -E ' python-[0-9]'
+
+
+[test:tweakwcs]
+repository = https://github.com/spacetelescope/tweakwcs
+script_setup =
+ pip install -e '.[test]'
+script =
+ pytest \
+ -r fEsx \
+ --basetemp="{{ func:basetemp_dir() }}" \
+ --junitxml="{{ func:junitxml_file() }}"
[deploy:artifactory:delivery]
@@ -36,7 +50,7 @@ dest = {{ meta.mission }}/{{ info.build_name }}/
[deploy:docker]
-;registry = bytesalad.stsci.edu
+registry = bytesalad.stsci.edu
image_compression = zstd -v -9 -c
build_args =
SNAPSHOT_INPUT={{ info.release_name }}.yml
diff --git a/tests/rt_generic.sh b/tests/rt_generic.sh
index 6da953d..6e4454c 100644
--- a/tests/rt_generic.sh
+++ b/tests/rt_generic.sh
@@ -6,10 +6,16 @@ if [ -n "$GITHUB_TOKEN" ] && [ -z "$STASIS_GH_TOKEN"]; then
else
export STASIS_GH_TOKEN="anonymous"
fi
+python_versions=(
+ 3.10
+ 3.11
+ 3.12
+)
topdir=$(pwd)
ws="rt_workspace"
+rm -rf "$ws"
mkdir -p "$ws"
ws="$(realpath $ws)"
@@ -28,9 +34,12 @@ popd
pushd "$ws"
type -P stasis
type -P stasis_indexer
+ retcode=0
- stasis --no-docker --no-artifactory --unbuffered -v "$topdir"/generic.ini
- retcode=$?
+ for py_version in "${python_versions[@]}"; do
+ stasis --python "$py_version" --no-docker --no-artifactory --unbuffered -v "$topdir"/generic.ini
+ retcode+=$?
+ done
set +x
@@ -54,7 +63,7 @@ pushd "$ws"
for cond in "${fail_on_main[@]}"; do
if grep --color -H -n "$cond" "$x" >&2; then
echo "ERROR DETECTED IN $x!" >&2
- retcode=2
+ retcode+=1
fi
done
done
@@ -94,6 +103,8 @@ pushd "$ws"
done
popd
-rm -rf "$ws"
+if [ -z "$RT_KEEP_WORKSPACE" ]; then
+ rm -rf "$ws"
+fi
exit $retcode \ No newline at end of file
diff --git a/tests/test_artifactory.c b/tests/test_artifactory.c
index 1a21f0e..2c732fa 100644
--- a/tests/test_artifactory.c
+++ b/tests/test_artifactory.c
@@ -1,4 +1,6 @@
#include "testing.h"
+#include "artifactory.h"
+#include "delivery.h"
// Import private functions from core
extern int delivery_init_platform(struct Delivery *ctx);
diff --git a/tests/test_conda.c b/tests/test_conda.c
index 72217fc..2ed869a 100644
--- a/tests/test_conda.c
+++ b/tests/test_conda.c
@@ -1,4 +1,6 @@
#include "testing.h"
+#include "conda.h"
+#include "delivery.h"
char cwd_start[PATH_MAX];
char cwd_workspace[PATH_MAX];
@@ -38,8 +40,8 @@ struct Delivery ctx;
void test_conda_installation() {
char *install_url = calloc(255, sizeof(install_url));
- delivery_get_installer_url(&ctx, install_url);
- delivery_get_installer(&ctx, install_url);
+ delivery_get_conda_installer_url(&ctx, install_url);
+ delivery_get_conda_installer(&ctx, install_url);
delivery_install_conda(ctx.conda.installer_path, ctx.storage.conda_install_prefix);
STASIS_ASSERT_FATAL(access(ctx.storage.conda_install_prefix, F_OK) == 0, "conda was not installed correctly");
STASIS_ASSERT_FATAL(conda_activate(ctx.storage.conda_install_prefix, "base") == 0, "unable to activate base environment");
diff --git a/tests/test_docker.c b/tests/test_docker.c
index 04a73aa..6eec53c 100644
--- a/tests/test_docker.c
+++ b/tests/test_docker.c
@@ -1,4 +1,6 @@
#include "testing.h"
+#include "docker.h"
+
struct DockerCapabilities cap_suite;
void test_docker_capable() {
diff --git a/tests/test_download.c b/tests/test_download.c
index cee7683..ad8724e 100644
--- a/tests/test_download.c
+++ b/tests/test_download.c
@@ -1,4 +1,5 @@
#include "testing.h"
+#include "download.h"
void test_download() {
struct testcase {
diff --git a/tests/test_ini.c b/tests/test_ini.c
index 2579e21..e4a7808 100644
--- a/tests/test_ini.c
+++ b/tests/test_ini.c
@@ -86,11 +86,13 @@ void test_ini_setval_getval() {
STASIS_ASSERT(ini_getval(ini, "default", "a", INIVAL_TYPE_STR, render_mode, &val) == 0, "failed to get value");
STASIS_ASSERT(strcmp(val.as_char_p, "a") != 0, "unexpected value loaded from modified variable");
STASIS_ASSERT(strcmp(val.as_char_p, "changed") == 0, "unexpected value loaded from modified variable");
+ guard_free(val.as_char_p);
STASIS_ASSERT(ini_setval(&ini, INI_SETVAL_APPEND, "default", "a", " twice") == 0, "failed to set value");
STASIS_ASSERT(ini_getval(ini, "default", "a", INIVAL_TYPE_STR, render_mode, &val) == 0, "failed to get value");
STASIS_ASSERT(strcmp(val.as_char_p, "changed") != 0, "unexpected value loaded from modified variable");
STASIS_ASSERT(strcmp(val.as_char_p, "changed twice") == 0, "unexpected value loaded from modified variable");
+ guard_free(val.as_char_p);
ini_free(&ini);
remove(filename);
}
diff --git a/tests/test_junitxml.c b/tests/test_junitxml.c
index 9b2181e..7111249 100644
--- a/tests/test_junitxml.c
+++ b/tests/test_junitxml.c
@@ -1,4 +1,5 @@
#include "testing.h"
+#include "junitxml.h"
void test_junitxml_testsuite_read() {
struct JUNIT_Testsuite *testsuite;
diff --git a/tests/test_multiprocessing.c b/tests/test_multiprocessing.c
new file mode 100644
index 0000000..b9cd309
--- /dev/null
+++ b/tests/test_multiprocessing.c
@@ -0,0 +1,127 @@
+#include "testing.h"
+#include "multiprocessing.h"
+
+static struct MultiProcessingPool *pool;
+char *commands[] = {
+ "sleep 1; true",
+ "sleep 2; uname -a",
+ "sleep 3; /bin/echo hello world",
+ "sleep 4; true",
+ "sleep 5; uname -a",
+ "sleep 6; /bin/echo hello world",
+};
+
+void test_mp_pool_init() {
+ STASIS_ASSERT((pool = mp_pool_init("mypool", "mplogs")) != NULL, "Pool initialization failed");
+ STASIS_ASSERT_FATAL(pool != NULL, "Should not be NULL");
+ STASIS_ASSERT(pool->num_alloc == MP_POOL_TASK_MAX, "Wrong number of default records");
+ STASIS_ASSERT(pool->num_used == 0, "Wrong number of used records");
+ STASIS_ASSERT(strcmp(pool->log_root, "mplogs") == 0, "Wrong log root directory");
+ STASIS_ASSERT(strcmp(pool->ident, "mypool") == 0, "Wrong identity");
+
+ int data_bad_total = 0;
+ for (size_t i = 0; i < pool->num_alloc; i++) {
+ int data_bad = 0;
+ struct MultiProcessingTask *task = &pool->task[i];
+
+ data_bad += task->status == 0 ? 0 : 1;
+ data_bad += task->pid == 0 ? 0 : 1;
+ data_bad += task->parent_pid == 0 ? 0 : 1;
+ data_bad += task->signaled_by == 0 ? 0 : 1;
+ data_bad += task->time_data.t_start.tv_nsec == 0 ? 0 : 1;
+ data_bad += task->time_data.t_start.tv_sec == 0 ? 0 : 1;
+ data_bad += task->time_data.t_stop.tv_nsec == 0 ? 0 : 1;
+ data_bad += task->time_data.t_stop.tv_sec == 0 ? 0 : 1;
+ data_bad += (int) strlen(task->ident) == 0 ? 0 : 1;
+ data_bad += (int) strlen(task->parent_script) == 0 ? 0 : 1;
+ if (data_bad) {
+ SYSERROR("%s.task[%zu] has garbage values!", pool->ident, i);
+ SYSERROR(" ident: %s", task->ident);
+ SYSERROR(" status: %d", task->status);
+ SYSERROR(" pid: %d", task->pid);
+ SYSERROR(" parent_pid: %d", task->parent_pid);
+ SYSERROR(" signaled_by: %d", task->signaled_by);
+ SYSERROR(" t_start.tv_nsec: %ld", task->time_data.t_start.tv_nsec);
+ SYSERROR(" t_start.tv_sec: %ld", task->time_data.t_start.tv_sec);
+ SYSERROR(" t_stop.tv_nsec: %ld", task->time_data.t_stop.tv_nsec);
+ SYSERROR(" t_stop.tv_sec: %ld", task->time_data.t_stop.tv_sec);
+ data_bad_total++;
+ }
+ }
+ STASIS_ASSERT(data_bad_total == 0, "Task array is not pristine");
+ mp_pool_free(&pool);
+}
+
+void test_mp_task() {
+ pool = mp_pool_init("mypool", "mplogs");
+
+ if (pool) {
+ for (size_t i = 0; i < sizeof(commands) / sizeof(*commands); i++) {
+ struct MultiProcessingTask *task;
+ char task_name[100] = {0};
+ sprintf(task_name, "mytask%zu", i);
+ STASIS_ASSERT_FATAL((task = mp_pool_task(pool, task_name, NULL, commands[i])) != NULL, "Task should not be NULL");
+ STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "PID should be non-zero at this point");
+ STASIS_ASSERT(task->parent_pid == MP_POOL_PID_UNUSED, "Parent PID should be non-zero");
+ STASIS_ASSERT(task->status == -1, "Status should be -1 (not started yet)");
+ STASIS_ASSERT(strcmp(task->ident, task_name) == 0, "Wrong task identity");
+ STASIS_ASSERT(strstr(task->log_file, pool->log_root) != NULL, "Log file path must be in log_root");
+ }
+ }
+}
+
+void test_mp_pool_join() {
+ STASIS_ASSERT(mp_pool_join(pool, get_cpu_count(), 0) == 0, "Pool tasks should have not have failed");
+ for (size_t i = 0; i < pool->num_used; i++) {
+ struct MultiProcessingTask *task = &pool->task[i];
+ STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "Task should be marked as unused");
+ STASIS_ASSERT(task->status == 0, "Task status should be zero (success)");
+ }
+}
+
+void test_mp_pool_free() {
+ mp_pool_free(&pool);
+ STASIS_ASSERT(pool == NULL, "Should be NULL");
+}
+
+void test_mp_pool_workflow() {
+ struct testcase {
+ const char *input_cmd;
+ int input_join_flags;
+ int expected_result;
+ int expected_status;
+ int expected_signal;
+ };
+ struct testcase tc[] = {
+ {.input_cmd = "true && kill $$", .input_join_flags = 0, .expected_result = 1, .expected_status = 0, .expected_signal = SIGTERM},
+ {.input_cmd = "false || kill $$", .input_join_flags = 0, .expected_result = 1, .expected_status = 0, .expected_signal = SIGTERM},
+ {.input_cmd = "true", .input_join_flags = 0,.expected_result = 0, .expected_status = 0, .expected_signal = 0},
+ {.input_cmd = "false", .input_join_flags = 0, .expected_result = 1, .expected_status = 1, .expected_signal = 0},
+ };
+ for (size_t i = 0; i < sizeof(tc) / sizeof(*tc); i++) {
+ struct testcase *test = &tc[i];
+ struct MultiProcessingPool *p;
+ struct MultiProcessingTask *task;
+ STASIS_ASSERT((p = mp_pool_init("workflow", "mplogs")) != NULL, "Failed to initialize pool");
+ STASIS_ASSERT((task = mp_pool_task(p, "task", NULL, (char *) test->input_cmd)) != NULL, "Failed to queue task");
+ STASIS_ASSERT(mp_pool_join(p, get_cpu_count(), test->input_join_flags) == test->expected_result, "Unexpected result");
+ STASIS_ASSERT(task->status == test->expected_status, "Unexpected status");
+ STASIS_ASSERT(task->signaled_by == test->expected_signal, "Unexpected signal");
+ STASIS_ASSERT(task->pid == MP_POOL_PID_UNUSED, "Unexpected PID. Should be marked UNUSED.");
+ mp_pool_show_summary(p);
+ mp_pool_free(&p);
+ }
+}
+
+int main(int argc, char *argv[]) {
+ STASIS_TEST_BEGIN_MAIN();
+ STASIS_TEST_FUNC *tests[] = {
+ test_mp_pool_init,
+ test_mp_task,
+ test_mp_pool_join,
+ test_mp_pool_free,
+ test_mp_pool_workflow,
+ };
+ STASIS_TEST_RUN(tests);
+ STASIS_TEST_END_MAIN();
+}
diff --git a/tests/test_recipe.c b/tests/test_recipe.c
index 8e2c470..7c55cd5 100644
--- a/tests/test_recipe.c
+++ b/tests/test_recipe.c
@@ -1,4 +1,6 @@
#include "testing.h"
+#include "relocation.h"
+#include "recipe.h"
static void make_local_recipe(const char *localdir) {
char path[PATH_MAX] = {0};
diff --git a/tests/test_str.c b/tests/test_str.c
index 85c3b78..4991c1c 100644
--- a/tests/test_str.c
+++ b/tests/test_str.c
@@ -79,6 +79,7 @@ void test_strdup_array_and_strcmp_array() {
for (size_t outer = 0; outer < sizeof(tc) / sizeof(*tc); outer++) {
char **result = strdup_array((char **) tc[outer].data);
STASIS_ASSERT(strcmp_array((const char **) result, tc[outer].expected) == 0, "array members were different");
+ GENERIC_ARRAY_FREE(result);
}
const struct testcase tc_bad[] = {
@@ -94,6 +95,7 @@ void test_strdup_array_and_strcmp_array() {
for (size_t outer = 0; outer < sizeof(tc_bad) / sizeof(*tc_bad); outer++) {
char **result = strdup_array((char **) tc_bad[outer].data);
STASIS_ASSERT(strcmp_array((const char **) result, tc_bad[outer].expected) != 0, "array members were identical");
+ GENERIC_ARRAY_FREE(result);
}
}
@@ -242,7 +244,7 @@ void test_join_ex() {
};
for (size_t i = 0; i < sizeof(tc) / sizeof(*tc); i++) {
char *result;
- result = join_ex(tc[i].delim, "a", "b", "c", "d", "e", NULL);
+ result = join_ex((char *) tc[i].delim, "a", "b", "c", "d", "e", NULL);
STASIS_ASSERT(strcmp(result ? result : "", tc[i].expected) == 0, "failed to join array");
guard_free(result);
}
diff --git a/tests/test_wheel.c b/tests/test_wheel.c
index 99ac97c..6818b22 100644
--- a/tests/test_wheel.c
+++ b/tests/test_wheel.c
@@ -1,4 +1,5 @@
#include "testing.h"
+#include "wheel.h"
void test_get_wheel_file() {
struct testcase {
@@ -50,12 +51,12 @@ void test_get_wheel_file() {
},
};
- struct Wheel *doesnotexist = get_wheel_file("doesnotexist", "doesnotexist-0.0.1-py2.py3-none-any.whl", (char *[]) {"not", NULL}, WHEEL_MATCH_ANY);
+ struct Wheel *doesnotexist = get_wheel_info("doesnotexist", "doesnotexist-0.0.1-py2.py3-none-any.whl", (char *[]) {"not", NULL}, WHEEL_MATCH_ANY);
STASIS_ASSERT(doesnotexist == NULL, "returned non-NULL on error");
for (size_t i = 0; i < sizeof(tc) / sizeof(*tc); i++) {
struct testcase *test = &tc[i];
- struct Wheel *wheel = get_wheel_file(".", test->expected.distribution, (char *[]) {(char *) test->expected.version, NULL}, WHEEL_MATCH_ANY);
+ struct Wheel *wheel = get_wheel_info(".", test->expected.distribution, (char *[]) {(char *) test->expected.version, NULL}, WHEEL_MATCH_ANY);
STASIS_ASSERT(wheel != NULL, "result should not be NULL!");
STASIS_ASSERT(wheel->file_name && strcmp(wheel->file_name, test->expected.file_name) == 0, "mismatched file name");
STASIS_ASSERT(wheel->version && strcmp(wheel->version, test->expected.version) == 0, "mismatched version");
@@ -67,6 +68,7 @@ void test_get_wheel_file() {
STASIS_ASSERT(strcmp(wheel->build_tag, test->expected.build_tag) == 0,
"mismatched build tag (optional arbitrary string)");
}
+ wheel_free(&wheel);
}
}