summaryrefslogtreecommitdiff
path: root/src/build.rs
diff options
context:
space:
mode:
authorDawid Rycerz <dawid@rycerz.xyz>2026-01-22 22:07:32 +0100
committerDawid Rycerz <dawid@rycerz.xyz>2026-02-10 18:44:26 +0100
commit064a1d01c5c14f5ecc032fa9b8346a4a88b893f6 (patch)
treea2023f9ccd297ed8a41a3a0cc5699c2add09244d /src/build.rs
witryna 0.1.0 — initial releasev0.1.0
Minimalist Git-based static site deployment orchestrator. Webhook-triggered builds in Podman/Docker containers with atomic symlink publishing, SIGHUP hot-reload, and zero-downtime deploys. See README.md for usage, CHANGELOG.md for details.
Diffstat (limited to 'src/build.rs')
-rw-r--r--src/build.rs843
1 files changed, 843 insertions, 0 deletions
diff --git a/src/build.rs b/src/build.rs
new file mode 100644
index 0000000..e887f64
--- /dev/null
+++ b/src/build.rs
@@ -0,0 +1,843 @@
+use anyhow::{Context as _, Result};
+use std::collections::HashMap;
+use std::path::{Path, PathBuf};
+use std::process::Stdio;
+use std::time::{Duration, Instant};
+use tokio::io::{AsyncWrite, AsyncWriteExt as _, BufWriter};
+use tokio::process::Command;
+use tracing::{debug, info};
+
+use crate::repo_config::RepoConfig;
+
+/// Optional container resource limits and network mode.
+///
+/// Passed from `SiteConfig` to `execute()` to inject `--memory`, `--cpus`,
+/// `--pids-limit`, and `--network` flags into the container command.
+#[derive(Debug)]
+pub struct ContainerOptions {
+ pub memory: Option<String>,
+ pub cpus: Option<f64>,
+ pub pids_limit: Option<u32>,
+ pub network: String,
+ pub workdir: Option<String>,
+}
+
+impl Default for ContainerOptions {
+ fn default() -> Self {
+ Self {
+ memory: None,
+ cpus: None,
+ pids_limit: None,
+ network: "bridge".to_owned(),
+ workdir: None,
+ }
+ }
+}
+
+/// Default timeout for build operations.
+pub const BUILD_TIMEOUT_DEFAULT: Duration = Duration::from_secs(600); // 10 minutes
+
+/// Size of the in-memory tail buffer for stderr (last 1 KB).
+/// Used for `BuildFailure::Display` after streaming to disk.
+const STDERR_TAIL_SIZE: usize = 1024;
+
+/// Result of a build execution.
+///
+/// Stdout and stderr are streamed to temporary files on disk during the build.
+/// Callers should pass these paths to `logs::save_build_log()` for composition.
+#[derive(Debug)]
+pub struct BuildResult {
+ pub stdout_file: PathBuf,
+ pub stderr_file: PathBuf,
+ pub duration: Duration,
+}
+
+/// Error from a failed build command.
+///
+/// Carries structured exit code and file paths to captured output.
+/// `last_stderr` holds the last 1 KB of stderr for the `Display` impl.
+#[derive(Debug)]
+pub struct BuildFailure {
+ pub exit_code: i32,
+ pub stdout_file: PathBuf,
+ pub stderr_file: PathBuf,
+ pub last_stderr: String,
+ pub duration: Duration,
+}
+
+impl std::fmt::Display for BuildFailure {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "build failed with exit code {}: {}",
+ self.exit_code,
+ self.last_stderr.trim()
+ )
+ }
+}
+
+impl std::error::Error for BuildFailure {}
+
+/// Writer that duplicates all writes to both a primary and secondary writer.
+///
+/// Used for `--verbose` mode: streams build output to both a temp file (primary)
+/// and stderr (secondary) simultaneously.
+pub(crate) struct TeeWriter<W> {
+ primary: W,
+ secondary: tokio::io::Stderr,
+}
+
+impl<W: AsyncWrite + Unpin> TeeWriter<W> {
+ pub(crate) const fn new(primary: W, secondary: tokio::io::Stderr) -> Self {
+ Self { primary, secondary }
+ }
+}
+
+impl<W: AsyncWrite + Unpin> AsyncWrite for TeeWriter<W> {
+ fn poll_write(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut std::task::Context<'_>,
+ buf: &[u8],
+ ) -> std::task::Poll<std::io::Result<usize>> {
+ // Write to primary first
+ let poll = std::pin::Pin::new(&mut self.primary).poll_write(cx, buf);
+ if let std::task::Poll::Ready(Ok(n)) = &poll {
+ // Best-effort write to secondary (stderr) — same bytes
+ let _ = std::pin::Pin::new(&mut self.secondary).poll_write(cx, &buf[..*n]);
+ }
+ poll
+ }
+
+ fn poll_flush(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut std::task::Context<'_>,
+ ) -> std::task::Poll<std::io::Result<()>> {
+ let _ = std::pin::Pin::new(&mut self.secondary).poll_flush(cx);
+ std::pin::Pin::new(&mut self.primary).poll_flush(cx)
+ }
+
+ fn poll_shutdown(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut std::task::Context<'_>,
+ ) -> std::task::Poll<std::io::Result<()>> {
+ let _ = std::pin::Pin::new(&mut self.secondary).poll_shutdown(cx);
+ std::pin::Pin::new(&mut self.primary).poll_shutdown(cx)
+ }
+}
+
+/// Execute a containerized build for a site.
+///
+/// Stdout and stderr are streamed to the provided temporary files on disk
+/// instead of being buffered in memory. This removes unbounded memory usage
+/// for container builds.
+///
+/// # Arguments
+/// * `runtime` - Container runtime to use ("podman" or "docker")
+/// * `clone_dir` - Path to the cloned repository
+/// * `repo_config` - Build configuration from witryna.yaml
+/// * `cache_volumes` - Pairs of (`container_path`, `host_path`) for persistent cache mounts
+/// * `env` - User-defined environment variables to pass into the container via `--env`
+/// * `options` - Optional container resource limits and network mode
+/// * `stdout_file` - Temp file path for captured stdout
+/// * `stderr_file` - Temp file path for captured stderr
+/// * `timeout` - Maximum duration before killing the build
+/// * `verbose` - When true, also stream build output to stderr in real-time
+///
+/// # Errors
+///
+/// Returns an error if the container command times out, fails to execute,
+/// or exits with a non-zero status code (as a [`BuildFailure`]).
+///
+/// # Security
+/// - Uses typed arguments (no shell interpolation) per OWASP guidelines
+/// - Mounts clone directory as read-write (needed for build output)
+/// - Runs with minimal capabilities
+#[allow(clippy::implicit_hasher, clippy::too_many_arguments)]
+pub async fn execute(
+ runtime: &str,
+ clone_dir: &Path,
+ repo_config: &RepoConfig,
+ cache_volumes: &[(String, PathBuf)],
+ env: &HashMap<String, String>,
+ options: &ContainerOptions,
+ stdout_file: &Path,
+ stderr_file: &Path,
+ timeout: Duration,
+ verbose: bool,
+) -> Result<BuildResult> {
+ info!(
+ image = %repo_config.image,
+ command = %repo_config.command,
+ path = %clone_dir.display(),
+ "executing container build"
+ );
+
+ let start = Instant::now();
+
+ // Build args dynamically to support optional cache volumes
+ let mut args = vec![
+ "run".to_owned(),
+ "--rm".to_owned(),
+ "--volume".to_owned(),
+ format!("{}:/workspace:Z", clone_dir.display()),
+ ];
+
+ // Add cache volume mounts
+ for (container_path, host_path) in cache_volumes {
+ args.push("--volume".to_owned());
+ args.push(format!("{}:{}:Z", host_path.display(), container_path));
+ }
+
+ // Add user-defined environment variables
+ for (key, value) in env {
+ args.push("--env".to_owned());
+ args.push(format!("{key}={value}"));
+ }
+
+ let workdir = match &options.workdir {
+ Some(subdir) => format!("/workspace/{subdir}"),
+ None => "/workspace".to_owned(),
+ };
+ args.extend(["--workdir".to_owned(), workdir, "--cap-drop=ALL".to_owned()]);
+
+ if runtime == "podman" {
+ args.push("--userns=keep-id".to_owned());
+ } else {
+ // Docker: container runs as root but workspace is owned by host UID.
+ // DAC_OVERRIDE lets root bypass file permission checks.
+ // Podman doesn't need this because --userns=keep-id maps to the host UID.
+ args.push("--cap-add=DAC_OVERRIDE".to_owned());
+ }
+
+ // Resource limits
+ if let Some(memory) = &options.memory {
+ args.push("--memory".to_owned());
+ args.push(memory.clone());
+ }
+ if let Some(cpus) = options.cpus {
+ args.push("--cpus".to_owned());
+ args.push(cpus.to_string());
+ }
+ if let Some(pids) = options.pids_limit {
+ args.push("--pids-limit".to_owned());
+ args.push(pids.to_string());
+ }
+
+ // Network mode
+ args.push(format!("--network={}", options.network));
+
+ args.extend([
+ repo_config.image.clone(),
+ "sh".to_owned(),
+ "-c".to_owned(),
+ repo_config.command.clone(),
+ ]);
+
+ // Spawn with piped stdout/stderr for streaming (OWASP: no shell interpolation)
+ let mut child = Command::new(runtime)
+ .args(&args)
+ .kill_on_drop(true)
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .spawn()
+ .context("failed to spawn container build")?;
+
+ let stdout_pipe = child
+ .stdout
+ .take()
+ .ok_or_else(|| anyhow::anyhow!("missing stdout pipe"))?;
+ let stderr_pipe = child
+ .stderr
+ .take()
+ .ok_or_else(|| anyhow::anyhow!("missing stderr pipe"))?;
+
+ let stdout_file_writer = BufWriter::new(
+ tokio::fs::File::create(stdout_file)
+ .await
+ .with_context(|| format!("failed to create {}", stdout_file.display()))?,
+ );
+ let stderr_file_writer = BufWriter::new(
+ tokio::fs::File::create(stderr_file)
+ .await
+ .with_context(|| format!("failed to create {}", stderr_file.display()))?,
+ );
+
+ if verbose {
+ let mut stdout_tee = TeeWriter::new(stdout_file_writer, tokio::io::stderr());
+ let mut stderr_tee = TeeWriter::new(stderr_file_writer, tokio::io::stderr());
+ run_build_process(
+ child,
+ stdout_pipe,
+ stderr_pipe,
+ &mut stdout_tee,
+ &mut stderr_tee,
+ start,
+ stdout_file,
+ stderr_file,
+ clone_dir,
+ "container",
+ timeout,
+ )
+ .await
+ } else {
+ let mut stdout_writer = stdout_file_writer;
+ let mut stderr_writer = stderr_file_writer;
+ run_build_process(
+ child,
+ stdout_pipe,
+ stderr_pipe,
+ &mut stdout_writer,
+ &mut stderr_writer,
+ start,
+ stdout_file,
+ stderr_file,
+ clone_dir,
+ "container",
+ timeout,
+ )
+ .await
+ }
+}
+
+/// Copy from reader to writer, keeping the last `tail_size` bytes in memory.
+/// Returns `(total_bytes_copied, tail_buffer)`.
+///
+/// When `tail_size` is 0, skips tail tracking entirely (used for stdout
+/// where we don't need a tail). The tail buffer is used to provide a
+/// meaningful error message in `BuildFailure::Display` without reading
+/// the entire stderr file back into memory.
+#[allow(clippy::indexing_slicing)] // buf[..n] bounded by read() return value
+pub(crate) async fn copy_with_tail<R, W>(
+ mut reader: R,
+ mut writer: W,
+ tail_size: usize,
+) -> std::io::Result<(u64, Vec<u8>)>
+where
+ R: tokio::io::AsyncRead + Unpin,
+ W: tokio::io::AsyncWrite + Unpin,
+{
+ use tokio::io::AsyncReadExt as _;
+
+ let mut buf = [0_u8; 8192];
+ let mut total: u64 = 0;
+ let mut tail: Vec<u8> = Vec::new();
+
+ loop {
+ let n = reader.read(&mut buf).await?;
+ if n == 0 {
+ break;
+ }
+ writer.write_all(&buf[..n]).await?;
+ total += n as u64;
+
+ if tail_size > 0 {
+ tail.extend_from_slice(&buf[..n]);
+ if tail.len() > tail_size {
+ let excess = tail.len() - tail_size;
+ tail.drain(..excess);
+ }
+ }
+ }
+
+ Ok((total, tail))
+}
+
+/// Shared build-process loop: stream stdout/stderr through writers, handle timeout and exit status.
+#[allow(clippy::too_many_arguments)]
+async fn run_build_process<W1, W2>(
+ mut child: tokio::process::Child,
+ stdout_pipe: tokio::process::ChildStdout,
+ stderr_pipe: tokio::process::ChildStderr,
+ stdout_writer: &mut W1,
+ stderr_writer: &mut W2,
+ start: Instant,
+ stdout_file: &Path,
+ stderr_file: &Path,
+ clone_dir: &Path,
+ label: &str,
+ timeout: Duration,
+) -> Result<BuildResult>
+where
+ W1: AsyncWrite + Unpin,
+ W2: AsyncWrite + Unpin,
+{
+ #[allow(clippy::large_futures)]
+ let Ok((stdout_res, stderr_res, wait_res)) = tokio::time::timeout(timeout, async {
+ let (stdout_res, stderr_res, wait_res) = tokio::join!(
+ copy_with_tail(stdout_pipe, &mut *stdout_writer, 0),
+ copy_with_tail(stderr_pipe, &mut *stderr_writer, STDERR_TAIL_SIZE),
+ child.wait(),
+ );
+ (stdout_res, stderr_res, wait_res)
+ })
+ .await
+ else {
+ let _ = child.kill().await;
+ anyhow::bail!("{label} build timed out after {}s", timeout.as_secs());
+ };
+
+ stdout_res.context("failed to stream stdout")?;
+ let (_, stderr_tail) = stderr_res.context("failed to stream stderr")?;
+ stdout_writer.flush().await?;
+ stderr_writer.flush().await?;
+
+ let status = wait_res.context(format!("{label} build I/O error"))?;
+ let last_stderr = String::from_utf8_lossy(&stderr_tail).into_owned();
+
+ if !status.success() {
+ let exit_code = status.code().unwrap_or(-1);
+ debug!(exit_code, "{label} build failed");
+ return Err(BuildFailure {
+ exit_code,
+ stdout_file: stdout_file.to_path_buf(),
+ stderr_file: stderr_file.to_path_buf(),
+ last_stderr,
+ duration: start.elapsed(),
+ }
+ .into());
+ }
+
+ let duration = start.elapsed();
+ debug!(path = %clone_dir.display(), ?duration, "{label} build completed");
+ Ok(BuildResult {
+ stdout_file: stdout_file.to_path_buf(),
+ stderr_file: stderr_file.to_path_buf(),
+ duration,
+ })
+}
+
+#[cfg(test)]
+#[allow(
+ clippy::unwrap_used,
+ clippy::indexing_slicing,
+ clippy::large_futures,
+ clippy::print_stderr
+)]
+mod tests {
+ use super::*;
+ use crate::test_support::{cleanup, temp_dir};
+ use tokio::fs;
+ use tokio::process::Command as TokioCommand;
+
+ /// Check if a container runtime is available and its daemon is running.
+ async fn container_runtime_available(runtime: &str) -> bool {
+ TokioCommand::new(runtime)
+ .args(["info"])
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .status()
+ .await
+ .map(|s| s.success())
+ .unwrap_or(false)
+ }
+
+ /// Get the first available container runtime.
+ async fn get_runtime() -> Option<String> {
+ for runtime in &["podman", "docker"] {
+ if container_runtime_available(runtime).await {
+ return Some((*runtime).to_owned());
+ }
+ }
+ None
+ }
+
+ // --- copy_with_tail() unit tests ---
+
+ #[tokio::test]
+ async fn copy_with_tail_small_input() {
+ let input = b"hello";
+ let mut output = Vec::new();
+ let (total, tail) = copy_with_tail(&input[..], &mut output, 1024).await.unwrap();
+ assert_eq!(total, 5);
+ assert_eq!(tail, b"hello");
+ assert_eq!(output, b"hello");
+ }
+
+ #[tokio::test]
+ async fn copy_with_tail_large_input() {
+ // Input larger than tail_size — only last N bytes kept
+ let input: Vec<u8> = (0_u8..=255).cycle().take(2048).collect();
+ let mut output = Vec::new();
+ let (total, tail) = copy_with_tail(&input[..], &mut output, 512).await.unwrap();
+ assert_eq!(total, 2048);
+ assert_eq!(tail.len(), 512);
+ assert_eq!(&tail[..], &input[2048 - 512..]);
+ assert_eq!(output, input);
+ }
+
+ #[tokio::test]
+ async fn copy_with_tail_zero_tail() {
+ let input = b"data";
+ let mut output = Vec::new();
+ let (total, tail) = copy_with_tail(&input[..], &mut output, 0).await.unwrap();
+ assert_eq!(total, 4);
+ assert!(tail.is_empty());
+ assert_eq!(output, b"data");
+ }
+
+ // --- ContainerOptions workdir tests ---
+
+ #[tokio::test]
+ async fn execute_custom_workdir_runs_from_subdir() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-workdir-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ // Create a subdirectory with a marker file
+ let subdir = temp.join("packages").join("frontend");
+ fs::create_dir_all(&subdir).await.unwrap();
+ fs::write(subdir.join("marker.txt"), "subdir-marker")
+ .await
+ .unwrap();
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "cat marker.txt".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let options = ContainerOptions {
+ workdir: Some("packages/frontend".to_owned()),
+ ..ContainerOptions::default()
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &options,
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_ok(), "build should succeed: {result:?}");
+ let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
+ assert!(
+ stdout.contains("subdir-marker"),
+ "should read marker from subdir, got: {stdout}"
+ );
+
+ cleanup(&temp).await;
+ }
+
+ // --- execute() container tests (Tier 2) ---
+
+ #[tokio::test]
+ async fn execute_simple_command_success() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "echo 'hello world'".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_ok(), "build should succeed: {result:?}");
+ let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
+ assert!(stdout.contains("hello world"));
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_creates_output_files() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "mkdir -p dist && echo 'content' > dist/index.html".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_ok(), "build should succeed: {result:?}");
+
+ // Verify output file was created
+ let output_file = temp.join("dist/index.html");
+ assert!(output_file.exists(), "output file should exist");
+
+ let content = fs::read_to_string(&output_file).await.unwrap();
+ assert!(content.contains("content"));
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_failing_command_returns_error() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "exit 1".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_err(), "build should fail");
+ let err = result.unwrap_err().to_string();
+ assert!(err.contains("exit code 1"));
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_command_with_stderr() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "echo 'error message' >&2 && exit 1".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_err(), "build should fail");
+ let err = result.unwrap_err().to_string();
+ assert!(err.contains("error message"));
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_invalid_image_returns_error() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "nonexistent-image-xyz-12345:latest".to_owned(),
+ command: "echo hello".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_err(), "build should fail for invalid image");
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_workdir_is_correct() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ // Create a file in the temp dir to verify we can see it
+ fs::write(temp.join("marker.txt"), "test-marker")
+ .await
+ .unwrap();
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "cat marker.txt".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_ok(), "build should succeed: {result:?}");
+ let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
+ assert!(stdout.contains("test-marker"));
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_invalid_runtime_returns_error() {
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "echo hello".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let result = execute(
+ "nonexistent-runtime-xyz",
+ &temp,
+ &repo_config,
+ &[],
+ &HashMap::new(),
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_err(), "build should fail for invalid runtime");
+
+ cleanup(&temp).await;
+ }
+
+ #[tokio::test]
+ async fn execute_with_env_vars_passes_to_container() {
+ let Some(runtime) = get_runtime().await else {
+ eprintln!("Skipping test: no container runtime available");
+ return;
+ };
+
+ let temp = temp_dir("build-test").await;
+ let stdout_tmp = temp.join("stdout.tmp");
+ let stderr_tmp = temp.join("stderr.tmp");
+
+ let repo_config = RepoConfig {
+ image: "alpine:latest".to_owned(),
+ command: "printenv MY_VAR".to_owned(),
+ public: "dist".to_owned(),
+ };
+
+ let env = HashMap::from([("MY_VAR".to_owned(), "my_value".to_owned())]);
+ let result = execute(
+ &runtime,
+ &temp,
+ &repo_config,
+ &[],
+ &env,
+ &ContainerOptions::default(),
+ &stdout_tmp,
+ &stderr_tmp,
+ BUILD_TIMEOUT_DEFAULT,
+ false,
+ )
+ .await;
+
+ assert!(result.is_ok(), "build should succeed: {result:?}");
+ let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
+ assert!(
+ stdout.contains("my_value"),
+ "stdout should contain env var value, got: {stdout}",
+ );
+
+ cleanup(&temp).await;
+ }
+}