diff options
| author | Dawid Rycerz <dawid@rycerz.xyz> | 2026-02-15 21:27:00 +0100 |
|---|---|---|
| committer | Dawid Rycerz <dawid@rycerz.xyz> | 2026-02-15 21:27:00 +0100 |
| commit | ce0dbf6b249956700c6a1705bf4ad85a09d53e8c (patch) | |
| tree | d7c3236807cfbf75d7f3a355eb5df5a5e2cc4ad7 /src | |
| parent | 064a1d01c5c14f5ecc032fa9b8346a4a88b893f6 (diff) | |
Switch, cleanup, and status CLI commands. Persistent build state via
state.json. Post-deploy hooks on success and failure with
WITRYNA_BUILD_STATUS. Dependency diet (axum→tiny_http, clap→argh,
tracing→log). Drop built-in rate limiting. Nix flake with NixOS module.
Arch Linux PKGBUILD. Centralized version management.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Diffstat (limited to 'src')
| -rw-r--r-- | src/build.rs | 131 | ||||
| -rw-r--r-- | src/build_guard.rs | 43 | ||||
| -rw-r--r-- | src/cleanup.rs | 92 | ||||
| -rw-r--r-- | src/cli.rs | 303 | ||||
| -rw-r--r-- | src/config.rs | 73 | ||||
| -rw-r--r-- | src/git.rs | 71 | ||||
| -rw-r--r-- | src/hook.rs | 45 | ||||
| -rw-r--r-- | src/lib.rs | 9 | ||||
| -rw-r--r-- | src/logger.rs | 121 | ||||
| -rw-r--r-- | src/logs.rs | 291 | ||||
| -rw-r--r-- | src/main.rs | 370 | ||||
| -rw-r--r-- | src/pipeline.rs | 234 | ||||
| -rw-r--r-- | src/polling.rs | 72 | ||||
| -rw-r--r-- | src/publish.rs | 25 | ||||
| -rw-r--r-- | src/server.rs | 1026 | ||||
| -rw-r--r-- | src/state.rs | 311 | ||||
| -rw-r--r-- | src/test_support.rs | 23 | ||||
| -rw-r--r-- | src/time.rs | 222 |
18 files changed, 2097 insertions, 1365 deletions
diff --git a/src/build.rs b/src/build.rs index e887f64..b56e680 100644 --- a/src/build.rs +++ b/src/build.rs @@ -1,11 +1,11 @@ use anyhow::{Context as _, Result}; +use log::{debug, info}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::process::Stdio; use std::time::{Duration, Instant}; use tokio::io::{AsyncWrite, AsyncWriteExt as _, BufWriter}; use tokio::process::Command; -use tracing::{debug, info}; use crate::repo_config::RepoConfig; @@ -82,13 +82,13 @@ impl std::error::Error for BuildFailure {} /// /// Used for `--verbose` mode: streams build output to both a temp file (primary) /// and stderr (secondary) simultaneously. -pub(crate) struct TeeWriter<W> { +pub struct TeeWriter<W> { primary: W, secondary: tokio::io::Stderr, } impl<W: AsyncWrite + Unpin> TeeWriter<W> { - pub(crate) const fn new(primary: W, secondary: tokio::io::Stderr) -> Self { + pub const fn new(primary: W, secondary: tokio::io::Stderr) -> Self { Self { primary, secondary } } } @@ -125,56 +125,18 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for TeeWriter<W> { } } -/// Execute a containerized build for a site. -/// -/// Stdout and stderr are streamed to the provided temporary files on disk -/// instead of being buffered in memory. This removes unbounded memory usage -/// for container builds. -/// -/// # Arguments -/// * `runtime` - Container runtime to use ("podman" or "docker") -/// * `clone_dir` - Path to the cloned repository -/// * `repo_config` - Build configuration from witryna.yaml -/// * `cache_volumes` - Pairs of (`container_path`, `host_path`) for persistent cache mounts -/// * `env` - User-defined environment variables to pass into the container via `--env` -/// * `options` - Optional container resource limits and network mode -/// * `stdout_file` - Temp file path for captured stdout -/// * `stderr_file` - Temp file path for captured stderr -/// * `timeout` - Maximum duration before killing the build -/// * `verbose` - When true, also stream build output to stderr in real-time -/// -/// # Errors -/// -/// Returns an error if the container command times out, fails to execute, -/// or exits with a non-zero status code (as a [`BuildFailure`]). +/// Build the container CLI arguments for a build invocation. /// -/// # Security -/// - Uses typed arguments (no shell interpolation) per OWASP guidelines -/// - Mounts clone directory as read-write (needed for build output) -/// - Runs with minimal capabilities -#[allow(clippy::implicit_hasher, clippy::too_many_arguments)] -pub async fn execute( +/// Assembles the full `run --rm ...` argument list including volume mounts, +/// environment variables, resource limits, and the build command. +fn build_container_args( runtime: &str, clone_dir: &Path, repo_config: &RepoConfig, cache_volumes: &[(String, PathBuf)], env: &HashMap<String, String>, options: &ContainerOptions, - stdout_file: &Path, - stderr_file: &Path, - timeout: Duration, - verbose: bool, -) -> Result<BuildResult> { - info!( - image = %repo_config.image, - command = %repo_config.command, - path = %clone_dir.display(), - "executing container build" - ); - - let start = Instant::now(); - - // Build args dynamically to support optional cache volumes +) -> Vec<String> { let mut args = vec![ "run".to_owned(), "--rm".to_owned(), @@ -182,13 +144,13 @@ pub async fn execute( format!("{}:/workspace:Z", clone_dir.display()), ]; - // Add cache volume mounts + // Cache volume mounts for (container_path, host_path) in cache_volumes { args.push("--volume".to_owned()); args.push(format!("{}:{}:Z", host_path.display(), container_path)); } - // Add user-defined environment variables + // User-defined environment variables for (key, value) in env { args.push("--env".to_owned()); args.push(format!("{key}={value}")); @@ -203,9 +165,6 @@ pub async fn execute( if runtime == "podman" { args.push("--userns=keep-id".to_owned()); } else { - // Docker: container runs as root but workspace is owned by host UID. - // DAC_OVERRIDE lets root bypass file permission checks. - // Podman doesn't need this because --userns=keep-id maps to the host UID. args.push("--cap-add=DAC_OVERRIDE".to_owned()); } @@ -233,6 +192,59 @@ pub async fn execute( repo_config.command.clone(), ]); + args +} + +/// Execute a containerized build for a site. +/// +/// Stdout and stderr are streamed to the provided temporary files on disk +/// instead of being buffered in memory. This removes unbounded memory usage +/// for container builds. +/// +/// # Arguments +/// * `runtime` - Container runtime to use ("podman" or "docker") +/// * `clone_dir` - Path to the cloned repository +/// * `repo_config` - Build configuration from witryna.yaml +/// * `cache_volumes` - Pairs of (`container_path`, `host_path`) for persistent cache mounts +/// * `env` - User-defined environment variables to pass into the container via `--env` +/// * `options` - Optional container resource limits and network mode +/// * `stdout_file` - Temp file path for captured stdout +/// * `stderr_file` - Temp file path for captured stderr +/// * `timeout` - Maximum duration before killing the build +/// * `verbose` - When true, also stream build output to stderr in real-time +/// +/// # Errors +/// +/// Returns an error if the container command times out, fails to execute, +/// or exits with a non-zero status code (as a [`BuildFailure`]). +/// +/// # Security +/// - Uses typed arguments (no shell interpolation) per OWASP guidelines +/// - Mounts clone directory as read-write (needed for build output) +/// - Runs with minimal capabilities +#[allow(clippy::implicit_hasher, clippy::too_many_arguments)] +pub async fn execute( + runtime: &str, + clone_dir: &Path, + repo_config: &RepoConfig, + cache_volumes: &[(String, PathBuf)], + env: &HashMap<String, String>, + options: &ContainerOptions, + stdout_file: &Path, + stderr_file: &Path, + timeout: Duration, + verbose: bool, +) -> Result<BuildResult> { + info!( + "executing container build: image={} command={} path={}", + repo_config.image, + repo_config.command, + clone_dir.display() + ); + + let start = Instant::now(); + let args = build_container_args(runtime, clone_dir, repo_config, cache_volumes, env, options); + // Spawn with piped stdout/stderr for streaming (OWASP: no shell interpolation) let mut child = Command::new(runtime) .args(&args) @@ -265,7 +277,7 @@ pub async fn execute( if verbose { let mut stdout_tee = TeeWriter::new(stdout_file_writer, tokio::io::stderr()); let mut stderr_tee = TeeWriter::new(stderr_file_writer, tokio::io::stderr()); - run_build_process( + Box::pin(run_build_process( child, stdout_pipe, stderr_pipe, @@ -277,12 +289,12 @@ pub async fn execute( clone_dir, "container", timeout, - ) + )) .await } else { let mut stdout_writer = stdout_file_writer; let mut stderr_writer = stderr_file_writer; - run_build_process( + Box::pin(run_build_process( child, stdout_pipe, stderr_pipe, @@ -294,7 +306,7 @@ pub async fn execute( clone_dir, "container", timeout, - ) + )) .await } } @@ -307,7 +319,7 @@ pub async fn execute( /// meaningful error message in `BuildFailure::Display` without reading /// the entire stderr file back into memory. #[allow(clippy::indexing_slicing)] // buf[..n] bounded by read() return value -pub(crate) async fn copy_with_tail<R, W>( +pub async fn copy_with_tail<R, W>( mut reader: R, mut writer: W, tail_size: usize, @@ -386,7 +398,7 @@ where if !status.success() { let exit_code = status.code().unwrap_or(-1); - debug!(exit_code, "{label} build failed"); + debug!("{label} build failed: exit_code={exit_code}"); return Err(BuildFailure { exit_code, stdout_file: stdout_file.to_path_buf(), @@ -398,7 +410,10 @@ where } let duration = start.elapsed(); - debug!(path = %clone_dir.display(), ?duration, "{label} build completed"); + debug!( + "{label} build completed: path={} duration={duration:?}", + clone_dir.display() + ); Ok(BuildResult { stdout_file: stdout_file.to_path_buf(), stderr_file: stderr_file.to_path_buf(), diff --git a/src/build_guard.rs b/src/build_guard.rs index 0c7fed3..dd67bea 100644 --- a/src/build_guard.rs +++ b/src/build_guard.rs @@ -1,33 +1,35 @@ -use dashmap::DashSet; -use std::sync::Arc; +use std::collections::HashSet; +use std::sync::{Arc, Mutex}; /// Manages per-site build scheduling: immediate execution and a depth-1 queue. /// /// When a build is already in progress, a single rebuild can be queued. /// Subsequent requests while a rebuild is already queued are collapsed (no-op). pub struct BuildScheduler { - pub in_progress: DashSet<String>, - pub queued: DashSet<String>, + pub in_progress: Mutex<HashSet<String>>, + pub queued: Mutex<HashSet<String>>, } impl BuildScheduler { #[must_use] pub fn new() -> Self { Self { - in_progress: DashSet::new(), - queued: DashSet::new(), + in_progress: Mutex::new(HashSet::new()), + queued: Mutex::new(HashSet::new()), } } /// Queue a rebuild for a site that is currently building. /// Returns `true` if newly queued, `false` if already queued (collapse). pub(crate) fn try_queue(&self, site_name: &str) -> bool { - self.queued.insert(site_name.to_owned()) + #[allow(clippy::unwrap_used)] + self.queued.lock().unwrap().insert(site_name.to_owned()) } /// Check and clear queued rebuild. Returns `true` if there was one. pub(crate) fn take_queued(&self, site_name: &str) -> bool { - self.queued.remove(site_name).is_some() + #[allow(clippy::unwrap_used)] + self.queued.lock().unwrap().remove(site_name) } } @@ -47,7 +49,13 @@ pub(crate) struct BuildGuard { impl BuildGuard { pub(crate) fn try_acquire(site_name: String, scheduler: &Arc<BuildScheduler>) -> Option<Self> { - if scheduler.in_progress.insert(site_name.clone()) { + #[allow(clippy::unwrap_used)] + let inserted = scheduler + .in_progress + .lock() + .unwrap() + .insert(site_name.clone()); + if inserted { Some(Self { site_name, scheduler: Arc::clone(scheduler), @@ -60,7 +68,12 @@ impl BuildGuard { impl Drop for BuildGuard { fn drop(&mut self) { - self.scheduler.in_progress.remove(&self.site_name); + #[allow(clippy::unwrap_used)] + self.scheduler + .in_progress + .lock() + .unwrap() + .remove(&self.site_name); } } @@ -74,7 +87,7 @@ mod tests { let scheduler = Arc::new(BuildScheduler::new()); let guard = BuildGuard::try_acquire("my-site".to_owned(), &scheduler); assert!(guard.is_some()); - assert!(scheduler.in_progress.contains("my-site")); + assert!(scheduler.in_progress.lock().unwrap().contains("my-site")); } #[test] @@ -90,10 +103,10 @@ mod tests { let scheduler = Arc::new(BuildScheduler::new()); { let _guard = BuildGuard::try_acquire("my-site".to_owned(), &scheduler); - assert!(scheduler.in_progress.contains("my-site")); + assert!(scheduler.in_progress.lock().unwrap().contains("my-site")); } // Guard dropped — lock released - assert!(!scheduler.in_progress.contains("my-site")); + assert!(!scheduler.in_progress.lock().unwrap().contains("my-site")); let again = BuildGuard::try_acquire("my-site".to_owned(), &scheduler); assert!(again.is_some()); } @@ -102,7 +115,7 @@ mod tests { fn scheduler_try_queue_succeeds() { let scheduler = BuildScheduler::new(); assert!(scheduler.try_queue("my-site")); - assert!(scheduler.queued.contains("my-site")); + assert!(scheduler.queued.lock().unwrap().contains("my-site")); } #[test] @@ -117,7 +130,7 @@ mod tests { let scheduler = BuildScheduler::new(); scheduler.try_queue("my-site"); assert!(scheduler.take_queued("my-site")); - assert!(!scheduler.queued.contains("my-site")); + assert!(!scheduler.queued.lock().unwrap().contains("my-site")); } #[test] diff --git a/src/cleanup.rs b/src/cleanup.rs index ced8320..b2b068b 100644 --- a/src/cleanup.rs +++ b/src/cleanup.rs @@ -1,6 +1,7 @@ +use crate::state; use anyhow::{Context as _, Result}; +use log::{debug, info, warn}; use std::path::Path; -use tracing::{debug, info, warn}; /// Result of a cleanup operation. #[derive(Debug, Default)] @@ -35,7 +36,7 @@ pub async fn cleanup_old_builds( ) -> Result<CleanupResult> { // If max_to_keep is 0, keep all builds if max_to_keep == 0 { - debug!(%site_name, "max_builds_to_keep is 0, skipping cleanup"); + debug!("[{site_name}] max_builds_to_keep is 0, skipping cleanup"); return Ok(CleanupResult::default()); } @@ -44,7 +45,7 @@ pub async fn cleanup_old_builds( // Check if builds directory exists if !builds_dir.exists() { - debug!(%site_name, "builds directory does not exist, skipping cleanup"); + debug!("[{site_name}] builds directory does not exist, skipping cleanup"); return Ok(CleanupResult::default()); } @@ -59,10 +60,14 @@ pub async fn cleanup_old_builds( // Calculate how many to remove let to_remove = build_timestamps.len().saturating_sub(max_to_keep as usize); if to_remove == 0 { - debug!(%site_name, count = build_timestamps.len(), max = max_to_keep, "no builds to remove"); + debug!( + "[{site_name}] no builds to remove: count={} max={max_to_keep}", + build_timestamps.len() + ); } // Remove oldest builds (they're at the end after reverse sort) + let mut removed_timestamps = Vec::new(); for timestamp in build_timestamps.iter().skip(max_to_keep as usize) { let build_path = builds_dir.join(timestamp); let log_path = site_log_dir.join(format!("{timestamp}.log")); @@ -70,11 +75,15 @@ pub async fn cleanup_old_builds( // Remove build directory match tokio::fs::remove_dir_all(&build_path).await { Ok(()) => { - debug!(path = %build_path.display(), "removed old build"); + debug!("removed old build: {}", build_path.display()); result.builds_removed += 1; + removed_timestamps.push(timestamp.clone()); } Err(e) => { - warn!(path = %build_path.display(), error = %e, "failed to remove old build"); + warn!( + "failed to remove old build: path={} error={e}", + build_path.display() + ); } } @@ -82,11 +91,14 @@ pub async fn cleanup_old_builds( if log_path.exists() { match tokio::fs::remove_file(&log_path).await { Ok(()) => { - debug!(path = %log_path.display(), "removed old log"); + debug!("removed old log: {}", log_path.display()); result.logs_removed += 1; } Err(e) => { - warn!(path = %log_path.display(), error = %e, "failed to remove old log"); + warn!( + "failed to remove old log: path={} error={e}", + log_path.display() + ); } } } @@ -95,18 +107,24 @@ pub async fn cleanup_old_builds( let hook_log_path = site_log_dir.join(format!("{timestamp}-hook.log")); match tokio::fs::remove_file(&hook_log_path).await { Ok(()) => { - debug!(path = %hook_log_path.display(), "removed old hook log"); + debug!("removed old hook log: {}", hook_log_path.display()); result.logs_removed += 1; } Err(e) if e.kind() == std::io::ErrorKind::NotFound => { // Not every build has a hook — silently skip } Err(e) => { - warn!(path = %hook_log_path.display(), error = %e, "failed to remove old hook log"); + warn!( + "failed to remove old hook log: path={} error={e}", + hook_log_path.display() + ); } } } + // Prune removed builds from state.json + state::remove_builds(base_dir, site_name, &removed_timestamps).await; + // Remove orphaned temp files (crash recovery) if site_log_dir.exists() && let Ok(mut entries) = tokio::fs::read_dir(&site_log_dir).await @@ -117,10 +135,13 @@ pub async fn cleanup_old_builds( let path = entry.path(); match tokio::fs::remove_file(&path).await { Ok(()) => { - debug!(path = %path.display(), "removed orphaned temp file"); + debug!("removed orphaned temp file: {}", path.display()); } Err(e) => { - warn!(path = %path.display(), error = %e, "failed to remove orphaned temp file"); + warn!( + "failed to remove orphaned temp file: path={} error={e}", + path.display() + ); } } } @@ -129,10 +150,8 @@ pub async fn cleanup_old_builds( if result.builds_removed > 0 || result.logs_removed > 0 { info!( - %site_name, - builds_removed = result.builds_removed, - logs_removed = result.logs_removed, - "cleanup completed" + "[{site_name}] cleanup completed: builds_removed={} logs_removed={}", + result.builds_removed, result.logs_removed ); } @@ -142,7 +161,11 @@ pub async fn cleanup_old_builds( /// List all build timestamps in a builds directory. /// /// Returns directory names that look like timestamps, excluding 'current' symlink. -async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> { +/// +/// # Errors +/// +/// Returns an error if the builds directory cannot be read or entries cannot be inspected. +pub async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> { let mut timestamps = Vec::new(); let mut entries = tokio::fs::read_dir(builds_dir) @@ -176,7 +199,8 @@ async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> { /// Check if a string looks like a valid timestamp format. /// /// Expected format: YYYYMMDD-HHMMSS-microseconds (e.g., 20260126-143000-123456) -fn looks_like_timestamp(s: &str) -> bool { +#[must_use] +pub fn looks_like_timestamp(s: &str) -> bool { let parts: Vec<&str> = s.split('-').collect(); let [date, time, micros, ..] = parts.as_slice() else { return false; @@ -410,6 +434,38 @@ mod tests { } #[tokio::test] + async fn cleanup_does_not_delete_state_json() { + let base_dir = temp_dir("cleanup-test").await; + let log_dir = base_dir.join("logs"); + let site = "test-site"; + + // Create 3 builds (keep 1 → remove 2) + for ts in &[ + "20260126-100000-000001", + "20260126-100000-000002", + "20260126-100000-000003", + ] { + create_build_and_log(&base_dir, &log_dir, site, ts).await; + } + + // Write a state.json in the builds dir + let state_path = base_dir.join("builds").join(site).join("state.json"); + fs::write(&state_path, r#"{"status":"success"}"#) + .await + .unwrap(); + + let result = cleanup_old_builds(&base_dir, &log_dir, site, 1).await; + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.builds_removed, 2); + + // state.json must still exist + assert!(state_path.exists(), "state.json must not be deleted"); + + cleanup(&base_dir).await; + } + + #[tokio::test] async fn cleanup_removes_orphaned_tmp_files() { let base_dir = temp_dir("cleanup-test").await; let log_dir = base_dir.join("logs"); @@ -1,57 +1,114 @@ -use clap::{Parser, Subcommand}; +use argh::FromArgs; use std::path::PathBuf; -/// Witryna - minimalist Git-based static site deployment orchestrator -#[derive(Debug, Parser)] -#[command( - name = "witryna", - version, - author, - about = "Minimalist Git-based static site deployment orchestrator", - long_about = "Minimalist Git-based static site deployment orchestrator.\n\n\ - Witryna listens for webhook HTTP requests, pulls the corresponding Git \ - repository (with automatic Git LFS fetch and submodule initialization), \ - runs a user-defined build command inside an ephemeral container and \ - publishes the resulting assets via atomic symlink switching.\n\n\ - A health-check endpoint is available at GET /health (returns 200 OK).\n\n\ - Witryna does not serve files, terminate TLS, or manage DNS. \ - It is designed to sit behind a reverse proxy (Nginx, Caddy, etc.).", - subcommand_required = true, - arg_required_else_help = true -)] +/// Minimalist Git-based static site deployment orchestrator +#[derive(Debug, FromArgs)] pub struct Cli { - /// Path to the configuration file. - /// If not specified, searches: ./witryna.toml, $XDG_CONFIG_HOME/witryna/witryna.toml, /etc/witryna/witryna.toml - #[arg(long, global = true, value_name = "FILE")] - pub config: Option<PathBuf>, - - #[command(subcommand)] + #[argh(subcommand)] pub command: Command, } -#[derive(Debug, Subcommand)] +#[derive(Debug, FromArgs)] +#[argh(subcommand)] pub enum Command { - /// Start the deployment server (foreground) - Serve, - /// Validate configuration file and print summary - Validate, - /// Trigger a one-off build for a site (synchronous, no server) - Run { - /// Site name (as defined in witryna.toml) - site: String, - /// Stream full build output to stderr in real-time - #[arg(long, short)] - verbose: bool, - }, - /// Show deployment status for configured sites - Status { - /// Show last 10 deployments for a single site - #[arg(long, short)] - site: Option<String>, - /// Output in JSON format - #[arg(long)] - json: bool, - }, + Serve(ServeCmd), + Validate(ValidateCmd), + Run(RunCmd), + Status(StatusCmd), + Switch(SwitchCmd), + Cleanup(CleanupCmd), +} + +impl Command { + #[must_use] + pub fn config(&self) -> Option<&std::path::Path> { + match self { + Self::Serve(c) => c.config.as_deref(), + Self::Validate(c) => c.config.as_deref(), + Self::Run(c) => c.config.as_deref(), + Self::Status(c) => c.config.as_deref(), + Self::Switch(c) => c.config.as_deref(), + Self::Cleanup(c) => c.config.as_deref(), + } + } +} + +/// Start the deployment server (foreground) +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "serve")] +pub struct ServeCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, +} + +/// Validate configuration file and print summary +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "validate")] +pub struct ValidateCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, +} + +/// Trigger a one-off build for a site (synchronous, no server) +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "run")] +pub struct RunCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, + /// site name (as defined in witryna.toml) + #[argh(positional)] + pub site: String, + /// stream full build output to stderr in real-time + #[argh(switch, short = 'v')] + pub verbose: bool, +} + +/// Show deployment status for configured sites +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "status")] +pub struct StatusCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, + /// site name (if omitted, shows all sites) + #[argh(positional)] + pub site: Option<String>, + /// output in JSON format + #[argh(switch)] + pub json: bool, +} + +/// Switch the active build symlink (rollback) +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "switch")] +pub struct SwitchCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, + /// site name (as defined in witryna.toml) + #[argh(positional)] + pub site: String, + /// build timestamp to switch to (from `witryna status <site>`) + #[argh(positional)] + pub build: String, +} + +/// Remove old builds and logs +#[derive(Debug, FromArgs)] +#[argh(subcommand, name = "cleanup")] +pub struct CleanupCmd { + /// path to configuration file + #[argh(option)] + pub config: Option<PathBuf>, + /// site name (if omitted, cleans all sites) + #[argh(positional)] + pub site: Option<String>, + /// number of builds to keep per site (overrides `max_builds_to_keep`) + #[argh(option)] + pub keep: Option<u32>, } #[cfg(test)] @@ -61,74 +118,168 @@ mod tests { #[test] fn run_parses_site_name() { - let cli = Cli::try_parse_from(["witryna", "run", "my-site"]).unwrap(); + let cli = Cli::from_args(&["witryna"], &["run", "my-site"]).unwrap(); match cli.command { - Command::Run { site, verbose } => { - assert_eq!(site, "my-site"); - assert!(!verbose); + Command::Run(cmd) => { + assert_eq!(cmd.site, "my-site"); + assert!(!cmd.verbose); } - _ => panic!("expected Run command"), + _ => unreachable!("expected Run command"), } } #[test] fn run_parses_verbose_flag() { - let cli = Cli::try_parse_from(["witryna", "run", "my-site", "--verbose"]).unwrap(); + let cli = Cli::from_args(&["witryna"], &["run", "my-site", "--verbose"]).unwrap(); match cli.command { - Command::Run { site, verbose } => { - assert_eq!(site, "my-site"); - assert!(verbose); + Command::Run(cmd) => { + assert_eq!(cmd.site, "my-site"); + assert!(cmd.verbose); } - _ => panic!("expected Run command"), + _ => unreachable!("expected Run command"), } } #[test] fn status_parses_without_flags() { - let cli = Cli::try_parse_from(["witryna", "status"]).unwrap(); + let cli = Cli::from_args(&["witryna"], &["status"]).unwrap(); match cli.command { - Command::Status { site, json } => { - assert!(site.is_none()); - assert!(!json); + Command::Status(cmd) => { + assert!(cmd.site.is_none()); + assert!(!cmd.json); } - _ => panic!("expected Status command"), + _ => unreachable!("expected Status command"), } } #[test] fn status_parses_site_filter() { - let cli = Cli::try_parse_from(["witryna", "status", "--site", "my-site"]).unwrap(); + let cli = Cli::from_args(&["witryna"], &["status", "my-site"]).unwrap(); match cli.command { - Command::Status { site, json } => { - assert_eq!(site.as_deref(), Some("my-site")); - assert!(!json); + Command::Status(cmd) => { + assert_eq!(cmd.site.as_deref(), Some("my-site")); + assert!(!cmd.json); } - _ => panic!("expected Status command"), + _ => unreachable!("expected Status command"), } } #[test] fn status_parses_json_flag() { - let cli = Cli::try_parse_from(["witryna", "status", "--json"]).unwrap(); + let cli = Cli::from_args(&["witryna"], &["status", "--json"]).unwrap(); + match cli.command { + Command::Status(cmd) => { + assert!(cmd.site.is_none()); + assert!(cmd.json); + } + _ => unreachable!("expected Status command"), + } + } + + #[test] + fn cleanup_parses_without_args() { + let cli = Cli::from_args(&["witryna"], &["cleanup"]).unwrap(); + match cli.command { + Command::Cleanup(cmd) => { + assert!(cmd.site.is_none()); + assert!(cmd.keep.is_none()); + } + _ => unreachable!("expected Cleanup command"), + } + } + + #[test] + fn cleanup_parses_site_name() { + let cli = Cli::from_args(&["witryna"], &["cleanup", "my-site"]).unwrap(); + match cli.command { + Command::Cleanup(cmd) => { + assert_eq!(cmd.site.as_deref(), Some("my-site")); + assert!(cmd.keep.is_none()); + } + _ => unreachable!("expected Cleanup command"), + } + } + + #[test] + fn cleanup_parses_keep_flag() { + let cli = Cli::from_args(&["witryna"], &["cleanup", "--keep", "3"]).unwrap(); + match cli.command { + Command::Cleanup(cmd) => { + assert!(cmd.site.is_none()); + assert_eq!(cmd.keep, Some(3)); + } + _ => unreachable!("expected Cleanup command"), + } + } + + #[test] + fn cleanup_parses_site_and_keep() { + let cli = Cli::from_args(&["witryna"], &["cleanup", "my-site", "--keep", "3"]).unwrap(); + match cli.command { + Command::Cleanup(cmd) => { + assert_eq!(cmd.site.as_deref(), Some("my-site")); + assert_eq!(cmd.keep, Some(3)); + } + _ => unreachable!("expected Cleanup command"), + } + } + + #[test] + fn switch_parses_site_and_build() { + let cli = Cli::from_args( + &["witryna"], + &["switch", "my-site", "20260126-143000-123456"], + ) + .unwrap(); + match cli.command { + Command::Switch(cmd) => { + assert_eq!(cmd.site, "my-site"); + assert_eq!(cmd.build, "20260126-143000-123456"); + assert!(cmd.config.is_none()); + } + _ => unreachable!("expected Switch command"), + } + } + + #[test] + fn switch_parses_config_flag() { + let cli = Cli::from_args( + &["witryna"], + &[ + "switch", + "--config", + "/etc/witryna.toml", + "my-site", + "20260126-143000-123456", + ], + ) + .unwrap(); match cli.command { - Command::Status { site, json } => { - assert!(site.is_none()); - assert!(json); + Command::Switch(cmd) => { + assert_eq!(cmd.site, "my-site"); + assert_eq!(cmd.build, "20260126-143000-123456"); + assert_eq!( + cmd.config, + Some(std::path::PathBuf::from("/etc/witryna.toml")) + ); } - _ => panic!("expected Status command"), + _ => unreachable!("expected Switch command"), } } #[test] fn config_flag_is_optional() { - let cli = Cli::try_parse_from(["witryna", "status"]).unwrap(); - assert!(cli.config.is_none()); + let cli = Cli::from_args(&["witryna"], &["status"]).unwrap(); + assert!(cli.command.config().is_none()); } #[test] fn config_flag_explicit_path() { let cli = - Cli::try_parse_from(["witryna", "--config", "/etc/witryna.toml", "status"]).unwrap(); - assert_eq!(cli.config, Some(PathBuf::from("/etc/witryna.toml"))); + Cli::from_args(&["witryna"], &["status", "--config", "/etc/witryna.toml"]).unwrap(); + assert_eq!( + cli.command.config(), + Some(std::path::Path::new("/etc/witryna.toml")) + ); } } diff --git a/src/config.rs b/src/config.rs index 63f3447..d79e91c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,20 +1,16 @@ use crate::repo_config; use anyhow::{Context as _, Result, bail}; +use log::LevelFilter; use serde::{Deserialize, Deserializer}; use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::path::{Component, PathBuf}; use std::time::Duration; -use tracing::level_filters::LevelFilter; fn default_log_dir() -> PathBuf { PathBuf::from("/var/log/witryna") } -const fn default_rate_limit() -> u32 { - 10 -} - const fn default_max_builds_to_keep() -> u32 { 5 } @@ -50,8 +46,6 @@ pub struct Config { #[serde(default = "default_log_dir")] pub log_dir: PathBuf, pub log_level: String, - #[serde(default = "default_rate_limit")] - pub rate_limit_per_minute: u32, #[serde(default = "default_max_builds_to_keep")] pub max_builds_to_keep: u32, /// Optional global git operation timeout (e.g., "2m", "5m"). @@ -109,7 +103,7 @@ pub struct SiteConfig { #[serde(default)] pub cache_dirs: Option<Vec<String>>, /// Optional post-deploy hook command (array form, no shell). - /// Runs after successful symlink switch. Non-fatal on failure. + /// Runs after every build (success or failure). Non-fatal on failure. #[serde(default)] pub post_deploy: Option<Vec<String>>, /// Optional environment variables passed to container builds and post-deploy hooks. @@ -265,17 +259,14 @@ impl Config { ) })?; } else if let Some(path) = &site.webhook_token_file { - site.webhook_token = tokio::fs::read_to_string(path) - .await - .with_context(|| { - format!( - "site '{}': failed to read webhook_token_file '{}'", - site.name, - path.display() - ) - })? - .trim() - .to_owned(); + let token_content = tokio::fs::read_to_string(path).await.with_context(|| { + format!( + "site '{}': failed to read webhook_token_file '{}'", + site.name, + path.display() + ) + })?; + token_content.trim().clone_into(&mut site.webhook_token); } } Ok(()) @@ -284,7 +275,6 @@ impl Config { fn validate(&self) -> Result<()> { self.validate_listen_address()?; self.validate_log_level()?; - self.validate_rate_limit()?; self.validate_git_timeout()?; self.validate_container_runtime()?; self.validate_sites()?; @@ -295,13 +285,12 @@ impl Config { if let Some(timeout) = self.git_timeout { if timeout < MIN_GIT_TIMEOUT { bail!( - "git_timeout is too short ({:?}): minimum is {}s", - timeout, + "git_timeout is too short ({timeout:?}): minimum is {}s", MIN_GIT_TIMEOUT.as_secs() ); } if timeout > MAX_GIT_TIMEOUT { - bail!("git_timeout is too long ({:?}): maximum is 1h", timeout,); + bail!("git_timeout is too long ({timeout:?}): maximum is 1h"); } } Ok(()) @@ -333,13 +322,6 @@ impl Config { Ok(()) } - fn validate_rate_limit(&self) -> Result<()> { - if self.rate_limit_per_minute == 0 { - bail!("rate_limit_per_minute must be greater than 0"); - } - Ok(()) - } - fn validate_sites(&self) -> Result<()> { let mut seen_names = HashSet::new(); @@ -369,12 +351,12 @@ impl Config { #[must_use] pub fn log_level_filter(&self) -> LevelFilter { match self.log_level.to_lowercase().as_str() { - "trace" => LevelFilter::TRACE, - "debug" => LevelFilter::DEBUG, - "warn" => LevelFilter::WARN, - "error" => LevelFilter::ERROR, + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, // Catch-all: covers "info" and the unreachable default after validation. - _ => LevelFilter::INFO, + _ => LevelFilter::Info, } } @@ -921,27 +903,6 @@ sites = [] } #[test] - fn zero_rate_limit_rejected() { - let toml = r#" -listen_address = "127.0.0.1:8080" -container_runtime = "podman" -base_dir = "/var/lib/witryna" -log_level = "info" -rate_limit_per_minute = 0 -sites = [] -"#; - let config: Config = toml::from_str(toml).unwrap(); - let result = config.validate(); - assert!(result.is_err()); - assert!( - result - .unwrap_err() - .to_string() - .contains("rate_limit_per_minute") - ); - } - - #[test] fn duplicate_site_names() { let toml = r#" listen_address = "127.0.0.1:8080" @@ -1,8 +1,8 @@ use anyhow::{Context as _, Result, bail}; +use log::{debug, error, info, warn}; use std::path::Path; use std::time::Duration; use tokio::process::Command; -use tracing::{debug, error, info, warn}; /// Default timeout for git operations (used when not configured). pub const GIT_TIMEOUT_DEFAULT: Duration = Duration::from_secs(60); @@ -104,10 +104,15 @@ pub async fn sync_repo( pull(clone_dir, branch, timeout, depth).await?; } else if let Err(e) = clone(repo_url, branch, clone_dir, timeout, depth).await { if clone_dir.exists() { - warn!(path = %clone_dir.display(), "cleaning up partial clone after failure"); + warn!( + "cleaning up partial clone after failure: {}", + clone_dir.display() + ); if let Err(cleanup_err) = tokio::fs::remove_dir_all(clone_dir).await { - error!(path = %clone_dir.display(), error = %cleanup_err, - "failed to clean up partial clone"); + error!( + "failed to clean up partial clone: path={} error={cleanup_err}", + clone_dir.display() + ); } } return Err(e); @@ -142,12 +147,18 @@ pub async fn has_remote_changes( ) -> Result<bool> { // If clone directory doesn't exist, treat as "needs update" if !clone_dir.exists() { - debug!(path = %clone_dir.display(), "clone directory does not exist, needs initial clone"); + debug!( + "clone directory does not exist, needs initial clone: {}", + clone_dir.display() + ); return Ok(true); } // Fetch from remote (update refs only, no working tree changes) - debug!(path = %clone_dir.display(), branch, "fetching remote refs"); + debug!( + "fetching remote refs: path={} branch={branch}", + clone_dir.display() + ); let depth_str = depth.to_string(); let mut fetch_args = vec!["fetch"]; if depth > 0 { @@ -165,10 +176,8 @@ pub async fn has_remote_changes( let remote_head = get_commit_hash(clone_dir, &remote_ref).await?; debug!( - path = %clone_dir.display(), - local = %local_head, - remote = %remote_head, - "comparing commits" + "comparing commits: path={} local={local_head} remote={remote_head}", + clone_dir.display() ); Ok(local_head != remote_head) @@ -198,7 +207,10 @@ async fn clone( timeout: Duration, depth: u32, ) -> Result<()> { - info!(repo_url, branch, path = %clone_dir.display(), "cloning repository"); + info!( + "cloning repository: repo={repo_url} branch={branch} path={}", + clone_dir.display() + ); // Create parent directory if needed if let Some(parent) = clone_dir.parent() { @@ -218,12 +230,15 @@ async fn clone( args.push(clone_dir_str.as_str()); run_git(&args, None, timeout, "git clone").await?; - debug!(path = %clone_dir.display(), "clone completed"); + debug!("clone completed: {}", clone_dir.display()); Ok(()) } async fn pull(clone_dir: &Path, branch: &str, timeout: Duration, depth: u32) -> Result<()> { - info!(branch, path = %clone_dir.display(), "pulling latest changes"); + info!( + "pulling latest changes: branch={branch} path={}", + clone_dir.display() + ); // Fetch from origin (shallow or full depending on depth) let depth_str = depth.to_string(); @@ -245,7 +260,7 @@ async fn pull(clone_dir: &Path, branch: &str, timeout: Duration, depth: u32) -> ) .await?; - debug!(path = %clone_dir.display(), "pull completed"); + debug!("pull completed: {}", clone_dir.display()); Ok(()) } @@ -292,7 +307,7 @@ async fn has_lfs_pointers(clone_dir: &Path) -> Result<bool> { continue; }; if content.starts_with(LFS_POINTER_SIGNATURE) { - debug!(file = %file_path, "found LFS pointer"); + debug!("found LFS pointer: {file_path}"); return Ok(true); } } @@ -310,7 +325,7 @@ async fn is_lfs_available() -> bool { } async fn lfs_pull(clone_dir: &Path) -> Result<()> { - info!(path = %clone_dir.display(), "fetching LFS objects"); + info!("fetching LFS objects: {}", clone_dir.display()); run_git( &["lfs", "pull"], @@ -320,7 +335,7 @@ async fn lfs_pull(clone_dir: &Path) -> Result<()> { ) .await?; - debug!(path = %clone_dir.display(), "LFS pull completed"); + debug!("LFS pull completed: {}", clone_dir.display()); Ok(()) } @@ -334,11 +349,14 @@ async fn lfs_pull(clone_dir: &Path) -> Result<()> { async fn maybe_fetch_lfs(clone_dir: &Path) -> Result<()> { // Step 1: Quick check for LFS configuration if !has_lfs_configured(clone_dir).await { - debug!(path = %clone_dir.display(), "no LFS configuration found"); + debug!("no LFS configuration found: {}", clone_dir.display()); return Ok(()); } - info!(path = %clone_dir.display(), "LFS configured, checking for pointers"); + info!( + "LFS configured, checking for pointers: {}", + clone_dir.display() + ); // Step 2: Scan for actual pointer files match has_lfs_pointers(clone_dir).await { @@ -346,12 +364,12 @@ async fn maybe_fetch_lfs(clone_dir: &Path) -> Result<()> { // Pointers found, need to fetch } Ok(false) => { - debug!(path = %clone_dir.display(), "no LFS pointers found"); + debug!("no LFS pointers found: {}", clone_dir.display()); return Ok(()); } Err(e) => { // If scan fails, try to fetch anyway (conservative approach) - debug!(error = %e, "LFS pointer scan failed, attempting fetch"); + debug!("LFS pointer scan failed, attempting fetch: {e}"); } } @@ -384,11 +402,11 @@ async fn maybe_init_submodules( is_pull: bool, ) -> Result<()> { if !has_submodules(clone_dir).await { - debug!(path = %clone_dir.display(), "no submodules configured"); + debug!("no submodules configured: {}", clone_dir.display()); return Ok(()); } - info!(path = %clone_dir.display(), "submodules detected, initializing"); + info!("submodules detected, initializing: {}", clone_dir.display()); // On pull, sync URLs first (handles upstream submodule URL changes) if is_pull { @@ -419,7 +437,10 @@ async fn maybe_init_submodules( ) .await?; - debug!(path = %clone_dir.display(), "submodule initialization completed"); + debug!( + "submodule initialization completed: {}", + clone_dir.display() + ); Ok(()) } @@ -1045,7 +1066,7 @@ mod tests { } /// Create a parent repo with a submodule wired up. - /// Returns (parent_url, submodule_url). + /// Returns `(parent_url, submodule_url)`. async fn create_repo_with_submodule(temp: &Path, branch: &str) -> (String, String) { // 1. Create bare submodule repo with a file let sub_bare = temp.join("sub.git"); diff --git a/src/hook.rs b/src/hook.rs index 53e1e18..6cb3823 100644 --- a/src/hook.rs +++ b/src/hook.rs @@ -1,4 +1,5 @@ use crate::build::copy_with_tail; +use log::debug; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::process::Stdio; @@ -6,7 +7,6 @@ use std::time::{Duration, Instant}; use tokio::io::AsyncWriteExt as _; use tokio::io::BufWriter; use tokio::process::Command; -use tracing::debug; #[cfg(not(test))] const HOOK_TIMEOUT: Duration = Duration::from_secs(30); @@ -47,6 +47,7 @@ pub async fn run_post_deploy_hook( build_dir: &Path, public_dir: &Path, timestamp: &str, + build_status: &str, env: &HashMap<String, String>, stdout_file: &Path, stderr_file: &Path, @@ -84,6 +85,7 @@ pub async fn run_post_deploy_hook( .env("WITRYNA_BUILD_DIR", build_dir.as_os_str()) .env("WITRYNA_PUBLIC_DIR", public_dir.as_os_str()) .env("WITRYNA_BUILD_TIMESTAMP", timestamp) + .env("WITRYNA_BUILD_STATUS", build_status) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn(); @@ -106,7 +108,7 @@ pub async fn run_post_deploy_hook( } }; - debug!(cmd = ?command, "hook process spawned"); + debug!("hook process spawned: {command:?}"); let (last_stderr, exit_code, success) = stream_hook_output(&mut child, stdout_file, stderr_file).await; @@ -231,6 +233,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -255,6 +258,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -277,6 +281,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -305,6 +310,7 @@ mod tests { tmp.path(), &public_dir, "20260202-120000-000000", + "success", &env, &stdout_tmp, &stderr_tmp, @@ -315,6 +321,7 @@ mod tests { let stdout = fs::read_to_string(&stdout_tmp).await.unwrap(); assert!(stdout.contains("WITRYNA_SITE=my-site")); assert!(stdout.contains("WITRYNA_BUILD_TIMESTAMP=20260202-120000-000000")); + assert!(stdout.contains("WITRYNA_BUILD_STATUS=success")); assert!(stdout.contains("WITRYNA_BUILD_DIR=")); assert!(stdout.contains("WITRYNA_PUBLIC_DIR=")); assert!(stdout.contains("PATH=")); @@ -336,6 +343,7 @@ mod tests { "WITRYNA_BUILD_DIR", "WITRYNA_PUBLIC_DIR", "WITRYNA_BUILD_TIMESTAMP", + "WITRYNA_BUILD_STATUS", "MY_VAR", "DEPLOY_TARGET", ] @@ -357,6 +365,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -380,6 +389,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -404,6 +414,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -432,6 +443,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -460,6 +472,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &HashMap::new(), &stdout_tmp, &stderr_tmp, @@ -472,6 +485,33 @@ mod tests { } #[tokio::test] + async fn hook_env_build_status_failed() { + let tmp = TempDir::new().unwrap(); + let stdout_tmp = tmp.path().join("stdout.tmp"); + let stderr_tmp = tmp.path().join("stderr.tmp"); + + let result = run_post_deploy_hook( + &cmd(&["env"]), + "test-site", + tmp.path(), + &tmp.path().join("current"), + "ts", + "failed", + &HashMap::new(), + &stdout_tmp, + &stderr_tmp, + ) + .await; + + assert!(result.success); + let stdout = fs::read_to_string(&stdout_tmp).await.unwrap(); + assert!( + stdout.contains("WITRYNA_BUILD_STATUS=failed"), + "WITRYNA_BUILD_STATUS should be 'failed'" + ); + } + + #[tokio::test] async fn hook_user_env_does_not_override_reserved() { let tmp = TempDir::new().unwrap(); let stdout_tmp = tmp.path().join("stdout.tmp"); @@ -484,6 +524,7 @@ mod tests { tmp.path(), &tmp.path().join("current"), "ts", + "success", &env, &stdout_tmp, &stderr_tmp, @@ -3,19 +3,22 @@ //! This crate exposes modules for use by the binary and integration tests. //! It is not intended for external consumption and has no stability guarantees. -pub mod build; +pub(crate) mod build; pub mod build_guard; pub mod cleanup; pub mod cli; pub mod config; pub mod git; -pub mod hook; +pub(crate) mod hook; +pub mod logger; pub mod logs; pub mod pipeline; pub mod polling; pub mod publish; -pub mod repo_config; +pub(crate) mod repo_config; pub mod server; +pub mod state; +pub mod time; #[cfg(any(test, feature = "integration"))] pub mod test_support; diff --git a/src/logger.rs b/src/logger.rs new file mode 100644 index 0000000..8d9611b --- /dev/null +++ b/src/logger.rs @@ -0,0 +1,121 @@ +use log::{LevelFilter, Log, Metadata, Record}; +use std::io::Write as _; + +pub struct Logger { + show_timestamp: bool, +} + +impl Logger { + /// Initialize the global logger. + /// + /// `level` is the configured log level (from config or CLI). + /// `RUST_LOG` env var overrides it if set and valid. + /// Timestamps are omitted when `JOURNAL_STREAM` is set (systemd/journald). + /// + /// # Panics + /// + /// Panics if called more than once (the global logger can only be set once). + pub fn init(level: LevelFilter) { + let effective_level = std::env::var("RUST_LOG") + .ok() + .and_then(|s| parse_level(&s)) + .unwrap_or(level); + + let show_timestamp = std::env::var_os("JOURNAL_STREAM").is_none(); + + let logger = Self { show_timestamp }; + log::set_boxed_logger(Box::new(logger)).expect("logger already initialized"); + log::set_max_level(effective_level); + } +} + +fn parse_level(s: &str) -> Option<LevelFilter> { + match s.trim().to_lowercase().as_str() { + "trace" => Some(LevelFilter::Trace), + "debug" => Some(LevelFilter::Debug), + "info" => Some(LevelFilter::Info), + "warn" => Some(LevelFilter::Warn), + "error" => Some(LevelFilter::Error), + "off" => Some(LevelFilter::Off), + _ => None, + } +} + +impl Log for Logger { + fn enabled(&self, metadata: &Metadata) -> bool { + metadata.level() <= log::max_level() + } + + fn log(&self, record: &Record) { + if !self.enabled(record.metadata()) { + return; + } + let mut stderr = std::io::stderr().lock(); + if self.show_timestamp { + let now = crate::time::format_log_timestamp(std::time::SystemTime::now()); + let _ = writeln!(stderr, "{now} {:>5} {}", record.level(), record.args()); + } else { + let _ = writeln!(stderr, "{:>5} {}", record.level(), record.args()); + } + } + + fn flush(&self) { + let _ = std::io::stderr().flush(); + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + #[test] + fn parse_level_valid_levels() { + assert_eq!(parse_level("trace"), Some(LevelFilter::Trace)); + assert_eq!(parse_level("debug"), Some(LevelFilter::Debug)); + assert_eq!(parse_level("info"), Some(LevelFilter::Info)); + assert_eq!(parse_level("warn"), Some(LevelFilter::Warn)); + assert_eq!(parse_level("error"), Some(LevelFilter::Error)); + } + + #[test] + fn parse_level_case_insensitive() { + assert_eq!(parse_level("DEBUG"), Some(LevelFilter::Debug)); + assert_eq!(parse_level("Debug"), Some(LevelFilter::Debug)); + assert_eq!(parse_level("dEbUg"), Some(LevelFilter::Debug)); + } + + #[test] + fn parse_level_invalid_returns_none() { + assert_eq!(parse_level("invalid"), None); + assert_eq!(parse_level(""), None); + assert_eq!(parse_level("verbose"), None); + } + + #[test] + fn parse_level_off() { + assert_eq!(parse_level("off"), Some(LevelFilter::Off)); + } + + #[test] + fn parse_level_trimmed() { + assert_eq!(parse_level(" info "), Some(LevelFilter::Info)); + assert_eq!(parse_level("\tdebug\n"), Some(LevelFilter::Debug)); + } + + #[test] + fn enabled_respects_max_level() { + let logger = Logger { + show_timestamp: true, + }; + + // Set max level to Warn for this test + log::set_max_level(LevelFilter::Warn); + + let warn_meta = log::MetadataBuilder::new().level(log::Level::Warn).build(); + let debug_meta = log::MetadataBuilder::new().level(log::Level::Debug).build(); + + assert!(logger.enabled(&warn_meta)); + assert!(!logger.enabled(&debug_meta)); + } +} diff --git a/src/logs.rs b/src/logs.rs index bddcc9d..e25262a 100644 --- a/src/logs.rs +++ b/src/logs.rs @@ -1,9 +1,9 @@ use anyhow::{Context as _, Result}; +use log::debug; use std::path::{Path, PathBuf}; use std::time::Duration; use tokio::io::AsyncWriteExt as _; use tokio::process::Command; -use tracing::{debug, warn}; use crate::hook::HookResult; @@ -107,9 +107,9 @@ pub async fn save_build_log( let _ = tokio::fs::remove_file(stderr_file).await; debug!( - path = %log_file.display(), - site = %meta.site_name, - "build log saved" + "[{}] build log saved: {}", + meta.site_name, + log_file.display() ); Ok(log_file) @@ -239,11 +239,7 @@ pub async fn save_hook_log( let _ = tokio::fs::remove_file(&hook_result.stdout_file).await; let _ = tokio::fs::remove_file(&hook_result.stderr_file).await; - debug!( - path = %log_file.display(), - site = %site_name, - "hook log saved" - ); + debug!("[{site_name}] hook log saved: {}", log_file.display()); Ok(log_file) } @@ -271,17 +267,6 @@ fn format_hook_log_header(site_name: &str, timestamp: &str, result: &HookResult) ) } -/// Parsed header from a build log file. -#[derive(Debug, Clone, serde::Serialize)] -pub struct ParsedLogHeader { - pub site_name: String, - pub timestamp: String, - pub git_commit: String, - pub image: String, - pub duration: String, - pub status: String, -} - /// Combined deployment status (build + optional hook). #[derive(Debug, Clone, serde::Serialize)] pub struct DeploymentStatus { @@ -291,182 +276,7 @@ pub struct DeploymentStatus { pub duration: String, pub status: String, pub log: String, -} - -/// Parse the header section of a build log file. -/// -/// Expects lines like: -/// ```text -/// === BUILD LOG === -/// Site: my-site -/// Timestamp: 20260126-143000-123456 -/// Git Commit: abc123d -/// Image: node:20-alpine -/// Duration: 45s -/// Status: success -/// ``` -/// -/// Returns `None` if the header is malformed. -#[must_use] -pub fn parse_log_header(content: &str) -> Option<ParsedLogHeader> { - let mut site_name = None; - let mut timestamp = None; - let mut git_commit = None; - let mut image = None; - let mut duration = None; - let mut status = None; - - for line in content.lines().take(10) { - if let Some(val) = line.strip_prefix("Site: ") { - site_name = Some(val.to_owned()); - } else if let Some(val) = line.strip_prefix("Timestamp: ") { - timestamp = Some(val.to_owned()); - } else if let Some(val) = line.strip_prefix("Git Commit: ") { - git_commit = Some(val.to_owned()); - } else if let Some(val) = line.strip_prefix("Image: ") { - image = Some(val.to_owned()); - } else if let Some(val) = line.strip_prefix("Duration: ") { - duration = Some(val.to_owned()); - } else if let Some(val) = line.strip_prefix("Status: ") { - status = Some(val.to_owned()); - } - } - - Some(ParsedLogHeader { - site_name: site_name?, - timestamp: timestamp?, - git_commit: git_commit.unwrap_or_else(|| "unknown".to_owned()), - image: image.unwrap_or_else(|| "unknown".to_owned()), - duration: duration?, - status: status?, - }) -} - -/// Parse the status line from a hook log. -/// -/// Returns `Some(true)` for success, `Some(false)` for failure, -/// `None` if the content cannot be parsed. -#[must_use] -pub fn parse_hook_status(content: &str) -> Option<bool> { - for line in content.lines().take(10) { - if let Some(val) = line.strip_prefix("Status: ") { - return Some(val == "success"); - } - } - None -} - -/// List build log files for a site, sorted newest-first. -/// -/// Returns `(timestamp, path)` pairs. Excludes `*-hook.log` and `*.tmp` files. -/// -/// # Errors -/// -/// Returns an error if the directory cannot be read (except for not-found, -/// which returns an empty list). -pub async fn list_site_logs(log_dir: &Path, site_name: &str) -> Result<Vec<(String, PathBuf)>> { - let site_log_dir = log_dir.join(site_name); - - if !site_log_dir.is_dir() { - return Ok(Vec::new()); - } - - let mut entries = tokio::fs::read_dir(&site_log_dir) - .await - .with_context(|| format!("failed to read log directory: {}", site_log_dir.display()))?; - - let mut logs = Vec::new(); - - while let Some(entry) = entries.next_entry().await? { - let name = entry.file_name(); - let name_str = name.to_string_lossy(); - - // Skip hook logs and temp files - if name_str.ends_with("-hook.log") || name_str.ends_with(".tmp") { - continue; - } - - if let Some(timestamp) = name_str.strip_suffix(".log") { - logs.push((timestamp.to_owned(), entry.path())); - } - } - - // Sort descending (newest first) — timestamps are lexicographically sortable - logs.sort_by(|a, b| b.0.cmp(&a.0)); - - Ok(logs) -} - -/// Get the deployment status for a single build log. -/// -/// Reads the build log header and checks for an accompanying hook log -/// to determine overall deployment status. -/// -/// # Errors -/// -/// Returns an error if the build log cannot be read. -pub async fn get_deployment_status( - log_dir: &Path, - site_name: &str, - timestamp: &str, - log_path: &Path, -) -> Result<DeploymentStatus> { - let content = tokio::fs::read_to_string(log_path) - .await - .with_context(|| format!("failed to read build log: {}", log_path.display()))?; - - let header = parse_log_header(&content); - - let (git_commit, duration, build_status) = match &header { - Some(h) => (h.git_commit.clone(), h.duration.clone(), h.status.clone()), - None => { - warn!(path = %log_path.display(), "malformed build log header"); - ( - "unknown".to_owned(), - "-".to_owned(), - "(parse error)".to_owned(), - ) - } - }; - - // Check for accompanying hook log - let hook_log_path = log_dir - .join(site_name) - .join(format!("{timestamp}-hook.log")); - - let status = if hook_log_path.is_file() { - match tokio::fs::read_to_string(&hook_log_path).await { - Ok(hook_content) => match parse_hook_status(&hook_content) { - Some(true) => { - if build_status.starts_with("failed") { - build_status - } else { - "success".to_owned() - } - } - Some(false) => { - if build_status.starts_with("failed") { - build_status - } else { - "hook failed".to_owned() - } - } - None => build_status, - }, - Err(_) => build_status, - } - } else { - build_status - }; - - Ok(DeploymentStatus { - site_name: site_name.to_owned(), - timestamp: timestamp.to_owned(), - git_commit, - duration, - status, - log: log_path.to_string_lossy().to_string(), - }) + pub current_build: String, } #[cfg(test)] @@ -827,93 +637,4 @@ mod tests { cleanup(&base_dir).await; } - - // --- parse_log_header tests --- - - #[test] - fn parse_log_header_success() { - let content = "\ -=== BUILD LOG === -Site: my-site -Timestamp: 20260126-143000-123456 -Git Commit: abc123d -Image: node:20-alpine -Duration: 45s -Status: success - -=== STDOUT === -build output -"; - let header = parse_log_header(content).unwrap(); - assert_eq!(header.site_name, "my-site"); - assert_eq!(header.timestamp, "20260126-143000-123456"); - assert_eq!(header.git_commit, "abc123d"); - assert_eq!(header.image, "node:20-alpine"); - assert_eq!(header.duration, "45s"); - assert_eq!(header.status, "success"); - } - - #[test] - fn parse_log_header_failed_build() { - let content = "\ -=== BUILD LOG === -Site: fail-site -Timestamp: 20260126-160000-000000 -Git Commit: def456 -Image: node:18 -Duration: 2m 0s -Status: failed (exit code: 42): build error -"; - let header = parse_log_header(content).unwrap(); - assert_eq!(header.status, "failed (exit code: 42): build error"); - assert_eq!(header.duration, "2m 0s"); - } - - #[test] - fn parse_log_header_unknown_commit() { - let content = "\ -=== BUILD LOG === -Site: test-site -Timestamp: 20260126-150000-000000 -Git Commit: unknown -Image: alpine:latest -Duration: 5s -Status: success -"; - let header = parse_log_header(content).unwrap(); - assert_eq!(header.git_commit, "unknown"); - } - - #[test] - fn parse_log_header_malformed() { - let content = "This is not a valid log file\nSome random text\n"; - let header = parse_log_header(content); - assert!(header.is_none()); - } - - #[test] - fn parse_hook_status_success() { - let content = "\ -=== HOOK LOG === -Site: test-site -Timestamp: 20260202-120000-000000 -Command: touch marker -Duration: 1s -Status: success -"; - assert_eq!(parse_hook_status(content), Some(true)); - } - - #[test] - fn parse_hook_status_failed() { - let content = "\ -=== HOOK LOG === -Site: test-site -Timestamp: 20260202-120000-000000 -Command: false -Duration: 0s -Status: failed (exit code 1) -"; - assert_eq!(parse_hook_status(content), Some(false)); - } } diff --git a/src/main.rs b/src/main.rs index b153297..ea0b033 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,65 +1,62 @@ use anyhow::{Context as _, Result, bail}; -use clap::Parser as _; -use tracing::{info, warn}; -use tracing_subscriber::EnvFilter; +use log::{info, warn}; use witryna::cli::{Cli, Command}; use witryna::config; use witryna::logs::{self, DeploymentStatus}; -use witryna::{pipeline, server}; +use witryna::state::BuildEntry; +use witryna::{cleanup, pipeline, publish, server, state}; #[tokio::main] async fn main() -> Result<()> { - let cli = Cli::parse(); - let config_path = config::discover_config(cli.config.as_deref())?; + let cli: Cli = argh::from_env(); + let config_path = config::discover_config(cli.command.config())?; match cli.command { - Command::Serve => run_serve(config_path).await, - Command::Validate => run_validate(config_path).await, - Command::Run { site, verbose } => run_run(config_path, site, verbose).await, - Command::Status { site, json } => run_status(config_path, site, json).await, + Command::Serve(_) => run_serve(config_path).await, + Command::Validate(_) => run_validate(config_path).await, + Command::Run(cmd) => Box::pin(run_run(config_path, cmd.site, cmd.verbose)).await, + Command::Status(cmd) => run_status(config_path, cmd.site, cmd.json).await, + Command::Switch(cmd) => run_switch(config_path, cmd.site, cmd.build).await, + Command::Cleanup(cmd) => run_cleanup(config_path, cmd.site, cmd.keep).await, } } async fn run_serve(config_path: std::path::PathBuf) -> Result<()> { let config = config::Config::load(&config_path).await?; - // Initialize tracing with configured log level + // Initialize logger with configured log level // RUST_LOG env var takes precedence if set - let filter = EnvFilter::try_from_default_env() - .unwrap_or_else(|_| EnvFilter::new(config.log_level_filter().to_string())); - tracing_subscriber::fmt().with_env_filter(filter).init(); + witryna::logger::Logger::init(config.log_level_filter()); info!( - listen_address = %config.listen_address, - container_runtime = %config.container_runtime, - base_dir = %config.base_dir.display(), - log_dir = %config.log_dir.display(), - log_level = %config.log_level, - sites_count = config.sites.len(), - "loaded configuration" + "loaded configuration: listen={} runtime={} base_dir={} log_dir={} log_level={} sites={}", + config.listen_address, + config.container_runtime, + config.base_dir.display(), + config.log_dir.display(), + config.log_level, + config.sites.len(), ); for site in &config.sites { if site.webhook_token.is_empty() { warn!( - name = %site.name, - "webhook authentication disabled (no token configured)" + "[{}] webhook authentication disabled (no token configured)", + site.name, ); } if let Some(interval) = site.poll_interval { info!( - name = %site.name, - repo_url = %site.repo_url, - branch = %site.branch, - poll_interval_secs = interval.as_secs(), - "configured site with polling" + "[{}] configured site with polling: repo={} branch={} poll_interval_secs={}", + site.name, + site.repo_url, + site.branch, + interval.as_secs(), ); } else { info!( - name = %site.name, - repo_url = %site.repo_url, - branch = %site.branch, - "configured site (webhook-only)" + "[{}] configured site (webhook-only): repo={} branch={}", + site.name, site.repo_url, site.branch, ); } } @@ -89,13 +86,13 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo })? .clone(); - // Initialize tracing: compact stderr, DEBUG when verbose - let level = if verbose { "debug" } else { "info" }; - let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level)); - tracing_subscriber::fmt() - .with_env_filter(filter) - .with_writer(std::io::stderr) - .init(); + // Initialize logger: DEBUG when verbose + let level = if verbose { + log::LevelFilter::Debug + } else { + log::LevelFilter::Info + }; + witryna::logger::Logger::init(level); eprintln!( "Building site: {} (repo: {}, branch: {})", @@ -106,7 +103,7 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo .git_timeout .unwrap_or(witryna::git::GIT_TIMEOUT_DEFAULT); - let result = pipeline::run_build( + let result = Box::pin(pipeline::run_build( &site_name, &site, &config.base_dir, @@ -115,7 +112,7 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo config.max_builds_to_keep, git_timeout, verbose, - ) + )) .await?; eprintln!( @@ -142,36 +139,38 @@ async fn run_status( bail!("site '{}' not found in {}", name, config_path.display()); } + let sites: Vec<&str> = match &site_filter { + Some(name) => vec![name.as_str()], + None => config.sites.iter().map(|s| s.name.as_str()).collect(), + }; + let mut statuses: Vec<DeploymentStatus> = Vec::new(); - match &site_filter { - Some(name) => { - // Show last 10 deployments for a single site - let site_logs = logs::list_site_logs(&config.log_dir, name).await?; - for (ts, path) in site_logs.into_iter().take(10) { - let ds = logs::get_deployment_status(&config.log_dir, name, &ts, &path).await?; - statuses.push(ds); - } + for site_name in &sites { + let st = state::load_state(&config.base_dir, site_name).await; + + if st.builds.is_empty() { + statuses.push(DeploymentStatus { + site_name: (*site_name).to_owned(), + timestamp: "-".to_owned(), + git_commit: "-".to_owned(), + duration: "-".to_owned(), + status: "-".to_owned(), + log: "(no builds)".to_owned(), + current_build: String::new(), + }); + continue; } - None => { - // Show latest deployment for each site - for site in &config.sites { - let site_logs = logs::list_site_logs(&config.log_dir, &site.name).await?; - if let Some((ts, path)) = site_logs.into_iter().next() { - let ds = logs::get_deployment_status(&config.log_dir, &site.name, &ts, &path) - .await?; - statuses.push(ds); - } else { - statuses.push(DeploymentStatus { - site_name: site.name.clone(), - timestamp: "-".to_owned(), - git_commit: "-".to_owned(), - duration: "-".to_owned(), - status: "-".to_owned(), - log: "(no builds)".to_owned(), - }); - } - } + + // Single-site filter: show all builds. Overview: show only latest. + let builds = if site_filter.is_some() { + st.builds.iter().collect::<Vec<_>>() + } else { + st.builds.iter().take(1).collect::<Vec<_>>() + }; + + for entry in builds { + statuses.push(build_entry_to_status(site_name, entry, &st.current)); } } @@ -187,6 +186,153 @@ async fn run_status( Ok(()) } +#[allow(clippy::print_stderr)] // CLI output goes to stderr +async fn run_switch( + config_path: std::path::PathBuf, + site_name: String, + build_timestamp: String, +) -> Result<()> { + let config = config::Config::load(&config_path).await?; + + if config.find_site(&site_name).is_none() { + bail!( + "site '{}' not found in {}", + site_name, + config_path.display() + ); + } + + let builds_dir = config.base_dir.join("builds").join(&site_name); + + if !builds_dir.exists() { + bail!("no builds found for site '{site_name}'"); + } + + if !cleanup::looks_like_timestamp(&build_timestamp) { + bail!("'{build_timestamp}' is not a valid build timestamp"); + } + + let build_dir = builds_dir.join(&build_timestamp); + if !build_dir.is_dir() { + let available = cleanup::list_build_timestamps(&builds_dir).await?; + if available.is_empty() { + bail!("no builds found for site '{site_name}'"); + } + let mut sorted = available; + sorted.sort_by(|a, b| b.cmp(a)); + bail!( + "build '{}' not found for site '{}'\navailable builds:\n {}", + build_timestamp, + site_name, + sorted.join("\n ") + ); + } + + let current_link = builds_dir.join("current"); + publish::atomic_symlink_update(&build_dir, ¤t_link).await?; + state::set_current(&config.base_dir, &site_name, &build_timestamp).await; + + eprintln!("switched {site_name} to build {build_timestamp}"); + Ok(()) +} + +#[allow(clippy::print_stderr)] // CLI output goes to stderr +async fn run_cleanup( + config_path: std::path::PathBuf, + site_filter: Option<String>, + keep: Option<u32>, +) -> Result<()> { + let config = config::Config::load(&config_path).await?; + + if let Some(name) = &site_filter + && config.find_site(name).is_none() + { + bail!("site '{}' not found in {}", name, config_path.display()); + } + + if keep == Some(0) { + bail!("--keep 0 would delete all builds; refusing"); + } + + let max_to_keep = keep.unwrap_or(config.max_builds_to_keep); + + if max_to_keep == 0 { + eprintln!("cleanup disabled (max_builds_to_keep is 0; use --keep N to override)"); + return Ok(()); + } + + let sites: Vec<&str> = match &site_filter { + Some(name) => vec![name.as_str()], + None => config.sites.iter().map(|s| s.name.as_str()).collect(), + }; + + let mut total_builds: u32 = 0; + let mut total_logs: u32 = 0; + + for site_name in &sites { + let result = + cleanup::cleanup_old_builds(&config.base_dir, &config.log_dir, site_name, max_to_keep) + .await + .with_context(|| format!("cleanup failed for site '{site_name}'"))?; + + if result.builds_removed > 0 || result.logs_removed > 0 { + eprintln!( + "{site_name}: removed {} build(s), {} log(s)", + result.builds_removed, result.logs_removed + ); + } else { + eprintln!("{site_name}: nothing to clean"); + } + + total_builds += result.builds_removed; + total_logs += result.logs_removed; + } + + if sites.len() > 1 { + eprintln!("total: {total_builds} build(s), {total_logs} log(s) removed"); + } + + Ok(()) +} + +/// Convert a `BuildEntry` to a `DeploymentStatus` for display. +/// +/// For "building" entries, computes elapsed time from `started_at`. +fn build_entry_to_status(site_name: &str, entry: &BuildEntry, current: &str) -> DeploymentStatus { + let duration = if entry.status == "building" { + elapsed_since(&entry.started_at) + } else { + entry.duration.clone() + }; + + let git_commit = if entry.git_commit.is_empty() { + "-".to_owned() + } else { + entry.git_commit.clone() + }; + + DeploymentStatus { + site_name: site_name.to_owned(), + timestamp: entry.timestamp.clone(), + git_commit, + duration, + status: entry.status.clone(), + log: entry.log.clone(), + current_build: current.to_owned(), + } +} + +/// Compute human-readable elapsed time from an ISO 8601 timestamp. +fn elapsed_since(started_at: &str) -> String { + let Some(start) = witryna::time::parse_rfc3339(started_at) else { + return "-".to_owned(); + }; + let Ok(elapsed) = start.elapsed() else { + return "-".to_owned(); + }; + logs::format_duration(elapsed) +} + fn format_status_table(statuses: &[DeploymentStatus]) -> String { use std::fmt::Write as _; @@ -200,14 +346,19 @@ fn format_status_table(statuses: &[DeploymentStatus]) -> String { let mut out = String::new(); let _ = writeln!( out, - "{:<site_width$} {:<11} {:<7} {:<8} {:<24} LOG", + " {:<site_width$} {:<11} {:<7} {:<8} {:<24} LOG", "SITE", "STATUS", "COMMIT", "DURATION", "TIMESTAMP" ); for s in statuses { + let marker = if !s.current_build.is_empty() && s.timestamp == s.current_build { + "+" + } else { + " " + }; let _ = writeln!( out, - "{:<site_width$} {:<11} {:<7} {:<8} {:<24} {}", + "{marker} {:<site_width$} {:<11} {:<7} {:<8} {:<24} {}", s.site_name, s.status, s.git_commit, s.duration, s.timestamp, s.log ); } @@ -247,7 +398,6 @@ mod tests { base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), - rate_limit_per_minute: 10, max_builds_to_keep: 5, git_timeout: None, sites, @@ -342,6 +492,7 @@ mod tests { duration: duration.to_owned(), status: status.to_owned(), log: log.to_owned(), + current_build: String::new(), } } @@ -419,4 +570,81 @@ mod tests { let output = format_status_table(&statuses); assert!(output.contains("hook failed")); } + + #[test] + fn format_status_table_current_build_marker() { + let mut ds = test_deployment( + "my-site", + "success", + "abc123d", + "45s", + "20260126-143000-123456", + "/logs/my-site/20260126-143000-123456.log", + ); + ds.current_build = "20260126-143000-123456".to_owned(); + let output = format_status_table(&[ds]); + + // The matching row should start with "+" + let data_line = output.lines().nth(1).unwrap(); + assert!( + data_line.starts_with('+'), + "row should start with '+', got: {data_line}" + ); + } + + #[test] + fn format_status_table_no_marker_when_no_current() { + let ds = test_deployment( + "my-site", + "success", + "abc123d", + "45s", + "20260126-143000-123456", + "/logs/my-site/20260126-143000-123456.log", + ); + let output = format_status_table(&[ds]); + + let data_line = output.lines().nth(1).unwrap(); + assert!( + data_line.starts_with(' '), + "row should start with space when no current_build, got: {data_line}" + ); + } + + #[test] + fn format_status_table_marker_only_on_matching_row() { + let mut ds1 = test_deployment( + "my-site", + "success", + "abc123d", + "45s", + "20260126-143000-123456", + "/logs/1.log", + ); + ds1.current_build = "20260126-143000-123456".to_owned(); + + let mut ds2 = test_deployment( + "my-site", + "failed", + "def4567", + "30s", + "20260126-150000-000000", + "/logs/2.log", + ); + ds2.current_build = "20260126-143000-123456".to_owned(); + + let output = format_status_table(&[ds1, ds2]); + let lines: Vec<&str> = output.lines().collect(); + + assert!( + lines[1].starts_with('+'), + "matching row should have +: {}", + lines[1] + ); + assert!( + lines[2].starts_with(' '), + "non-matching row should have space: {}", + lines[2] + ); + } } diff --git a/src/pipeline.rs b/src/pipeline.rs index 5827ad7..21857a8 100644 --- a/src/pipeline.rs +++ b/src/pipeline.rs @@ -1,11 +1,12 @@ use crate::config::SiteConfig; use crate::logs::{BuildExitStatus, BuildLogMeta}; -use crate::{build, cleanup, git, hook, logs, publish, repo_config}; +use crate::state::BuildEntry; +use crate::{build, cleanup, git, hook, logs, publish, repo_config, state}; use anyhow::Result; -use chrono::Utc; +use log::{error, info, warn}; +use std::collections::HashMap; use std::path::{Path, PathBuf}; -use std::time::{Duration, Instant}; -use tracing::{error, info, warn}; +use std::time::{Duration, Instant, SystemTime}; /// Result of a successful pipeline run. pub struct PipelineResult { @@ -37,13 +38,35 @@ pub async fn run_build( git_timeout: Duration, verbose: bool, ) -> Result<PipelineResult> { - let timestamp = Utc::now().format("%Y%m%d-%H%M%S-%f").to_string(); + let now = SystemTime::now(); + let timestamp = crate::time::format_build_timestamp(now); let start_time = Instant::now(); + let started_at = crate::time::format_rfc3339(now); + let log_path_str = log_dir + .join(site_name) + .join(format!("{timestamp}.log")) + .to_string_lossy() + .to_string(); + + // Write "building" state + state::push_build( + base_dir, + site_name, + BuildEntry { + status: "building".to_owned(), + timestamp: timestamp.clone(), + started_at: started_at.clone(), + git_commit: String::new(), + duration: String::new(), + log: log_path_str.clone(), + }, + ) + .await; let clone_dir = base_dir.join("clones").join(site_name); // 1. Sync git repository - info!(%site_name, "syncing repository"); + info!("[{site_name}] syncing repository"); if let Err(e) = git::sync_repo( &site.repo_url, &site.branch, @@ -53,7 +76,8 @@ pub async fn run_build( ) .await { - error!(%site_name, error = %e, "git sync failed"); + error!("[{site_name}] git sync failed: {e}"); + let duration = start_time.elapsed(); save_build_log_for_error( log_dir, site_name, @@ -64,6 +88,7 @@ pub async fn run_build( &e.to_string(), ) .await; + update_final_state(base_dir, site_name, "failed", "", duration).await; return Err(e.context("git sync failed")); } @@ -80,17 +105,26 @@ pub async fn run_build( { Ok(config) => config, Err(e) => { - error!(%site_name, error = %e, "failed to load repo config"); + error!("[{site_name}] failed to load repo config: {e}"); + let duration = start_time.elapsed(); save_build_log_for_error( log_dir, site_name, ×tamp, start_time, - git_commit, + git_commit.clone(), "config-load", &e.to_string(), ) .await; + update_final_state( + base_dir, + site_name, + "failed", + git_commit.as_deref().unwrap_or(""), + duration, + ) + .await; return Err(e.context("failed to load repo config")); } }; @@ -103,7 +137,10 @@ pub async fn run_build( let sanitized = crate::config::sanitize_cache_dir_name(dir); let host_path = base_dir.join("cache").join(site_name).join(&sanitized); if let Err(e) = tokio::fs::create_dir_all(&host_path).await { - error!(%site_name, path = %host_path.display(), error = %e, "failed to create cache directory"); + error!( + "[{site_name}] failed to create cache directory: path={} {e}", + host_path.display() + ); anyhow::bail!("failed to create cache directory: {e}"); } volumes.push((dir.clone(), host_path)); @@ -112,7 +149,7 @@ pub async fn run_build( .iter() .map(|(c, h)| format!("{}:{}", h.display(), c)) .collect(); - info!(%site_name, mounts = ?mount_list, "mounting cache volumes"); + info!("[{site_name}] mounting cache volumes: {mount_list:?}"); volumes } _ => Vec::new(), @@ -121,7 +158,7 @@ pub async fn run_build( // 4. Execute build — stream output to temp files let site_log_dir = log_dir.join(site_name); if let Err(e) = tokio::fs::create_dir_all(&site_log_dir).await { - error!(%site_name, error = %e, "failed to create log directory"); + error!("[{site_name}] failed to create log directory: {e}"); anyhow::bail!("failed to create log directory: {e}"); } let stdout_tmp = site_log_dir.join(format!("{timestamp}-stdout.tmp")); @@ -136,8 +173,11 @@ pub async fn run_build( network: site.container_network.clone(), workdir: site.container_workdir.clone(), }; - info!(%site_name, image = %repo_config.image, "running container build"); - let build_result = build::execute( + info!( + "[{site_name}] running container build: image={}", + repo_config.image + ); + let build_result = Box::pin(build::execute( container_runtime, &clone_dir, &repo_config, @@ -148,7 +188,7 @@ pub async fn run_build( &stderr_tmp, timeout, verbose, - ) + )) .await; // Determine exit status and extract temp file paths @@ -207,7 +247,7 @@ pub async fn run_build( match logs::save_build_log(log_dir, &meta, &build_stdout_file, &build_stderr_file).await { Ok(path) => path, Err(e) => { - error!(%site_name, error = %e, "failed to save build log"); + error!("[{site_name}] failed to save build log: {e}"); let _ = tokio::fs::remove_file(&build_stdout_file).await; let _ = tokio::fs::remove_file(&build_stderr_file).await; // Non-fatal for log save — continue if build succeeded @@ -215,14 +255,37 @@ pub async fn run_build( } }; - // If build failed, return error + // If build failed, run hook (if configured) then return error if let Err(e) = build_result { - error!(%site_name, "build failed"); + error!("[{site_name}] build failed"); + run_hook_if_configured( + site, + site_name, + &clone_dir, + base_dir, + log_dir, + &site_log_dir, + ×tamp, + "failed", + &env, + ) + .await; + update_final_state( + base_dir, + site_name, + "failed", + git_commit.as_deref().unwrap_or(""), + start_time.elapsed(), + ) + .await; return Err(e); } // 5. Publish assets (with same timestamp as log) - info!(%site_name, public = %repo_config.public, "publishing assets"); + info!( + "[{site_name}] publishing assets: public={}", + repo_config.public + ); let publish_result = publish::publish( base_dir, site_name, @@ -233,53 +296,50 @@ pub async fn run_build( .await?; info!( - %site_name, - build_dir = %publish_result.build_dir.display(), - timestamp = %publish_result.timestamp, - "deployment completed successfully" + "[{site_name}] deployment completed successfully: build_dir={} timestamp={}", + publish_result.build_dir.display(), + publish_result.timestamp ); // 6. Run post-deploy hook (non-fatal) - if let Some(hook_cmd) = &site.post_deploy { - info!(%site_name, "running post-deploy hook"); - let hook_stdout_tmp = site_log_dir.join(format!("{timestamp}-hook-stdout.tmp")); - let hook_stderr_tmp = site_log_dir.join(format!("{timestamp}-hook-stderr.tmp")); - let public_dir = base_dir.join("builds").join(site_name).join("current"); - - let hook_result = hook::run_post_deploy_hook( - hook_cmd, - site_name, - &publish_result.build_dir, - &public_dir, - ×tamp, - &env, - &hook_stdout_tmp, - &hook_stderr_tmp, - ) - .await; - - if let Err(e) = logs::save_hook_log(log_dir, site_name, ×tamp, &hook_result).await { - error!(%site_name, error = %e, "failed to save hook log"); - let _ = tokio::fs::remove_file(&hook_stdout_tmp).await; - let _ = tokio::fs::remove_file(&hook_stderr_tmp).await; - } - - if hook_result.success { - info!(%site_name, "post-deploy hook completed"); + let mut final_status = "success"; + if let Some(hook_success) = run_hook_if_configured( + site, + site_name, + &publish_result.build_dir, + base_dir, + log_dir, + &site_log_dir, + ×tamp, + "success", + &env, + ) + .await + { + if hook_success { + info!("[{site_name}] post-deploy hook completed"); } else { - warn!( - %site_name, - exit_code = ?hook_result.exit_code, - "post-deploy hook failed (non-fatal)" - ); + warn!("[{site_name}] post-deploy hook failed (non-fatal)"); + final_status = "hook failed"; } } + // Write final state + set current build + update_final_state( + base_dir, + site_name, + final_status, + git_commit.as_deref().unwrap_or(""), + start_time.elapsed(), + ) + .await; + state::set_current(base_dir, site_name, ×tamp).await; + // 7. Cleanup old builds (non-fatal if it fails) if let Err(e) = cleanup::cleanup_old_builds(base_dir, log_dir, site_name, max_builds_to_keep).await { - warn!(%site_name, error = %e, "cleanup failed (non-fatal)"); + warn!("[{site_name}] cleanup failed (non-fatal): {e}"); } let duration = start_time.elapsed(); @@ -291,6 +351,68 @@ pub async fn run_build( }) } +/// Run the post-deploy hook if configured. Returns `Some(success)` if the hook +/// ran, or `None` if no hook is configured. +#[allow(clippy::too_many_arguments)] +async fn run_hook_if_configured( + site: &SiteConfig, + site_name: &str, + build_dir: &Path, + base_dir: &Path, + log_dir: &Path, + site_log_dir: &Path, + timestamp: &str, + build_status: &str, + env: &HashMap<String, String>, +) -> Option<bool> { + let hook_cmd = site.post_deploy.as_ref()?; + info!("[{site_name}] running post-deploy hook (build_status={build_status})"); + + let hook_stdout_tmp = site_log_dir.join(format!("{timestamp}-hook-stdout.tmp")); + let hook_stderr_tmp = site_log_dir.join(format!("{timestamp}-hook-stderr.tmp")); + let public_dir = base_dir.join("builds").join(site_name).join("current"); + + let hook_result = Box::pin(hook::run_post_deploy_hook( + hook_cmd, + site_name, + build_dir, + &public_dir, + timestamp, + build_status, + env, + &hook_stdout_tmp, + &hook_stderr_tmp, + )) + .await; + + if let Err(e) = logs::save_hook_log(log_dir, site_name, timestamp, &hook_result).await { + error!("[{site_name}] failed to save hook log: {e}"); + let _ = tokio::fs::remove_file(&hook_stdout_tmp).await; + let _ = tokio::fs::remove_file(&hook_stderr_tmp).await; + } + + Some(hook_result.success) +} + +/// Update the latest build entry with final status, commit, and duration. Best-effort. +async fn update_final_state( + base_dir: &Path, + site_name: &str, + status: &str, + git_commit: &str, + duration: Duration, +) { + let s = status.to_owned(); + let c = git_commit.to_owned(); + let d = logs::format_duration(duration); + state::update_latest_build(base_dir, site_name, |e| { + e.status = s; + e.git_commit = c; + e.duration = d; + }) + .await; +} + /// Save a build log for errors that occur before the build starts. async fn save_build_log_for_error( log_dir: &Path, @@ -321,7 +443,7 @@ async fn save_build_log_for_error( let _ = tokio::fs::File::create(&stderr_tmp).await; if let Err(e) = logs::save_build_log(log_dir, &meta, &stdout_tmp, &stderr_tmp).await { - error!(site_name, error = %e, "failed to save build log"); + error!("[{site_name}] failed to save build log: {e}"); let _ = tokio::fs::remove_file(&stdout_tmp).await; let _ = tokio::fs::remove_file(&stderr_tmp).await; } diff --git a/src/polling.rs b/src/polling.rs index 6c25326..c06cfad 100644 --- a/src/polling.rs +++ b/src/polling.rs @@ -7,18 +7,17 @@ use crate::build_guard::BuildGuard; use crate::config::SiteConfig; use crate::git; use crate::server::AppState; +use log::{debug, error, info}; use std::collections::HashMap; use std::hash::{Hash as _, Hasher as _}; use std::sync::Arc; use std::time::Duration; -use tokio::sync::RwLock; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info}; +use tokio::sync::{RwLock, watch}; /// Manages polling tasks for all sites. pub struct PollingManager { - /// Map of `site_name` -> cancellation token for active polling tasks - tasks: Arc<RwLock<HashMap<String, CancellationToken>>>, + /// Map of `site_name` -> cancellation sender for active polling tasks + tasks: Arc<RwLock<HashMap<String, watch::Sender<()>>>>, } impl PollingManager { @@ -31,10 +30,19 @@ impl PollingManager { /// Start polling tasks for sites with `poll_interval` configured. /// Call this on startup and after SIGHUP reload. + /// + /// # Panics + /// + /// Panics if the config `RwLock` is poisoned. pub async fn start_polling(&self, state: AppState) { - let config = state.config.read().await; - - for site in &config.sites { + let sites: Vec<_> = state + .config + .read() + .expect("config lock poisoned") + .sites + .clone(); + + for site in &sites { if let Some(interval) = site.poll_interval { self.spawn_poll_task(state.clone(), site.clone(), interval) .await; @@ -47,38 +55,37 @@ impl PollingManager { pub async fn stop_all(&self) { let mut tasks = self.tasks.write().await; - for (site_name, token) in tasks.drain() { - info!(site = %site_name, "stopping polling task"); - token.cancel(); + for (site_name, tx) in tasks.drain() { + info!("[{site_name}] stopping polling task"); + let _ = tx.send(()); } } /// Spawn a single polling task for a site. async fn spawn_poll_task(&self, state: AppState, site: SiteConfig, interval: Duration) { let site_name = site.name.clone(); - let token = CancellationToken::new(); + let (cancel_tx, cancel_rx) = watch::channel(()); - // Store the cancellation token + // Store the cancellation sender { let mut tasks = self.tasks.write().await; - tasks.insert(site_name.clone(), token.clone()); + tasks.insert(site_name.clone(), cancel_tx); } info!( - site = %site_name, - interval_secs = interval.as_secs(), - "starting polling task" + "[{site_name}] starting polling task: interval_secs={}", + interval.as_secs() ); // Spawn the polling loop let tasks = Arc::clone(&self.tasks); tokio::spawn(async move { #[allow(clippy::large_futures)] - poll_loop(state, site, interval, token.clone()).await; + poll_loop(state, site, interval, cancel_rx).await; // Remove from active tasks when done tasks.write().await.remove(&site_name); - debug!(site = %site_name, "polling task ended"); + debug!("[{site_name}] polling task ended"); }); } } @@ -94,29 +101,32 @@ async fn poll_loop( state: AppState, site: SiteConfig, interval: Duration, - cancel_token: CancellationToken, + mut cancel_rx: watch::Receiver<()>, ) { let site_name = &site.name; // Initial delay before first poll (avoid thundering herd on startup) let initial_delay = calculate_initial_delay(site_name, interval); - debug!(site = %site_name, delay_secs = initial_delay.as_secs(), "initial poll delay"); + debug!( + "[{site_name}] initial poll delay: {} secs", + initial_delay.as_secs() + ); tokio::select! { () = tokio::time::sleep(initial_delay) => {} - () = cancel_token.cancelled() => return, + _ = cancel_rx.changed() => return, } loop { - debug!(site = %site_name, "polling for changes"); + debug!("[{site_name}] polling for changes"); // 1. Acquire build lock before any git operation let Some(guard) = BuildGuard::try_acquire(site_name.clone(), &state.build_scheduler) else { - debug!(site = %site_name, "build in progress, skipping poll cycle"); + debug!("[{site_name}] build in progress, skipping poll cycle"); tokio::select! { () = tokio::time::sleep(interval) => {} - () = cancel_token.cancelled() => { - info!(site = %site_name, "polling cancelled"); + _ = cancel_rx.changed() => { + info!("[{site_name}] polling cancelled"); return; } } @@ -125,7 +135,7 @@ async fn poll_loop( // Get current config (might have changed via SIGHUP) let (base_dir, git_timeout) = { - let config = state.config.read().await; + let config = state.config.read().expect("config lock poisoned"); ( config.base_dir.clone(), config.git_timeout.unwrap_or(git::GIT_TIMEOUT_DEFAULT), @@ -144,14 +154,14 @@ async fn poll_loop( { Ok(changed) => changed, Err(e) => { - error!(site = %site_name, error = %e, "failed to check for changes"); + error!("[{site_name}] failed to check for changes: {e}"); false } }; if has_changes { // 3a. Keep guard alive — move into build pipeline - info!(site = %site_name, "new commits detected, triggering build"); + info!("[{site_name}] new commits detected, triggering build"); #[allow(clippy::large_futures)] crate::server::run_build_pipeline( state.clone(), @@ -168,8 +178,8 @@ async fn poll_loop( // 4. Sleep (lock is NOT held here in either branch) tokio::select! { () = tokio::time::sleep(interval) => {} - () = cancel_token.cancelled() => { - info!(site = %site_name, "polling cancelled"); + _ = cancel_rx.changed() => { + info!("[{site_name}] polling cancelled"); return; } } diff --git a/src/publish.rs b/src/publish.rs index 338a136..f4862c4 100644 --- a/src/publish.rs +++ b/src/publish.rs @@ -1,6 +1,6 @@ use anyhow::{Context as _, Result, bail}; +use log::{debug, info, warn}; use std::path::{Path, PathBuf}; -use tracing::{debug, info}; /// Result of a successful publish operation. #[derive(Debug)] @@ -52,9 +52,9 @@ pub async fn publish( let current_link = site_builds_dir.join("current"); info!( - source = %source_dir.display(), - destination = %build_dir.display(), - "publishing assets" + "publishing assets: source={} destination={}", + source_dir.display(), + build_dir.display() ); // 3. Create builds directory structure @@ -76,9 +76,9 @@ pub async fn publish( atomic_symlink_update(&build_dir, ¤t_link).await?; debug!( - build_dir = %build_dir.display(), - symlink = %current_link.display(), - "publish completed" + "publish completed: build_dir={} symlink={}", + build_dir.display(), + current_link.display() ); Ok(PublishResult { @@ -109,7 +109,7 @@ async fn copy_dir_contents(src: &Path, dst: &Path) -> Result<()> { // SEC-002: reject symlinks in build output to prevent symlink attacks let metadata = tokio::fs::symlink_metadata(&entry_path).await?; if metadata.file_type().is_symlink() { - tracing::warn!(path = %entry_path.display(), "skipping symlink in build output"); + warn!("skipping symlink in build output: {}", entry_path.display()); continue; } @@ -144,7 +144,11 @@ async fn copy_dir_contents(src: &Path, dst: &Path) -> Result<()> { /// 2. Rename temp to final: {`link_path}.tmp` -> {`link_path`} /// /// The rename operation is atomic on POSIX filesystems. -async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> { +/// +/// # Errors +/// +/// Returns an error if the temporary symlink cannot be created or the atomic rename fails. +pub async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> { let temp_link = link_path.with_extension("tmp"); // Remove any stale temp symlink from previous failed attempts @@ -168,11 +172,10 @@ async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> { mod tests { use super::*; use crate::test_support::{cleanup, temp_dir}; - use chrono::Utc; use tokio::fs; fn test_timestamp() -> String { - Utc::now().format("%Y%m%d-%H%M%S-%f").to_string() + crate::time::format_build_timestamp(std::time::SystemTime::now()) } #[tokio::test] diff --git a/src/server.rs b/src/server.rs index e31a1e4..a2aef5c 100644 --- a/src/server.rs +++ b/src/server.rs @@ -2,24 +2,11 @@ use crate::build_guard::{BuildGuard, BuildScheduler}; use crate::config::{Config, SiteConfig}; use crate::polling::PollingManager; use anyhow::Result; -use axum::{ - Json, Router, - extract::{DefaultBodyLimit, Path, State}, - http::{HeaderMap, StatusCode}, - response::IntoResponse, - routing::{get, post}, -}; -use governor::clock::DefaultClock; -use governor::state::keyed::DashMapStateStore; -use governor::{Quota, RateLimiter}; -use std::num::NonZeroU32; +use log::{error, info, warn}; use std::path::PathBuf; -use std::sync::Arc; -use subtle::ConstantTimeEq as _; -use tokio::net::TcpListener; +use std::sync::{Arc, RwLock}; +use tiny_http::{Header, Method, Request, Response, Server}; use tokio::signal::unix::{SignalKind, signal}; -use tokio::sync::RwLock; -use tracing::{error, info, warn}; #[derive(serde::Serialize)] struct ErrorResponse { @@ -36,114 +23,118 @@ struct HealthResponse { status: &'static str, } -fn error_response(status: StatusCode, error: &'static str) -> impl IntoResponse { - (status, Json(ErrorResponse { error })) +fn json_response(status: u16, body: &str) -> Response<std::io::Cursor<Vec<u8>>> { + let data = body.as_bytes().to_vec(); + Response::from_data(data) + .with_status_code(status) + .with_header(Header::from_bytes("Content-Type", "application/json").expect("valid header")) } -type TokenRateLimiter = RateLimiter<String, DashMapStateStore<String>, DefaultClock>; +fn empty_response(status: u16) -> Response<std::io::Empty> { + Response::empty(status) +} #[derive(Clone)] pub struct AppState { pub config: Arc<RwLock<Config>>, pub config_path: Arc<PathBuf>, pub build_scheduler: Arc<BuildScheduler>, - pub rate_limiter: Arc<TokenRateLimiter>, pub polling_manager: Arc<PollingManager>, } -pub fn create_router(state: AppState) -> Router { - Router::new() - .route("/health", get(health_handler)) - .route("/{site_name}", post(deploy_handler)) - .layer(DefaultBodyLimit::max(1024 * 1024)) // 1MB limit - .with_state(state) -} - -async fn health_handler() -> impl IntoResponse { - Json(HealthResponse { status: "ok" }) -} - -/// Extract Bearer token from Authorization header. -fn extract_bearer_token(headers: &HeaderMap) -> Option<&str> { +/// Extract Bearer token from `tiny_http` headers. +fn extract_bearer_token(headers: &[Header]) -> Option<&str> { headers - .get("authorization") - .and_then(|v| v.to_str().ok()) - .and_then(|v| v.strip_prefix("Bearer ")) + .iter() + .find(|h| h.field.equiv("Authorization")) + .and_then(|h| h.value.as_str().strip_prefix("Bearer ")) } fn validate_token(provided: &str, expected: &str) -> bool { - let provided_bytes = provided.as_bytes(); - let expected_bytes = expected.as_bytes(); + let a = provided.as_bytes(); + let b = expected.as_bytes(); + + // Constant-time comparison — OWASP requirement. + // Length check is not constant-time, but token length is not secret + // (same early-return approach as subtle::ConstantTimeEq for slices). + if a.len() != b.len() { + return false; + } - // Constant-time comparison - OWASP requirement - provided_bytes.ct_eq(expected_bytes).into() + let mut acc: u8 = 0; + for (x, y) in a.iter().zip(b.iter()) { + acc |= x ^ y; + } + acc == 0 } -async fn deploy_handler( - State(state): State<AppState>, - Path(site_name): Path<String>, - headers: HeaderMap, -) -> impl IntoResponse { - info!(%site_name, "deployment request received"); - - // Find the site first to avoid information leakage - let site = { - let config = state.config.read().await; - if let Some(site) = config.find_site(&site_name) { - site.clone() - } else { - info!(%site_name, "site not found"); - return error_response(StatusCode::NOT_FOUND, "not_found").into_response(); - } - }; - - // Validate Bearer token (skip if auth disabled for this site) - if site.webhook_token.is_empty() { - // Auth disabled — rate limit by site name instead - if state.rate_limiter.check_key(&site_name).is_err() { - info!(%site_name, "rate limit exceeded"); - return error_response(StatusCode::TOO_MANY_REQUESTS, "rate_limit_exceeded") - .into_response(); - } - } else { - let Some(token) = extract_bearer_token(&headers) else { - info!(%site_name, "missing or malformed authorization header"); - return error_response(StatusCode::UNAUTHORIZED, "unauthorized").into_response(); - }; +/// Check if path is a single segment (e.g., "/my-site"). +fn is_site_path(path: &str) -> bool { + path.starts_with('/') && path.len() > 1 && !path[1..].contains('/') +} - if !validate_token(token, &site.webhook_token) { - info!(%site_name, "invalid token"); - return error_response(StatusCode::UNAUTHORIZED, "unauthorized").into_response(); - } +/// Handle POST `/{site_name}`. +fn handle_deploy( + request: Request, + site_name: &str, + state: &AppState, + handle: &tokio::runtime::Handle, +) { + info!("[{site_name}] deployment request received"); + + // Find site + let site = state + .config + .read() + .expect("config lock poisoned") + .find_site(site_name) + .cloned(); + let Some(site) = site else { + info!("[{site_name}] site not found"); + let body = serde_json::to_string(&ErrorResponse { error: "not_found" }) + .expect("static JSON serialization"); + let _ = request.respond(json_response(404, &body)); + return; + }; - // Rate limit check (per token) - if state.rate_limiter.check_key(&token.to_owned()).is_err() { - info!(%site_name, "rate limit exceeded"); - return error_response(StatusCode::TOO_MANY_REQUESTS, "rate_limit_exceeded") - .into_response(); + // Auth check (if configured) + if !site.webhook_token.is_empty() { + let token_valid = extract_bearer_token(request.headers()) + .is_some_and(|token| validate_token(token, &site.webhook_token)); + + if !token_valid { + info!("[{site_name}] unauthorized request"); + let body = serde_json::to_string(&ErrorResponse { + error: "unauthorized", + }) + .expect("static JSON serialization"); + let _ = request.respond(json_response(401, &body)); + return; } } // Try immediate build - let Some(guard) = BuildGuard::try_acquire(site_name.clone(), &state.build_scheduler) else { + let Some(guard) = BuildGuard::try_acquire(site_name.to_owned(), &state.build_scheduler) else { // Build in progress — try to queue - if state.build_scheduler.try_queue(&site_name) { - info!(%site_name, "build queued"); - return ( - StatusCode::ACCEPTED, - Json(QueuedResponse { status: "queued" }), - ) - .into_response(); + if state.build_scheduler.try_queue(site_name) { + info!("[{site_name}] build queued"); + let body = serde_json::to_string(&QueuedResponse { status: "queued" }) + .expect("static JSON serialization"); + let _ = request.respond(json_response(202, &body)); + return; } // Already queued — collapse - info!(%site_name, "build already queued, collapsing"); - return StatusCode::ACCEPTED.into_response(); + info!("[{site_name}] build already queued, collapsing"); + let _ = request.respond(empty_response(202)); + return; }; - info!(%site_name, "deployment accepted"); + info!("[{site_name}] deployment accepted"); // Spawn async build pipeline with queue drain loop - tokio::spawn(async move { + let state = state.clone(); + let site_name = site_name.to_owned(); + handle.spawn(async move { let mut current_site = site; let mut current_guard = guard; loop { @@ -160,9 +151,15 @@ async fn deploy_handler( if !state.build_scheduler.take_queued(&site_name) { break; } - info!(%site_name, "processing queued rebuild"); - let Some(new_site) = state.config.read().await.find_site(&site_name).cloned() else { - warn!(%site_name, "site removed from config, skipping queued rebuild"); + info!("[{site_name}] processing queued rebuild"); + let Some(new_site) = state + .config + .read() + .expect("config lock poisoned") + .find_site(&site_name) + .cloned() + else { + warn!("[{site_name}] site removed from config, skipping queued rebuild"); break; }; let Some(new_guard) = @@ -175,7 +172,43 @@ async fn deploy_handler( } }); - StatusCode::ACCEPTED.into_response() + let _ = request.respond(empty_response(202)); +} + +/// Main request loop (runs on `std::thread`). +#[allow(clippy::needless_pass_by_value)] // ownership required by std::thread::spawn callers +pub(crate) fn handle_requests( + server: Arc<Server>, + state: AppState, + handle: tokio::runtime::Handle, +) { + for request in server.incoming_requests() { + let path = request.url().split('?').next().unwrap_or("").to_owned(); + let method = request.method().clone(); + + match (method, path.as_str()) { + (Method::Get, "/health") => { + let body = serde_json::to_string(&HealthResponse { status: "ok" }) + .expect("static JSON serialization"); + let _ = request.respond(json_response(200, &body)); + } + (_, "/health") => { + let _ = request.respond(empty_response(405)); + } + (Method::Post, _) if is_site_path(&path) => { + let site_name = &path[1..]; + handle_deploy(request, site_name, &state, &handle); + } + (_, _) if is_site_path(&path) => { + let _ = request.respond(empty_response(405)); + } + _ => { + let body = serde_json::to_string(&ErrorResponse { error: "not_found" }) + .expect("static JSON serialization"); + let _ = request.respond(json_response(404, &body)); + } + } + } } /// Run the complete build pipeline: git sync → build → publish. @@ -187,7 +220,7 @@ pub(crate) async fn run_build_pipeline( _guard: BuildGuard, ) { let (base_dir, log_dir, container_runtime, max_builds_to_keep, git_timeout) = { - let config = state.config.read().await; + let config = state.config.read().expect("config lock poisoned"); ( config.base_dir.clone(), config.log_dir.clone(), @@ -213,14 +246,13 @@ pub(crate) async fn run_build_pipeline( { Ok(result) => { info!( - %site_name, - build_dir = %result.build_dir.display(), - duration_secs = result.duration.as_secs(), - "pipeline completed" + "[{site_name}] pipeline completed: build_dir={} duration_secs={}", + result.build_dir.display(), + result.duration.as_secs() ); } Err(e) => { - error!(%site_name, error = %e, "pipeline failed"); + error!("[{site_name}] pipeline failed: {e}"); } } } @@ -239,38 +271,41 @@ pub(crate) fn setup_sighup_handler(state: AppState) { let config_path = state.config_path.as_ref(); match Config::load(config_path).await { Ok(new_config) => { - let old_sites_count = state.config.read().await.sites.len(); + let old_sites_count = state + .config + .read() + .expect("config lock poisoned") + .sites + .len(); let new_sites_count = new_config.sites.len(); // Check for non-reloadable changes and capture old values let (old_listen, old_base, old_log_dir, old_log_level) = { - let old_config = state.config.read().await; + let old_config = state.config.read().expect("config lock poisoned"); if old_config.listen_address != new_config.listen_address { warn!( - old = %old_config.listen_address, - new = %new_config.listen_address, - "listen_address changed but cannot be reloaded (restart required)" + "listen_address changed but cannot be reloaded (restart required): old={} new={}", + old_config.listen_address, new_config.listen_address ); } if old_config.base_dir != new_config.base_dir { warn!( - old = %old_config.base_dir.display(), - new = %new_config.base_dir.display(), - "base_dir changed but cannot be reloaded (restart required)" + "base_dir changed but cannot be reloaded (restart required): old={} new={}", + old_config.base_dir.display(), + new_config.base_dir.display() ); } if old_config.log_dir != new_config.log_dir { warn!( - old = %old_config.log_dir.display(), - new = %new_config.log_dir.display(), - "log_dir changed but cannot be reloaded (restart required)" + "log_dir changed but cannot be reloaded (restart required): old={} new={}", + old_config.log_dir.display(), + new_config.log_dir.display() ); } if old_config.log_level != new_config.log_level { warn!( - old = %old_config.log_level, - new = %new_config.log_level, - "log_level changed but cannot be reloaded (restart required)" + "log_level changed but cannot be reloaded (restart required): old={} new={}", + old_config.log_level, new_config.log_level ); } ( @@ -289,7 +324,7 @@ pub(crate) fn setup_sighup_handler(state: AppState) { final_config.log_level = old_log_level; // Apply the merged configuration - *state.config.write().await = final_config; + *state.config.write().expect("config lock poisoned") = final_config; // Restart polling tasks with new configuration info!("restarting polling tasks"); @@ -297,12 +332,11 @@ pub(crate) fn setup_sighup_handler(state: AppState) { state.polling_manager.start_polling(state.clone()).await; info!( - old_sites_count, - new_sites_count, "configuration reloaded successfully" + "configuration reloaded successfully: old_sites_count={old_sites_count} new_sites_count={new_sites_count}" ); } Err(e) => { - error!(error = %e, "failed to reload configuration, keeping current config"); + error!("failed to reload configuration, keeping current config: {e}"); } } } @@ -315,28 +349,14 @@ pub(crate) fn setup_sighup_handler(state: AppState) { /// /// Returns an error if the TCP listener cannot bind or the server encounters /// a fatal I/O error. -/// -/// # Panics -/// -/// Panics if `rate_limit_per_minute` is zero. This is unreachable after -/// successful config validation. pub async fn run(config: Config, config_path: PathBuf) -> Result<()> { let addr = config.parsed_listen_address(); - #[allow(clippy::expect_used)] // validated by Config::validate_rate_limit() - let quota = Quota::per_minute( - NonZeroU32::new(config.rate_limit_per_minute) - .expect("rate_limit_per_minute must be greater than 0"), - ); - let rate_limiter = Arc::new(RateLimiter::dashmap(quota)); - let polling_manager = Arc::new(PollingManager::new()); - let state = AppState { config: Arc::new(RwLock::new(config)), config_path: Arc::new(config_path), build_scheduler: Arc::new(BuildScheduler::new()), - rate_limiter, - polling_manager, + polling_manager: Arc::new(PollingManager::new()), }; // Setup SIGHUP handler for configuration hot-reload @@ -345,37 +365,54 @@ pub async fn run(config: Config, config_path: PathBuf) -> Result<()> { // Start polling tasks for sites with poll_interval configured state.polling_manager.start_polling(state.clone()).await; - let listener = TcpListener::bind(addr).await?; - info!(%addr, "server listening"); + let server = Arc::new(Server::http(addr).map_err(|e| anyhow::anyhow!("failed to bind: {e}"))?); + info!("server listening on {addr}"); - run_with_listener(state, listener, async { + // Shutdown handler: signal → unblock server + let shutdown_server = Arc::clone(&server); + tokio::spawn(async move { let mut sigterm = signal(SignalKind::terminate()).expect("failed to setup SIGTERM handler"); let mut sigint = signal(SignalKind::interrupt()).expect("failed to setup SIGINT handler"); tokio::select! { _ = sigterm.recv() => info!("received SIGTERM, shutting down"), _ = sigint.recv() => info!("received SIGINT, shutting down"), } + shutdown_server.unblock(); + }); + + // Run HTTP loop on blocking thread + let handle = tokio::runtime::Handle::current(); + tokio::task::spawn_blocking(move || { + handle_requests(server, state, handle); }) - .await + .await?; + + Ok(()) } -/// Run the server on an already-bound listener with a custom shutdown signal. +/// Run the server with a pre-built Server, shutting down when `shutdown_signal` resolves. /// -/// This is the core server loop used by both production (`run`) and integration tests. -/// Production delegates here after binding the listener and setting up SIGHUP handlers. -/// Tests call this via `test_support::run_server` with their own listener and shutdown channel. -pub(crate) async fn run_with_listener( +/// Used by integration tests via [`test_support::run_server`]. +/// Returns a `std::thread::JoinHandle` for the request-handling thread. +#[cfg(any(test, feature = "integration"))] +pub(crate) fn run_with_server( state: AppState, - listener: TcpListener, + server: Arc<Server>, shutdown_signal: impl std::future::Future<Output = ()> + Send + 'static, -) -> Result<()> { - let router = create_router(state); +) -> std::thread::JoinHandle<()> { + let handle = tokio::runtime::Handle::current(); - axum::serve(listener, router) - .with_graceful_shutdown(shutdown_signal) - .await?; + // Shutdown: wait for signal, then unblock + let shutdown_server = Arc::clone(&server); + tokio::spawn(async move { + shutdown_signal.await; + shutdown_server.unblock(); + }); - Ok(()) + // Spawn request handler on std::thread, return handle for joining + std::thread::spawn(move || { + handle_requests(server, state, handle); + }) } #[cfg(test)] @@ -383,23 +420,13 @@ pub(crate) async fn run_with_listener( mod tests { use super::*; use crate::config::{BuildOverrides, SiteConfig}; - use axum::body::Body; - use axum::http::{Request, StatusCode}; - use axum::response::Response; use std::path::PathBuf; - use tower::ServiceExt as _; fn test_state(config: Config) -> AppState { - test_state_with_rate_limit(config, 1000) // High limit for most tests - } - - fn test_state_with_rate_limit(config: Config, rate_limit: u32) -> AppState { - let quota = Quota::per_minute(NonZeroU32::new(rate_limit).unwrap()); AppState { config: Arc::new(RwLock::new(config)), config_path: Arc::new(PathBuf::from("witryna.toml")), build_scheduler: Arc::new(BuildScheduler::new()), - rate_limiter: Arc::new(RateLimiter::dashmap(quota)), polling_manager: Arc::new(PollingManager::new()), } } @@ -411,7 +438,6 @@ mod tests { base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), - rate_limit_per_minute: 10, max_builds_to_keep: 5, git_timeout: None, sites: vec![], @@ -445,221 +471,169 @@ mod tests { } } + /// Start a test server on a random port, returning the server handle, state, and port. + fn test_server(config: Config) -> (Arc<Server>, AppState, u16) { + let state = test_state(config); + let server = Arc::new(Server::http("127.0.0.1:0").unwrap()); + let port = match server.server_addr() { + tiny_http::ListenAddr::IP(a) => a.port(), + _ => unreachable!("expected IP address"), + }; + let handle = tokio::runtime::Handle::current(); + let server_clone = server.clone(); + let state_clone = state.clone(); + std::thread::spawn(move || handle_requests(server_clone, state_clone, handle)); + (server, state, port) + } + #[tokio::test] async fn health_endpoint_returns_ok() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .uri("/health") - .body(Body::empty()) - .unwrap(), - ) + let (server, _state, port) = test_server(test_config_with_sites()); + let resp = reqwest::get(format!("http://127.0.0.1:{port}/health")) .await .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 200); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["status"], "ok"); + server.unblock(); } #[tokio::test] - async fn unknown_site_post_returns_not_found() { - let state = test_state(test_config()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/nonexistent") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); + async fn json_responses_have_content_type_header() { + let (server, _state, port) = test_server(test_config_with_sites()); + let resp = reqwest::get(format!("http://127.0.0.1:{port}/health")) + .await + .unwrap(); + assert_eq!( + resp.headers() + .get("content-type") + .unwrap() + .to_str() + .unwrap(), + "application/json" + ); + server.unblock(); + } - assert_eq!(response.status(), StatusCode::NOT_FOUND); - let body = axum::body::to_bytes(response.into_body(), 1024) + #[tokio::test] + async fn unknown_site_post_returns_not_found() { + let (server, _state, port) = test_server(test_config()); + let client = reqwest::Client::new(); + let resp = client + .post(format!("http://127.0.0.1:{port}/nonexistent")) + .send() .await .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 404); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "not_found"); + server.unblock(); } #[tokio::test] async fn deploy_known_site_with_valid_token_returns_accepted() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::ACCEPTED); - let body = axum::body::to_bytes(response.into_body(), 1024) + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Bearer secret-token") + .send() .await .unwrap(); - assert!(body.is_empty()); + assert_eq!(resp.status().as_u16(), 202); + server.unblock(); } #[tokio::test] async fn deploy_missing_auth_header_returns_unauthorized() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response.into_body(), 1024) + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .send() .await .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 401); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); + server.unblock(); } #[tokio::test] async fn deploy_invalid_token_returns_unauthorized() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer wrong-token") - .body(Body::empty()) - .unwrap(), - ) + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Bearer wrong-token") + .send() .await .unwrap(); - - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 401); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); + server.unblock(); } #[tokio::test] async fn deploy_malformed_auth_header_returns_unauthorized() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); // Test without "Bearer " prefix - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "secret-token") - .body(Body::empty()) - .unwrap(), - ) + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "secret-token") + .send() .await .unwrap(); - - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 401); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); + server.unblock(); } #[tokio::test] async fn deploy_basic_auth_returns_unauthorized() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); // Test Basic auth instead of Bearer - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Basic dXNlcjpwYXNz") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response.into_body(), 1024) + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Basic dXNlcjpwYXNz") + .send() .await .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 401); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); + server.unblock(); } #[tokio::test] async fn deploy_get_method_not_allowed() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("GET") - .uri("/my-site") - .body(Body::empty()) - .unwrap(), - ) + let (server, _state, port) = test_server(test_config_with_sites()); + let resp = reqwest::get(format!("http://127.0.0.1:{port}/my-site")) .await .unwrap(); - - assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED); + assert_eq!(resp.status().as_u16(), 405); + server.unblock(); } #[tokio::test] async fn deploy_unknown_site_with_token_returns_not_found() { - let state = test_state(test_config_with_sites()); - let router = create_router(state); - - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/unknown-site") - .header("Authorization", "Bearer any-token") - .body(Body::empty()) - .unwrap(), - ) + let (server, _state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); + let resp = client + .post(format!("http://127.0.0.1:{port}/unknown-site")) + .header("Authorization", "Bearer any-token") + .send() .await .unwrap(); - // Returns 404 before checking token (site lookup first) - assert_eq!(response.status(), StatusCode::NOT_FOUND); - let body = axum::body::to_bytes(response.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 404); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "not_found"); + server.unblock(); } fn test_config_with_two_sites() -> Config { @@ -669,7 +643,6 @@ mod tests { base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), - rate_limit_per_minute: 10, max_builds_to_keep: 5, git_timeout: None, sites: vec![ @@ -721,290 +694,92 @@ mod tests { #[tokio::test] async fn deploy_concurrent_same_site_gets_queued() { - let state = test_state(test_config_with_sites()); - let router = create_router(state.clone()); - - // First request should succeed (immediate build) - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); - let body1 = axum::body::to_bytes(response1.into_body(), 1024) - .await - .unwrap(); - assert!(body1.is_empty()); - - // Second request to same site should be queued (202 with body) - let response2: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response2.status(), StatusCode::ACCEPTED); - let body2 = axum::body::to_bytes(response2.into_body(), 1024) + let (server, state, port) = test_server(test_config_with_sites()); + let client = reqwest::Client::new(); + + // Pre-mark site as building to simulate an in-progress build + state + .build_scheduler + .in_progress + .lock() + .unwrap() + .insert("my-site".to_owned()); + + // First request to same site should be queued (202 with body) + let resp1 = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Bearer secret-token") + .send() .await .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body2).unwrap(); + assert_eq!(resp1.status().as_u16(), 202); + let json: serde_json::Value = resp1.json().await.unwrap(); assert_eq!(json["status"], "queued"); - // Third request should be collapsed (202, no body) - let response3: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response3.status(), StatusCode::ACCEPTED); - let body3 = axum::body::to_bytes(response3.into_body(), 1024) + // Second request should be collapsed (202, no body) + let resp2 = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Bearer secret-token") + .send() .await .unwrap(); - assert!(body3.is_empty()); + assert_eq!(resp2.status().as_u16(), 202); + + server.unblock(); } #[tokio::test] async fn deploy_concurrent_different_sites_both_succeed() { - let state = test_state(test_config_with_two_sites()); - let router = create_router(state.clone()); + let (server, _state, port) = test_server(test_config_with_two_sites()); + let client = reqwest::Client::new(); // First site deployment - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/site-one") - .header("Authorization", "Bearer token-one") - .body(Body::empty()) - .unwrap(), - ) + let resp1 = client + .post(format!("http://127.0.0.1:{port}/site-one")) + .header("Authorization", "Bearer token-one") + .send() .await .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); + assert_eq!(resp1.status().as_u16(), 202); // Second site deployment should also succeed - let response2: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/site-two") - .header("Authorization", "Bearer token-two") - .body(Body::empty()) - .unwrap(), - ) + let resp2 = client + .post(format!("http://127.0.0.1:{port}/site-two")) + .header("Authorization", "Bearer token-two") + .send() .await .unwrap(); - assert_eq!(response2.status(), StatusCode::ACCEPTED); + assert_eq!(resp2.status().as_u16(), 202); + + server.unblock(); } #[tokio::test] async fn deploy_site_in_progress_checked_after_auth() { - let state = test_state(test_config_with_sites()); + let (server, state, port) = test_server(test_config_with_sites()); // Pre-mark site as building state .build_scheduler .in_progress + .lock() + .unwrap() .insert("my-site".to_owned()); - let router = create_router(state); + let client = reqwest::Client::new(); // Request with wrong token should return 401 (auth checked before build status) - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer wrong-token") - .body(Body::empty()) - .unwrap(), - ) + let resp = client + .post(format!("http://127.0.0.1:{port}/my-site")) + .header("Authorization", "Bearer wrong-token") + .send() .await .unwrap(); - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(resp.status().as_u16(), 401); + let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); - } - #[tokio::test] - async fn rate_limit_exceeded_returns_429() { - // Create state with rate limit of 2 per minute - let state = test_state_with_rate_limit(test_config_with_sites(), 2); - let router = create_router(state); - - // First request should succeed - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); - - // Second request should succeed (or 409 if build in progress) - let response2: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - // Could be 202 or 409 depending on timing - assert!( - response2.status() == StatusCode::ACCEPTED - || response2.status() == StatusCode::CONFLICT - ); - - // Third request should hit rate limit - let response3: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response3.status(), StatusCode::TOO_MANY_REQUESTS); - let body = axum::body::to_bytes(response3.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - assert_eq!(json["error"], "rate_limit_exceeded"); - } - - #[tokio::test] - async fn rate_limit_different_tokens_independent() { - // Create state with rate limit of 1 per minute - let state = test_state_with_rate_limit(test_config_with_two_sites(), 1); - let router = create_router(state); - - // First request with token-one should succeed - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/site-one") - .header("Authorization", "Bearer token-one") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); - - // Second request with token-one should hit rate limit - let response2: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/site-one") - .header("Authorization", "Bearer token-one") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response2.status(), StatusCode::TOO_MANY_REQUESTS); - let body = axum::body::to_bytes(response2.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - assert_eq!(json["error"], "rate_limit_exceeded"); - - // Request with different token should still succeed - let response3: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/site-two") - .header("Authorization", "Bearer token-two") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response3.status(), StatusCode::ACCEPTED); - } - - #[tokio::test] - async fn rate_limit_checked_after_auth() { - // Create state with rate limit of 1 per minute - let state = test_state_with_rate_limit(test_config_with_sites(), 1); - let router = create_router(state); - - // First valid request exhausts rate limit - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer secret-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); - - // Request with invalid token should return 401, not 429 - // (auth is checked before rate limit) - let response2: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/my-site") - .header("Authorization", "Bearer wrong-token") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response2.status(), StatusCode::UNAUTHORIZED); - let body = axum::body::to_bytes(response2.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - assert_eq!(json["error"], "unauthorized"); + server.unblock(); } #[tokio::test] @@ -1016,7 +791,6 @@ mod tests { base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), - rate_limit_per_minute: 10, max_builds_to_keep: 5, git_timeout: None, sites: vec![SiteConfig { @@ -1052,7 +826,6 @@ mod tests { base_dir: PathBuf::from("/tmp/new-base"), log_dir: PathBuf::from("/tmp/new-logs"), log_level: "debug".to_owned(), - rate_limit_per_minute: 20, max_builds_to_keep: 10, git_timeout: None, sites: vec![SiteConfig { @@ -1080,7 +853,7 @@ mod tests { // Apply the same merge logic used in setup_sighup_handler let (old_listen, old_base, old_log_dir, old_log_level) = { - let old_config = state.config.read().await; + let old_config = state.config.read().unwrap(); ( old_config.listen_address.clone(), old_config.base_dir.clone(), @@ -1095,21 +868,30 @@ mod tests { final_config.log_dir = old_log_dir; final_config.log_level = old_log_level; - *state.config.write().await = final_config; - - // Verify non-reloadable fields are preserved - let config = state.config.read().await; - assert_eq!(config.listen_address, "127.0.0.1:8080"); - assert_eq!(config.base_dir, PathBuf::from("/var/lib/witryna")); - assert_eq!(config.log_dir, PathBuf::from("/var/log/witryna")); - assert_eq!(config.log_level, "info"); - - // Verify reloadable fields are updated - assert_eq!(config.container_runtime, "docker"); - assert_eq!(config.rate_limit_per_minute, 20); - assert_eq!(config.max_builds_to_keep, 10); - assert_eq!(config.sites.len(), 1); - assert_eq!(config.sites[0].name, "new-site"); + *state.config.write().unwrap() = final_config; + + // Verify non-reloadable fields are preserved and reloadable fields are updated + let (listen, base, log_d, log_l, runtime, max_builds, sites_len, site_name) = { + let config = state.config.read().unwrap(); + ( + config.listen_address.clone(), + config.base_dir.clone(), + config.log_dir.clone(), + config.log_level.clone(), + config.container_runtime.clone(), + config.max_builds_to_keep, + config.sites.len(), + config.sites[0].name.clone(), + ) + }; + assert_eq!(listen, "127.0.0.1:8080"); + assert_eq!(base, PathBuf::from("/var/lib/witryna")); + assert_eq!(log_d, PathBuf::from("/var/log/witryna")); + assert_eq!(log_l, "info"); + assert_eq!(runtime, "docker"); + assert_eq!(max_builds, 10); + assert_eq!(sites_len, 1); + assert_eq!(site_name, "new-site"); } fn test_config_with_disabled_auth() -> Config { @@ -1140,80 +922,34 @@ mod tests { #[tokio::test] async fn deploy_disabled_auth_returns_accepted() { - let state = test_state(test_config_with_disabled_auth()); - let router = create_router(state); + let (server, _state, port) = test_server(test_config_with_disabled_auth()); + let client = reqwest::Client::new(); // Request without Authorization header should succeed - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/open-site") - .body(Body::empty()) - .unwrap(), - ) + let resp = client + .post(format!("http://127.0.0.1:{port}/open-site")) + .send() .await .unwrap(); + assert_eq!(resp.status().as_u16(), 202); - assert_eq!(response.status(), StatusCode::ACCEPTED); + server.unblock(); } #[tokio::test] async fn deploy_disabled_auth_ignores_token() { - let state = test_state(test_config_with_disabled_auth()); - let router = create_router(state); + let (server, _state, port) = test_server(test_config_with_disabled_auth()); + let client = reqwest::Client::new(); // Request WITH a Bearer token should also succeed (token ignored) - let response: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/open-site") - .header("Authorization", "Bearer any-token") - .body(Body::empty()) - .unwrap(), - ) + let resp = client + .post(format!("http://127.0.0.1:{port}/open-site")) + .header("Authorization", "Bearer any-token") + .send() .await .unwrap(); + assert_eq!(resp.status().as_u16(), 202); - assert_eq!(response.status(), StatusCode::ACCEPTED); - } - - #[tokio::test] - async fn deploy_disabled_auth_rate_limited_by_site_name() { - let state = test_state_with_rate_limit(test_config_with_disabled_auth(), 1); - let router = create_router(state); - - // First request should succeed - let response1: Response = router - .clone() - .oneshot( - Request::builder() - .method("POST") - .uri("/open-site") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response1.status(), StatusCode::ACCEPTED); - - // Second request should hit rate limit (keyed by site name) - let response2: Response = router - .oneshot( - Request::builder() - .method("POST") - .uri("/open-site") - .body(Body::empty()) - .unwrap(), - ) - .await - .unwrap(); - assert_eq!(response2.status(), StatusCode::TOO_MANY_REQUESTS); - let body = axum::body::to_bytes(response2.into_body(), 1024) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - assert_eq!(json["error"], "rate_limit_exceeded"); + server.unblock(); } } diff --git a/src/state.rs b/src/state.rs new file mode 100644 index 0000000..be4e981 --- /dev/null +++ b/src/state.rs @@ -0,0 +1,311 @@ +use anyhow::Result; +use log::warn; +use std::path::Path; + +/// A single build record within the site state. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct BuildEntry { + /// Build phase: "building", "success", "failed", "hook failed". + pub status: String, + /// Build timestamp (YYYYMMDD-HHMMSS-microseconds). + pub timestamp: String, + /// ISO 8601 UTC when the build started (for elapsed time calculation). + pub started_at: String, + /// Short git commit hash, or empty string if unknown. + pub git_commit: String, + /// Human-readable duration ("45s", "2m 30s"), empty while building. + pub duration: String, + /// Path to the build log file. + pub log: String, +} + +/// Persistent per-site build state, written to `{base_dir}/builds/{site}/state.json`. +/// +/// Contains the full build history and the currently active build timestamp. +/// The CLI `status` command reads only this file — no log parsing needed. +#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)] +pub struct SiteState { + /// Timestamp of the currently active build (empty if none). + pub current: String, + /// All builds, newest first. + pub builds: Vec<BuildEntry>, +} + +/// Atomically write site state to `{base_dir}/builds/{site_name}/state.json`. +/// +/// Uses temp-file + rename for atomic writes. Creates parent directories +/// if they don't exist. Errors are non-fatal — callers should log and continue. +/// +/// # Errors +/// +/// Returns an error if directory creation, JSON serialization, or the atomic write/rename fails. +pub async fn save_state(base_dir: &Path, site_name: &str, state: &SiteState) -> Result<()> { + let builds_dir = base_dir.join("builds").join(site_name); + tokio::fs::create_dir_all(&builds_dir).await?; + + let state_path = builds_dir.join("state.json"); + let tmp_path = builds_dir.join("state.json.tmp"); + + let json = serde_json::to_string_pretty(state)?; + tokio::fs::write(&tmp_path, json.as_bytes()).await?; + tokio::fs::rename(&tmp_path, &state_path).await?; + + Ok(()) +} + +/// Load site state from `{base_dir}/builds/{site_name}/state.json`. +/// +/// Returns the default empty state if the file is missing or cannot be parsed. +pub async fn load_state(base_dir: &Path, site_name: &str) -> SiteState { + let state_path = base_dir.join("builds").join(site_name).join("state.json"); + + let Ok(content) = tokio::fs::read_to_string(&state_path).await else { + return SiteState::default(); + }; + + match serde_json::from_str(&content) { + Ok(state) => state, + Err(e) => { + warn!( + "[{site_name}] malformed state.json: {e} (path={})", + state_path.display() + ); + SiteState::default() + } + } +} + +/// Add a new build entry to the front of the builds list. Best-effort. +pub async fn push_build(base_dir: &Path, site_name: &str, entry: BuildEntry) { + let mut state = load_state(base_dir, site_name).await; + state.builds.insert(0, entry); + if let Err(e) = save_state(base_dir, site_name, &state).await { + warn!("[{site_name}] failed to write state after push_build: {e}"); + } +} + +/// Update the most recent build entry in-place. Best-effort. +/// +/// Does nothing if the builds list is empty. +pub async fn update_latest_build( + base_dir: &Path, + site_name: &str, + updater: impl FnOnce(&mut BuildEntry), +) { + let mut state = load_state(base_dir, site_name).await; + if let Some(entry) = state.builds.first_mut() { + updater(entry); + if let Err(e) = save_state(base_dir, site_name, &state).await { + warn!("[{site_name}] failed to write state after update_latest_build: {e}"); + } + } +} + +/// Set the currently active build timestamp. Best-effort. +pub async fn set_current(base_dir: &Path, site_name: &str, timestamp: &str) { + let mut state = load_state(base_dir, site_name).await; + state.current = timestamp.to_owned(); + if let Err(e) = save_state(base_dir, site_name, &state).await { + warn!("[{site_name}] failed to write state after set_current: {e}"); + } +} + +/// Remove build entries whose timestamps match any in `timestamps`. Best-effort. +pub async fn remove_builds(base_dir: &Path, site_name: &str, timestamps: &[String]) { + if timestamps.is_empty() { + return; + } + let mut state = load_state(base_dir, site_name).await; + let before = state.builds.len(); + state.builds.retain(|b| !timestamps.contains(&b.timestamp)); + if state.builds.len() == before { + return; // nothing changed + } + if let Err(e) = save_state(base_dir, site_name, &state).await { + warn!("[{site_name}] failed to write state after remove_builds: {e}"); + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use crate::test_support::{cleanup, temp_dir}; + + fn test_entry() -> BuildEntry { + BuildEntry { + status: "building".to_owned(), + timestamp: "20260210-120000-000000".to_owned(), + started_at: "2026-02-10T12:00:00Z".to_owned(), + git_commit: "abc123d".to_owned(), + duration: String::new(), + log: "/var/log/witryna/my-site/20260210-120000-000000.log".to_owned(), + } + } + + fn test_state() -> SiteState { + SiteState { + current: String::new(), + builds: vec![test_entry()], + } + } + + #[tokio::test] + async fn save_and_load_roundtrip() { + let base_dir = temp_dir("state-test").await; + let state = test_state(); + + save_state(&base_dir, "my-site", &state).await.unwrap(); + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.builds.len(), 1); + let b = &loaded.builds[0]; + assert_eq!(b.status, "building"); + assert_eq!(b.timestamp, "20260210-120000-000000"); + assert_eq!(b.started_at, "2026-02-10T12:00:00Z"); + assert_eq!(b.git_commit, "abc123d"); + assert_eq!(b.duration, ""); + assert!(b.log.contains("20260210-120000-000000.log")); + assert_eq!(loaded.current, ""); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn load_state_missing_file_returns_default() { + let base_dir = temp_dir("state-test").await; + + let loaded = load_state(&base_dir, "nonexistent").await; + assert!(loaded.builds.is_empty()); + assert_eq!(loaded.current, ""); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn load_state_malformed_json_returns_default() { + let base_dir = temp_dir("state-test").await; + let state_dir = base_dir.join("builds").join("bad-site"); + tokio::fs::create_dir_all(&state_dir).await.unwrap(); + tokio::fs::write(state_dir.join("state.json"), "not valid json{{{") + .await + .unwrap(); + + let loaded = load_state(&base_dir, "bad-site").await; + assert!(loaded.builds.is_empty()); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn save_state_atomic_no_tmp_left() { + let base_dir = temp_dir("state-test").await; + let state = test_state(); + + save_state(&base_dir, "my-site", &state).await.unwrap(); + + let tmp_path = base_dir + .join("builds") + .join("my-site") + .join("state.json.tmp"); + assert!(!tmp_path.exists(), "temp file should not remain"); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn push_build_prepends() { + let base_dir = temp_dir("state-test").await; + + let entry1 = test_entry(); + push_build(&base_dir, "my-site", entry1).await; + + let mut entry2 = test_entry(); + entry2.timestamp = "20260210-130000-000000".to_owned(); + push_build(&base_dir, "my-site", entry2).await; + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.builds.len(), 2); + assert_eq!(loaded.builds[0].timestamp, "20260210-130000-000000"); + assert_eq!(loaded.builds[1].timestamp, "20260210-120000-000000"); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn update_latest_build_modifies_first() { + let base_dir = temp_dir("state-test").await; + + push_build(&base_dir, "my-site", test_entry()).await; + + update_latest_build(&base_dir, "my-site", |e| { + e.status = "success".to_owned(); + e.duration = "30s".to_owned(); + }) + .await; + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.builds[0].status, "success"); + assert_eq!(loaded.builds[0].duration, "30s"); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn set_current_updates_field() { + let base_dir = temp_dir("state-test").await; + + push_build(&base_dir, "my-site", test_entry()).await; + set_current(&base_dir, "my-site", "20260210-120000-000000").await; + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.current, "20260210-120000-000000"); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn remove_builds_prunes_entries() { + let base_dir = temp_dir("state-test").await; + + let mut e1 = test_entry(); + e1.timestamp = "20260210-100000-000000".to_owned(); + let mut e2 = test_entry(); + e2.timestamp = "20260210-110000-000000".to_owned(); + let mut e3 = test_entry(); + e3.timestamp = "20260210-120000-000000".to_owned(); + + push_build(&base_dir, "my-site", e3).await; + push_build(&base_dir, "my-site", e2).await; + push_build(&base_dir, "my-site", e1).await; + + remove_builds( + &base_dir, + "my-site", + &[ + "20260210-100000-000000".to_owned(), + "20260210-120000-000000".to_owned(), + ], + ) + .await; + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.builds.len(), 1); + assert_eq!(loaded.builds[0].timestamp, "20260210-110000-000000"); + + cleanup(&base_dir).await; + } + + #[tokio::test] + async fn remove_builds_empty_list_is_noop() { + let base_dir = temp_dir("state-test").await; + + push_build(&base_dir, "my-site", test_entry()).await; + remove_builds(&base_dir, "my-site", &[]).await; + + let loaded = load_state(&base_dir, "my-site").await; + assert_eq!(loaded.builds.len(), 1); + + cleanup(&base_dir).await; + } +} diff --git a/src/test_support.rs b/src/test_support.rs index 8f2d2bf..d6a0a96 100644 --- a/src/test_support.rs +++ b/src/test_support.rs @@ -7,24 +7,21 @@ #![allow(clippy::unwrap_used, clippy::expect_used)] -use crate::server::{AppState, run_with_listener}; -use anyhow::Result; +use crate::server::{AppState, run_with_server}; use std::path::{Path, PathBuf}; -use tokio::net::TcpListener; +use std::sync::Arc; +use tiny_http::Server; -/// Start the HTTP server on the given listener, shutting down when `shutdown` resolves. +/// Start the HTTP server, returning a `JoinHandle` for the request-handling thread. /// -/// The server behaves identically to production — same middleware, same handlers. -/// -/// # Errors -/// -/// Returns an error if the server encounters a fatal I/O error. -pub async fn run_server( +/// The server shuts down when `shutdown` resolves (calls `server.unblock()`). +/// Callers should join the handle after triggering shutdown. +pub fn run_server( state: AppState, - listener: TcpListener, + server: Arc<Server>, shutdown: impl std::future::Future<Output = ()> + Send + 'static, -) -> Result<()> { - run_with_listener(state, listener, shutdown).await +) -> std::thread::JoinHandle<()> { + run_with_server(state, server, shutdown) } /// Install the SIGHUP configuration-reload handler for `state`. diff --git a/src/time.rs b/src/time.rs new file mode 100644 index 0000000..2e084a8 --- /dev/null +++ b/src/time.rs @@ -0,0 +1,222 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +/// Format as `YYYYMMDD-HHMMSS-ffffff` (build directories and log files). +#[must_use] +pub fn format_build_timestamp(t: SystemTime) -> String { + let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default(); + let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs()); + let us = dur.subsec_micros(); + format!("{year:04}{month:02}{day:02}-{hour:02}{min:02}{sec:02}-{us:06}") +} + +/// Format as `YYYY-MM-DDTHH:MM:SSZ` (state.json `started_at`, second precision). +#[must_use] +pub fn format_rfc3339(t: SystemTime) -> String { + let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default(); + let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs()); + format!("{year:04}-{month:02}-{day:02}T{hour:02}:{min:02}:{sec:02}Z") +} + +/// Format as `YYYY-MM-DDTHH:MM:SS.mmmZ` (logger console output, millisecond precision, UTC). +#[must_use] +pub fn format_log_timestamp(t: SystemTime) -> String { + let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default(); + let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs()); + let ms = dur.subsec_millis(); + format!("{year:04}-{month:02}-{day:02}T{hour:02}:{min:02}:{sec:02}.{ms:03}Z") +} + +/// Parse `YYYY-MM-DDTHH:MM:SSZ` back to `SystemTime`. Only handles `Z` suffix. +#[must_use] +pub fn parse_rfc3339(s: &str) -> Option<SystemTime> { + let s = s.strip_suffix('Z')?; + if s.len() != 19 { + return None; + } + let bytes = s.as_bytes(); + if bytes.get(4) != Some(&b'-') + || bytes.get(7) != Some(&b'-') + || bytes.get(10) != Some(&b'T') + || bytes.get(13) != Some(&b':') + || bytes.get(16) != Some(&b':') + { + return None; + } + let year: u16 = s.get(0..4)?.parse().ok()?; + let month: u8 = s.get(5..7)?.parse().ok()?; + let day: u8 = s.get(8..10)?.parse().ok()?; + let hour: u8 = s.get(11..13)?.parse().ok()?; + let min: u8 = s.get(14..16)?.parse().ok()?; + let sec: u8 = s.get(17..19)?.parse().ok()?; + if !(1..=12).contains(&month) || !(1..=31).contains(&day) || hour > 23 || min > 59 || sec > 59 { + return None; + } + let epoch = civil_to_epoch(year, month, day, hour, min, sec); + Some(UNIX_EPOCH + std::time::Duration::from_secs(epoch)) +} + +/// Convert Unix epoch seconds to (year, month, day, hour, minute, second). +/// Uses Howard Hinnant's `civil_from_days` algorithm. +/// +/// # Safety (casts) +/// All `as` casts are bounded by the civil-date algorithm: +/// year fits u16 (0–9999), month/day/h/m/s fit u8, day-count fits i64. +#[expect( + clippy::cast_possible_truncation, + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + reason = "Hinnant civil_from_days algorithm: values bounded by calendar math" +)] +const fn epoch_to_civil(secs: u64) -> (u16, u8, u8, u8, u8, u8) { + let day_secs = secs % 86400; + let days = (secs / 86400) as i64 + 719_468; + let era = days.div_euclid(146_097); + let doe = days.rem_euclid(146_097) as u64; // day of era [0, 146096] + let yoe = (doe - doe / 1460 + doe / 36524 - doe / 146_096) / 365; // year of era + let year = (yoe as i64) + era * 400; + let doy = doe - (365 * yoe + yoe / 4 - yoe / 100); // day of year [0, 365] + let mp = (5 * doy + 2) / 153; // [0, 11] + let day = (doy - (153 * mp + 2) / 5 + 1) as u8; + let month = if mp < 10 { mp + 3 } else { mp - 9 } as u8; + let year = if month <= 2 { year + 1 } else { year } as u16; + let hour = (day_secs / 3600) as u8; + let min = ((day_secs % 3600) / 60) as u8; + let sec = (day_secs % 60) as u8; + (year, month, day, hour, min, sec) +} + +/// Convert (year, month, day, hour, minute, second) to Unix epoch seconds. +/// +/// # Safety (casts) +/// All `as` casts are bounded by the civil-date algorithm: +/// year fits i64, month/day/h/m/s fit u64, `doe` fits i64 (0–146096). +/// Final `as u64` is non-negative for all valid civil dates. +#[expect( + clippy::cast_possible_wrap, + clippy::cast_sign_loss, + reason = "Hinnant civil_from_days algorithm: values bounded by calendar math" +)] +const fn civil_to_epoch(year: u16, month: u8, day: u8, hour: u8, min: u8, sec: u8) -> u64 { + let year = (year as i64) - (month <= 2) as i64; + let era = year.div_euclid(400); + let yoe = year.rem_euclid(400) as u64; // year of era [0, 399] + let m_adj = if month > 2 { + (month as u64) - 3 + } else { + (month as u64) + 9 + }; // [0, 11] + let doy = (153 * m_adj + 2) / 5 + (day as u64) - 1; // day of year [0, 365] + let doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; // day of era [0, 146096] + let days = (era * 146_097 + doe as i64 - 719_468) as u64; + days * 86400 + (hour as u64) * 3600 + (min as u64) * 60 + (sec as u64) +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use std::time::Duration; + + #[test] + fn format_build_timestamp_unix_epoch() { + assert_eq!(format_build_timestamp(UNIX_EPOCH), "19700101-000000-000000"); + } + + #[test] + fn format_build_timestamp_format() { + let s = format_build_timestamp(SystemTime::now()); + let parts: Vec<&str> = s.split('-').collect(); + assert_eq!(parts.len(), 3, "expected 3 dash-separated parts, got: {s}"); + assert_eq!(parts[0].len(), 8, "date part should be 8 digits"); + assert_eq!(parts[1].len(), 6, "time part should be 6 digits"); + assert_eq!(parts[2].len(), 6, "micros part should be 6 digits"); + assert!(parts.iter().all(|p| p.chars().all(|c| c.is_ascii_digit()))); + } + + #[test] + fn format_rfc3339_unix_epoch() { + assert_eq!(format_rfc3339(UNIX_EPOCH), "1970-01-01T00:00:00Z"); + } + + #[test] + fn format_log_timestamp_unix_epoch() { + assert_eq!(format_log_timestamp(UNIX_EPOCH), "1970-01-01T00:00:00.000Z"); + } + + #[test] + fn format_rfc3339_known_date() { + // 2024-02-29T12:30:45Z (leap year) + let secs = civil_to_epoch(2024, 2, 29, 12, 30, 45); + let t = UNIX_EPOCH + Duration::from_secs(secs); + assert_eq!(format_rfc3339(t), "2024-02-29T12:30:45Z"); + } + + #[test] + fn parse_rfc3339_roundtrip() { + let now = SystemTime::now(); + let s = format_rfc3339(now); + let parsed = parse_rfc3339(&s).unwrap(); + // Roundtrip loses sub-second precision, so compare formatted output + assert_eq!(format_rfc3339(parsed), s); + } + + #[test] + fn parse_rfc3339_valid() { + let t = parse_rfc3339("2026-02-13T14:30:00Z").unwrap(); + assert_eq!(format_rfc3339(t), "2026-02-13T14:30:00Z"); + } + + #[test] + fn parse_rfc3339_rejects_plus_offset() { + assert!(parse_rfc3339("2026-02-13T14:30:00+00:00").is_none()); + } + + #[test] + fn parse_rfc3339_rejects_garbage() { + assert!(parse_rfc3339("not-a-date").is_none()); + assert!(parse_rfc3339("").is_none()); + assert!(parse_rfc3339("2026-13-01T00:00:00Z").is_none()); // month 13 + assert!(parse_rfc3339("2026-00-01T00:00:00Z").is_none()); // month 0 + } + + #[test] + fn epoch_to_civil_roundtrip() { + let dates: &[(u16, u8, u8, u8, u8, u8)] = &[ + (1970, 1, 1, 0, 0, 0), + (2000, 1, 1, 0, 0, 0), + (2024, 2, 29, 23, 59, 59), // leap year + (2024, 12, 31, 12, 0, 0), + (2026, 2, 13, 14, 30, 0), + ]; + for &(y, m, d, h, min, s) in dates { + let epoch = civil_to_epoch(y, m, d, h, min, s); + let (y2, m2, d2, h2, min2, s2) = epoch_to_civil(epoch); + assert_eq!( + (y, m, d, h, min, s), + (y2, m2, d2, h2, min2, s2), + "roundtrip failed for {y}-{m:02}-{d:02}T{h:02}:{min:02}:{s:02}Z" + ); + } + } + + #[test] + fn format_log_timestamp_millisecond_precision() { + let t = UNIX_EPOCH + Duration::from_millis(1_234_567_890_123); + let s = format_log_timestamp(t); + assert!(s.ends_with("Z")); + assert!(s.contains('.')); + // The milliseconds portion should be 3 digits + let dot_pos = s.find('.').unwrap(); + assert_eq!(&s[dot_pos + 4..], "Z"); + } + + #[test] + fn format_build_timestamp_microsecond_precision() { + let t = UNIX_EPOCH + Duration::from_micros(1_234_567_890_123_456); + let s = format_build_timestamp(t); + // Last 6 chars before end should be microseconds + let parts: Vec<&str> = s.split('-').collect(); + assert_eq!(parts.len(), 3); + assert_eq!(parts[2].len(), 6); + } +} |
