summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDawid Rycerz <dawid@rycerz.xyz>2026-02-15 21:27:00 +0100
committerDawid Rycerz <dawid@rycerz.xyz>2026-02-15 21:27:00 +0100
commitce0dbf6b249956700c6a1705bf4ad85a09d53e8c (patch)
treed7c3236807cfbf75d7f3a355eb5df5a5e2cc4ad7
parent064a1d01c5c14f5ecc032fa9b8346a4a88b893f6 (diff)
feat: witryna 0.2.0HEADv0.2.0main
Switch, cleanup, and status CLI commands. Persistent build state via state.json. Post-deploy hooks on success and failure with WITRYNA_BUILD_STATUS. Dependency diet (axum→tiny_http, clap→argh, tracing→log). Drop built-in rate limiting. Nix flake with NixOS module. Arch Linux PKGBUILD. Centralized version management. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
-rw-r--r--.gitignore7
-rw-r--r--AGENTS.md53
-rw-r--r--CHANGELOG.md55
-rw-r--r--Cargo.lock913
-rw-r--r--Cargo.toml33
-rw-r--r--Justfile67
-rw-r--r--README.md38
-rw-r--r--arch/PKGBUILD70
-rw-r--r--arch/witryna.install15
-rw-r--r--arch/witryna.sysusers1
-rw-r--r--arch/witryna.tmpfiles5
-rw-r--r--build.rs150
-rw-r--r--examples/caddy/Caddyfile13
-rwxr-xr-xexamples/hooks/caddy-deploy.sh11
-rw-r--r--examples/nginx/witryna.conf8
-rw-r--r--examples/witryna.toml3
-rw-r--r--examples/witryna.yaml1
-rw-r--r--flake.lock27
-rw-r--r--flake.nix68
-rw-r--r--man/witryna.1131
-rw-r--r--man/witryna.toml.524
-rw-r--r--nix/module.nix68
-rw-r--r--scripts/witryna.service2
-rw-r--r--src/build.rs131
-rw-r--r--src/build_guard.rs43
-rw-r--r--src/cleanup.rs92
-rw-r--r--src/cli.rs303
-rw-r--r--src/config.rs73
-rw-r--r--src/git.rs71
-rw-r--r--src/hook.rs45
-rw-r--r--src/lib.rs9
-rw-r--r--src/logger.rs121
-rw-r--r--src/logs.rs291
-rw-r--r--src/main.rs370
-rw-r--r--src/pipeline.rs234
-rw-r--r--src/polling.rs72
-rw-r--r--src/publish.rs25
-rw-r--r--src/server.rs1026
-rw-r--r--src/state.rs311
-rw-r--r--src/test_support.rs23
-rw-r--r--src/time.rs222
-rw-r--r--tests/integration/cache.rs2
-rw-r--r--tests/integration/cleanup.rs3
-rw-r--r--tests/integration/cli_cleanup.rs341
-rw-r--r--tests/integration/cli_run.rs16
-rw-r--r--tests/integration/cli_status.rs544
-rw-r--r--tests/integration/cli_switch.rs330
-rw-r--r--tests/integration/cli_validate.rs137
-rw-r--r--tests/integration/concurrent.rs8
-rw-r--r--tests/integration/env_vars.rs4
-rw-r--r--tests/integration/harness.rs77
-rw-r--r--tests/integration/hooks.rs148
-rw-r--r--tests/integration/main.rs4
-rw-r--r--tests/integration/polling.rs1
-rw-r--r--tests/integration/rate_limit.rs114
-rw-r--r--tests/integration/sighup.rs34
56 files changed, 4449 insertions, 2539 deletions
diff --git a/.gitignore b/.gitignore
index 0abb5a0..0fa88b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,5 +23,12 @@ Thumbs.db
SPRINT.md
ARCHITECTURE.md
+# Nix
+/result
+
+# Arch packaging build artifacts
+/arch/src/
+/arch/witryna/
+
# Temporary files
/tmp/
diff --git a/AGENTS.md b/AGENTS.md
index 97b3aab..62292a7 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -22,9 +22,11 @@ This project follows a **minimal philosophy**: software should be simple, minima
witryna serve # Start the deployment server
witryna validate # Validate config and print summary
witryna run <site> [-v] # One-off build (synchronous)
-witryna status [-s <site>] [--json] # Deployment status
+witryna status [site] [--json] # Deployment status
+witryna switch <site> <build> # Switch active build (rollback)
+witryna cleanup [site] [--keep N] # Remove old builds/logs (manual)
# Config discovery: ./witryna.toml → $XDG_CONFIG_HOME/witryna/witryna.toml → /etc/witryna/witryna.toml
-# Override with: witryna --config /path/to/witryna.toml <command>
+# Override with: witryna <command> --config /path/to/witryna.toml
```
### Development
@@ -37,10 +39,8 @@ just test # Run unit tests
just test-integration # Run integration tests (Tier 1 + Tier 2)
just test-integration-serial # Integration tests with --test-threads=1 (for SIGHUP)
just test-all # All lints + unit tests + integration tests
+just lint-picky # Pedantic + nursery clippy lints (warnings only, not gated)
just pre-commit # Mirrors lefthook pre-commit checks
-just man-1 # View witryna(1) man page (needs cargo build first)
-just man-5 # View witryna.toml(5) man page
-
# Cargo (direct)
cargo build # Build the project
cargo run # Run the application
@@ -51,7 +51,7 @@ cargo check # Type-check without building
### Core Components
-1. **HTTP Server (axum)**: Listens on localhost, handles webhook POST requests
+1. **HTTP Server (tiny_http)**: Listens on localhost, handles webhook POST requests
2. **Site Manager**: Manages site configurations from `witryna.toml`
3. **Build Executor**: Runs containerized builds via Podman/Docker
4. **Asset Publisher**: Atomic symlink switching for zero-downtime deployments
@@ -68,7 +68,8 @@ cargo check # Type-check without building
├── clones/{site-name}/ # Git repository clones
├── builds/{site-name}/
│ ├── {timestamp}/ # Timestamped build outputs
-│ └── current -> {latest} # Symlink to current build
+│ ├── current -> {latest} # Symlink to current build
+│ └── state.json # Current build state (building/success/failed)
└── cache/{site-name}/ # Persistent build caches
/var/log/witryna/
@@ -83,7 +84,6 @@ cargo check # Type-check without building
- `202 Accepted` - Build triggered (immediate or queued)
- `401 Unauthorized` - Invalid token (only when `webhook_token` is configured; `{"error": "unauthorized"}`)
- `404 Not Found` - Unknown site (`{"error": "not_found"}`)
- - `429 Too Many Requests` - Rate limit exceeded (`{"error": "rate_limit_exceeded"}`)
### System Diagram
@@ -118,6 +118,7 @@ Upon receiving a valid webhook request, Witryna executes asynchronously:
1. **Acquire Lock / Queue:** Per-site non-blocking lock. If a build is in progress, the request is queued (depth-1, latest-wins). Queued rebuilds run after the current build completes.
2. **Determine Paths:** Construct clone/build paths from `base_dir` and `site_name`.
+2b. **Write Build State:** Write `state.json` with `"building"` status (enables `witryna status` to show in-progress builds).
3. **Fetch Source Code:** `git clone` if first time, `git pull` otherwise.
3b. **Initialize Submodules:** If `.gitmodules` exists, run `git submodule sync --recursive` (pull only) then `git submodule update --init --recursive [--depth N]`.
4. **Parse Repository Config:** Read build config (`.witryna.yaml` / `witryna.yaml` / custom `config_file`) or use `witryna.toml` overrides.
@@ -136,7 +137,8 @@ Upon receiving a valid webhook request, Witryna executes asynchronously:
node:20-alpine sh -c "npm install && npm run build"
```
6. **Publish Assets:** Copy built `public` dir to timestamped directory, atomically switch symlink via `ln -sfn`.
-6b. **Post-Deploy Hook (Optional):** Run `post_deploy` command with `WITRYNA_SITE`, `WITRYNA_BUILD_DIR`, `WITRYNA_BUILD_TIMESTAMP` env vars. 30s timeout, non-fatal on failure.
+6b. **Post-Deploy Hook (Optional):** Run `post_deploy` command with `WITRYNA_SITE`, `WITRYNA_BUILD_DIR`, `WITRYNA_PUBLIC_DIR`, `WITRYNA_BUILD_TIMESTAMP`, `WITRYNA_BUILD_STATUS` env vars. Hooks run on both success and failure. 30s timeout, non-fatal on failure.
+6c. **Update Build State:** Write `state.json` with final status (`"success"`, `"failed"`, or `"hook failed"`).
7. **Release Lock:** Release the per-site lock.
8. **Log Outcome:** Log success or failure.
@@ -186,7 +188,7 @@ cargo test --features integration overrides # Build config override tests
#### Test Tiers
-- **Tier 1 (no container runtime needed):** health, auth (401), 404, concurrent build (409), rate limit (429), edge cases, SIGHUP
+- **Tier 1 (no container runtime needed):** health, auth (401), 404, concurrent build (409), edge cases, SIGHUP
- **Tier 2 (requires podman or docker):** deploy, logs, cleanup, overrides, polling
Tests that require git or a container runtime automatically skip with an explicit message (e.g., `SKIPPED: no container runtime (podman/docker) found`) when the dependency is missing.
@@ -212,7 +214,6 @@ tests/integration/
not_found.rs # 404: unknown site
deploy.rs # Full build pipeline (Tier 2)
concurrent.rs # 409 via DashSet injection (Tier 1)
- rate_limit.rs # 429 with isolated server (Tier 1)
logs.rs # Build log verification (Tier 2)
cleanup.rs # Old build cleanup (Tier 2)
sighup.rs # SIGHUP reload (#[serial], Tier 1)
@@ -222,13 +223,16 @@ tests/integration/
cache.rs # Cache directory persistence (Tier 2)
env_vars.rs # Environment variable passing (Tier 2)
cli_run.rs # witryna run command (Tier 2)
- cli_status.rs # witryna status command (Tier 1)
+ cli_status.rs # witryna status command + state.json (Tier 1)
+ cli_switch.rs # witryna switch command (Tier 1)
+ cli_cleanup.rs # witryna cleanup command (Tier 1)
+ cli_validate.rs # witryna validate command (Tier 1)
hooks.rs # Post-deploy hooks (Tier 2)
```
#### Test Categories
-- **Core pipeline** — health, auth (401), 404, deployment (202), concurrent build rejection (409), rate limiting (429)
+- **Core pipeline** — health, auth (401), 404, deployment (202), concurrent build rejection (409)
- **FEAT-001** — SIGHUP config hot-reload
- **FEAT-002** — build config overrides from `witryna.toml` (complete and partial)
- **FEAT-003** — periodic repository polling, new commit detection
@@ -253,12 +257,11 @@ Follow OWASP best practices for all HTTP endpoints:
- Limit request body size to prevent DoS
3. **Rate Limiting**
- - Implement rate limiting per token/IP to prevent abuse
- - Return `429 Too Many Requests` when exceeded
+ - Delegated to the reverse proxy (Caddy, nginx). See `examples/caddy/Caddyfile` and `examples/nginx/witryna.conf` for configuration.
4. **Error Handling**
- Never expose internal error details in responses
- - Log detailed errors server-side with `tracing`
+ - Log detailed errors server-side with `log`
- Return generic error messages to clients
5. **Command Injection Prevention**
@@ -273,10 +276,14 @@ Follow OWASP best practices for all HTTP endpoints:
- Configurable working directory: `container_workdir` (relative path, no traversal)
- Podman: rootless via `--userns=keep-id`; Docker: `--cap-add=DAC_OVERRIDE` for workspace access
+## Backlog & Sprint Tracking
+
+`SPRINT.md` (gitignored) contains the project backlog, current sprint tasks, and sprint history. Always check `SPRINT.md` first when asked "what's next", what remains for a milestone, or what to work on.
+
## Conventions
- Use `anyhow` for error handling with context
-- Use `tracing` macros for logging (`info!`, `debug!`, `error!`)
+- Use `log` macros for logging (`info!`, `debug!`, `error!`)
- Async-first: prefer `tokio::fs` over `std::fs`
- Use `DashSet` for concurrent build tracking
- `SPRINT.md` is gitignored — update it after each task to track progress, but **never commit it**
@@ -285,15 +292,15 @@ Follow OWASP best practices for all HTTP endpoints:
## Branching
-- Implement each new feature or task on a **dedicated branch** named after the task ID (e.g., `cli-002-man-pages`, `pkg-001-cargo-deb`)
-- Branch from `main` before starting work: `git checkout -b <branch-name> main`
-- Keep the branch focused on a single task — do not mix unrelated changes
-- Merge back to `main` only after the task is complete and tests pass
-- Do not delete the branch until the merge is confirmed
+- **Only `main` is pushed to the remote.** `main` has a clean, linear history — one squash commit per feature/milestone.
+- Day-to-day work happens on a **`staging`** branch (or feature branches off staging). These are never pushed.
+- When work is complete and tests pass, squash-merge `staging` into `main` with a single descriptive commit.
+- Feature branches (if used) branch from `staging`, not `main`.
+- Do not push work-in-progress branches — Git is decentralized; only publish finished work.
## Commit Rules
-**IMPORTANT:** Before completing any task, run `just test-all` to verify everything passes, then run `/commit-smart` to commit changes.
+**IMPORTANT:** Before completing any task, run `just test-all` and `just lint-picky` to verify everything passes, then run `/commit-smart` to commit changes.
- Only commit files modified in the current session
- Use atomic commits with descriptive messages
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 18cc413..8a3a593 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,54 @@
# Changelog
+## 0.2.0 — 2026-02-15
+
+### Features
+
+- **Manual build switching**: `witryna switch <site> <build>` command for
+ instant rollback to any previous build via atomic symlink swap
+- **Manual cleanup**: `witryna cleanup [site] [--keep N]` subcommand for
+ pruning old builds and logs
+- **Persistent build state**: `state.json` as single source of truth for
+ build status, timestamps, and git commit hash
+- **Active build marker**: `witryna status` shows `+` next to sites with
+ builds in progress
+- **Hook build status**: `WITRYNA_BUILD_STATUS` environment variable
+ (`"success"` or `"failed"`) passed to post-deploy hooks
+
+### Breaking Changes
+
+- **Post-deploy hooks now run on all builds.** Previously hooks only ran
+ after successful builds. They now also run after build failures, with the
+ new `WITRYNA_BUILD_STATUS` environment variable set to `"success"` or
+ `"failed"`. Update existing hooks to check `WITRYNA_BUILD_STATUS` if they
+ should only run on success (e.g. web server reload scripts).
+
+- **Removed built-in rate limiting.** The `rate_limit_per_minute` and
+ `trust_proxy` configuration options have been removed. Rate limiting
+ is now delegated to the reverse proxy (Caddy, nginx). See
+ `examples/caddy/Caddyfile` and `examples/nginx/witryna.conf` for
+ configuration examples.
+
+- **Dependency overhaul.** HTTP server replaced (`axum` → `tiny_http`),
+ CLI parser replaced (`clap` → `argh`), logging replaced (`tracing` →
+ `log`). Configuration format is unchanged but the binary is significantly
+ smaller and has fewer transitive dependencies (192 → 139 crates, −28%).
+
+### Internal
+
+- Replace `tracing` with `log` + custom `Logger` for smaller binary
+- `Box::pin` large futures to reduce async stack usage
+- Centralized version management via `Cargo.toml`
+- Drop `chrono`, `dashmap`, `tokio-util`, `subtle` dependencies
+- MSRV set to Rust 1.85
+
+### Packaging
+
+- **Nix flake** with NixOS module and VM integration test
+- **Arch Linux PKGBUILD** with packaging scripts
+- Add `git` and `git-lfs` to deb/rpm package dependencies
+- New integration tests for `validate` and `cleanup` CLI commands
+
## 0.1.0 — 2026-02-10
Initial release.
@@ -11,12 +60,12 @@ symlink switching.
### Features
-- **HTTP webhook server** (axum) with bearer token auth, rate limiting,
- and JSON error responses
+- **HTTP webhook server** (tiny_http) with bearer token auth and JSON error
+ responses
- **Git integration**: clone, fetch, shallow/full depth, automatic
submodule initialization, LFS support
- **Containerized builds** via Podman or Docker with security hardening
- (`--cap-drop=ALL`, `--network=none` default, resource limits)
+ (`--cap-drop=ALL`, `--network=bridge` default, resource limits)
- **Atomic publishing** via timestamped directories and symlink switching
- **Post-deploy hooks** with environment variables (`WITRYNA_SITE`,
`WITRYNA_BUILD_DIR`, `WITRYNA_PUBLIC_DIR`, `WITRYNA_BUILD_TIMESTAMP`)
diff --git a/Cargo.lock b/Cargo.lock
index 70e45f7..6a66265 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3,78 +3,47 @@
version = 4
[[package]]
-name = "aho-corasick"
-version = "1.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
-dependencies = [
- "memchr",
-]
-
-[[package]]
-name = "android_system_properties"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "anstream"
-version = "0.6.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a"
-dependencies = [
- "anstyle",
- "anstyle-parse",
- "anstyle-query",
- "anstyle-wincon",
- "colorchoice",
- "is_terminal_polyfill",
- "utf8parse",
-]
-
-[[package]]
-name = "anstyle"
-version = "1.0.13"
+name = "anyhow"
+version = "1.0.101"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78"
+checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea"
[[package]]
-name = "anstyle-parse"
-version = "0.2.7"
+name = "argh"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
+checksum = "7f384d96bfd3c0b3c41f24dae69ee9602c091d64fc432225cf5295b5abbe0036"
dependencies = [
- "utf8parse",
+ "argh_derive",
+ "argh_shared",
]
[[package]]
-name = "anstyle-query"
-version = "1.1.5"
+name = "argh_derive"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc"
+checksum = "938e5f66269c1f168035e29ed3fb437b084e476465e9314a0328f4005d7be599"
dependencies = [
- "windows-sys 0.61.2",
+ "argh_shared",
+ "proc-macro2",
+ "quote",
+ "syn",
]
[[package]]
-name = "anstyle-wincon"
-version = "3.0.11"
+name = "argh_shared"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d"
+checksum = "5127f8a5bc1cfb0faf1f6248491452b8a5b6901068d8da2d47cbb285986ae683"
dependencies = [
- "anstyle",
- "once_cell_polyfill",
- "windows-sys 0.61.2",
+ "serde",
]
[[package]]
-name = "anyhow"
-version = "1.0.100"
+name = "ascii"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
+checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16"
[[package]]
name = "atomic-waker"
@@ -83,64 +52,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
-name = "autocfg"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
-
-[[package]]
-name = "axum"
-version = "0.8.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8"
-dependencies = [
- "axum-core",
- "bytes",
- "form_urlencoded",
- "futures-util",
- "http",
- "http-body",
- "http-body-util",
- "hyper",
- "hyper-util",
- "itoa",
- "matchit",
- "memchr",
- "mime",
- "percent-encoding",
- "pin-project-lite",
- "serde_core",
- "serde_json",
- "serde_path_to_error",
- "serde_urlencoded",
- "sync_wrapper",
- "tokio",
- "tower",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
-name = "axum-core"
-version = "0.5.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
-dependencies = [
- "bytes",
- "futures-core",
- "http",
- "http-body",
- "http-body-util",
- "mime",
- "pin-project-lite",
- "sync_wrapper",
- "tower-layer",
- "tower-service",
- "tracing",
-]
-
-[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -148,9 +59,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "bitflags"
-version = "2.10.0"
+version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
+checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
[[package]]
name = "bumpalo"
@@ -166,9 +77,9 @@ checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
[[package]]
name = "cc"
-version = "1.2.53"
+version = "1.2.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "755d2fce177175ffca841e9a06afdb2c4ab0f593d53b4dee48147dfaade85932"
+checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2"
dependencies = [
"find-msvc-tools",
"shlex",
@@ -187,79 +98,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
-name = "chrono"
-version = "0.4.43"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118"
-dependencies = [
- "iana-time-zone",
- "js-sys",
- "num-traits",
- "wasm-bindgen",
- "windows-link",
-]
-
-[[package]]
-name = "clap"
-version = "4.5.56"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a75ca66430e33a14957acc24c5077b503e7d374151b2b4b3a10c83b4ceb4be0e"
-dependencies = [
- "clap_builder",
- "clap_derive",
-]
-
-[[package]]
-name = "clap_builder"
-version = "4.5.56"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "793207c7fa6300a0608d1080b858e5fdbe713cdc1c8db9fb17777d8a13e63df0"
-dependencies = [
- "anstream",
- "anstyle",
- "clap_lex",
- "strsim",
-]
-
-[[package]]
-name = "clap_derive"
-version = "4.5.55"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5"
-dependencies = [
- "heck",
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "clap_lex"
-version = "0.7.7"
+name = "chunked_transfer"
+version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32"
+checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901"
[[package]]
-name = "clap_mangen"
-version = "0.2.31"
+name = "core-foundation"
+version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "439ea63a92086df93893164221ad4f24142086d535b3a0957b9b9bea2dc86301"
+checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
- "clap",
- "roff",
+ "core-foundation-sys",
+ "libc",
]
[[package]]
-name = "colorchoice"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
-
-[[package]]
name = "core-foundation"
-version = "0.9.4"
+version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
+checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
dependencies = [
"core-foundation-sys",
"libc",
@@ -272,26 +130,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
-name = "crossbeam-utils"
-version = "0.8.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
-
-[[package]]
-name = "dashmap"
-version = "6.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf"
-dependencies = [
- "cfg-if",
- "crossbeam-utils",
- "hashbrown 0.14.5",
- "lock_api",
- "once_cell",
- "parking_lot_core",
-]
-
-[[package]]
name = "displaydoc"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -335,9 +173,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
name = "find-msvc-tools"
-version = "0.1.8"
+version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db"
+checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
[[package]]
name = "fnv"
@@ -346,6 +184,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
+name = "foldhash"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
+
+[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -371,24 +215,24 @@ dependencies = [
[[package]]
name = "futures-channel"
-version = "0.3.31"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
+checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d"
dependencies = [
"futures-core",
]
[[package]]
name = "futures-core"
-version = "0.3.31"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
+checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d"
[[package]]
name = "futures-executor"
-version = "0.3.31"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
+checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d"
dependencies = [
"futures-core",
"futures-task",
@@ -397,33 +241,25 @@ dependencies = [
[[package]]
name = "futures-sink"
-version = "0.3.31"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
+checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893"
[[package]]
name = "futures-task"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
-
-[[package]]
-name = "futures-timer"
-version = "3.0.3"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24"
+checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393"
[[package]]
name = "futures-util"
-version = "0.3.31"
+version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
+checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6"
dependencies = [
"futures-core",
- "futures-sink",
"futures-task",
"pin-project-lite",
- "pin-utils",
"slab",
]
@@ -440,39 +276,15 @@ dependencies = [
[[package]]
name = "getrandom"
-version = "0.3.4"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
+checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec"
dependencies = [
"cfg-if",
- "js-sys",
"libc",
"r-efi",
"wasip2",
- "wasm-bindgen",
-]
-
-[[package]]
-name = "governor"
-version = "0.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be93b4ec2e4710b04d9264c0c7350cdd62a8c20e5e4ac732552ebb8f0debe8eb"
-dependencies = [
- "cfg-if",
- "dashmap",
- "futures-sink",
- "futures-timer",
- "futures-util",
- "getrandom 0.3.4",
- "no-std-compat",
- "nonzero_ext",
- "parking_lot",
- "portable-atomic",
- "quanta",
- "rand",
- "smallvec",
- "spinning_top",
- "web-time",
+ "wasip3",
]
[[package]]
@@ -496,9 +308,12 @@ dependencies = [
[[package]]
name = "hashbrown"
-version = "0.14.5"
+version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
+checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
+dependencies = [
+ "foldhash",
+]
[[package]]
name = "hashbrown"
@@ -577,7 +392,6 @@ dependencies = [
"http",
"http-body",
"httparse",
- "httpdate",
"itoa",
"pin-project-lite",
"pin-utils",
@@ -620,14 +434,13 @@ dependencies = [
[[package]]
name = "hyper-util"
-version = "0.1.19"
+version = "0.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f"
+checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0"
dependencies = [
"base64",
"bytes",
"futures-channel",
- "futures-core",
"futures-util",
"http",
"http-body",
@@ -645,30 +458,6 @@ dependencies = [
]
[[package]]
-name = "iana-time-zone"
-version = "0.1.64"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb"
-dependencies = [
- "android_system_properties",
- "core-foundation-sys",
- "iana-time-zone-haiku",
- "js-sys",
- "log",
- "wasm-bindgen",
- "windows-core",
-]
-
-[[package]]
-name = "iana-time-zone-haiku"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
-dependencies = [
- "cc",
-]
-
-[[package]]
name = "icu_collections"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -750,6 +539,12 @@ dependencies = [
]
[[package]]
+name = "id-arena"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954"
+
+[[package]]
name = "idna"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -778,6 +573,8 @@ checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
dependencies = [
"equivalent",
"hashbrown 0.16.1",
+ "serde",
+ "serde_core",
]
[[package]]
@@ -797,12 +594,6 @@ dependencies = [
]
[[package]]
-name = "is_terminal_polyfill"
-version = "1.70.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695"
-
-[[package]]
name = "itoa"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -819,16 +610,16 @@ dependencies = [
]
[[package]]
-name = "lazy_static"
-version = "1.5.0"
+name = "leb128fmt"
+version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
+checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
[[package]]
name = "libc"
-version = "0.2.180"
+version = "0.2.182"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
+checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112"
[[package]]
name = "linux-raw-sys"
@@ -858,25 +649,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
-name = "matchers"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
-dependencies = [
- "regex-automata",
-]
-
-[[package]]
-name = "matchit"
-version = "0.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
-
-[[package]]
name = "memchr"
-version = "2.7.6"
+version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
+checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
[[package]]
name = "mime"
@@ -897,9 +673,9 @@ dependencies = [
[[package]]
name = "native-tls"
-version = "0.2.14"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e"
+checksum = "9d5d26952a508f321b4d3d2e80e78fc2603eaefcdf0c30783867f19586518bdc"
dependencies = [
"libc",
"log",
@@ -925,48 +701,12 @@ dependencies = [
]
[[package]]
-name = "no-std-compat"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c"
-
-[[package]]
-name = "nonzero_ext"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
-
-[[package]]
-name = "nu-ansi-term"
-version = "0.50.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
-dependencies = [
- "windows-sys 0.61.2",
-]
-
-[[package]]
-name = "num-traits"
-version = "0.2.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
-name = "once_cell_polyfill"
-version = "1.70.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
-
-[[package]]
name = "openssl"
version = "0.10.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -994,9 +734,9 @@ dependencies = [
[[package]]
name = "openssl-probe"
-version = "0.1.6"
+version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
+checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
[[package]]
name = "openssl-sys"
@@ -1058,12 +798,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
-name = "portable-atomic"
-version = "1.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950"
-
-[[package]]
name = "potential_utf"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1073,12 +807,13 @@ dependencies = [
]
[[package]]
-name = "ppv-lite86"
-version = "0.2.21"
+name = "prettyplease"
+version = "0.2.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9"
+checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
- "zerocopy",
+ "proc-macro2",
+ "syn",
]
[[package]]
@@ -1091,25 +826,10 @@ dependencies = [
]
[[package]]
-name = "quanta"
-version = "0.12.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7"
-dependencies = [
- "crossbeam-utils",
- "libc",
- "once_cell",
- "raw-cpuid",
- "wasi",
- "web-sys",
- "winapi",
-]
-
-[[package]]
name = "quote"
-version = "1.0.43"
+version = "1.0.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a"
+checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4"
dependencies = [
"proc-macro2",
]
@@ -1121,44 +841,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
-name = "rand"
-version = "0.9.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
-dependencies = [
- "rand_chacha",
- "rand_core",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.9.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c"
-dependencies = [
- "getrandom 0.3.4",
-]
-
-[[package]]
-name = "raw-cpuid"
-version = "11.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186"
-dependencies = [
- "bitflags",
-]
-
-[[package]]
name = "redox_syscall"
version = "0.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1168,23 +850,6 @@ dependencies = [
]
[[package]]
-name = "regex-automata"
-version = "0.4.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-syntax",
-]
-
-[[package]]
-name = "regex-syntax"
-version = "0.8.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
-
-[[package]]
name = "reqwest"
version = "0.12.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1239,12 +904,6 @@ dependencies = [
]
[[package]]
-name = "roff"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3"
-
-[[package]]
name = "rustix"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1298,9 +957,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
-version = "1.0.22"
+version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
+checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f"
[[package]]
name = "scc"
@@ -1334,12 +993,12 @@ checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca"
[[package]]
name = "security-framework"
-version = "2.11.1"
+version = "3.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
+checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38"
dependencies = [
"bitflags",
- "core-foundation",
+ "core-foundation 0.10.1",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -1347,15 +1006,21 @@ dependencies = [
[[package]]
name = "security-framework-sys"
-version = "2.15.0"
+version = "2.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0"
+checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
+name = "semver"
+version = "1.0.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
+
+[[package]]
name = "serde"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1399,17 +1064,6 @@ dependencies = [
]
[[package]]
-name = "serde_path_to_error"
-version = "0.1.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457"
-dependencies = [
- "itoa",
- "serde",
- "serde_core",
-]
-
-[[package]]
name = "serde_spanned"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1470,15 +1124,6 @@ dependencies = [
]
[[package]]
-name = "sharded-slab"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
-dependencies = [
- "lazy_static",
-]
-
-[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1496,9 +1141,9 @@ dependencies = [
[[package]]
name = "slab"
-version = "0.4.11"
+version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
+checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5"
[[package]]
name = "smallvec"
@@ -1508,36 +1153,21 @@ checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "socket2"
-version = "0.6.1"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
+checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0"
dependencies = [
"libc",
"windows-sys 0.60.2",
]
[[package]]
-name = "spinning_top"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300"
-dependencies = [
- "lock_api",
-]
-
-[[package]]
name = "stable_deref_trait"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
[[package]]
-name = "strsim"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
-
-[[package]]
name = "subtle"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1545,9 +1175,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
-version = "2.0.114"
+version = "2.0.116"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a"
+checksum = "3df424c70518695237746f84cede799c9c58fcb37450d7b23716568cc8bc69cb"
dependencies = [
"proc-macro2",
"quote",
@@ -1576,12 +1206,12 @@ dependencies = [
[[package]]
name = "system-configuration"
-version = "0.6.1"
+version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
+checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b"
dependencies = [
"bitflags",
- "core-foundation",
+ "core-foundation 0.9.4",
"system-configuration-sys",
]
@@ -1597,24 +1227,27 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.24.0"
+version = "3.25.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c"
+checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1"
dependencies = [
"fastrand",
- "getrandom 0.3.4",
+ "getrandom 0.4.1",
"once_cell",
"rustix",
"windows-sys 0.61.2",
]
[[package]]
-name = "thread_local"
-version = "1.1.9"
+name = "tiny_http"
+version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185"
+checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82"
dependencies = [
- "cfg-if",
+ "ascii",
+ "chunked_transfer",
+ "httpdate",
+ "log",
]
[[package]]
@@ -1689,9 +1322,9 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.9.11+spec-1.1.0"
+version = "0.9.12+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46"
+checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863"
dependencies = [
"indexmap",
"serde_core",
@@ -1713,9 +1346,9 @@ dependencies = [
[[package]]
name = "toml_parser"
-version = "1.0.6+spec-1.1.0"
+version = "1.0.8+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44"
+checksum = "0742ff5ff03ea7e67c8ae6c93cac239e0d9784833362da3f9a9c1da8dfefcbdc"
dependencies = [
"winnow",
]
@@ -1739,7 +1372,6 @@ dependencies = [
"tokio",
"tower-layer",
"tower-service",
- "tracing",
]
[[package]]
@@ -1778,60 +1410,17 @@ version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [
- "log",
"pin-project-lite",
- "tracing-attributes",
"tracing-core",
]
[[package]]
-name = "tracing-attributes"
-version = "0.1.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
name = "tracing-core"
version = "0.1.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a"
dependencies = [
"once_cell",
- "valuable",
-]
-
-[[package]]
-name = "tracing-log"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3"
-dependencies = [
- "log",
- "once_cell",
- "tracing-core",
-]
-
-[[package]]
-name = "tracing-subscriber"
-version = "0.3.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e"
-dependencies = [
- "matchers",
- "nu-ansi-term",
- "once_cell",
- "regex-automata",
- "sharded-slab",
- "smallvec",
- "thread_local",
- "tracing",
- "tracing-core",
- "tracing-log",
]
[[package]]
@@ -1842,9 +1431,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "unicode-ident"
-version = "1.0.22"
+version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
+checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
[[package]]
name = "unsafe-libyaml"
@@ -1877,18 +1472,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
-name = "utf8parse"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
-
-[[package]]
-name = "valuable"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
-
-[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -1911,11 +1494,20 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasip2"
-version = "1.0.2+wasi-0.2.9"
+version = "1.0.1+wasi-0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
+dependencies = [
+ "wit-bindgen 0.46.0",
+]
+
+[[package]]
+name = "wasip3"
+version = "0.4.0+wasi-0.3.0-rc-2026-01-06"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5"
+checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5"
dependencies = [
- "wit-bindgen",
+ "wit-bindgen 0.51.0",
]
[[package]]
@@ -1978,80 +1570,47 @@ dependencies = [
]
[[package]]
-name = "web-sys"
-version = "0.3.85"
+name = "wasm-encoder"
+version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598"
+checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319"
dependencies = [
- "js-sys",
- "wasm-bindgen",
+ "leb128fmt",
+ "wasmparser",
]
[[package]]
-name = "web-time"
-version = "1.1.0"
+name = "wasm-metadata"
+version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
+checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909"
dependencies = [
- "js-sys",
- "wasm-bindgen",
-]
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "windows-core"
-version = "0.62.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb"
-dependencies = [
- "windows-implement",
- "windows-interface",
- "windows-link",
- "windows-result",
- "windows-strings",
+ "anyhow",
+ "indexmap",
+ "wasm-encoder",
+ "wasmparser",
]
[[package]]
-name = "windows-implement"
-version = "0.60.2"
+name = "wasmparser"
+version = "0.244.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
+checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "bitflags",
+ "hashbrown 0.15.5",
+ "indexmap",
+ "semver",
]
[[package]]
-name = "windows-interface"
-version = "0.59.3"
+name = "web-sys"
+version = "0.3.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
+checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598"
dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "js-sys",
+ "wasm-bindgen",
]
[[package]]
@@ -2253,36 +1812,116 @@ checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829"
[[package]]
name = "wit-bindgen"
+version = "0.46.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
+
+[[package]]
+name = "wit-bindgen"
version = "0.51.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
+dependencies = [
+ "wit-bindgen-rust-macro",
+]
+
+[[package]]
+name = "wit-bindgen-core"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc"
+dependencies = [
+ "anyhow",
+ "heck",
+ "wit-parser",
+]
+
+[[package]]
+name = "wit-bindgen-rust"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21"
+dependencies = [
+ "anyhow",
+ "heck",
+ "indexmap",
+ "prettyplease",
+ "syn",
+ "wasm-metadata",
+ "wit-bindgen-core",
+ "wit-component",
+]
+
+[[package]]
+name = "wit-bindgen-rust-macro"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a"
+dependencies = [
+ "anyhow",
+ "prettyplease",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wit-bindgen-core",
+ "wit-bindgen-rust",
+]
+
+[[package]]
+name = "wit-component"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
+dependencies = [
+ "anyhow",
+ "bitflags",
+ "indexmap",
+ "log",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "wasm-encoder",
+ "wasm-metadata",
+ "wasmparser",
+ "wit-parser",
+]
+
+[[package]]
+name = "wit-parser"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736"
+dependencies = [
+ "anyhow",
+ "id-arena",
+ "indexmap",
+ "log",
+ "semver",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "unicode-xid",
+ "wasmparser",
+]
[[package]]
name = "witryna"
-version = "0.1.0"
+version = "0.2.0"
dependencies = [
"anyhow",
- "axum",
- "chrono",
- "clap",
- "clap_mangen",
- "dashmap",
- "governor",
+ "argh",
"humantime",
+ "log",
"nix",
"reqwest",
"serde",
"serde_json",
"serde_yaml_ng",
"serial_test",
- "subtle",
"tempfile",
+ "tiny_http",
"tokio",
- "tokio-util",
"toml",
- "tower",
- "tracing",
- "tracing-subscriber",
]
[[package]]
@@ -2315,26 +1954,6 @@ dependencies = [
]
[[package]]
-name = "zerocopy"
-version = "0.8.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
-dependencies = [
- "zerocopy-derive",
-]
-
-[[package]]
-name = "zerocopy-derive"
-version = "0.8.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
name = "zerofrom"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -2396,6 +2015,6 @@ dependencies = [
[[package]]
name = "zmij"
-version = "1.0.16"
+version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
+checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa"
diff --git a/Cargo.toml b/Cargo.toml
index fb8e20d..6fbac26 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,12 +1,15 @@
[package]
name = "witryna"
-version = "0.1.0"
+version = "0.2.0"
edition = "2024"
+rust-version = "1.85"
authors = ["Dawid Rycerz"]
description = "Minimalist Git-based static site deployment orchestrator"
homepage = "https://git.craftknight.com/dawid/witryna"
repository = "https://git.craftknight.com/dawid/witryna.git"
license = "MIT"
+keywords = ["deploy", "static-site", "git", "webhook", "container"]
+categories = ["command-line-utilities", "web-programming"]
[features]
default = []
@@ -14,33 +17,22 @@ integration = []
[dependencies]
anyhow = "1.0.100"
-clap = { version = "4", features = ["derive"] }
-subtle = "2.6"
-axum = "0.8.8"
-chrono = "0.4.43"
-dashmap = "6.1.0"
-governor = "0.8"
+argh = "0.1"
+tiny_http = "0.12"
serde = { version = "1.0.228", features = ["derive"] }
serde_yaml_ng = "0.10"
-tokio = { version = "1.49.0", features = ["rt-multi-thread", "macros", "fs", "process", "net", "signal", "sync", "time", "io-util", "io-std"] }
+tokio = { version = "1.49.0", features = ["rt-multi-thread", "macros", "fs", "process", "signal", "sync", "time", "io-util", "io-std"] }
toml = "0.9.11"
-tracing = "0.1.44"
-tracing-subscriber = { version = "0.3.22", features = ["env-filter"] }
+log = { version = "0.4", features = ["std"] }
humantime = "2.3.0"
-tokio-util = "0.7.18"
serde_json = "1.0"
[dev-dependencies]
-tower = "0.5"
-reqwest = { version = "0.12" }
+reqwest = { version = "0.12", features = ["json"] }
tempfile = "3"
nix = { version = "0.29", features = ["signal"] }
serial_test = "3"
-[build-dependencies]
-clap = { version = "4", features = ["derive"] }
-clap_mangen = "0.2.31"
-
[profile.release]
strip = true
lto = true
@@ -56,13 +48,13 @@ containerized build commands, and publishes static assets via atomic \
symlink switching."""
section = "web"
priority = "optional"
-depends = "$auto, adduser, systemd"
+depends = "$auto, adduser, systemd, git, git-lfs"
recommends = "podman | docker.io"
maintainer-scripts = "debian/"
conf-files = ["/etc/witryna/witryna.toml"]
assets = [
["target/release/witryna", "usr/bin/", "755"],
- ["target/man/witryna.1", "usr/share/man/man1/witryna.1", "644"],
+ ["man/witryna.1", "usr/share/man/man1/witryna.1", "644"],
["man/witryna.toml.5", "usr/share/man/man5/witryna.toml.5", "644"],
["examples/witryna.toml", "etc/witryna/witryna.toml", "644"],
["README.md", "usr/share/doc/witryna/README.md", "644"],
@@ -91,7 +83,7 @@ group = "Applications/Internet"
release = "1"
assets = [
{ source = "target/release/witryna", dest = "/usr/bin/witryna", mode = "755" },
- { source = "target/man/witryna.1", dest = "/usr/share/man/man1/witryna.1", mode = "644", doc = true },
+ { source = "man/witryna.1", dest = "/usr/share/man/man1/witryna.1", mode = "644", doc = true },
{ source = "man/witryna.toml.5", dest = "/usr/share/man/man5/witryna.toml.5", mode = "644", doc = true },
{ source = "examples/witryna.toml", dest = "/etc/witryna/witryna.toml", mode = "644", config = "noreplace" },
{ source = "debian/witryna.service", dest = "/usr/lib/systemd/system/witryna.service", mode = "644" },
@@ -186,6 +178,7 @@ fi"""
systemd = "*"
shadow-utils = "*"
git = "*"
+git-lfs = "*"
[package.metadata.generate-rpm.recommends]
podman = "*"
diff --git a/Justfile b/Justfile
index fb24e36..37add68 100644
--- a/Justfile
+++ b/Justfile
@@ -1,4 +1,5 @@
# Witryna development tasks
+version := `grep '^version' Cargo.toml | head -1 | sed 's/.*"\(.*\)"/\1/'`
# List available recipes
default:
@@ -34,7 +35,41 @@ lint-picky:
cargo clippy --all-targets --all-features -- -W clippy::pedantic -W clippy::nursery -W clippy::unwrap_used -W clippy::expect_used -W clippy::panic -W clippy::indexing_slicing -W clippy::clone_on_ref_ptr -W clippy::print_stdout -W clippy::print_stderr
# Run all lints
-lint: lint-rust lint-yaml lint-secrets
+lint: lint-rust lint-yaml lint-secrets check-version
+
+# --- Version ---
+
+# Verify all files match Cargo.toml version
+check-version:
+ #!/usr/bin/env bash
+ set -euo pipefail
+ v="{{version}}"
+ # Arch pkgver: strip pre-release punctuation (0.2.0-rc.1 → 0.2.0rc1)
+ pkgver=$(echo "$v" | sed 's/-\([a-z]*\)\./\1/')
+ tag="v${v}"
+ ok=true
+ check() { grep -qF "$2" "$1" || { echo "MISMATCH: $1 missing '$2'"; ok=false; }; }
+ check arch/PKGBUILD "pkgver=${pkgver}"
+ check arch/PKGBUILD "_tag=${tag}"
+ check man/witryna.1 "witryna ${v}\""
+ check man/witryna.1 "${tag}"
+ check man/witryna.toml.5 "witryna ${v}\""
+ $ok && echo "All versions match Cargo.toml: ${v}" || { echo "Run 'just bump-version' to fix"; exit 1; }
+
+# Sync version in packaging/doc files to match Cargo.toml
+bump-version:
+ #!/usr/bin/env bash
+ set -euo pipefail
+ v="{{version}}"
+ pkgver=$(echo "$v" | sed 's/-\([a-z]*\)\./\1/')
+ tag="v${v}"
+ echo "Syncing to ${v} (pkgver=${pkgver}, tag=${tag})"
+ sed -i "s/^pkgver=.*/pkgver=${pkgver}/" arch/PKGBUILD
+ sed -i "s/^_tag=.*/_tag=${tag}/" arch/PKGBUILD
+ sed -i "1s/\"witryna [^\"]*\"/\"witryna ${v}\"/" man/witryna.1
+ sed -i "1s/\"witryna [^\"]*\"/\"witryna ${v}\"/" man/witryna.toml.5
+ sed -i '/^\.SH VERSION$/,/^\.SH /{s/^v.*/'"${tag}"'/}' man/witryna.1
+ echo "Done. Remaining manual updates: CHANGELOG.md, README.md"
# --- Testing ---
@@ -59,16 +94,6 @@ test-all: lint test test-integration
build-release:
cargo build --release
-# --- Man pages ---
-
-# View witryna(1) man page (run `cargo build` first)
-man-1:
- man -l target/man/witryna.1
-
-# View witryna.toml(5) man page
-man-5:
- man -l man/witryna.toml.5
-
# --- Packaging ---
# Build Debian package
@@ -100,6 +125,24 @@ inspect-rpm:
echo "" && echo "=== Config files ===" && rpm -qcp "$rpm"
echo "" && echo "=== Scripts ===" && rpm -q --scripts -p "$rpm"
+# Test PKGBUILD locally (requires makepkg)
+build-arch:
+ cd arch && makepkg -sf
+
+# Build Nix package (requires nix with flakes)
+build-nix:
+ nix build .#witryna
+
+# Show contents of built Arch package
+inspect-arch:
+ #!/usr/bin/env bash
+ set -euo pipefail
+ pkg=$(find arch -name 'witryna-*.pkg.tar.zst' 2>/dev/null | head -1)
+ if [[ -z "$pkg" ]]; then echo "No .pkg.tar.zst found — run 'just build-arch' first" >&2; exit 1; fi
+ echo "=== Info ===" && pacman -Qip "$pkg"
+ echo "" && echo "=== Contents ===" && pacman -Qlp "$pkg"
+ echo "" && echo "=== Backup files ===" && pacman -Qip "$pkg" | grep -A99 "^Backup Files"
+
# --- Release ---
RELEASE_HOST := "git@sandcrawler"
@@ -117,7 +160,7 @@ build-tarball: build-release
cp target/release/witryna "$staging/"
cp examples/witryna.toml "$staging/"
cp debian/witryna.service "$staging/"
- cp target/man/witryna.1 "$staging/"
+ cp man/witryna.1 "$staging/"
cp man/witryna.toml.5 "$staging/"
cp README.md "$staging/"
cp examples/hooks/caddy-deploy.sh "$staging/examples/hooks/"
diff --git a/README.md b/README.md
index f9ed799..1001048 100644
--- a/README.md
+++ b/README.md
@@ -20,19 +20,39 @@ Pre-built packages are available at
From a `.deb` package (Debian/Ubuntu):
- curl -LO https://release.craftknight.com/witryna_0.1.0-1_amd64.deb
- sudo dpkg -i witryna_0.1.0-1_amd64.deb
+ curl -LO https://release.craftknight.com/witryna_0.2.0-1_amd64.deb
+ sudo dpkg -i witryna_0.2.0-1_amd64.deb
From an `.rpm` package (Fedora/RHEL):
- curl -LO https://release.craftknight.com/witryna-0.1.0-1.x86_64.rpm
- sudo rpm -i witryna-0.1.0-1.x86_64.rpm
+ curl -LO https://release.craftknight.com/witryna-0.2.0-1.x86_64.rpm
+ sudo rpm -i witryna-0.2.0-1.x86_64.rpm
+
+From PKGBUILD (Arch Linux):
+
+ git clone https://git.craftknight.com/dawid/witryna.git
+ cd witryna/arch && makepkg -si
+
+With Nix (flake):
+
+ nix profile install git+https://git.craftknight.com/dawid/witryna
+
+NixOS module:
+
+ # In your flake.nix inputs:
+ witryna.url = "git+https://git.craftknight.com/dawid/witryna";
+
+ # In your NixOS configuration:
+ services.witryna = {
+ enable = true;
+ configFile = "/etc/witryna/witryna.toml";
+ };
From a tarball (any Linux):
- curl -LO https://release.craftknight.com/witryna-0.1.0-linux-amd64.tar.gz
- tar xzf witryna-0.1.0-linux-amd64.tar.gz
- sudo cp witryna-0.1.0-linux-amd64/witryna /usr/local/bin/
+ curl -LO https://release.craftknight.com/witryna-0.2.0-linux-amd64.tar.gz
+ tar xzf witryna-0.2.0-linux-amd64.tar.gz
+ sudo cp witryna-0.2.0-linux-amd64/witryna /usr/local/bin/
From source:
@@ -99,6 +119,8 @@ override from `/usr/share/doc/witryna/examples/systemd/` to
| `witryna validate` | Validate config and print summary |
| `witryna run <site>` | Run a one-off build (synchronous) |
| `witryna status` | Show deployment status |
+| `witryna switch <site> <build>` | Switch active build (rollback) |
+| `witryna cleanup [site]` | Remove old builds and logs |
## Configuration
@@ -138,6 +160,8 @@ To build distribution packages:
just build-deb # Debian .deb package
just build-rpm # RPM package
+ just build-arch # Arch Linux package
+ just build-nix # Nix package
## Dependencies
diff --git a/arch/PKGBUILD b/arch/PKGBUILD
new file mode 100644
index 0000000..c16df3d
--- /dev/null
+++ b/arch/PKGBUILD
@@ -0,0 +1,70 @@
+# Maintainer: Dawid Rycerz <dawid@craftknight.com>
+pkgname=witryna
+pkgver=0.2.0
+pkgrel=1
+pkgdesc='Minimalist Git-based static site deployment orchestrator'
+url='https://git.craftknight.com/dawid/witryna'
+license=('MIT')
+arch=('x86_64')
+makedepends=('cargo' 'git')
+depends=('gcc-libs' 'glibc' 'git' 'git-lfs')
+optdepends=('podman: container runtime for builds (recommended)'
+ 'docker: alternative container runtime')
+backup=('etc/witryna/witryna.toml')
+install=witryna.install
+_tag=v0.2.0
+source=("$pkgname::git+https://git.craftknight.com/dawid/witryna.git#tag=$_tag"
+ 'witryna.sysusers'
+ 'witryna.tmpfiles'
+ 'witryna.install')
+b2sums=('SKIP' 'SKIP' 'SKIP' 'SKIP')
+
+prepare() {
+ cd "$pkgname"
+ export RUSTUP_TOOLCHAIN=stable
+ cargo fetch --locked --target "$(rustc -vV | sed -n 's/host: //p')"
+}
+
+build() {
+ cd "$pkgname"
+ export RUSTUP_TOOLCHAIN=stable
+ export CARGO_TARGET_DIR=target
+ cargo build --frozen --release
+}
+
+check() {
+ cd "$pkgname"
+ export RUSTUP_TOOLCHAIN=stable
+ export CARGO_TARGET_DIR=target
+ cargo test --frozen
+}
+
+package() {
+ cd "$pkgname"
+
+ # Binary
+ install -Dm0755 target/release/witryna "$pkgdir/usr/bin/witryna"
+
+ # Man pages
+ install -Dm0644 man/witryna.1 "$pkgdir/usr/share/man/man1/witryna.1"
+ install -Dm0644 man/witryna.toml.5 "$pkgdir/usr/share/man/man5/witryna.toml.5"
+
+ # Config
+ install -Dm0644 examples/witryna.toml "$pkgdir/etc/witryna/witryna.toml"
+
+ # Systemd
+ install -Dm0644 debian/witryna.service "$pkgdir/usr/lib/systemd/system/witryna.service"
+ install -Dm0644 "$srcdir/witryna.sysusers" "$pkgdir/usr/lib/sysusers.d/witryna.conf"
+ install -Dm0644 "$srcdir/witryna.tmpfiles" "$pkgdir/usr/lib/tmpfiles.d/witryna.conf"
+
+ # License
+ install -Dm0644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
+
+ # Examples / documentation
+ install -Dm0755 examples/hooks/caddy-deploy.sh "$pkgdir/usr/share/doc/$pkgname/examples/hooks/caddy-deploy.sh"
+ install -Dm0644 examples/caddy/Caddyfile "$pkgdir/usr/share/doc/$pkgname/examples/caddy/Caddyfile"
+ install -Dm0644 examples/nginx/witryna.conf "$pkgdir/usr/share/doc/$pkgname/examples/nginx/witryna.conf"
+ install -Dm0644 examples/witryna.yaml "$pkgdir/usr/share/doc/$pkgname/examples/witryna.yaml"
+ install -Dm0644 examples/systemd/docker.conf "$pkgdir/usr/share/doc/$pkgname/examples/systemd/docker.conf"
+ install -Dm0644 examples/systemd/podman.conf "$pkgdir/usr/share/doc/$pkgname/examples/systemd/podman.conf"
+}
diff --git a/arch/witryna.install b/arch/witryna.install
new file mode 100644
index 0000000..00199e0
--- /dev/null
+++ b/arch/witryna.install
@@ -0,0 +1,15 @@
+post_install() {
+ echo '==> Witryna has been installed.'
+ echo ' 1. Edit /etc/witryna/witryna.toml'
+ echo ' 2. Configure your container runtime:'
+ echo ' Podman: cp /usr/share/doc/witryna/examples/systemd/podman.conf \'
+ echo ' /etc/systemd/system/witryna.service.d/10-runtime.conf'
+ echo ' Docker: cp /usr/share/doc/witryna/examples/systemd/docker.conf \'
+ echo ' /etc/systemd/system/witryna.service.d/10-runtime.conf'
+ echo ' 3. systemctl daemon-reload && systemctl enable --now witryna'
+ echo ' See man witryna(1) and man witryna.toml(5) for documentation.'
+}
+
+post_upgrade() {
+ :
+}
diff --git a/arch/witryna.sysusers b/arch/witryna.sysusers
new file mode 100644
index 0000000..7c202be
--- /dev/null
+++ b/arch/witryna.sysusers
@@ -0,0 +1 @@
+u witryna - "Witryna deployment daemon" /var/lib/witryna /usr/bin/nologin
diff --git a/arch/witryna.tmpfiles b/arch/witryna.tmpfiles
new file mode 100644
index 0000000..a0ab1fe
--- /dev/null
+++ b/arch/witryna.tmpfiles
@@ -0,0 +1,5 @@
+d /var/lib/witryna 0755 witryna witryna -
+d /var/lib/witryna/clones 0755 witryna witryna -
+d /var/lib/witryna/builds 0755 witryna witryna -
+d /var/lib/witryna/cache 0755 witryna witryna -
+d /var/log/witryna 0755 witryna witryna -
diff --git a/build.rs b/build.rs
deleted file mode 100644
index 287f7fb..0000000
--- a/build.rs
+++ /dev/null
@@ -1,150 +0,0 @@
-#[path = "src/cli.rs"]
-mod cli;
-
-use clap::CommandFactory as _;
-use std::io::Write as _;
-
-fn main() -> std::io::Result<()> {
- println!("cargo:rerun-if-changed=src/cli.rs");
-
- #[allow(clippy::expect_used)] // OUT_DIR is always set by cargo during build scripts
- let out_dir = std::env::var("OUT_DIR").expect("OUT_DIR is set by cargo");
- let out_path = std::path::Path::new(&out_dir).join("witryna.1");
-
- let cmd = cli::Cli::command();
- let man = clap_mangen::Man::new(cmd).date(build_date());
-
- let mut buf: Vec<u8> = Vec::new();
-
- // Standard clap-generated sections
- man.render_title(&mut buf)?;
- man.render_name_section(&mut buf)?;
- man.render_synopsis_section(&mut buf)?;
- man.render_description_section(&mut buf)?;
- buf.write_all(SUBCOMMANDS)?;
- man.render_options_section(&mut buf)?;
-
- // Custom roff sections
- buf.write_all(SIGNALS)?;
- buf.write_all(EXIT_STATUS)?;
- buf.write_all(INSTALLATION)?;
- buf.write_all(FILES)?;
- buf.write_all(SEE_ALSO)?;
-
- man.render_version_section(&mut buf)?;
- man.render_authors_section(&mut buf)?;
-
- std::fs::write(&out_path, &buf)?;
-
- // Copy to stable path so cargo-deb and Justfile always find it
- let target_dir = std::path::Path::new("target/man");
- std::fs::create_dir_all(target_dir)?;
- std::fs::copy(&out_path, target_dir.join("witryna.1"))?;
-
- Ok(())
-}
-
-const SUBCOMMANDS: &[u8] = br#".SH "SUBCOMMANDS"
-.TP
-\fBserve\fR
-Start the deployment server (foreground).
-.TP
-\fBvalidate\fR
-Validate configuration file and print summary.
-.TP
-\fBrun\fR \fIsite\fR
-Trigger a one\-off build for a site (synchronous, no server).
-.TP
-\fBstatus\fR
-Show deployment status for configured sites.
-"#;
-
-const SIGNALS: &[u8] = br#".SH "SIGNALS"
-.TP
-\fBSIGHUP\fR
-Reload configuration from \fIwitryna.toml\fR without restarting the server.
-Sites can be added, removed, or modified on the fly.
-Changes to \fBlisten_address\fR, \fBbase_dir\fR, \fBlog_dir\fR,
-and \fBlog_level\fR are detected but require a full restart to take effect;
-a warning is logged when these fields differ.
-.TP
-\fBSIGTERM\fR, \fBSIGINT\fR
-Initiate graceful shutdown.
-In\-progress builds are allowed to finish before the process exits.
-"#;
-
-const EXIT_STATUS: &[u8] = br#".SH "EXIT STATUS"
-.TP
-\fB0\fR
-Clean shutdown after SIGTERM/SIGINT (\fBserve\fR), configuration valid (\fBvalidate\fR),
-build succeeded (\fBrun\fR), or status displayed (\fBstatus\fR).
-Post\-deploy hook failure is non\-fatal and still exits 0.
-.TP
-\fB1\fR
-Startup failure, validation error, build failure, site not found, or configuration error.
-.TP
-\fB2\fR
-Command\-line usage error (unknown flag, missing subcommand, etc.).
-"#;
-
-const INSTALLATION: &[u8] = br#".SH "INSTALLATION"
-When installed via deb or rpm packages, the post\-install script automatically
-detects the available container runtime and configures system\-level access:
-.TP
-\fBDocker\fR
-The \fBwitryna\fR user is added to the \fBdocker\fR group and a systemd
-override is installed at
-\fI/etc/systemd/system/witryna.service.d/10\-runtime.conf\fR
-granting access to the Docker socket.
-.TP
-\fBPodman\fR
-Subordinate UID/GID ranges are allocated (100000\-165535), user lingering is
-enabled, and a systemd override is installed that disables
-\fBRestrictNamespaces\fR and sets \fBXDG_RUNTIME_DIR\fR.
-.PP
-If neither runtime is found at install time, a warning is printed.
-Install a runtime and reinstall the package, or manually copy the appropriate
-override template from \fI/usr/share/doc/witryna/examples/systemd/\fR to
-\fI/etc/systemd/system/witryna.service.d/10\-runtime.conf\fR.
-"#;
-
-const FILES: &[u8] = br#".SH "FILES"
-.TP
-\fI/etc/witryna/witryna.toml\fR
-Conventional configuration path for system\-wide installs.
-The shipped systemd unit passes \fB\-\-config /etc/witryna/witryna.toml\fR
-explicitly; without \fB\-\-config\fR the CLI defaults to \fIwitryna.toml\fR
-in the current working directory.
-.TP
-\fI/var/lib/witryna/clones/<site>/\fR
-Git repository clones.
-.TP
-\fI/var/lib/witryna/builds/<site>/<timestamp>/\fR
-Timestamped build outputs.
-.TP
-\fI/var/lib/witryna/builds/<site>/current\fR
-Symlink to the latest successful build.
-.TP
-\fI/var/lib/witryna/cache/<site>/\fR
-Persistent build cache volumes.
-.TP
-\fI/var/log/witryna/<site>/<timestamp>.log\fR
-Per\-build log files (site, timestamp, git commit, image, duration, status, stdout, stderr).
-.TP
-\fI/var/log/witryna/<site>/<timestamp>\-hook.log\fR
-Post\-deploy hook output (if configured).
-"#;
-
-const SEE_ALSO: &[u8] = br#".SH "SEE ALSO"
-\fBwitryna.toml\fR(5)
-"#;
-
-fn build_date() -> String {
- std::process::Command::new("date")
- .arg("+%Y-%m-%d")
- .output()
- .ok()
- .and_then(|o| String::from_utf8(o.stdout).ok())
- .map(|s| s.trim().to_owned())
- .unwrap_or_default()
-}
diff --git a/examples/caddy/Caddyfile b/examples/caddy/Caddyfile
index b2285f6..6502830 100644
--- a/examples/caddy/Caddyfile
+++ b/examples/caddy/Caddyfile
@@ -10,6 +10,19 @@
import /etc/caddy/sites.d/*.caddy
# Webhook endpoint — reverse proxy to Witryna
+#
+# Rate limiting: Caddy does not include built-in rate limiting.
+# Install the caddy-ratelimit module for per-IP request limiting:
+# https://github.com/mholt/caddy-ratelimit
+#
+# Example with caddy-ratelimit (uncomment after installing the module):
+# rate_limit {
+# zone webhook {
+# key {remote_host}
+# events 10
+# window 1m
+# }
+# }
witryna.example.com {
reverse_proxy 127.0.0.1:8080
diff --git a/examples/hooks/caddy-deploy.sh b/examples/hooks/caddy-deploy.sh
index 7f2173b..ed05f20 100755
--- a/examples/hooks/caddy-deploy.sh
+++ b/examples/hooks/caddy-deploy.sh
@@ -5,8 +5,9 @@
# Supports wildcard hosting domains and custom primary domains with redirects.
#
# Env vars from Witryna (automatic):
-# WITRYNA_SITE — site name
-# WITRYNA_PUBLIC_DIR — stable "current" symlink path (document root)
+# WITRYNA_SITE — site name
+# WITRYNA_PUBLIC_DIR — stable "current" symlink path (document root)
+# WITRYNA_BUILD_STATUS — build outcome: "success" or "failed"
#
# Env vars from [sites.env] in witryna.toml:
# BASE_DOMAIN — wildcard hosting domain (e.g. mywitrynahost.com)
@@ -37,6 +38,12 @@
set -eu
+# Only configure Caddy on successful builds
+if [ "${WITRYNA_BUILD_STATUS:-}" = "failed" ]; then
+ echo "Build failed — skipping Caddy configuration"
+ exit 0
+fi
+
SITES_DIR="${CADDY_SITES_DIR:-/etc/caddy/sites.d}"
CADDY_CONFIG="${CADDY_CONFIG:-/etc/caddy/Caddyfile}"
diff --git a/examples/nginx/witryna.conf b/examples/nginx/witryna.conf
index 5f56ef2..0b92e52 100644
--- a/examples/nginx/witryna.conf
+++ b/examples/nginx/witryna.conf
@@ -7,6 +7,11 @@
# TLS is not configured here — use certbot or similar to add certificates:
# sudo certbot --nginx -d my-site.example.com -d witryna.example.com
+# Rate limiting: 10 requests per minute per source IP for webhook endpoints.
+# Place this directive at the http {} level (outside server blocks) or in a
+# separate file included from nginx.conf.
+limit_req_zone $binary_remote_addr zone=witryna_webhooks:10m rate=10r/m;
+
# Public site — serves your built static files
server {
listen 80;
@@ -36,6 +41,9 @@ server {
deny all;
}
+ limit_req zone=witryna_webhooks burst=5 nodelay;
+ limit_req_status 429;
+
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
diff --git a/examples/witryna.toml b/examples/witryna.toml
index 6256d63..a76e4bb 100644
--- a/examples/witryna.toml
+++ b/examples/witryna.toml
@@ -6,7 +6,6 @@ container_runtime = "podman"
base_dir = "/var/lib/witryna"
log_dir = "/var/log/witryna"
log_level = "info"
-rate_limit_per_minute = 10
max_builds_to_keep = 5
# git_timeout = "2m" # default: 60s, range: 5s..1h
@@ -48,7 +47,7 @@ max_builds_to_keep = 5
# # command = "npm ci && npm run build"
# # public = "dist"
#
-# # Post-deploy hook (30s timeout, non-fatal)
+# # Post-deploy hook (runs on success and failure, 30s timeout, non-fatal)
# # post_deploy = ["systemctl", "reload", "nginx"]
#
# # Caddy auto-configuration (see examples/hooks/caddy-deploy.sh)
diff --git a/examples/witryna.yaml b/examples/witryna.yaml
index 3d6a09f..3104718 100644
--- a/examples/witryna.yaml
+++ b/examples/witryna.yaml
@@ -1,3 +1,4 @@
+---
# witryna.yaml — per-repository build configuration
# Place this file in the root of your Git repository.
# Supported filenames: .witryna.yaml, .witryna.yml, witryna.yaml, witryna.yml
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 0000000..d5681f4
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,27 @@
+{
+ "nodes": {
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1770843696,
+ "narHash": "sha256-LovWTGDwXhkfCOmbgLVA10bvsi/P8eDDpRudgk68HA8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "2343bbb58f99267223bc2aac4fc9ea301a155a16",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "nixpkgs": "nixpkgs"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 0000000..42ff254
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,68 @@
+{
+ description = "Witryna - Git-based static site deployment orchestrator";
+
+ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
+
+ outputs = { self, nixpkgs }:
+ let
+ supportedSystems = [ "x86_64-linux" "aarch64-linux" ];
+ forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
+ in
+ {
+ packages = forAllSystems (system:
+ let pkgs = nixpkgs.legacyPackages.${system}; in
+ {
+ witryna = pkgs.rustPlatform.buildRustPackage {
+ pname = "witryna";
+ version = (builtins.fromTOML (builtins.readFile ./Cargo.toml)).package.version;
+ src = ./.;
+ cargoLock.lockFile = ./Cargo.lock;
+ nativeBuildInputs = [ pkgs.installShellFiles pkgs.pkg-config ];
+ buildInputs = [ pkgs.openssl ];
+ nativeCheckInputs = [ pkgs.git ];
+ postInstall = ''
+ installManPage man/witryna.1
+ installManPage man/witryna.toml.5
+ mkdir -p $out/share/doc/witryna/examples
+ cp -r examples/* $out/share/doc/witryna/examples/
+ cp README.md $out/share/doc/witryna/
+ '';
+ meta = with pkgs.lib; {
+ description = "Minimalist Git-based static site deployment orchestrator";
+ homepage = "https://git.craftknight.com/dawid/witryna";
+ license = licenses.mit;
+ mainProgram = "witryna";
+ };
+ };
+ default = self.packages.${system}.witryna;
+ }
+ );
+
+ nixosModules.witryna = import ./nix/module.nix self;
+ nixosModules.default = self.nixosModules.witryna;
+
+ # NixOS VM test - verifies the module works
+ checks.x86_64-linux.nixos-test = nixpkgs.legacyPackages.x86_64-linux.nixosTest {
+ name = "witryna";
+ nodes.machine = { pkgs, ... }: {
+ imports = [ self.nixosModules.witryna ];
+ environment.systemPackages = [ pkgs.podman ];
+ virtualisation.podman.enable = true;
+ services.witryna = {
+ enable = true;
+ configFile = pkgs.writeText "witryna.toml" ''
+ listen_address = "127.0.0.1:8080"
+ base_dir = "/var/lib/witryna"
+ log_dir = "/var/log/witryna"
+ log_level = "debug"
+ container_runtime = "podman"
+ '';
+ };
+ };
+ testScript = ''
+ machine.wait_for_unit("witryna.service")
+ machine.succeed("curl -sf http://127.0.0.1:8080/health")
+ '';
+ };
+ };
+}
diff --git a/man/witryna.1 b/man/witryna.1
new file mode 100644
index 0000000..473d395
--- /dev/null
+++ b/man/witryna.1
@@ -0,0 +1,131 @@
+.TH witryna 1 2026-02-15 "witryna 0.2.0"
+.SH NAME
+witryna \- Minimalist Git\-based static site deployment orchestrator
+.SH SYNOPSIS
+\fBwitryna\fR <\fIcommand\fR> [\fB\-\-config\fR \fIFILE\fR] [\fIargs\fR]
+.SH DESCRIPTION
+Minimalist Git\-based static site deployment orchestrator.
+.PP
+Witryna listens for webhook HTTP requests, pulls the corresponding Git
+repository (with automatic Git LFS fetch and submodule initialization),
+runs a user\-defined build command inside an ephemeral container and
+publishes the resulting assets via atomic symlink switching.
+.PP
+A health\-check endpoint is available at GET /health (returns 200 OK).
+.PP
+Witryna does not serve files, terminate TLS, or manage DNS.
+It is designed to sit behind a reverse proxy (Nginx, Caddy, etc.).
+.SH "SUBCOMMANDS"
+.TP
+\fBserve\fR [\fB\-\-config\fR \fIFILE\fR]
+Start the deployment server (foreground).
+.TP
+\fBvalidate\fR [\fB\-\-config\fR \fIFILE\fR]
+Validate configuration file and print summary.
+.TP
+\fBrun\fR [\fB\-v\fR|\fB\-\-verbose\fR] [\fB\-\-config\fR \fIFILE\fR] \fIsite\fR
+Trigger a one\-off build for a site (synchronous, no server).
+Use \fB\-v\fR to stream full build output to stderr in real\-time.
+.TP
+\fBstatus\fR [\fB\-\-json\fR] [\fB\-\-config\fR \fIFILE\fR] [\fIsite\fR]
+Show deployment status for configured sites.
+If \fIsite\fR is given, show the last 10 deployments for that site.
+Use \fB\-\-json\fR for machine\-readable JSON output.
+A \fB+\fR marker indicates the active deployment (the build the \fBcurrent\fR symlink points to).
+.TP
+\fBswitch\fR [\fB\-\-config\fR \fIFILE\fR] \fIsite\fR \fIbuild\fR
+Switch the active build for a site. Atomically updates the \fBcurrent\fR symlink
+to point to the specified build timestamp. Use \fBwitryna status\fR \fIsite\fR to
+list available builds with their timestamps. Does not run post\-deploy hooks.
+.TP
+\fBcleanup\fR [\fB\-\-keep\fR \fIN\fR] [\fB\-\-config\fR \fIFILE\fR] [\fIsite\fR]
+Remove old builds and logs.
+If \fIsite\fR is given, clean only that site; otherwise clean all.
+\fB\-\-keep\fR \fIN\fR overrides the \fBmax_builds_to_keep\fR config value.
+.SH OPTIONS
+.TP
+\fB\-\-config\fR \fIFILE\fR
+Path to the configuration file (available on every subcommand).
+If not specified, searches: ./witryna.toml, $XDG_CONFIG_HOME/witryna/witryna.toml,
+/etc/witryna/witryna.toml.
+.TP
+\fB\-\-help\fR
+Print help information.
+.SH "SIGNALS"
+.TP
+\fBSIGHUP\fR
+Reload configuration from \fIwitryna.toml\fR without restarting the server.
+Sites can be added, removed, or modified on the fly.
+Changes to \fBlisten_address\fR, \fBbase_dir\fR, \fBlog_dir\fR,
+and \fBlog_level\fR are detected but require a full restart to take effect;
+a warning is logged when these fields differ.
+.TP
+\fBSIGTERM\fR, \fBSIGINT\fR
+Initiate graceful shutdown.
+In\-progress builds are allowed to finish before the process exits.
+.SH "EXIT STATUS"
+.TP
+\fB0\fR
+Clean shutdown after SIGTERM/SIGINT (\fBserve\fR), configuration valid (\fBvalidate\fR),
+build succeeded (\fBrun\fR), status displayed (\fBstatus\fR), or build switched (\fBswitch\fR).
+Post\-deploy hook failure is non\-fatal and still exits 0.
+Hooks run on both successful and failed builds.
+.TP
+\fB1\fR
+Startup failure, validation error, build failure, site not found, configuration error,
+or command\-line usage error (unknown flag, missing subcommand, etc.).
+.SH "INSTALLATION"
+When installed via deb or rpm packages, the post\-install script automatically
+detects the available container runtime and configures system\-level access:
+.TP
+\fBDocker\fR
+The \fBwitryna\fR user is added to the \fBdocker\fR group and a systemd
+override is installed at
+\fI/etc/systemd/system/witryna.service.d/10\-runtime.conf\fR
+granting access to the Docker socket.
+.TP
+\fBPodman\fR
+Subordinate UID/GID ranges are allocated (100000\-165535), user lingering is
+enabled, and a systemd override is installed that disables
+\fBRestrictNamespaces\fR and sets \fBXDG_RUNTIME_DIR\fR.
+.PP
+If neither runtime is found at install time, a warning is printed.
+Install a runtime and reinstall the package, or manually copy the appropriate
+override template from \fI/usr/share/doc/witryna/examples/systemd/\fR to
+\fI/etc/systemd/system/witryna.service.d/10\-runtime.conf\fR.
+.SH "FILES"
+.TP
+\fI/etc/witryna/witryna.toml\fR
+Conventional configuration path for system\-wide installs.
+The shipped systemd unit passes \fB\-\-config /etc/witryna/witryna.toml\fR
+explicitly; without \fB\-\-config\fR the CLI defaults to \fIwitryna.toml\fR
+in the current working directory.
+.TP
+\fI/var/lib/witryna/clones/<site>/\fR
+Git repository clones.
+.TP
+\fI/var/lib/witryna/builds/<site>/<timestamp>/\fR
+Timestamped build outputs.
+.TP
+\fI/var/lib/witryna/builds/<site>/current\fR
+Symlink to the latest successful build.
+.TP
+\fI/var/lib/witryna/builds/<site>/state.json\fR
+Current build state (building, success, failed, hook\ failed).
+Written atomically at each pipeline phase; read by \fBwitryna status\fR
+to show in\-progress builds with elapsed time.
+.TP
+\fI/var/lib/witryna/cache/<site>/\fR
+Persistent build cache volumes.
+.TP
+\fI/var/log/witryna/<site>/<timestamp>.log\fR
+Per\-build log files (site, timestamp, git commit, image, duration, status, stdout, stderr).
+.TP
+\fI/var/log/witryna/<site>/<timestamp>\-hook.log\fR
+Post\-deploy hook output (if configured).
+.SH "SEE ALSO"
+\fBwitryna.toml\fR(5)
+.SH VERSION
+v0.2.0
+.SH AUTHORS
+Dawid Rycerz
diff --git a/man/witryna.toml.5 b/man/witryna.toml.5
index 29c0331..78a6ece 100644
--- a/man/witryna.toml.5
+++ b/man/witryna.toml.5
@@ -1,11 +1,11 @@
-.TH WITRYNA.TOML 5 "2026-02-10" "witryna 0.1.0" "Witryna Configuration"
+.TH WITRYNA.TOML 5 "2026-02-15" "witryna 0.2.0" "Witryna Configuration"
.SH NAME
witryna.toml \- configuration file for \fBwitryna\fR(1)
.SH DESCRIPTION
\fBwitryna.toml\fR is a TOML file that configures the \fBwitryna\fR static site
deployment orchestrator.
It defines the HTTP listen address, container runtime, directory layout,
-logging, rate limiting, and zero or more site definitions with optional build
+logging, and zero or more site definitions with optional build
overrides, polling intervals, cache volumes, and post\-deploy hooks.
.PP
The file is read at startup and can be reloaded at runtime by sending
@@ -38,13 +38,9 @@ Directory for per\-build log files.
Layout: \fI<log_dir>/<site>/<timestamp>.log\fR
.TP
\fBlog_level\fR = "\fIlevel\fR" (required)
-Tracing verbosity.
+Log verbosity.
Valid values: "trace", "debug", "info", "warn", "error" (case\-insensitive).
Can be overridden at runtime with the \fBRUST_LOG\fR environment variable.
-.TP
-\fBrate_limit_per_minute\fR = \fIn\fR (optional, default: 10)
-Maximum webhook requests per token per minute.
-Exceeding this limit returns HTTP 429.
.PP
\fBNote:\fR The server enforces a hard 1\ MB request body size limit.
This is not configurable and applies to all endpoints.
@@ -54,6 +50,7 @@ Number of timestamped build directories to retain per site.
Older builds and their corresponding log files are removed after each
successful publish.
Set to 0 to disable cleanup (keep all builds).
+See also: \fBwitryna cleanup\fR for manual pruning.
.TP
\fBgit_timeout\fR = "\fIduration\fR" (optional, default: "1m")
Maximum time allowed for each git operation (clone, fetch, reset, submodule update).
@@ -309,7 +306,7 @@ Witryna\-internal values.
.SH POST-DEPLOY HOOKS
.TP
\fBpost_deploy\fR = ["\fIcmd\fR", "\fIarg\fR", ...] (optional)
-Command to execute after a successful symlink switch.
+Command to execute after every build completion (success or failure).
Uses array form (no shell interpolation) for safety.
.RS
The hook receives context exclusively via environment variables:
@@ -327,6 +324,9 @@ Use this as the web server document root.
.TP
\fBWITRYNA_BUILD_TIMESTAMP\fR
Build timestamp (YYYYmmdd-HHMMSS-ffffff).
+.TP
+\fBWITRYNA_BUILD_STATUS\fR
+Build outcome: "success" or "failed".
.PP
The hook runs with a minimal environment: any user\-defined variables
from \fB[sites.env]\fR, followed by PATH, HOME, LANG,
@@ -336,8 +336,9 @@ It is subject to a 30\-second timeout and killed if exceeded.
Output is streamed to disk and logged to
\fI<log_dir>/<site>/<timestamp>\-hook.log\fR.
.PP
-Hook failure is \fBnon\-fatal\fR: the deployment is already live,
-and a warning is logged.
+Hook failure is \fBnon\-fatal\fR and a warning is logged.
+On successful builds the deployment is already live; on failed builds
+the hook still runs but no assets were published.
The exit code is recorded in the hook log.
A log file is written for every hook invocation (success, failure,
timeout, or spawn error).
@@ -393,9 +394,6 @@ log_dir = "/var/log/witryna"
# Tracing verbosity
log_level = "info"
-# Webhook rate limit (per token, per minute)
-rate_limit_per_minute = 10
-
# Keep the 5 most recent builds per site
max_builds_to_keep = 5
diff --git a/nix/module.nix b/nix/module.nix
new file mode 100644
index 0000000..6299ad9
--- /dev/null
+++ b/nix/module.nix
@@ -0,0 +1,68 @@
+flake: { config, lib, pkgs, ... }:
+let
+ cfg = config.services.witryna;
+in
+{
+ options.services.witryna = {
+ enable = lib.mkEnableOption "witryna deployment service";
+
+ package = lib.mkOption {
+ type = lib.types.package;
+ default = flake.packages.${pkgs.stdenv.hostPlatform.system}.witryna;
+ description = "The witryna package to use.";
+ };
+
+ configFile = lib.mkOption {
+ type = lib.types.path;
+ description = "Path to witryna.toml configuration file.";
+ };
+ };
+
+ config = lib.mkIf cfg.enable {
+ users.users.witryna = {
+ isSystemUser = true;
+ group = "witryna";
+ home = "/var/lib/witryna";
+ };
+ users.groups.witryna = {};
+
+ systemd.tmpfiles.rules = [
+ "d /var/lib/witryna 0755 witryna witryna -"
+ "d /var/lib/witryna/clones 0755 witryna witryna -"
+ "d /var/lib/witryna/builds 0755 witryna witryna -"
+ "d /var/lib/witryna/cache 0755 witryna witryna -"
+ "d /var/log/witryna 0755 witryna witryna -"
+ ];
+
+ systemd.services.witryna = {
+ description = "Witryna - Git-based static site deployment orchestrator";
+ after = [ "network-online.target" ];
+ wants = [ "network-online.target" ];
+ wantedBy = [ "multi-user.target" ];
+ serviceConfig = {
+ Type = "simple";
+ User = "witryna";
+ Group = "witryna";
+ ExecStart = "${cfg.package}/bin/witryna serve --config ${cfg.configFile}";
+ ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+ Restart = "on-failure";
+ RestartSec = 5;
+ # Security hardening (mirrors debian/witryna.service)
+ NoNewPrivileges = true;
+ PrivateTmp = true;
+ ProtectSystem = "strict";
+ ProtectKernelTunables = true;
+ ProtectKernelModules = true;
+ ProtectControlGroups = true;
+ RestrictNamespaces = true;
+ RestrictRealtime = true;
+ RestrictSUIDSGID = true;
+ LockPersonality = true;
+ MemoryDenyWriteExecute = true;
+ ReadWritePaths = [ "/var/lib/witryna" "/var/log/witryna" "/run/user" ];
+ LimitNOFILE = 65536;
+ LimitNPROC = 4096;
+ };
+ };
+ };
+}
diff --git a/scripts/witryna.service b/scripts/witryna.service
index 63d7c2f..1585dbc 100644
--- a/scripts/witryna.service
+++ b/scripts/witryna.service
@@ -38,7 +38,7 @@
[Unit]
Description=Witryna - Git-based static site deployment orchestrator
-Documentation=https://github.com/knightdave/witryna
+Documentation=https://git.craftknight.com/dawid/witryna
After=network-online.target
Wants=network-online.target
diff --git a/src/build.rs b/src/build.rs
index e887f64..b56e680 100644
--- a/src/build.rs
+++ b/src/build.rs
@@ -1,11 +1,11 @@
use anyhow::{Context as _, Result};
+use log::{debug, info};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::process::Stdio;
use std::time::{Duration, Instant};
use tokio::io::{AsyncWrite, AsyncWriteExt as _, BufWriter};
use tokio::process::Command;
-use tracing::{debug, info};
use crate::repo_config::RepoConfig;
@@ -82,13 +82,13 @@ impl std::error::Error for BuildFailure {}
///
/// Used for `--verbose` mode: streams build output to both a temp file (primary)
/// and stderr (secondary) simultaneously.
-pub(crate) struct TeeWriter<W> {
+pub struct TeeWriter<W> {
primary: W,
secondary: tokio::io::Stderr,
}
impl<W: AsyncWrite + Unpin> TeeWriter<W> {
- pub(crate) const fn new(primary: W, secondary: tokio::io::Stderr) -> Self {
+ pub const fn new(primary: W, secondary: tokio::io::Stderr) -> Self {
Self { primary, secondary }
}
}
@@ -125,56 +125,18 @@ impl<W: AsyncWrite + Unpin> AsyncWrite for TeeWriter<W> {
}
}
-/// Execute a containerized build for a site.
-///
-/// Stdout and stderr are streamed to the provided temporary files on disk
-/// instead of being buffered in memory. This removes unbounded memory usage
-/// for container builds.
-///
-/// # Arguments
-/// * `runtime` - Container runtime to use ("podman" or "docker")
-/// * `clone_dir` - Path to the cloned repository
-/// * `repo_config` - Build configuration from witryna.yaml
-/// * `cache_volumes` - Pairs of (`container_path`, `host_path`) for persistent cache mounts
-/// * `env` - User-defined environment variables to pass into the container via `--env`
-/// * `options` - Optional container resource limits and network mode
-/// * `stdout_file` - Temp file path for captured stdout
-/// * `stderr_file` - Temp file path for captured stderr
-/// * `timeout` - Maximum duration before killing the build
-/// * `verbose` - When true, also stream build output to stderr in real-time
-///
-/// # Errors
-///
-/// Returns an error if the container command times out, fails to execute,
-/// or exits with a non-zero status code (as a [`BuildFailure`]).
+/// Build the container CLI arguments for a build invocation.
///
-/// # Security
-/// - Uses typed arguments (no shell interpolation) per OWASP guidelines
-/// - Mounts clone directory as read-write (needed for build output)
-/// - Runs with minimal capabilities
-#[allow(clippy::implicit_hasher, clippy::too_many_arguments)]
-pub async fn execute(
+/// Assembles the full `run --rm ...` argument list including volume mounts,
+/// environment variables, resource limits, and the build command.
+fn build_container_args(
runtime: &str,
clone_dir: &Path,
repo_config: &RepoConfig,
cache_volumes: &[(String, PathBuf)],
env: &HashMap<String, String>,
options: &ContainerOptions,
- stdout_file: &Path,
- stderr_file: &Path,
- timeout: Duration,
- verbose: bool,
-) -> Result<BuildResult> {
- info!(
- image = %repo_config.image,
- command = %repo_config.command,
- path = %clone_dir.display(),
- "executing container build"
- );
-
- let start = Instant::now();
-
- // Build args dynamically to support optional cache volumes
+) -> Vec<String> {
let mut args = vec![
"run".to_owned(),
"--rm".to_owned(),
@@ -182,13 +144,13 @@ pub async fn execute(
format!("{}:/workspace:Z", clone_dir.display()),
];
- // Add cache volume mounts
+ // Cache volume mounts
for (container_path, host_path) in cache_volumes {
args.push("--volume".to_owned());
args.push(format!("{}:{}:Z", host_path.display(), container_path));
}
- // Add user-defined environment variables
+ // User-defined environment variables
for (key, value) in env {
args.push("--env".to_owned());
args.push(format!("{key}={value}"));
@@ -203,9 +165,6 @@ pub async fn execute(
if runtime == "podman" {
args.push("--userns=keep-id".to_owned());
} else {
- // Docker: container runs as root but workspace is owned by host UID.
- // DAC_OVERRIDE lets root bypass file permission checks.
- // Podman doesn't need this because --userns=keep-id maps to the host UID.
args.push("--cap-add=DAC_OVERRIDE".to_owned());
}
@@ -233,6 +192,59 @@ pub async fn execute(
repo_config.command.clone(),
]);
+ args
+}
+
+/// Execute a containerized build for a site.
+///
+/// Stdout and stderr are streamed to the provided temporary files on disk
+/// instead of being buffered in memory. This removes unbounded memory usage
+/// for container builds.
+///
+/// # Arguments
+/// * `runtime` - Container runtime to use ("podman" or "docker")
+/// * `clone_dir` - Path to the cloned repository
+/// * `repo_config` - Build configuration from witryna.yaml
+/// * `cache_volumes` - Pairs of (`container_path`, `host_path`) for persistent cache mounts
+/// * `env` - User-defined environment variables to pass into the container via `--env`
+/// * `options` - Optional container resource limits and network mode
+/// * `stdout_file` - Temp file path for captured stdout
+/// * `stderr_file` - Temp file path for captured stderr
+/// * `timeout` - Maximum duration before killing the build
+/// * `verbose` - When true, also stream build output to stderr in real-time
+///
+/// # Errors
+///
+/// Returns an error if the container command times out, fails to execute,
+/// or exits with a non-zero status code (as a [`BuildFailure`]).
+///
+/// # Security
+/// - Uses typed arguments (no shell interpolation) per OWASP guidelines
+/// - Mounts clone directory as read-write (needed for build output)
+/// - Runs with minimal capabilities
+#[allow(clippy::implicit_hasher, clippy::too_many_arguments)]
+pub async fn execute(
+ runtime: &str,
+ clone_dir: &Path,
+ repo_config: &RepoConfig,
+ cache_volumes: &[(String, PathBuf)],
+ env: &HashMap<String, String>,
+ options: &ContainerOptions,
+ stdout_file: &Path,
+ stderr_file: &Path,
+ timeout: Duration,
+ verbose: bool,
+) -> Result<BuildResult> {
+ info!(
+ "executing container build: image={} command={} path={}",
+ repo_config.image,
+ repo_config.command,
+ clone_dir.display()
+ );
+
+ let start = Instant::now();
+ let args = build_container_args(runtime, clone_dir, repo_config, cache_volumes, env, options);
+
// Spawn with piped stdout/stderr for streaming (OWASP: no shell interpolation)
let mut child = Command::new(runtime)
.args(&args)
@@ -265,7 +277,7 @@ pub async fn execute(
if verbose {
let mut stdout_tee = TeeWriter::new(stdout_file_writer, tokio::io::stderr());
let mut stderr_tee = TeeWriter::new(stderr_file_writer, tokio::io::stderr());
- run_build_process(
+ Box::pin(run_build_process(
child,
stdout_pipe,
stderr_pipe,
@@ -277,12 +289,12 @@ pub async fn execute(
clone_dir,
"container",
timeout,
- )
+ ))
.await
} else {
let mut stdout_writer = stdout_file_writer;
let mut stderr_writer = stderr_file_writer;
- run_build_process(
+ Box::pin(run_build_process(
child,
stdout_pipe,
stderr_pipe,
@@ -294,7 +306,7 @@ pub async fn execute(
clone_dir,
"container",
timeout,
- )
+ ))
.await
}
}
@@ -307,7 +319,7 @@ pub async fn execute(
/// meaningful error message in `BuildFailure::Display` without reading
/// the entire stderr file back into memory.
#[allow(clippy::indexing_slicing)] // buf[..n] bounded by read() return value
-pub(crate) async fn copy_with_tail<R, W>(
+pub async fn copy_with_tail<R, W>(
mut reader: R,
mut writer: W,
tail_size: usize,
@@ -386,7 +398,7 @@ where
if !status.success() {
let exit_code = status.code().unwrap_or(-1);
- debug!(exit_code, "{label} build failed");
+ debug!("{label} build failed: exit_code={exit_code}");
return Err(BuildFailure {
exit_code,
stdout_file: stdout_file.to_path_buf(),
@@ -398,7 +410,10 @@ where
}
let duration = start.elapsed();
- debug!(path = %clone_dir.display(), ?duration, "{label} build completed");
+ debug!(
+ "{label} build completed: path={} duration={duration:?}",
+ clone_dir.display()
+ );
Ok(BuildResult {
stdout_file: stdout_file.to_path_buf(),
stderr_file: stderr_file.to_path_buf(),
diff --git a/src/build_guard.rs b/src/build_guard.rs
index 0c7fed3..dd67bea 100644
--- a/src/build_guard.rs
+++ b/src/build_guard.rs
@@ -1,33 +1,35 @@
-use dashmap::DashSet;
-use std::sync::Arc;
+use std::collections::HashSet;
+use std::sync::{Arc, Mutex};
/// Manages per-site build scheduling: immediate execution and a depth-1 queue.
///
/// When a build is already in progress, a single rebuild can be queued.
/// Subsequent requests while a rebuild is already queued are collapsed (no-op).
pub struct BuildScheduler {
- pub in_progress: DashSet<String>,
- pub queued: DashSet<String>,
+ pub in_progress: Mutex<HashSet<String>>,
+ pub queued: Mutex<HashSet<String>>,
}
impl BuildScheduler {
#[must_use]
pub fn new() -> Self {
Self {
- in_progress: DashSet::new(),
- queued: DashSet::new(),
+ in_progress: Mutex::new(HashSet::new()),
+ queued: Mutex::new(HashSet::new()),
}
}
/// Queue a rebuild for a site that is currently building.
/// Returns `true` if newly queued, `false` if already queued (collapse).
pub(crate) fn try_queue(&self, site_name: &str) -> bool {
- self.queued.insert(site_name.to_owned())
+ #[allow(clippy::unwrap_used)]
+ self.queued.lock().unwrap().insert(site_name.to_owned())
}
/// Check and clear queued rebuild. Returns `true` if there was one.
pub(crate) fn take_queued(&self, site_name: &str) -> bool {
- self.queued.remove(site_name).is_some()
+ #[allow(clippy::unwrap_used)]
+ self.queued.lock().unwrap().remove(site_name)
}
}
@@ -47,7 +49,13 @@ pub(crate) struct BuildGuard {
impl BuildGuard {
pub(crate) fn try_acquire(site_name: String, scheduler: &Arc<BuildScheduler>) -> Option<Self> {
- if scheduler.in_progress.insert(site_name.clone()) {
+ #[allow(clippy::unwrap_used)]
+ let inserted = scheduler
+ .in_progress
+ .lock()
+ .unwrap()
+ .insert(site_name.clone());
+ if inserted {
Some(Self {
site_name,
scheduler: Arc::clone(scheduler),
@@ -60,7 +68,12 @@ impl BuildGuard {
impl Drop for BuildGuard {
fn drop(&mut self) {
- self.scheduler.in_progress.remove(&self.site_name);
+ #[allow(clippy::unwrap_used)]
+ self.scheduler
+ .in_progress
+ .lock()
+ .unwrap()
+ .remove(&self.site_name);
}
}
@@ -74,7 +87,7 @@ mod tests {
let scheduler = Arc::new(BuildScheduler::new());
let guard = BuildGuard::try_acquire("my-site".to_owned(), &scheduler);
assert!(guard.is_some());
- assert!(scheduler.in_progress.contains("my-site"));
+ assert!(scheduler.in_progress.lock().unwrap().contains("my-site"));
}
#[test]
@@ -90,10 +103,10 @@ mod tests {
let scheduler = Arc::new(BuildScheduler::new());
{
let _guard = BuildGuard::try_acquire("my-site".to_owned(), &scheduler);
- assert!(scheduler.in_progress.contains("my-site"));
+ assert!(scheduler.in_progress.lock().unwrap().contains("my-site"));
}
// Guard dropped — lock released
- assert!(!scheduler.in_progress.contains("my-site"));
+ assert!(!scheduler.in_progress.lock().unwrap().contains("my-site"));
let again = BuildGuard::try_acquire("my-site".to_owned(), &scheduler);
assert!(again.is_some());
}
@@ -102,7 +115,7 @@ mod tests {
fn scheduler_try_queue_succeeds() {
let scheduler = BuildScheduler::new();
assert!(scheduler.try_queue("my-site"));
- assert!(scheduler.queued.contains("my-site"));
+ assert!(scheduler.queued.lock().unwrap().contains("my-site"));
}
#[test]
@@ -117,7 +130,7 @@ mod tests {
let scheduler = BuildScheduler::new();
scheduler.try_queue("my-site");
assert!(scheduler.take_queued("my-site"));
- assert!(!scheduler.queued.contains("my-site"));
+ assert!(!scheduler.queued.lock().unwrap().contains("my-site"));
}
#[test]
diff --git a/src/cleanup.rs b/src/cleanup.rs
index ced8320..b2b068b 100644
--- a/src/cleanup.rs
+++ b/src/cleanup.rs
@@ -1,6 +1,7 @@
+use crate::state;
use anyhow::{Context as _, Result};
+use log::{debug, info, warn};
use std::path::Path;
-use tracing::{debug, info, warn};
/// Result of a cleanup operation.
#[derive(Debug, Default)]
@@ -35,7 +36,7 @@ pub async fn cleanup_old_builds(
) -> Result<CleanupResult> {
// If max_to_keep is 0, keep all builds
if max_to_keep == 0 {
- debug!(%site_name, "max_builds_to_keep is 0, skipping cleanup");
+ debug!("[{site_name}] max_builds_to_keep is 0, skipping cleanup");
return Ok(CleanupResult::default());
}
@@ -44,7 +45,7 @@ pub async fn cleanup_old_builds(
// Check if builds directory exists
if !builds_dir.exists() {
- debug!(%site_name, "builds directory does not exist, skipping cleanup");
+ debug!("[{site_name}] builds directory does not exist, skipping cleanup");
return Ok(CleanupResult::default());
}
@@ -59,10 +60,14 @@ pub async fn cleanup_old_builds(
// Calculate how many to remove
let to_remove = build_timestamps.len().saturating_sub(max_to_keep as usize);
if to_remove == 0 {
- debug!(%site_name, count = build_timestamps.len(), max = max_to_keep, "no builds to remove");
+ debug!(
+ "[{site_name}] no builds to remove: count={} max={max_to_keep}",
+ build_timestamps.len()
+ );
}
// Remove oldest builds (they're at the end after reverse sort)
+ let mut removed_timestamps = Vec::new();
for timestamp in build_timestamps.iter().skip(max_to_keep as usize) {
let build_path = builds_dir.join(timestamp);
let log_path = site_log_dir.join(format!("{timestamp}.log"));
@@ -70,11 +75,15 @@ pub async fn cleanup_old_builds(
// Remove build directory
match tokio::fs::remove_dir_all(&build_path).await {
Ok(()) => {
- debug!(path = %build_path.display(), "removed old build");
+ debug!("removed old build: {}", build_path.display());
result.builds_removed += 1;
+ removed_timestamps.push(timestamp.clone());
}
Err(e) => {
- warn!(path = %build_path.display(), error = %e, "failed to remove old build");
+ warn!(
+ "failed to remove old build: path={} error={e}",
+ build_path.display()
+ );
}
}
@@ -82,11 +91,14 @@ pub async fn cleanup_old_builds(
if log_path.exists() {
match tokio::fs::remove_file(&log_path).await {
Ok(()) => {
- debug!(path = %log_path.display(), "removed old log");
+ debug!("removed old log: {}", log_path.display());
result.logs_removed += 1;
}
Err(e) => {
- warn!(path = %log_path.display(), error = %e, "failed to remove old log");
+ warn!(
+ "failed to remove old log: path={} error={e}",
+ log_path.display()
+ );
}
}
}
@@ -95,18 +107,24 @@ pub async fn cleanup_old_builds(
let hook_log_path = site_log_dir.join(format!("{timestamp}-hook.log"));
match tokio::fs::remove_file(&hook_log_path).await {
Ok(()) => {
- debug!(path = %hook_log_path.display(), "removed old hook log");
+ debug!("removed old hook log: {}", hook_log_path.display());
result.logs_removed += 1;
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
// Not every build has a hook — silently skip
}
Err(e) => {
- warn!(path = %hook_log_path.display(), error = %e, "failed to remove old hook log");
+ warn!(
+ "failed to remove old hook log: path={} error={e}",
+ hook_log_path.display()
+ );
}
}
}
+ // Prune removed builds from state.json
+ state::remove_builds(base_dir, site_name, &removed_timestamps).await;
+
// Remove orphaned temp files (crash recovery)
if site_log_dir.exists()
&& let Ok(mut entries) = tokio::fs::read_dir(&site_log_dir).await
@@ -117,10 +135,13 @@ pub async fn cleanup_old_builds(
let path = entry.path();
match tokio::fs::remove_file(&path).await {
Ok(()) => {
- debug!(path = %path.display(), "removed orphaned temp file");
+ debug!("removed orphaned temp file: {}", path.display());
}
Err(e) => {
- warn!(path = %path.display(), error = %e, "failed to remove orphaned temp file");
+ warn!(
+ "failed to remove orphaned temp file: path={} error={e}",
+ path.display()
+ );
}
}
}
@@ -129,10 +150,8 @@ pub async fn cleanup_old_builds(
if result.builds_removed > 0 || result.logs_removed > 0 {
info!(
- %site_name,
- builds_removed = result.builds_removed,
- logs_removed = result.logs_removed,
- "cleanup completed"
+ "[{site_name}] cleanup completed: builds_removed={} logs_removed={}",
+ result.builds_removed, result.logs_removed
);
}
@@ -142,7 +161,11 @@ pub async fn cleanup_old_builds(
/// List all build timestamps in a builds directory.
///
/// Returns directory names that look like timestamps, excluding 'current' symlink.
-async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> {
+///
+/// # Errors
+///
+/// Returns an error if the builds directory cannot be read or entries cannot be inspected.
+pub async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> {
let mut timestamps = Vec::new();
let mut entries = tokio::fs::read_dir(builds_dir)
@@ -176,7 +199,8 @@ async fn list_build_timestamps(builds_dir: &Path) -> Result<Vec<String>> {
/// Check if a string looks like a valid timestamp format.
///
/// Expected format: YYYYMMDD-HHMMSS-microseconds (e.g., 20260126-143000-123456)
-fn looks_like_timestamp(s: &str) -> bool {
+#[must_use]
+pub fn looks_like_timestamp(s: &str) -> bool {
let parts: Vec<&str> = s.split('-').collect();
let [date, time, micros, ..] = parts.as_slice() else {
return false;
@@ -410,6 +434,38 @@ mod tests {
}
#[tokio::test]
+ async fn cleanup_does_not_delete_state_json() {
+ let base_dir = temp_dir("cleanup-test").await;
+ let log_dir = base_dir.join("logs");
+ let site = "test-site";
+
+ // Create 3 builds (keep 1 → remove 2)
+ for ts in &[
+ "20260126-100000-000001",
+ "20260126-100000-000002",
+ "20260126-100000-000003",
+ ] {
+ create_build_and_log(&base_dir, &log_dir, site, ts).await;
+ }
+
+ // Write a state.json in the builds dir
+ let state_path = base_dir.join("builds").join(site).join("state.json");
+ fs::write(&state_path, r#"{"status":"success"}"#)
+ .await
+ .unwrap();
+
+ let result = cleanup_old_builds(&base_dir, &log_dir, site, 1).await;
+ assert!(result.is_ok());
+ let result = result.unwrap();
+ assert_eq!(result.builds_removed, 2);
+
+ // state.json must still exist
+ assert!(state_path.exists(), "state.json must not be deleted");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
async fn cleanup_removes_orphaned_tmp_files() {
let base_dir = temp_dir("cleanup-test").await;
let log_dir = base_dir.join("logs");
diff --git a/src/cli.rs b/src/cli.rs
index ab191a4..4d958ea 100644
--- a/src/cli.rs
+++ b/src/cli.rs
@@ -1,57 +1,114 @@
-use clap::{Parser, Subcommand};
+use argh::FromArgs;
use std::path::PathBuf;
-/// Witryna - minimalist Git-based static site deployment orchestrator
-#[derive(Debug, Parser)]
-#[command(
- name = "witryna",
- version,
- author,
- about = "Minimalist Git-based static site deployment orchestrator",
- long_about = "Minimalist Git-based static site deployment orchestrator.\n\n\
- Witryna listens for webhook HTTP requests, pulls the corresponding Git \
- repository (with automatic Git LFS fetch and submodule initialization), \
- runs a user-defined build command inside an ephemeral container and \
- publishes the resulting assets via atomic symlink switching.\n\n\
- A health-check endpoint is available at GET /health (returns 200 OK).\n\n\
- Witryna does not serve files, terminate TLS, or manage DNS. \
- It is designed to sit behind a reverse proxy (Nginx, Caddy, etc.).",
- subcommand_required = true,
- arg_required_else_help = true
-)]
+/// Minimalist Git-based static site deployment orchestrator
+#[derive(Debug, FromArgs)]
pub struct Cli {
- /// Path to the configuration file.
- /// If not specified, searches: ./witryna.toml, $XDG_CONFIG_HOME/witryna/witryna.toml, /etc/witryna/witryna.toml
- #[arg(long, global = true, value_name = "FILE")]
- pub config: Option<PathBuf>,
-
- #[command(subcommand)]
+ #[argh(subcommand)]
pub command: Command,
}
-#[derive(Debug, Subcommand)]
+#[derive(Debug, FromArgs)]
+#[argh(subcommand)]
pub enum Command {
- /// Start the deployment server (foreground)
- Serve,
- /// Validate configuration file and print summary
- Validate,
- /// Trigger a one-off build for a site (synchronous, no server)
- Run {
- /// Site name (as defined in witryna.toml)
- site: String,
- /// Stream full build output to stderr in real-time
- #[arg(long, short)]
- verbose: bool,
- },
- /// Show deployment status for configured sites
- Status {
- /// Show last 10 deployments for a single site
- #[arg(long, short)]
- site: Option<String>,
- /// Output in JSON format
- #[arg(long)]
- json: bool,
- },
+ Serve(ServeCmd),
+ Validate(ValidateCmd),
+ Run(RunCmd),
+ Status(StatusCmd),
+ Switch(SwitchCmd),
+ Cleanup(CleanupCmd),
+}
+
+impl Command {
+ #[must_use]
+ pub fn config(&self) -> Option<&std::path::Path> {
+ match self {
+ Self::Serve(c) => c.config.as_deref(),
+ Self::Validate(c) => c.config.as_deref(),
+ Self::Run(c) => c.config.as_deref(),
+ Self::Status(c) => c.config.as_deref(),
+ Self::Switch(c) => c.config.as_deref(),
+ Self::Cleanup(c) => c.config.as_deref(),
+ }
+ }
+}
+
+/// Start the deployment server (foreground)
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "serve")]
+pub struct ServeCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+}
+
+/// Validate configuration file and print summary
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "validate")]
+pub struct ValidateCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+}
+
+/// Trigger a one-off build for a site (synchronous, no server)
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "run")]
+pub struct RunCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+ /// site name (as defined in witryna.toml)
+ #[argh(positional)]
+ pub site: String,
+ /// stream full build output to stderr in real-time
+ #[argh(switch, short = 'v')]
+ pub verbose: bool,
+}
+
+/// Show deployment status for configured sites
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "status")]
+pub struct StatusCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+ /// site name (if omitted, shows all sites)
+ #[argh(positional)]
+ pub site: Option<String>,
+ /// output in JSON format
+ #[argh(switch)]
+ pub json: bool,
+}
+
+/// Switch the active build symlink (rollback)
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "switch")]
+pub struct SwitchCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+ /// site name (as defined in witryna.toml)
+ #[argh(positional)]
+ pub site: String,
+ /// build timestamp to switch to (from `witryna status <site>`)
+ #[argh(positional)]
+ pub build: String,
+}
+
+/// Remove old builds and logs
+#[derive(Debug, FromArgs)]
+#[argh(subcommand, name = "cleanup")]
+pub struct CleanupCmd {
+ /// path to configuration file
+ #[argh(option)]
+ pub config: Option<PathBuf>,
+ /// site name (if omitted, cleans all sites)
+ #[argh(positional)]
+ pub site: Option<String>,
+ /// number of builds to keep per site (overrides `max_builds_to_keep`)
+ #[argh(option)]
+ pub keep: Option<u32>,
}
#[cfg(test)]
@@ -61,74 +118,168 @@ mod tests {
#[test]
fn run_parses_site_name() {
- let cli = Cli::try_parse_from(["witryna", "run", "my-site"]).unwrap();
+ let cli = Cli::from_args(&["witryna"], &["run", "my-site"]).unwrap();
match cli.command {
- Command::Run { site, verbose } => {
- assert_eq!(site, "my-site");
- assert!(!verbose);
+ Command::Run(cmd) => {
+ assert_eq!(cmd.site, "my-site");
+ assert!(!cmd.verbose);
}
- _ => panic!("expected Run command"),
+ _ => unreachable!("expected Run command"),
}
}
#[test]
fn run_parses_verbose_flag() {
- let cli = Cli::try_parse_from(["witryna", "run", "my-site", "--verbose"]).unwrap();
+ let cli = Cli::from_args(&["witryna"], &["run", "my-site", "--verbose"]).unwrap();
match cli.command {
- Command::Run { site, verbose } => {
- assert_eq!(site, "my-site");
- assert!(verbose);
+ Command::Run(cmd) => {
+ assert_eq!(cmd.site, "my-site");
+ assert!(cmd.verbose);
}
- _ => panic!("expected Run command"),
+ _ => unreachable!("expected Run command"),
}
}
#[test]
fn status_parses_without_flags() {
- let cli = Cli::try_parse_from(["witryna", "status"]).unwrap();
+ let cli = Cli::from_args(&["witryna"], &["status"]).unwrap();
match cli.command {
- Command::Status { site, json } => {
- assert!(site.is_none());
- assert!(!json);
+ Command::Status(cmd) => {
+ assert!(cmd.site.is_none());
+ assert!(!cmd.json);
}
- _ => panic!("expected Status command"),
+ _ => unreachable!("expected Status command"),
}
}
#[test]
fn status_parses_site_filter() {
- let cli = Cli::try_parse_from(["witryna", "status", "--site", "my-site"]).unwrap();
+ let cli = Cli::from_args(&["witryna"], &["status", "my-site"]).unwrap();
match cli.command {
- Command::Status { site, json } => {
- assert_eq!(site.as_deref(), Some("my-site"));
- assert!(!json);
+ Command::Status(cmd) => {
+ assert_eq!(cmd.site.as_deref(), Some("my-site"));
+ assert!(!cmd.json);
}
- _ => panic!("expected Status command"),
+ _ => unreachable!("expected Status command"),
}
}
#[test]
fn status_parses_json_flag() {
- let cli = Cli::try_parse_from(["witryna", "status", "--json"]).unwrap();
+ let cli = Cli::from_args(&["witryna"], &["status", "--json"]).unwrap();
+ match cli.command {
+ Command::Status(cmd) => {
+ assert!(cmd.site.is_none());
+ assert!(cmd.json);
+ }
+ _ => unreachable!("expected Status command"),
+ }
+ }
+
+ #[test]
+ fn cleanup_parses_without_args() {
+ let cli = Cli::from_args(&["witryna"], &["cleanup"]).unwrap();
+ match cli.command {
+ Command::Cleanup(cmd) => {
+ assert!(cmd.site.is_none());
+ assert!(cmd.keep.is_none());
+ }
+ _ => unreachable!("expected Cleanup command"),
+ }
+ }
+
+ #[test]
+ fn cleanup_parses_site_name() {
+ let cli = Cli::from_args(&["witryna"], &["cleanup", "my-site"]).unwrap();
+ match cli.command {
+ Command::Cleanup(cmd) => {
+ assert_eq!(cmd.site.as_deref(), Some("my-site"));
+ assert!(cmd.keep.is_none());
+ }
+ _ => unreachable!("expected Cleanup command"),
+ }
+ }
+
+ #[test]
+ fn cleanup_parses_keep_flag() {
+ let cli = Cli::from_args(&["witryna"], &["cleanup", "--keep", "3"]).unwrap();
+ match cli.command {
+ Command::Cleanup(cmd) => {
+ assert!(cmd.site.is_none());
+ assert_eq!(cmd.keep, Some(3));
+ }
+ _ => unreachable!("expected Cleanup command"),
+ }
+ }
+
+ #[test]
+ fn cleanup_parses_site_and_keep() {
+ let cli = Cli::from_args(&["witryna"], &["cleanup", "my-site", "--keep", "3"]).unwrap();
+ match cli.command {
+ Command::Cleanup(cmd) => {
+ assert_eq!(cmd.site.as_deref(), Some("my-site"));
+ assert_eq!(cmd.keep, Some(3));
+ }
+ _ => unreachable!("expected Cleanup command"),
+ }
+ }
+
+ #[test]
+ fn switch_parses_site_and_build() {
+ let cli = Cli::from_args(
+ &["witryna"],
+ &["switch", "my-site", "20260126-143000-123456"],
+ )
+ .unwrap();
+ match cli.command {
+ Command::Switch(cmd) => {
+ assert_eq!(cmd.site, "my-site");
+ assert_eq!(cmd.build, "20260126-143000-123456");
+ assert!(cmd.config.is_none());
+ }
+ _ => unreachable!("expected Switch command"),
+ }
+ }
+
+ #[test]
+ fn switch_parses_config_flag() {
+ let cli = Cli::from_args(
+ &["witryna"],
+ &[
+ "switch",
+ "--config",
+ "/etc/witryna.toml",
+ "my-site",
+ "20260126-143000-123456",
+ ],
+ )
+ .unwrap();
match cli.command {
- Command::Status { site, json } => {
- assert!(site.is_none());
- assert!(json);
+ Command::Switch(cmd) => {
+ assert_eq!(cmd.site, "my-site");
+ assert_eq!(cmd.build, "20260126-143000-123456");
+ assert_eq!(
+ cmd.config,
+ Some(std::path::PathBuf::from("/etc/witryna.toml"))
+ );
}
- _ => panic!("expected Status command"),
+ _ => unreachable!("expected Switch command"),
}
}
#[test]
fn config_flag_is_optional() {
- let cli = Cli::try_parse_from(["witryna", "status"]).unwrap();
- assert!(cli.config.is_none());
+ let cli = Cli::from_args(&["witryna"], &["status"]).unwrap();
+ assert!(cli.command.config().is_none());
}
#[test]
fn config_flag_explicit_path() {
let cli =
- Cli::try_parse_from(["witryna", "--config", "/etc/witryna.toml", "status"]).unwrap();
- assert_eq!(cli.config, Some(PathBuf::from("/etc/witryna.toml")));
+ Cli::from_args(&["witryna"], &["status", "--config", "/etc/witryna.toml"]).unwrap();
+ assert_eq!(
+ cli.command.config(),
+ Some(std::path::Path::new("/etc/witryna.toml"))
+ );
}
}
diff --git a/src/config.rs b/src/config.rs
index 63f3447..d79e91c 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -1,20 +1,16 @@
use crate::repo_config;
use anyhow::{Context as _, Result, bail};
+use log::LevelFilter;
use serde::{Deserialize, Deserializer};
use std::collections::{HashMap, HashSet};
use std::net::SocketAddr;
use std::path::{Component, PathBuf};
use std::time::Duration;
-use tracing::level_filters::LevelFilter;
fn default_log_dir() -> PathBuf {
PathBuf::from("/var/log/witryna")
}
-const fn default_rate_limit() -> u32 {
- 10
-}
-
const fn default_max_builds_to_keep() -> u32 {
5
}
@@ -50,8 +46,6 @@ pub struct Config {
#[serde(default = "default_log_dir")]
pub log_dir: PathBuf,
pub log_level: String,
- #[serde(default = "default_rate_limit")]
- pub rate_limit_per_minute: u32,
#[serde(default = "default_max_builds_to_keep")]
pub max_builds_to_keep: u32,
/// Optional global git operation timeout (e.g., "2m", "5m").
@@ -109,7 +103,7 @@ pub struct SiteConfig {
#[serde(default)]
pub cache_dirs: Option<Vec<String>>,
/// Optional post-deploy hook command (array form, no shell).
- /// Runs after successful symlink switch. Non-fatal on failure.
+ /// Runs after every build (success or failure). Non-fatal on failure.
#[serde(default)]
pub post_deploy: Option<Vec<String>>,
/// Optional environment variables passed to container builds and post-deploy hooks.
@@ -265,17 +259,14 @@ impl Config {
)
})?;
} else if let Some(path) = &site.webhook_token_file {
- site.webhook_token = tokio::fs::read_to_string(path)
- .await
- .with_context(|| {
- format!(
- "site '{}': failed to read webhook_token_file '{}'",
- site.name,
- path.display()
- )
- })?
- .trim()
- .to_owned();
+ let token_content = tokio::fs::read_to_string(path).await.with_context(|| {
+ format!(
+ "site '{}': failed to read webhook_token_file '{}'",
+ site.name,
+ path.display()
+ )
+ })?;
+ token_content.trim().clone_into(&mut site.webhook_token);
}
}
Ok(())
@@ -284,7 +275,6 @@ impl Config {
fn validate(&self) -> Result<()> {
self.validate_listen_address()?;
self.validate_log_level()?;
- self.validate_rate_limit()?;
self.validate_git_timeout()?;
self.validate_container_runtime()?;
self.validate_sites()?;
@@ -295,13 +285,12 @@ impl Config {
if let Some(timeout) = self.git_timeout {
if timeout < MIN_GIT_TIMEOUT {
bail!(
- "git_timeout is too short ({:?}): minimum is {}s",
- timeout,
+ "git_timeout is too short ({timeout:?}): minimum is {}s",
MIN_GIT_TIMEOUT.as_secs()
);
}
if timeout > MAX_GIT_TIMEOUT {
- bail!("git_timeout is too long ({:?}): maximum is 1h", timeout,);
+ bail!("git_timeout is too long ({timeout:?}): maximum is 1h");
}
}
Ok(())
@@ -333,13 +322,6 @@ impl Config {
Ok(())
}
- fn validate_rate_limit(&self) -> Result<()> {
- if self.rate_limit_per_minute == 0 {
- bail!("rate_limit_per_minute must be greater than 0");
- }
- Ok(())
- }
-
fn validate_sites(&self) -> Result<()> {
let mut seen_names = HashSet::new();
@@ -369,12 +351,12 @@ impl Config {
#[must_use]
pub fn log_level_filter(&self) -> LevelFilter {
match self.log_level.to_lowercase().as_str() {
- "trace" => LevelFilter::TRACE,
- "debug" => LevelFilter::DEBUG,
- "warn" => LevelFilter::WARN,
- "error" => LevelFilter::ERROR,
+ "trace" => LevelFilter::Trace,
+ "debug" => LevelFilter::Debug,
+ "warn" => LevelFilter::Warn,
+ "error" => LevelFilter::Error,
// Catch-all: covers "info" and the unreachable default after validation.
- _ => LevelFilter::INFO,
+ _ => LevelFilter::Info,
}
}
@@ -921,27 +903,6 @@ sites = []
}
#[test]
- fn zero_rate_limit_rejected() {
- let toml = r#"
-listen_address = "127.0.0.1:8080"
-container_runtime = "podman"
-base_dir = "/var/lib/witryna"
-log_level = "info"
-rate_limit_per_minute = 0
-sites = []
-"#;
- let config: Config = toml::from_str(toml).unwrap();
- let result = config.validate();
- assert!(result.is_err());
- assert!(
- result
- .unwrap_err()
- .to_string()
- .contains("rate_limit_per_minute")
- );
- }
-
- #[test]
fn duplicate_site_names() {
let toml = r#"
listen_address = "127.0.0.1:8080"
diff --git a/src/git.rs b/src/git.rs
index 2193add..abbf701 100644
--- a/src/git.rs
+++ b/src/git.rs
@@ -1,8 +1,8 @@
use anyhow::{Context as _, Result, bail};
+use log::{debug, error, info, warn};
use std::path::Path;
use std::time::Duration;
use tokio::process::Command;
-use tracing::{debug, error, info, warn};
/// Default timeout for git operations (used when not configured).
pub const GIT_TIMEOUT_DEFAULT: Duration = Duration::from_secs(60);
@@ -104,10 +104,15 @@ pub async fn sync_repo(
pull(clone_dir, branch, timeout, depth).await?;
} else if let Err(e) = clone(repo_url, branch, clone_dir, timeout, depth).await {
if clone_dir.exists() {
- warn!(path = %clone_dir.display(), "cleaning up partial clone after failure");
+ warn!(
+ "cleaning up partial clone after failure: {}",
+ clone_dir.display()
+ );
if let Err(cleanup_err) = tokio::fs::remove_dir_all(clone_dir).await {
- error!(path = %clone_dir.display(), error = %cleanup_err,
- "failed to clean up partial clone");
+ error!(
+ "failed to clean up partial clone: path={} error={cleanup_err}",
+ clone_dir.display()
+ );
}
}
return Err(e);
@@ -142,12 +147,18 @@ pub async fn has_remote_changes(
) -> Result<bool> {
// If clone directory doesn't exist, treat as "needs update"
if !clone_dir.exists() {
- debug!(path = %clone_dir.display(), "clone directory does not exist, needs initial clone");
+ debug!(
+ "clone directory does not exist, needs initial clone: {}",
+ clone_dir.display()
+ );
return Ok(true);
}
// Fetch from remote (update refs only, no working tree changes)
- debug!(path = %clone_dir.display(), branch, "fetching remote refs");
+ debug!(
+ "fetching remote refs: path={} branch={branch}",
+ clone_dir.display()
+ );
let depth_str = depth.to_string();
let mut fetch_args = vec!["fetch"];
if depth > 0 {
@@ -165,10 +176,8 @@ pub async fn has_remote_changes(
let remote_head = get_commit_hash(clone_dir, &remote_ref).await?;
debug!(
- path = %clone_dir.display(),
- local = %local_head,
- remote = %remote_head,
- "comparing commits"
+ "comparing commits: path={} local={local_head} remote={remote_head}",
+ clone_dir.display()
);
Ok(local_head != remote_head)
@@ -198,7 +207,10 @@ async fn clone(
timeout: Duration,
depth: u32,
) -> Result<()> {
- info!(repo_url, branch, path = %clone_dir.display(), "cloning repository");
+ info!(
+ "cloning repository: repo={repo_url} branch={branch} path={}",
+ clone_dir.display()
+ );
// Create parent directory if needed
if let Some(parent) = clone_dir.parent() {
@@ -218,12 +230,15 @@ async fn clone(
args.push(clone_dir_str.as_str());
run_git(&args, None, timeout, "git clone").await?;
- debug!(path = %clone_dir.display(), "clone completed");
+ debug!("clone completed: {}", clone_dir.display());
Ok(())
}
async fn pull(clone_dir: &Path, branch: &str, timeout: Duration, depth: u32) -> Result<()> {
- info!(branch, path = %clone_dir.display(), "pulling latest changes");
+ info!(
+ "pulling latest changes: branch={branch} path={}",
+ clone_dir.display()
+ );
// Fetch from origin (shallow or full depending on depth)
let depth_str = depth.to_string();
@@ -245,7 +260,7 @@ async fn pull(clone_dir: &Path, branch: &str, timeout: Duration, depth: u32) ->
)
.await?;
- debug!(path = %clone_dir.display(), "pull completed");
+ debug!("pull completed: {}", clone_dir.display());
Ok(())
}
@@ -292,7 +307,7 @@ async fn has_lfs_pointers(clone_dir: &Path) -> Result<bool> {
continue;
};
if content.starts_with(LFS_POINTER_SIGNATURE) {
- debug!(file = %file_path, "found LFS pointer");
+ debug!("found LFS pointer: {file_path}");
return Ok(true);
}
}
@@ -310,7 +325,7 @@ async fn is_lfs_available() -> bool {
}
async fn lfs_pull(clone_dir: &Path) -> Result<()> {
- info!(path = %clone_dir.display(), "fetching LFS objects");
+ info!("fetching LFS objects: {}", clone_dir.display());
run_git(
&["lfs", "pull"],
@@ -320,7 +335,7 @@ async fn lfs_pull(clone_dir: &Path) -> Result<()> {
)
.await?;
- debug!(path = %clone_dir.display(), "LFS pull completed");
+ debug!("LFS pull completed: {}", clone_dir.display());
Ok(())
}
@@ -334,11 +349,14 @@ async fn lfs_pull(clone_dir: &Path) -> Result<()> {
async fn maybe_fetch_lfs(clone_dir: &Path) -> Result<()> {
// Step 1: Quick check for LFS configuration
if !has_lfs_configured(clone_dir).await {
- debug!(path = %clone_dir.display(), "no LFS configuration found");
+ debug!("no LFS configuration found: {}", clone_dir.display());
return Ok(());
}
- info!(path = %clone_dir.display(), "LFS configured, checking for pointers");
+ info!(
+ "LFS configured, checking for pointers: {}",
+ clone_dir.display()
+ );
// Step 2: Scan for actual pointer files
match has_lfs_pointers(clone_dir).await {
@@ -346,12 +364,12 @@ async fn maybe_fetch_lfs(clone_dir: &Path) -> Result<()> {
// Pointers found, need to fetch
}
Ok(false) => {
- debug!(path = %clone_dir.display(), "no LFS pointers found");
+ debug!("no LFS pointers found: {}", clone_dir.display());
return Ok(());
}
Err(e) => {
// If scan fails, try to fetch anyway (conservative approach)
- debug!(error = %e, "LFS pointer scan failed, attempting fetch");
+ debug!("LFS pointer scan failed, attempting fetch: {e}");
}
}
@@ -384,11 +402,11 @@ async fn maybe_init_submodules(
is_pull: bool,
) -> Result<()> {
if !has_submodules(clone_dir).await {
- debug!(path = %clone_dir.display(), "no submodules configured");
+ debug!("no submodules configured: {}", clone_dir.display());
return Ok(());
}
- info!(path = %clone_dir.display(), "submodules detected, initializing");
+ info!("submodules detected, initializing: {}", clone_dir.display());
// On pull, sync URLs first (handles upstream submodule URL changes)
if is_pull {
@@ -419,7 +437,10 @@ async fn maybe_init_submodules(
)
.await?;
- debug!(path = %clone_dir.display(), "submodule initialization completed");
+ debug!(
+ "submodule initialization completed: {}",
+ clone_dir.display()
+ );
Ok(())
}
@@ -1045,7 +1066,7 @@ mod tests {
}
/// Create a parent repo with a submodule wired up.
- /// Returns (parent_url, submodule_url).
+ /// Returns `(parent_url, submodule_url)`.
async fn create_repo_with_submodule(temp: &Path, branch: &str) -> (String, String) {
// 1. Create bare submodule repo with a file
let sub_bare = temp.join("sub.git");
diff --git a/src/hook.rs b/src/hook.rs
index 53e1e18..6cb3823 100644
--- a/src/hook.rs
+++ b/src/hook.rs
@@ -1,4 +1,5 @@
use crate::build::copy_with_tail;
+use log::debug;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::process::Stdio;
@@ -6,7 +7,6 @@ use std::time::{Duration, Instant};
use tokio::io::AsyncWriteExt as _;
use tokio::io::BufWriter;
use tokio::process::Command;
-use tracing::debug;
#[cfg(not(test))]
const HOOK_TIMEOUT: Duration = Duration::from_secs(30);
@@ -47,6 +47,7 @@ pub async fn run_post_deploy_hook(
build_dir: &Path,
public_dir: &Path,
timestamp: &str,
+ build_status: &str,
env: &HashMap<String, String>,
stdout_file: &Path,
stderr_file: &Path,
@@ -84,6 +85,7 @@ pub async fn run_post_deploy_hook(
.env("WITRYNA_BUILD_DIR", build_dir.as_os_str())
.env("WITRYNA_PUBLIC_DIR", public_dir.as_os_str())
.env("WITRYNA_BUILD_TIMESTAMP", timestamp)
+ .env("WITRYNA_BUILD_STATUS", build_status)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn();
@@ -106,7 +108,7 @@ pub async fn run_post_deploy_hook(
}
};
- debug!(cmd = ?command, "hook process spawned");
+ debug!("hook process spawned: {command:?}");
let (last_stderr, exit_code, success) =
stream_hook_output(&mut child, stdout_file, stderr_file).await;
@@ -231,6 +233,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -255,6 +258,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -277,6 +281,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -305,6 +310,7 @@ mod tests {
tmp.path(),
&public_dir,
"20260202-120000-000000",
+ "success",
&env,
&stdout_tmp,
&stderr_tmp,
@@ -315,6 +321,7 @@ mod tests {
let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
assert!(stdout.contains("WITRYNA_SITE=my-site"));
assert!(stdout.contains("WITRYNA_BUILD_TIMESTAMP=20260202-120000-000000"));
+ assert!(stdout.contains("WITRYNA_BUILD_STATUS=success"));
assert!(stdout.contains("WITRYNA_BUILD_DIR="));
assert!(stdout.contains("WITRYNA_PUBLIC_DIR="));
assert!(stdout.contains("PATH="));
@@ -336,6 +343,7 @@ mod tests {
"WITRYNA_BUILD_DIR",
"WITRYNA_PUBLIC_DIR",
"WITRYNA_BUILD_TIMESTAMP",
+ "WITRYNA_BUILD_STATUS",
"MY_VAR",
"DEPLOY_TARGET",
]
@@ -357,6 +365,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -380,6 +389,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -404,6 +414,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -432,6 +443,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -460,6 +472,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&HashMap::new(),
&stdout_tmp,
&stderr_tmp,
@@ -472,6 +485,33 @@ mod tests {
}
#[tokio::test]
+ async fn hook_env_build_status_failed() {
+ let tmp = TempDir::new().unwrap();
+ let stdout_tmp = tmp.path().join("stdout.tmp");
+ let stderr_tmp = tmp.path().join("stderr.tmp");
+
+ let result = run_post_deploy_hook(
+ &cmd(&["env"]),
+ "test-site",
+ tmp.path(),
+ &tmp.path().join("current"),
+ "ts",
+ "failed",
+ &HashMap::new(),
+ &stdout_tmp,
+ &stderr_tmp,
+ )
+ .await;
+
+ assert!(result.success);
+ let stdout = fs::read_to_string(&stdout_tmp).await.unwrap();
+ assert!(
+ stdout.contains("WITRYNA_BUILD_STATUS=failed"),
+ "WITRYNA_BUILD_STATUS should be 'failed'"
+ );
+ }
+
+ #[tokio::test]
async fn hook_user_env_does_not_override_reserved() {
let tmp = TempDir::new().unwrap();
let stdout_tmp = tmp.path().join("stdout.tmp");
@@ -484,6 +524,7 @@ mod tests {
tmp.path(),
&tmp.path().join("current"),
"ts",
+ "success",
&env,
&stdout_tmp,
&stderr_tmp,
diff --git a/src/lib.rs b/src/lib.rs
index a80b591..73354d9 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -3,19 +3,22 @@
//! This crate exposes modules for use by the binary and integration tests.
//! It is not intended for external consumption and has no stability guarantees.
-pub mod build;
+pub(crate) mod build;
pub mod build_guard;
pub mod cleanup;
pub mod cli;
pub mod config;
pub mod git;
-pub mod hook;
+pub(crate) mod hook;
+pub mod logger;
pub mod logs;
pub mod pipeline;
pub mod polling;
pub mod publish;
-pub mod repo_config;
+pub(crate) mod repo_config;
pub mod server;
+pub mod state;
+pub mod time;
#[cfg(any(test, feature = "integration"))]
pub mod test_support;
diff --git a/src/logger.rs b/src/logger.rs
new file mode 100644
index 0000000..8d9611b
--- /dev/null
+++ b/src/logger.rs
@@ -0,0 +1,121 @@
+use log::{LevelFilter, Log, Metadata, Record};
+use std::io::Write as _;
+
+pub struct Logger {
+ show_timestamp: bool,
+}
+
+impl Logger {
+ /// Initialize the global logger.
+ ///
+ /// `level` is the configured log level (from config or CLI).
+ /// `RUST_LOG` env var overrides it if set and valid.
+ /// Timestamps are omitted when `JOURNAL_STREAM` is set (systemd/journald).
+ ///
+ /// # Panics
+ ///
+ /// Panics if called more than once (the global logger can only be set once).
+ pub fn init(level: LevelFilter) {
+ let effective_level = std::env::var("RUST_LOG")
+ .ok()
+ .and_then(|s| parse_level(&s))
+ .unwrap_or(level);
+
+ let show_timestamp = std::env::var_os("JOURNAL_STREAM").is_none();
+
+ let logger = Self { show_timestamp };
+ log::set_boxed_logger(Box::new(logger)).expect("logger already initialized");
+ log::set_max_level(effective_level);
+ }
+}
+
+fn parse_level(s: &str) -> Option<LevelFilter> {
+ match s.trim().to_lowercase().as_str() {
+ "trace" => Some(LevelFilter::Trace),
+ "debug" => Some(LevelFilter::Debug),
+ "info" => Some(LevelFilter::Info),
+ "warn" => Some(LevelFilter::Warn),
+ "error" => Some(LevelFilter::Error),
+ "off" => Some(LevelFilter::Off),
+ _ => None,
+ }
+}
+
+impl Log for Logger {
+ fn enabled(&self, metadata: &Metadata) -> bool {
+ metadata.level() <= log::max_level()
+ }
+
+ fn log(&self, record: &Record) {
+ if !self.enabled(record.metadata()) {
+ return;
+ }
+ let mut stderr = std::io::stderr().lock();
+ if self.show_timestamp {
+ let now = crate::time::format_log_timestamp(std::time::SystemTime::now());
+ let _ = writeln!(stderr, "{now} {:>5} {}", record.level(), record.args());
+ } else {
+ let _ = writeln!(stderr, "{:>5} {}", record.level(), record.args());
+ }
+ }
+
+ fn flush(&self) {
+ let _ = std::io::stderr().flush();
+ }
+}
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn parse_level_valid_levels() {
+ assert_eq!(parse_level("trace"), Some(LevelFilter::Trace));
+ assert_eq!(parse_level("debug"), Some(LevelFilter::Debug));
+ assert_eq!(parse_level("info"), Some(LevelFilter::Info));
+ assert_eq!(parse_level("warn"), Some(LevelFilter::Warn));
+ assert_eq!(parse_level("error"), Some(LevelFilter::Error));
+ }
+
+ #[test]
+ fn parse_level_case_insensitive() {
+ assert_eq!(parse_level("DEBUG"), Some(LevelFilter::Debug));
+ assert_eq!(parse_level("Debug"), Some(LevelFilter::Debug));
+ assert_eq!(parse_level("dEbUg"), Some(LevelFilter::Debug));
+ }
+
+ #[test]
+ fn parse_level_invalid_returns_none() {
+ assert_eq!(parse_level("invalid"), None);
+ assert_eq!(parse_level(""), None);
+ assert_eq!(parse_level("verbose"), None);
+ }
+
+ #[test]
+ fn parse_level_off() {
+ assert_eq!(parse_level("off"), Some(LevelFilter::Off));
+ }
+
+ #[test]
+ fn parse_level_trimmed() {
+ assert_eq!(parse_level(" info "), Some(LevelFilter::Info));
+ assert_eq!(parse_level("\tdebug\n"), Some(LevelFilter::Debug));
+ }
+
+ #[test]
+ fn enabled_respects_max_level() {
+ let logger = Logger {
+ show_timestamp: true,
+ };
+
+ // Set max level to Warn for this test
+ log::set_max_level(LevelFilter::Warn);
+
+ let warn_meta = log::MetadataBuilder::new().level(log::Level::Warn).build();
+ let debug_meta = log::MetadataBuilder::new().level(log::Level::Debug).build();
+
+ assert!(logger.enabled(&warn_meta));
+ assert!(!logger.enabled(&debug_meta));
+ }
+}
diff --git a/src/logs.rs b/src/logs.rs
index bddcc9d..e25262a 100644
--- a/src/logs.rs
+++ b/src/logs.rs
@@ -1,9 +1,9 @@
use anyhow::{Context as _, Result};
+use log::debug;
use std::path::{Path, PathBuf};
use std::time::Duration;
use tokio::io::AsyncWriteExt as _;
use tokio::process::Command;
-use tracing::{debug, warn};
use crate::hook::HookResult;
@@ -107,9 +107,9 @@ pub async fn save_build_log(
let _ = tokio::fs::remove_file(stderr_file).await;
debug!(
- path = %log_file.display(),
- site = %meta.site_name,
- "build log saved"
+ "[{}] build log saved: {}",
+ meta.site_name,
+ log_file.display()
);
Ok(log_file)
@@ -239,11 +239,7 @@ pub async fn save_hook_log(
let _ = tokio::fs::remove_file(&hook_result.stdout_file).await;
let _ = tokio::fs::remove_file(&hook_result.stderr_file).await;
- debug!(
- path = %log_file.display(),
- site = %site_name,
- "hook log saved"
- );
+ debug!("[{site_name}] hook log saved: {}", log_file.display());
Ok(log_file)
}
@@ -271,17 +267,6 @@ fn format_hook_log_header(site_name: &str, timestamp: &str, result: &HookResult)
)
}
-/// Parsed header from a build log file.
-#[derive(Debug, Clone, serde::Serialize)]
-pub struct ParsedLogHeader {
- pub site_name: String,
- pub timestamp: String,
- pub git_commit: String,
- pub image: String,
- pub duration: String,
- pub status: String,
-}
-
/// Combined deployment status (build + optional hook).
#[derive(Debug, Clone, serde::Serialize)]
pub struct DeploymentStatus {
@@ -291,182 +276,7 @@ pub struct DeploymentStatus {
pub duration: String,
pub status: String,
pub log: String,
-}
-
-/// Parse the header section of a build log file.
-///
-/// Expects lines like:
-/// ```text
-/// === BUILD LOG ===
-/// Site: my-site
-/// Timestamp: 20260126-143000-123456
-/// Git Commit: abc123d
-/// Image: node:20-alpine
-/// Duration: 45s
-/// Status: success
-/// ```
-///
-/// Returns `None` if the header is malformed.
-#[must_use]
-pub fn parse_log_header(content: &str) -> Option<ParsedLogHeader> {
- let mut site_name = None;
- let mut timestamp = None;
- let mut git_commit = None;
- let mut image = None;
- let mut duration = None;
- let mut status = None;
-
- for line in content.lines().take(10) {
- if let Some(val) = line.strip_prefix("Site: ") {
- site_name = Some(val.to_owned());
- } else if let Some(val) = line.strip_prefix("Timestamp: ") {
- timestamp = Some(val.to_owned());
- } else if let Some(val) = line.strip_prefix("Git Commit: ") {
- git_commit = Some(val.to_owned());
- } else if let Some(val) = line.strip_prefix("Image: ") {
- image = Some(val.to_owned());
- } else if let Some(val) = line.strip_prefix("Duration: ") {
- duration = Some(val.to_owned());
- } else if let Some(val) = line.strip_prefix("Status: ") {
- status = Some(val.to_owned());
- }
- }
-
- Some(ParsedLogHeader {
- site_name: site_name?,
- timestamp: timestamp?,
- git_commit: git_commit.unwrap_or_else(|| "unknown".to_owned()),
- image: image.unwrap_or_else(|| "unknown".to_owned()),
- duration: duration?,
- status: status?,
- })
-}
-
-/// Parse the status line from a hook log.
-///
-/// Returns `Some(true)` for success, `Some(false)` for failure,
-/// `None` if the content cannot be parsed.
-#[must_use]
-pub fn parse_hook_status(content: &str) -> Option<bool> {
- for line in content.lines().take(10) {
- if let Some(val) = line.strip_prefix("Status: ") {
- return Some(val == "success");
- }
- }
- None
-}
-
-/// List build log files for a site, sorted newest-first.
-///
-/// Returns `(timestamp, path)` pairs. Excludes `*-hook.log` and `*.tmp` files.
-///
-/// # Errors
-///
-/// Returns an error if the directory cannot be read (except for not-found,
-/// which returns an empty list).
-pub async fn list_site_logs(log_dir: &Path, site_name: &str) -> Result<Vec<(String, PathBuf)>> {
- let site_log_dir = log_dir.join(site_name);
-
- if !site_log_dir.is_dir() {
- return Ok(Vec::new());
- }
-
- let mut entries = tokio::fs::read_dir(&site_log_dir)
- .await
- .with_context(|| format!("failed to read log directory: {}", site_log_dir.display()))?;
-
- let mut logs = Vec::new();
-
- while let Some(entry) = entries.next_entry().await? {
- let name = entry.file_name();
- let name_str = name.to_string_lossy();
-
- // Skip hook logs and temp files
- if name_str.ends_with("-hook.log") || name_str.ends_with(".tmp") {
- continue;
- }
-
- if let Some(timestamp) = name_str.strip_suffix(".log") {
- logs.push((timestamp.to_owned(), entry.path()));
- }
- }
-
- // Sort descending (newest first) — timestamps are lexicographically sortable
- logs.sort_by(|a, b| b.0.cmp(&a.0));
-
- Ok(logs)
-}
-
-/// Get the deployment status for a single build log.
-///
-/// Reads the build log header and checks for an accompanying hook log
-/// to determine overall deployment status.
-///
-/// # Errors
-///
-/// Returns an error if the build log cannot be read.
-pub async fn get_deployment_status(
- log_dir: &Path,
- site_name: &str,
- timestamp: &str,
- log_path: &Path,
-) -> Result<DeploymentStatus> {
- let content = tokio::fs::read_to_string(log_path)
- .await
- .with_context(|| format!("failed to read build log: {}", log_path.display()))?;
-
- let header = parse_log_header(&content);
-
- let (git_commit, duration, build_status) = match &header {
- Some(h) => (h.git_commit.clone(), h.duration.clone(), h.status.clone()),
- None => {
- warn!(path = %log_path.display(), "malformed build log header");
- (
- "unknown".to_owned(),
- "-".to_owned(),
- "(parse error)".to_owned(),
- )
- }
- };
-
- // Check for accompanying hook log
- let hook_log_path = log_dir
- .join(site_name)
- .join(format!("{timestamp}-hook.log"));
-
- let status = if hook_log_path.is_file() {
- match tokio::fs::read_to_string(&hook_log_path).await {
- Ok(hook_content) => match parse_hook_status(&hook_content) {
- Some(true) => {
- if build_status.starts_with("failed") {
- build_status
- } else {
- "success".to_owned()
- }
- }
- Some(false) => {
- if build_status.starts_with("failed") {
- build_status
- } else {
- "hook failed".to_owned()
- }
- }
- None => build_status,
- },
- Err(_) => build_status,
- }
- } else {
- build_status
- };
-
- Ok(DeploymentStatus {
- site_name: site_name.to_owned(),
- timestamp: timestamp.to_owned(),
- git_commit,
- duration,
- status,
- log: log_path.to_string_lossy().to_string(),
- })
+ pub current_build: String,
}
#[cfg(test)]
@@ -827,93 +637,4 @@ mod tests {
cleanup(&base_dir).await;
}
-
- // --- parse_log_header tests ---
-
- #[test]
- fn parse_log_header_success() {
- let content = "\
-=== BUILD LOG ===
-Site: my-site
-Timestamp: 20260126-143000-123456
-Git Commit: abc123d
-Image: node:20-alpine
-Duration: 45s
-Status: success
-
-=== STDOUT ===
-build output
-";
- let header = parse_log_header(content).unwrap();
- assert_eq!(header.site_name, "my-site");
- assert_eq!(header.timestamp, "20260126-143000-123456");
- assert_eq!(header.git_commit, "abc123d");
- assert_eq!(header.image, "node:20-alpine");
- assert_eq!(header.duration, "45s");
- assert_eq!(header.status, "success");
- }
-
- #[test]
- fn parse_log_header_failed_build() {
- let content = "\
-=== BUILD LOG ===
-Site: fail-site
-Timestamp: 20260126-160000-000000
-Git Commit: def456
-Image: node:18
-Duration: 2m 0s
-Status: failed (exit code: 42): build error
-";
- let header = parse_log_header(content).unwrap();
- assert_eq!(header.status, "failed (exit code: 42): build error");
- assert_eq!(header.duration, "2m 0s");
- }
-
- #[test]
- fn parse_log_header_unknown_commit() {
- let content = "\
-=== BUILD LOG ===
-Site: test-site
-Timestamp: 20260126-150000-000000
-Git Commit: unknown
-Image: alpine:latest
-Duration: 5s
-Status: success
-";
- let header = parse_log_header(content).unwrap();
- assert_eq!(header.git_commit, "unknown");
- }
-
- #[test]
- fn parse_log_header_malformed() {
- let content = "This is not a valid log file\nSome random text\n";
- let header = parse_log_header(content);
- assert!(header.is_none());
- }
-
- #[test]
- fn parse_hook_status_success() {
- let content = "\
-=== HOOK LOG ===
-Site: test-site
-Timestamp: 20260202-120000-000000
-Command: touch marker
-Duration: 1s
-Status: success
-";
- assert_eq!(parse_hook_status(content), Some(true));
- }
-
- #[test]
- fn parse_hook_status_failed() {
- let content = "\
-=== HOOK LOG ===
-Site: test-site
-Timestamp: 20260202-120000-000000
-Command: false
-Duration: 0s
-Status: failed (exit code 1)
-";
- assert_eq!(parse_hook_status(content), Some(false));
- }
}
diff --git a/src/main.rs b/src/main.rs
index b153297..ea0b033 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -1,65 +1,62 @@
use anyhow::{Context as _, Result, bail};
-use clap::Parser as _;
-use tracing::{info, warn};
-use tracing_subscriber::EnvFilter;
+use log::{info, warn};
use witryna::cli::{Cli, Command};
use witryna::config;
use witryna::logs::{self, DeploymentStatus};
-use witryna::{pipeline, server};
+use witryna::state::BuildEntry;
+use witryna::{cleanup, pipeline, publish, server, state};
#[tokio::main]
async fn main() -> Result<()> {
- let cli = Cli::parse();
- let config_path = config::discover_config(cli.config.as_deref())?;
+ let cli: Cli = argh::from_env();
+ let config_path = config::discover_config(cli.command.config())?;
match cli.command {
- Command::Serve => run_serve(config_path).await,
- Command::Validate => run_validate(config_path).await,
- Command::Run { site, verbose } => run_run(config_path, site, verbose).await,
- Command::Status { site, json } => run_status(config_path, site, json).await,
+ Command::Serve(_) => run_serve(config_path).await,
+ Command::Validate(_) => run_validate(config_path).await,
+ Command::Run(cmd) => Box::pin(run_run(config_path, cmd.site, cmd.verbose)).await,
+ Command::Status(cmd) => run_status(config_path, cmd.site, cmd.json).await,
+ Command::Switch(cmd) => run_switch(config_path, cmd.site, cmd.build).await,
+ Command::Cleanup(cmd) => run_cleanup(config_path, cmd.site, cmd.keep).await,
}
}
async fn run_serve(config_path: std::path::PathBuf) -> Result<()> {
let config = config::Config::load(&config_path).await?;
- // Initialize tracing with configured log level
+ // Initialize logger with configured log level
// RUST_LOG env var takes precedence if set
- let filter = EnvFilter::try_from_default_env()
- .unwrap_or_else(|_| EnvFilter::new(config.log_level_filter().to_string()));
- tracing_subscriber::fmt().with_env_filter(filter).init();
+ witryna::logger::Logger::init(config.log_level_filter());
info!(
- listen_address = %config.listen_address,
- container_runtime = %config.container_runtime,
- base_dir = %config.base_dir.display(),
- log_dir = %config.log_dir.display(),
- log_level = %config.log_level,
- sites_count = config.sites.len(),
- "loaded configuration"
+ "loaded configuration: listen={} runtime={} base_dir={} log_dir={} log_level={} sites={}",
+ config.listen_address,
+ config.container_runtime,
+ config.base_dir.display(),
+ config.log_dir.display(),
+ config.log_level,
+ config.sites.len(),
);
for site in &config.sites {
if site.webhook_token.is_empty() {
warn!(
- name = %site.name,
- "webhook authentication disabled (no token configured)"
+ "[{}] webhook authentication disabled (no token configured)",
+ site.name,
);
}
if let Some(interval) = site.poll_interval {
info!(
- name = %site.name,
- repo_url = %site.repo_url,
- branch = %site.branch,
- poll_interval_secs = interval.as_secs(),
- "configured site with polling"
+ "[{}] configured site with polling: repo={} branch={} poll_interval_secs={}",
+ site.name,
+ site.repo_url,
+ site.branch,
+ interval.as_secs(),
);
} else {
info!(
- name = %site.name,
- repo_url = %site.repo_url,
- branch = %site.branch,
- "configured site (webhook-only)"
+ "[{}] configured site (webhook-only): repo={} branch={}",
+ site.name, site.repo_url, site.branch,
);
}
}
@@ -89,13 +86,13 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo
})?
.clone();
- // Initialize tracing: compact stderr, DEBUG when verbose
- let level = if verbose { "debug" } else { "info" };
- let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level));
- tracing_subscriber::fmt()
- .with_env_filter(filter)
- .with_writer(std::io::stderr)
- .init();
+ // Initialize logger: DEBUG when verbose
+ let level = if verbose {
+ log::LevelFilter::Debug
+ } else {
+ log::LevelFilter::Info
+ };
+ witryna::logger::Logger::init(level);
eprintln!(
"Building site: {} (repo: {}, branch: {})",
@@ -106,7 +103,7 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo
.git_timeout
.unwrap_or(witryna::git::GIT_TIMEOUT_DEFAULT);
- let result = pipeline::run_build(
+ let result = Box::pin(pipeline::run_build(
&site_name,
&site,
&config.base_dir,
@@ -115,7 +112,7 @@ async fn run_run(config_path: std::path::PathBuf, site_name: String, verbose: bo
config.max_builds_to_keep,
git_timeout,
verbose,
- )
+ ))
.await?;
eprintln!(
@@ -142,36 +139,38 @@ async fn run_status(
bail!("site '{}' not found in {}", name, config_path.display());
}
+ let sites: Vec<&str> = match &site_filter {
+ Some(name) => vec![name.as_str()],
+ None => config.sites.iter().map(|s| s.name.as_str()).collect(),
+ };
+
let mut statuses: Vec<DeploymentStatus> = Vec::new();
- match &site_filter {
- Some(name) => {
- // Show last 10 deployments for a single site
- let site_logs = logs::list_site_logs(&config.log_dir, name).await?;
- for (ts, path) in site_logs.into_iter().take(10) {
- let ds = logs::get_deployment_status(&config.log_dir, name, &ts, &path).await?;
- statuses.push(ds);
- }
+ for site_name in &sites {
+ let st = state::load_state(&config.base_dir, site_name).await;
+
+ if st.builds.is_empty() {
+ statuses.push(DeploymentStatus {
+ site_name: (*site_name).to_owned(),
+ timestamp: "-".to_owned(),
+ git_commit: "-".to_owned(),
+ duration: "-".to_owned(),
+ status: "-".to_owned(),
+ log: "(no builds)".to_owned(),
+ current_build: String::new(),
+ });
+ continue;
}
- None => {
- // Show latest deployment for each site
- for site in &config.sites {
- let site_logs = logs::list_site_logs(&config.log_dir, &site.name).await?;
- if let Some((ts, path)) = site_logs.into_iter().next() {
- let ds = logs::get_deployment_status(&config.log_dir, &site.name, &ts, &path)
- .await?;
- statuses.push(ds);
- } else {
- statuses.push(DeploymentStatus {
- site_name: site.name.clone(),
- timestamp: "-".to_owned(),
- git_commit: "-".to_owned(),
- duration: "-".to_owned(),
- status: "-".to_owned(),
- log: "(no builds)".to_owned(),
- });
- }
- }
+
+ // Single-site filter: show all builds. Overview: show only latest.
+ let builds = if site_filter.is_some() {
+ st.builds.iter().collect::<Vec<_>>()
+ } else {
+ st.builds.iter().take(1).collect::<Vec<_>>()
+ };
+
+ for entry in builds {
+ statuses.push(build_entry_to_status(site_name, entry, &st.current));
}
}
@@ -187,6 +186,153 @@ async fn run_status(
Ok(())
}
+#[allow(clippy::print_stderr)] // CLI output goes to stderr
+async fn run_switch(
+ config_path: std::path::PathBuf,
+ site_name: String,
+ build_timestamp: String,
+) -> Result<()> {
+ let config = config::Config::load(&config_path).await?;
+
+ if config.find_site(&site_name).is_none() {
+ bail!(
+ "site '{}' not found in {}",
+ site_name,
+ config_path.display()
+ );
+ }
+
+ let builds_dir = config.base_dir.join("builds").join(&site_name);
+
+ if !builds_dir.exists() {
+ bail!("no builds found for site '{site_name}'");
+ }
+
+ if !cleanup::looks_like_timestamp(&build_timestamp) {
+ bail!("'{build_timestamp}' is not a valid build timestamp");
+ }
+
+ let build_dir = builds_dir.join(&build_timestamp);
+ if !build_dir.is_dir() {
+ let available = cleanup::list_build_timestamps(&builds_dir).await?;
+ if available.is_empty() {
+ bail!("no builds found for site '{site_name}'");
+ }
+ let mut sorted = available;
+ sorted.sort_by(|a, b| b.cmp(a));
+ bail!(
+ "build '{}' not found for site '{}'\navailable builds:\n {}",
+ build_timestamp,
+ site_name,
+ sorted.join("\n ")
+ );
+ }
+
+ let current_link = builds_dir.join("current");
+ publish::atomic_symlink_update(&build_dir, &current_link).await?;
+ state::set_current(&config.base_dir, &site_name, &build_timestamp).await;
+
+ eprintln!("switched {site_name} to build {build_timestamp}");
+ Ok(())
+}
+
+#[allow(clippy::print_stderr)] // CLI output goes to stderr
+async fn run_cleanup(
+ config_path: std::path::PathBuf,
+ site_filter: Option<String>,
+ keep: Option<u32>,
+) -> Result<()> {
+ let config = config::Config::load(&config_path).await?;
+
+ if let Some(name) = &site_filter
+ && config.find_site(name).is_none()
+ {
+ bail!("site '{}' not found in {}", name, config_path.display());
+ }
+
+ if keep == Some(0) {
+ bail!("--keep 0 would delete all builds; refusing");
+ }
+
+ let max_to_keep = keep.unwrap_or(config.max_builds_to_keep);
+
+ if max_to_keep == 0 {
+ eprintln!("cleanup disabled (max_builds_to_keep is 0; use --keep N to override)");
+ return Ok(());
+ }
+
+ let sites: Vec<&str> = match &site_filter {
+ Some(name) => vec![name.as_str()],
+ None => config.sites.iter().map(|s| s.name.as_str()).collect(),
+ };
+
+ let mut total_builds: u32 = 0;
+ let mut total_logs: u32 = 0;
+
+ for site_name in &sites {
+ let result =
+ cleanup::cleanup_old_builds(&config.base_dir, &config.log_dir, site_name, max_to_keep)
+ .await
+ .with_context(|| format!("cleanup failed for site '{site_name}'"))?;
+
+ if result.builds_removed > 0 || result.logs_removed > 0 {
+ eprintln!(
+ "{site_name}: removed {} build(s), {} log(s)",
+ result.builds_removed, result.logs_removed
+ );
+ } else {
+ eprintln!("{site_name}: nothing to clean");
+ }
+
+ total_builds += result.builds_removed;
+ total_logs += result.logs_removed;
+ }
+
+ if sites.len() > 1 {
+ eprintln!("total: {total_builds} build(s), {total_logs} log(s) removed");
+ }
+
+ Ok(())
+}
+
+/// Convert a `BuildEntry` to a `DeploymentStatus` for display.
+///
+/// For "building" entries, computes elapsed time from `started_at`.
+fn build_entry_to_status(site_name: &str, entry: &BuildEntry, current: &str) -> DeploymentStatus {
+ let duration = if entry.status == "building" {
+ elapsed_since(&entry.started_at)
+ } else {
+ entry.duration.clone()
+ };
+
+ let git_commit = if entry.git_commit.is_empty() {
+ "-".to_owned()
+ } else {
+ entry.git_commit.clone()
+ };
+
+ DeploymentStatus {
+ site_name: site_name.to_owned(),
+ timestamp: entry.timestamp.clone(),
+ git_commit,
+ duration,
+ status: entry.status.clone(),
+ log: entry.log.clone(),
+ current_build: current.to_owned(),
+ }
+}
+
+/// Compute human-readable elapsed time from an ISO 8601 timestamp.
+fn elapsed_since(started_at: &str) -> String {
+ let Some(start) = witryna::time::parse_rfc3339(started_at) else {
+ return "-".to_owned();
+ };
+ let Ok(elapsed) = start.elapsed() else {
+ return "-".to_owned();
+ };
+ logs::format_duration(elapsed)
+}
+
fn format_status_table(statuses: &[DeploymentStatus]) -> String {
use std::fmt::Write as _;
@@ -200,14 +346,19 @@ fn format_status_table(statuses: &[DeploymentStatus]) -> String {
let mut out = String::new();
let _ = writeln!(
out,
- "{:<site_width$} {:<11} {:<7} {:<8} {:<24} LOG",
+ " {:<site_width$} {:<11} {:<7} {:<8} {:<24} LOG",
"SITE", "STATUS", "COMMIT", "DURATION", "TIMESTAMP"
);
for s in statuses {
+ let marker = if !s.current_build.is_empty() && s.timestamp == s.current_build {
+ "+"
+ } else {
+ " "
+ };
let _ = writeln!(
out,
- "{:<site_width$} {:<11} {:<7} {:<8} {:<24} {}",
+ "{marker} {:<site_width$} {:<11} {:<7} {:<8} {:<24} {}",
s.site_name, s.status, s.git_commit, s.duration, s.timestamp, s.log
);
}
@@ -247,7 +398,6 @@ mod tests {
base_dir: PathBuf::from("/var/lib/witryna"),
log_dir: PathBuf::from("/var/log/witryna"),
log_level: "info".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites,
@@ -342,6 +492,7 @@ mod tests {
duration: duration.to_owned(),
status: status.to_owned(),
log: log.to_owned(),
+ current_build: String::new(),
}
}
@@ -419,4 +570,81 @@ mod tests {
let output = format_status_table(&statuses);
assert!(output.contains("hook failed"));
}
+
+ #[test]
+ fn format_status_table_current_build_marker() {
+ let mut ds = test_deployment(
+ "my-site",
+ "success",
+ "abc123d",
+ "45s",
+ "20260126-143000-123456",
+ "/logs/my-site/20260126-143000-123456.log",
+ );
+ ds.current_build = "20260126-143000-123456".to_owned();
+ let output = format_status_table(&[ds]);
+
+ // The matching row should start with "+"
+ let data_line = output.lines().nth(1).unwrap();
+ assert!(
+ data_line.starts_with('+'),
+ "row should start with '+', got: {data_line}"
+ );
+ }
+
+ #[test]
+ fn format_status_table_no_marker_when_no_current() {
+ let ds = test_deployment(
+ "my-site",
+ "success",
+ "abc123d",
+ "45s",
+ "20260126-143000-123456",
+ "/logs/my-site/20260126-143000-123456.log",
+ );
+ let output = format_status_table(&[ds]);
+
+ let data_line = output.lines().nth(1).unwrap();
+ assert!(
+ data_line.starts_with(' '),
+ "row should start with space when no current_build, got: {data_line}"
+ );
+ }
+
+ #[test]
+ fn format_status_table_marker_only_on_matching_row() {
+ let mut ds1 = test_deployment(
+ "my-site",
+ "success",
+ "abc123d",
+ "45s",
+ "20260126-143000-123456",
+ "/logs/1.log",
+ );
+ ds1.current_build = "20260126-143000-123456".to_owned();
+
+ let mut ds2 = test_deployment(
+ "my-site",
+ "failed",
+ "def4567",
+ "30s",
+ "20260126-150000-000000",
+ "/logs/2.log",
+ );
+ ds2.current_build = "20260126-143000-123456".to_owned();
+
+ let output = format_status_table(&[ds1, ds2]);
+ let lines: Vec<&str> = output.lines().collect();
+
+ assert!(
+ lines[1].starts_with('+'),
+ "matching row should have +: {}",
+ lines[1]
+ );
+ assert!(
+ lines[2].starts_with(' '),
+ "non-matching row should have space: {}",
+ lines[2]
+ );
+ }
}
diff --git a/src/pipeline.rs b/src/pipeline.rs
index 5827ad7..21857a8 100644
--- a/src/pipeline.rs
+++ b/src/pipeline.rs
@@ -1,11 +1,12 @@
use crate::config::SiteConfig;
use crate::logs::{BuildExitStatus, BuildLogMeta};
-use crate::{build, cleanup, git, hook, logs, publish, repo_config};
+use crate::state::BuildEntry;
+use crate::{build, cleanup, git, hook, logs, publish, repo_config, state};
use anyhow::Result;
-use chrono::Utc;
+use log::{error, info, warn};
+use std::collections::HashMap;
use std::path::{Path, PathBuf};
-use std::time::{Duration, Instant};
-use tracing::{error, info, warn};
+use std::time::{Duration, Instant, SystemTime};
/// Result of a successful pipeline run.
pub struct PipelineResult {
@@ -37,13 +38,35 @@ pub async fn run_build(
git_timeout: Duration,
verbose: bool,
) -> Result<PipelineResult> {
- let timestamp = Utc::now().format("%Y%m%d-%H%M%S-%f").to_string();
+ let now = SystemTime::now();
+ let timestamp = crate::time::format_build_timestamp(now);
let start_time = Instant::now();
+ let started_at = crate::time::format_rfc3339(now);
+ let log_path_str = log_dir
+ .join(site_name)
+ .join(format!("{timestamp}.log"))
+ .to_string_lossy()
+ .to_string();
+
+ // Write "building" state
+ state::push_build(
+ base_dir,
+ site_name,
+ BuildEntry {
+ status: "building".to_owned(),
+ timestamp: timestamp.clone(),
+ started_at: started_at.clone(),
+ git_commit: String::new(),
+ duration: String::new(),
+ log: log_path_str.clone(),
+ },
+ )
+ .await;
let clone_dir = base_dir.join("clones").join(site_name);
// 1. Sync git repository
- info!(%site_name, "syncing repository");
+ info!("[{site_name}] syncing repository");
if let Err(e) = git::sync_repo(
&site.repo_url,
&site.branch,
@@ -53,7 +76,8 @@ pub async fn run_build(
)
.await
{
- error!(%site_name, error = %e, "git sync failed");
+ error!("[{site_name}] git sync failed: {e}");
+ let duration = start_time.elapsed();
save_build_log_for_error(
log_dir,
site_name,
@@ -64,6 +88,7 @@ pub async fn run_build(
&e.to_string(),
)
.await;
+ update_final_state(base_dir, site_name, "failed", "", duration).await;
return Err(e.context("git sync failed"));
}
@@ -80,17 +105,26 @@ pub async fn run_build(
{
Ok(config) => config,
Err(e) => {
- error!(%site_name, error = %e, "failed to load repo config");
+ error!("[{site_name}] failed to load repo config: {e}");
+ let duration = start_time.elapsed();
save_build_log_for_error(
log_dir,
site_name,
&timestamp,
start_time,
- git_commit,
+ git_commit.clone(),
"config-load",
&e.to_string(),
)
.await;
+ update_final_state(
+ base_dir,
+ site_name,
+ "failed",
+ git_commit.as_deref().unwrap_or(""),
+ duration,
+ )
+ .await;
return Err(e.context("failed to load repo config"));
}
};
@@ -103,7 +137,10 @@ pub async fn run_build(
let sanitized = crate::config::sanitize_cache_dir_name(dir);
let host_path = base_dir.join("cache").join(site_name).join(&sanitized);
if let Err(e) = tokio::fs::create_dir_all(&host_path).await {
- error!(%site_name, path = %host_path.display(), error = %e, "failed to create cache directory");
+ error!(
+ "[{site_name}] failed to create cache directory: path={} {e}",
+ host_path.display()
+ );
anyhow::bail!("failed to create cache directory: {e}");
}
volumes.push((dir.clone(), host_path));
@@ -112,7 +149,7 @@ pub async fn run_build(
.iter()
.map(|(c, h)| format!("{}:{}", h.display(), c))
.collect();
- info!(%site_name, mounts = ?mount_list, "mounting cache volumes");
+ info!("[{site_name}] mounting cache volumes: {mount_list:?}");
volumes
}
_ => Vec::new(),
@@ -121,7 +158,7 @@ pub async fn run_build(
// 4. Execute build — stream output to temp files
let site_log_dir = log_dir.join(site_name);
if let Err(e) = tokio::fs::create_dir_all(&site_log_dir).await {
- error!(%site_name, error = %e, "failed to create log directory");
+ error!("[{site_name}] failed to create log directory: {e}");
anyhow::bail!("failed to create log directory: {e}");
}
let stdout_tmp = site_log_dir.join(format!("{timestamp}-stdout.tmp"));
@@ -136,8 +173,11 @@ pub async fn run_build(
network: site.container_network.clone(),
workdir: site.container_workdir.clone(),
};
- info!(%site_name, image = %repo_config.image, "running container build");
- let build_result = build::execute(
+ info!(
+ "[{site_name}] running container build: image={}",
+ repo_config.image
+ );
+ let build_result = Box::pin(build::execute(
container_runtime,
&clone_dir,
&repo_config,
@@ -148,7 +188,7 @@ pub async fn run_build(
&stderr_tmp,
timeout,
verbose,
- )
+ ))
.await;
// Determine exit status and extract temp file paths
@@ -207,7 +247,7 @@ pub async fn run_build(
match logs::save_build_log(log_dir, &meta, &build_stdout_file, &build_stderr_file).await {
Ok(path) => path,
Err(e) => {
- error!(%site_name, error = %e, "failed to save build log");
+ error!("[{site_name}] failed to save build log: {e}");
let _ = tokio::fs::remove_file(&build_stdout_file).await;
let _ = tokio::fs::remove_file(&build_stderr_file).await;
// Non-fatal for log save — continue if build succeeded
@@ -215,14 +255,37 @@ pub async fn run_build(
}
};
- // If build failed, return error
+ // If build failed, run hook (if configured) then return error
if let Err(e) = build_result {
- error!(%site_name, "build failed");
+ error!("[{site_name}] build failed");
+ run_hook_if_configured(
+ site,
+ site_name,
+ &clone_dir,
+ base_dir,
+ log_dir,
+ &site_log_dir,
+ &timestamp,
+ "failed",
+ &env,
+ )
+ .await;
+ update_final_state(
+ base_dir,
+ site_name,
+ "failed",
+ git_commit.as_deref().unwrap_or(""),
+ start_time.elapsed(),
+ )
+ .await;
return Err(e);
}
// 5. Publish assets (with same timestamp as log)
- info!(%site_name, public = %repo_config.public, "publishing assets");
+ info!(
+ "[{site_name}] publishing assets: public={}",
+ repo_config.public
+ );
let publish_result = publish::publish(
base_dir,
site_name,
@@ -233,53 +296,50 @@ pub async fn run_build(
.await?;
info!(
- %site_name,
- build_dir = %publish_result.build_dir.display(),
- timestamp = %publish_result.timestamp,
- "deployment completed successfully"
+ "[{site_name}] deployment completed successfully: build_dir={} timestamp={}",
+ publish_result.build_dir.display(),
+ publish_result.timestamp
);
// 6. Run post-deploy hook (non-fatal)
- if let Some(hook_cmd) = &site.post_deploy {
- info!(%site_name, "running post-deploy hook");
- let hook_stdout_tmp = site_log_dir.join(format!("{timestamp}-hook-stdout.tmp"));
- let hook_stderr_tmp = site_log_dir.join(format!("{timestamp}-hook-stderr.tmp"));
- let public_dir = base_dir.join("builds").join(site_name).join("current");
-
- let hook_result = hook::run_post_deploy_hook(
- hook_cmd,
- site_name,
- &publish_result.build_dir,
- &public_dir,
- &timestamp,
- &env,
- &hook_stdout_tmp,
- &hook_stderr_tmp,
- )
- .await;
-
- if let Err(e) = logs::save_hook_log(log_dir, site_name, &timestamp, &hook_result).await {
- error!(%site_name, error = %e, "failed to save hook log");
- let _ = tokio::fs::remove_file(&hook_stdout_tmp).await;
- let _ = tokio::fs::remove_file(&hook_stderr_tmp).await;
- }
-
- if hook_result.success {
- info!(%site_name, "post-deploy hook completed");
+ let mut final_status = "success";
+ if let Some(hook_success) = run_hook_if_configured(
+ site,
+ site_name,
+ &publish_result.build_dir,
+ base_dir,
+ log_dir,
+ &site_log_dir,
+ &timestamp,
+ "success",
+ &env,
+ )
+ .await
+ {
+ if hook_success {
+ info!("[{site_name}] post-deploy hook completed");
} else {
- warn!(
- %site_name,
- exit_code = ?hook_result.exit_code,
- "post-deploy hook failed (non-fatal)"
- );
+ warn!("[{site_name}] post-deploy hook failed (non-fatal)");
+ final_status = "hook failed";
}
}
+ // Write final state + set current build
+ update_final_state(
+ base_dir,
+ site_name,
+ final_status,
+ git_commit.as_deref().unwrap_or(""),
+ start_time.elapsed(),
+ )
+ .await;
+ state::set_current(base_dir, site_name, &timestamp).await;
+
// 7. Cleanup old builds (non-fatal if it fails)
if let Err(e) =
cleanup::cleanup_old_builds(base_dir, log_dir, site_name, max_builds_to_keep).await
{
- warn!(%site_name, error = %e, "cleanup failed (non-fatal)");
+ warn!("[{site_name}] cleanup failed (non-fatal): {e}");
}
let duration = start_time.elapsed();
@@ -291,6 +351,68 @@ pub async fn run_build(
})
}
+/// Run the post-deploy hook if configured. Returns `Some(success)` if the hook
+/// ran, or `None` if no hook is configured.
+#[allow(clippy::too_many_arguments)]
+async fn run_hook_if_configured(
+ site: &SiteConfig,
+ site_name: &str,
+ build_dir: &Path,
+ base_dir: &Path,
+ log_dir: &Path,
+ site_log_dir: &Path,
+ timestamp: &str,
+ build_status: &str,
+ env: &HashMap<String, String>,
+) -> Option<bool> {
+ let hook_cmd = site.post_deploy.as_ref()?;
+ info!("[{site_name}] running post-deploy hook (build_status={build_status})");
+
+ let hook_stdout_tmp = site_log_dir.join(format!("{timestamp}-hook-stdout.tmp"));
+ let hook_stderr_tmp = site_log_dir.join(format!("{timestamp}-hook-stderr.tmp"));
+ let public_dir = base_dir.join("builds").join(site_name).join("current");
+
+ let hook_result = Box::pin(hook::run_post_deploy_hook(
+ hook_cmd,
+ site_name,
+ build_dir,
+ &public_dir,
+ timestamp,
+ build_status,
+ env,
+ &hook_stdout_tmp,
+ &hook_stderr_tmp,
+ ))
+ .await;
+
+ if let Err(e) = logs::save_hook_log(log_dir, site_name, timestamp, &hook_result).await {
+ error!("[{site_name}] failed to save hook log: {e}");
+ let _ = tokio::fs::remove_file(&hook_stdout_tmp).await;
+ let _ = tokio::fs::remove_file(&hook_stderr_tmp).await;
+ }
+
+ Some(hook_result.success)
+}
+
+/// Update the latest build entry with final status, commit, and duration. Best-effort.
+async fn update_final_state(
+ base_dir: &Path,
+ site_name: &str,
+ status: &str,
+ git_commit: &str,
+ duration: Duration,
+) {
+ let s = status.to_owned();
+ let c = git_commit.to_owned();
+ let d = logs::format_duration(duration);
+ state::update_latest_build(base_dir, site_name, |e| {
+ e.status = s;
+ e.git_commit = c;
+ e.duration = d;
+ })
+ .await;
+}
+
/// Save a build log for errors that occur before the build starts.
async fn save_build_log_for_error(
log_dir: &Path,
@@ -321,7 +443,7 @@ async fn save_build_log_for_error(
let _ = tokio::fs::File::create(&stderr_tmp).await;
if let Err(e) = logs::save_build_log(log_dir, &meta, &stdout_tmp, &stderr_tmp).await {
- error!(site_name, error = %e, "failed to save build log");
+ error!("[{site_name}] failed to save build log: {e}");
let _ = tokio::fs::remove_file(&stdout_tmp).await;
let _ = tokio::fs::remove_file(&stderr_tmp).await;
}
diff --git a/src/polling.rs b/src/polling.rs
index 6c25326..c06cfad 100644
--- a/src/polling.rs
+++ b/src/polling.rs
@@ -7,18 +7,17 @@ use crate::build_guard::BuildGuard;
use crate::config::SiteConfig;
use crate::git;
use crate::server::AppState;
+use log::{debug, error, info};
use std::collections::HashMap;
use std::hash::{Hash as _, Hasher as _};
use std::sync::Arc;
use std::time::Duration;
-use tokio::sync::RwLock;
-use tokio_util::sync::CancellationToken;
-use tracing::{debug, error, info};
+use tokio::sync::{RwLock, watch};
/// Manages polling tasks for all sites.
pub struct PollingManager {
- /// Map of `site_name` -> cancellation token for active polling tasks
- tasks: Arc<RwLock<HashMap<String, CancellationToken>>>,
+ /// Map of `site_name` -> cancellation sender for active polling tasks
+ tasks: Arc<RwLock<HashMap<String, watch::Sender<()>>>>,
}
impl PollingManager {
@@ -31,10 +30,19 @@ impl PollingManager {
/// Start polling tasks for sites with `poll_interval` configured.
/// Call this on startup and after SIGHUP reload.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the config `RwLock` is poisoned.
pub async fn start_polling(&self, state: AppState) {
- let config = state.config.read().await;
-
- for site in &config.sites {
+ let sites: Vec<_> = state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .sites
+ .clone();
+
+ for site in &sites {
if let Some(interval) = site.poll_interval {
self.spawn_poll_task(state.clone(), site.clone(), interval)
.await;
@@ -47,38 +55,37 @@ impl PollingManager {
pub async fn stop_all(&self) {
let mut tasks = self.tasks.write().await;
- for (site_name, token) in tasks.drain() {
- info!(site = %site_name, "stopping polling task");
- token.cancel();
+ for (site_name, tx) in tasks.drain() {
+ info!("[{site_name}] stopping polling task");
+ let _ = tx.send(());
}
}
/// Spawn a single polling task for a site.
async fn spawn_poll_task(&self, state: AppState, site: SiteConfig, interval: Duration) {
let site_name = site.name.clone();
- let token = CancellationToken::new();
+ let (cancel_tx, cancel_rx) = watch::channel(());
- // Store the cancellation token
+ // Store the cancellation sender
{
let mut tasks = self.tasks.write().await;
- tasks.insert(site_name.clone(), token.clone());
+ tasks.insert(site_name.clone(), cancel_tx);
}
info!(
- site = %site_name,
- interval_secs = interval.as_secs(),
- "starting polling task"
+ "[{site_name}] starting polling task: interval_secs={}",
+ interval.as_secs()
);
// Spawn the polling loop
let tasks = Arc::clone(&self.tasks);
tokio::spawn(async move {
#[allow(clippy::large_futures)]
- poll_loop(state, site, interval, token.clone()).await;
+ poll_loop(state, site, interval, cancel_rx).await;
// Remove from active tasks when done
tasks.write().await.remove(&site_name);
- debug!(site = %site_name, "polling task ended");
+ debug!("[{site_name}] polling task ended");
});
}
}
@@ -94,29 +101,32 @@ async fn poll_loop(
state: AppState,
site: SiteConfig,
interval: Duration,
- cancel_token: CancellationToken,
+ mut cancel_rx: watch::Receiver<()>,
) {
let site_name = &site.name;
// Initial delay before first poll (avoid thundering herd on startup)
let initial_delay = calculate_initial_delay(site_name, interval);
- debug!(site = %site_name, delay_secs = initial_delay.as_secs(), "initial poll delay");
+ debug!(
+ "[{site_name}] initial poll delay: {} secs",
+ initial_delay.as_secs()
+ );
tokio::select! {
() = tokio::time::sleep(initial_delay) => {}
- () = cancel_token.cancelled() => return,
+ _ = cancel_rx.changed() => return,
}
loop {
- debug!(site = %site_name, "polling for changes");
+ debug!("[{site_name}] polling for changes");
// 1. Acquire build lock before any git operation
let Some(guard) = BuildGuard::try_acquire(site_name.clone(), &state.build_scheduler) else {
- debug!(site = %site_name, "build in progress, skipping poll cycle");
+ debug!("[{site_name}] build in progress, skipping poll cycle");
tokio::select! {
() = tokio::time::sleep(interval) => {}
- () = cancel_token.cancelled() => {
- info!(site = %site_name, "polling cancelled");
+ _ = cancel_rx.changed() => {
+ info!("[{site_name}] polling cancelled");
return;
}
}
@@ -125,7 +135,7 @@ async fn poll_loop(
// Get current config (might have changed via SIGHUP)
let (base_dir, git_timeout) = {
- let config = state.config.read().await;
+ let config = state.config.read().expect("config lock poisoned");
(
config.base_dir.clone(),
config.git_timeout.unwrap_or(git::GIT_TIMEOUT_DEFAULT),
@@ -144,14 +154,14 @@ async fn poll_loop(
{
Ok(changed) => changed,
Err(e) => {
- error!(site = %site_name, error = %e, "failed to check for changes");
+ error!("[{site_name}] failed to check for changes: {e}");
false
}
};
if has_changes {
// 3a. Keep guard alive — move into build pipeline
- info!(site = %site_name, "new commits detected, triggering build");
+ info!("[{site_name}] new commits detected, triggering build");
#[allow(clippy::large_futures)]
crate::server::run_build_pipeline(
state.clone(),
@@ -168,8 +178,8 @@ async fn poll_loop(
// 4. Sleep (lock is NOT held here in either branch)
tokio::select! {
() = tokio::time::sleep(interval) => {}
- () = cancel_token.cancelled() => {
- info!(site = %site_name, "polling cancelled");
+ _ = cancel_rx.changed() => {
+ info!("[{site_name}] polling cancelled");
return;
}
}
diff --git a/src/publish.rs b/src/publish.rs
index 338a136..f4862c4 100644
--- a/src/publish.rs
+++ b/src/publish.rs
@@ -1,6 +1,6 @@
use anyhow::{Context as _, Result, bail};
+use log::{debug, info, warn};
use std::path::{Path, PathBuf};
-use tracing::{debug, info};
/// Result of a successful publish operation.
#[derive(Debug)]
@@ -52,9 +52,9 @@ pub async fn publish(
let current_link = site_builds_dir.join("current");
info!(
- source = %source_dir.display(),
- destination = %build_dir.display(),
- "publishing assets"
+ "publishing assets: source={} destination={}",
+ source_dir.display(),
+ build_dir.display()
);
// 3. Create builds directory structure
@@ -76,9 +76,9 @@ pub async fn publish(
atomic_symlink_update(&build_dir, &current_link).await?;
debug!(
- build_dir = %build_dir.display(),
- symlink = %current_link.display(),
- "publish completed"
+ "publish completed: build_dir={} symlink={}",
+ build_dir.display(),
+ current_link.display()
);
Ok(PublishResult {
@@ -109,7 +109,7 @@ async fn copy_dir_contents(src: &Path, dst: &Path) -> Result<()> {
// SEC-002: reject symlinks in build output to prevent symlink attacks
let metadata = tokio::fs::symlink_metadata(&entry_path).await?;
if metadata.file_type().is_symlink() {
- tracing::warn!(path = %entry_path.display(), "skipping symlink in build output");
+ warn!("skipping symlink in build output: {}", entry_path.display());
continue;
}
@@ -144,7 +144,11 @@ async fn copy_dir_contents(src: &Path, dst: &Path) -> Result<()> {
/// 2. Rename temp to final: {`link_path}.tmp` -> {`link_path`}
///
/// The rename operation is atomic on POSIX filesystems.
-async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> {
+///
+/// # Errors
+///
+/// Returns an error if the temporary symlink cannot be created or the atomic rename fails.
+pub async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> {
let temp_link = link_path.with_extension("tmp");
// Remove any stale temp symlink from previous failed attempts
@@ -168,11 +172,10 @@ async fn atomic_symlink_update(target: &Path, link_path: &Path) -> Result<()> {
mod tests {
use super::*;
use crate::test_support::{cleanup, temp_dir};
- use chrono::Utc;
use tokio::fs;
fn test_timestamp() -> String {
- Utc::now().format("%Y%m%d-%H%M%S-%f").to_string()
+ crate::time::format_build_timestamp(std::time::SystemTime::now())
}
#[tokio::test]
diff --git a/src/server.rs b/src/server.rs
index e31a1e4..a2aef5c 100644
--- a/src/server.rs
+++ b/src/server.rs
@@ -2,24 +2,11 @@ use crate::build_guard::{BuildGuard, BuildScheduler};
use crate::config::{Config, SiteConfig};
use crate::polling::PollingManager;
use anyhow::Result;
-use axum::{
- Json, Router,
- extract::{DefaultBodyLimit, Path, State},
- http::{HeaderMap, StatusCode},
- response::IntoResponse,
- routing::{get, post},
-};
-use governor::clock::DefaultClock;
-use governor::state::keyed::DashMapStateStore;
-use governor::{Quota, RateLimiter};
-use std::num::NonZeroU32;
+use log::{error, info, warn};
use std::path::PathBuf;
-use std::sync::Arc;
-use subtle::ConstantTimeEq as _;
-use tokio::net::TcpListener;
+use std::sync::{Arc, RwLock};
+use tiny_http::{Header, Method, Request, Response, Server};
use tokio::signal::unix::{SignalKind, signal};
-use tokio::sync::RwLock;
-use tracing::{error, info, warn};
#[derive(serde::Serialize)]
struct ErrorResponse {
@@ -36,114 +23,118 @@ struct HealthResponse {
status: &'static str,
}
-fn error_response(status: StatusCode, error: &'static str) -> impl IntoResponse {
- (status, Json(ErrorResponse { error }))
+fn json_response(status: u16, body: &str) -> Response<std::io::Cursor<Vec<u8>>> {
+ let data = body.as_bytes().to_vec();
+ Response::from_data(data)
+ .with_status_code(status)
+ .with_header(Header::from_bytes("Content-Type", "application/json").expect("valid header"))
}
-type TokenRateLimiter = RateLimiter<String, DashMapStateStore<String>, DefaultClock>;
+fn empty_response(status: u16) -> Response<std::io::Empty> {
+ Response::empty(status)
+}
#[derive(Clone)]
pub struct AppState {
pub config: Arc<RwLock<Config>>,
pub config_path: Arc<PathBuf>,
pub build_scheduler: Arc<BuildScheduler>,
- pub rate_limiter: Arc<TokenRateLimiter>,
pub polling_manager: Arc<PollingManager>,
}
-pub fn create_router(state: AppState) -> Router {
- Router::new()
- .route("/health", get(health_handler))
- .route("/{site_name}", post(deploy_handler))
- .layer(DefaultBodyLimit::max(1024 * 1024)) // 1MB limit
- .with_state(state)
-}
-
-async fn health_handler() -> impl IntoResponse {
- Json(HealthResponse { status: "ok" })
-}
-
-/// Extract Bearer token from Authorization header.
-fn extract_bearer_token(headers: &HeaderMap) -> Option<&str> {
+/// Extract Bearer token from `tiny_http` headers.
+fn extract_bearer_token(headers: &[Header]) -> Option<&str> {
headers
- .get("authorization")
- .and_then(|v| v.to_str().ok())
- .and_then(|v| v.strip_prefix("Bearer "))
+ .iter()
+ .find(|h| h.field.equiv("Authorization"))
+ .and_then(|h| h.value.as_str().strip_prefix("Bearer "))
}
fn validate_token(provided: &str, expected: &str) -> bool {
- let provided_bytes = provided.as_bytes();
- let expected_bytes = expected.as_bytes();
+ let a = provided.as_bytes();
+ let b = expected.as_bytes();
+
+ // Constant-time comparison — OWASP requirement.
+ // Length check is not constant-time, but token length is not secret
+ // (same early-return approach as subtle::ConstantTimeEq for slices).
+ if a.len() != b.len() {
+ return false;
+ }
- // Constant-time comparison - OWASP requirement
- provided_bytes.ct_eq(expected_bytes).into()
+ let mut acc: u8 = 0;
+ for (x, y) in a.iter().zip(b.iter()) {
+ acc |= x ^ y;
+ }
+ acc == 0
}
-async fn deploy_handler(
- State(state): State<AppState>,
- Path(site_name): Path<String>,
- headers: HeaderMap,
-) -> impl IntoResponse {
- info!(%site_name, "deployment request received");
-
- // Find the site first to avoid information leakage
- let site = {
- let config = state.config.read().await;
- if let Some(site) = config.find_site(&site_name) {
- site.clone()
- } else {
- info!(%site_name, "site not found");
- return error_response(StatusCode::NOT_FOUND, "not_found").into_response();
- }
- };
-
- // Validate Bearer token (skip if auth disabled for this site)
- if site.webhook_token.is_empty() {
- // Auth disabled — rate limit by site name instead
- if state.rate_limiter.check_key(&site_name).is_err() {
- info!(%site_name, "rate limit exceeded");
- return error_response(StatusCode::TOO_MANY_REQUESTS, "rate_limit_exceeded")
- .into_response();
- }
- } else {
- let Some(token) = extract_bearer_token(&headers) else {
- info!(%site_name, "missing or malformed authorization header");
- return error_response(StatusCode::UNAUTHORIZED, "unauthorized").into_response();
- };
+/// Check if path is a single segment (e.g., "/my-site").
+fn is_site_path(path: &str) -> bool {
+ path.starts_with('/') && path.len() > 1 && !path[1..].contains('/')
+}
- if !validate_token(token, &site.webhook_token) {
- info!(%site_name, "invalid token");
- return error_response(StatusCode::UNAUTHORIZED, "unauthorized").into_response();
- }
+/// Handle POST `/{site_name}`.
+fn handle_deploy(
+ request: Request,
+ site_name: &str,
+ state: &AppState,
+ handle: &tokio::runtime::Handle,
+) {
+ info!("[{site_name}] deployment request received");
+
+ // Find site
+ let site = state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .find_site(site_name)
+ .cloned();
+ let Some(site) = site else {
+ info!("[{site_name}] site not found");
+ let body = serde_json::to_string(&ErrorResponse { error: "not_found" })
+ .expect("static JSON serialization");
+ let _ = request.respond(json_response(404, &body));
+ return;
+ };
- // Rate limit check (per token)
- if state.rate_limiter.check_key(&token.to_owned()).is_err() {
- info!(%site_name, "rate limit exceeded");
- return error_response(StatusCode::TOO_MANY_REQUESTS, "rate_limit_exceeded")
- .into_response();
+ // Auth check (if configured)
+ if !site.webhook_token.is_empty() {
+ let token_valid = extract_bearer_token(request.headers())
+ .is_some_and(|token| validate_token(token, &site.webhook_token));
+
+ if !token_valid {
+ info!("[{site_name}] unauthorized request");
+ let body = serde_json::to_string(&ErrorResponse {
+ error: "unauthorized",
+ })
+ .expect("static JSON serialization");
+ let _ = request.respond(json_response(401, &body));
+ return;
}
}
// Try immediate build
- let Some(guard) = BuildGuard::try_acquire(site_name.clone(), &state.build_scheduler) else {
+ let Some(guard) = BuildGuard::try_acquire(site_name.to_owned(), &state.build_scheduler) else {
// Build in progress — try to queue
- if state.build_scheduler.try_queue(&site_name) {
- info!(%site_name, "build queued");
- return (
- StatusCode::ACCEPTED,
- Json(QueuedResponse { status: "queued" }),
- )
- .into_response();
+ if state.build_scheduler.try_queue(site_name) {
+ info!("[{site_name}] build queued");
+ let body = serde_json::to_string(&QueuedResponse { status: "queued" })
+ .expect("static JSON serialization");
+ let _ = request.respond(json_response(202, &body));
+ return;
}
// Already queued — collapse
- info!(%site_name, "build already queued, collapsing");
- return StatusCode::ACCEPTED.into_response();
+ info!("[{site_name}] build already queued, collapsing");
+ let _ = request.respond(empty_response(202));
+ return;
};
- info!(%site_name, "deployment accepted");
+ info!("[{site_name}] deployment accepted");
// Spawn async build pipeline with queue drain loop
- tokio::spawn(async move {
+ let state = state.clone();
+ let site_name = site_name.to_owned();
+ handle.spawn(async move {
let mut current_site = site;
let mut current_guard = guard;
loop {
@@ -160,9 +151,15 @@ async fn deploy_handler(
if !state.build_scheduler.take_queued(&site_name) {
break;
}
- info!(%site_name, "processing queued rebuild");
- let Some(new_site) = state.config.read().await.find_site(&site_name).cloned() else {
- warn!(%site_name, "site removed from config, skipping queued rebuild");
+ info!("[{site_name}] processing queued rebuild");
+ let Some(new_site) = state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .find_site(&site_name)
+ .cloned()
+ else {
+ warn!("[{site_name}] site removed from config, skipping queued rebuild");
break;
};
let Some(new_guard) =
@@ -175,7 +172,43 @@ async fn deploy_handler(
}
});
- StatusCode::ACCEPTED.into_response()
+ let _ = request.respond(empty_response(202));
+}
+
+/// Main request loop (runs on `std::thread`).
+#[allow(clippy::needless_pass_by_value)] // ownership required by std::thread::spawn callers
+pub(crate) fn handle_requests(
+ server: Arc<Server>,
+ state: AppState,
+ handle: tokio::runtime::Handle,
+) {
+ for request in server.incoming_requests() {
+ let path = request.url().split('?').next().unwrap_or("").to_owned();
+ let method = request.method().clone();
+
+ match (method, path.as_str()) {
+ (Method::Get, "/health") => {
+ let body = serde_json::to_string(&HealthResponse { status: "ok" })
+ .expect("static JSON serialization");
+ let _ = request.respond(json_response(200, &body));
+ }
+ (_, "/health") => {
+ let _ = request.respond(empty_response(405));
+ }
+ (Method::Post, _) if is_site_path(&path) => {
+ let site_name = &path[1..];
+ handle_deploy(request, site_name, &state, &handle);
+ }
+ (_, _) if is_site_path(&path) => {
+ let _ = request.respond(empty_response(405));
+ }
+ _ => {
+ let body = serde_json::to_string(&ErrorResponse { error: "not_found" })
+ .expect("static JSON serialization");
+ let _ = request.respond(json_response(404, &body));
+ }
+ }
+ }
}
/// Run the complete build pipeline: git sync → build → publish.
@@ -187,7 +220,7 @@ pub(crate) async fn run_build_pipeline(
_guard: BuildGuard,
) {
let (base_dir, log_dir, container_runtime, max_builds_to_keep, git_timeout) = {
- let config = state.config.read().await;
+ let config = state.config.read().expect("config lock poisoned");
(
config.base_dir.clone(),
config.log_dir.clone(),
@@ -213,14 +246,13 @@ pub(crate) async fn run_build_pipeline(
{
Ok(result) => {
info!(
- %site_name,
- build_dir = %result.build_dir.display(),
- duration_secs = result.duration.as_secs(),
- "pipeline completed"
+ "[{site_name}] pipeline completed: build_dir={} duration_secs={}",
+ result.build_dir.display(),
+ result.duration.as_secs()
);
}
Err(e) => {
- error!(%site_name, error = %e, "pipeline failed");
+ error!("[{site_name}] pipeline failed: {e}");
}
}
}
@@ -239,38 +271,41 @@ pub(crate) fn setup_sighup_handler(state: AppState) {
let config_path = state.config_path.as_ref();
match Config::load(config_path).await {
Ok(new_config) => {
- let old_sites_count = state.config.read().await.sites.len();
+ let old_sites_count = state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .sites
+ .len();
let new_sites_count = new_config.sites.len();
// Check for non-reloadable changes and capture old values
let (old_listen, old_base, old_log_dir, old_log_level) = {
- let old_config = state.config.read().await;
+ let old_config = state.config.read().expect("config lock poisoned");
if old_config.listen_address != new_config.listen_address {
warn!(
- old = %old_config.listen_address,
- new = %new_config.listen_address,
- "listen_address changed but cannot be reloaded (restart required)"
+ "listen_address changed but cannot be reloaded (restart required): old={} new={}",
+ old_config.listen_address, new_config.listen_address
);
}
if old_config.base_dir != new_config.base_dir {
warn!(
- old = %old_config.base_dir.display(),
- new = %new_config.base_dir.display(),
- "base_dir changed but cannot be reloaded (restart required)"
+ "base_dir changed but cannot be reloaded (restart required): old={} new={}",
+ old_config.base_dir.display(),
+ new_config.base_dir.display()
);
}
if old_config.log_dir != new_config.log_dir {
warn!(
- old = %old_config.log_dir.display(),
- new = %new_config.log_dir.display(),
- "log_dir changed but cannot be reloaded (restart required)"
+ "log_dir changed but cannot be reloaded (restart required): old={} new={}",
+ old_config.log_dir.display(),
+ new_config.log_dir.display()
);
}
if old_config.log_level != new_config.log_level {
warn!(
- old = %old_config.log_level,
- new = %new_config.log_level,
- "log_level changed but cannot be reloaded (restart required)"
+ "log_level changed but cannot be reloaded (restart required): old={} new={}",
+ old_config.log_level, new_config.log_level
);
}
(
@@ -289,7 +324,7 @@ pub(crate) fn setup_sighup_handler(state: AppState) {
final_config.log_level = old_log_level;
// Apply the merged configuration
- *state.config.write().await = final_config;
+ *state.config.write().expect("config lock poisoned") = final_config;
// Restart polling tasks with new configuration
info!("restarting polling tasks");
@@ -297,12 +332,11 @@ pub(crate) fn setup_sighup_handler(state: AppState) {
state.polling_manager.start_polling(state.clone()).await;
info!(
- old_sites_count,
- new_sites_count, "configuration reloaded successfully"
+ "configuration reloaded successfully: old_sites_count={old_sites_count} new_sites_count={new_sites_count}"
);
}
Err(e) => {
- error!(error = %e, "failed to reload configuration, keeping current config");
+ error!("failed to reload configuration, keeping current config: {e}");
}
}
}
@@ -315,28 +349,14 @@ pub(crate) fn setup_sighup_handler(state: AppState) {
///
/// Returns an error if the TCP listener cannot bind or the server encounters
/// a fatal I/O error.
-///
-/// # Panics
-///
-/// Panics if `rate_limit_per_minute` is zero. This is unreachable after
-/// successful config validation.
pub async fn run(config: Config, config_path: PathBuf) -> Result<()> {
let addr = config.parsed_listen_address();
- #[allow(clippy::expect_used)] // validated by Config::validate_rate_limit()
- let quota = Quota::per_minute(
- NonZeroU32::new(config.rate_limit_per_minute)
- .expect("rate_limit_per_minute must be greater than 0"),
- );
- let rate_limiter = Arc::new(RateLimiter::dashmap(quota));
- let polling_manager = Arc::new(PollingManager::new());
-
let state = AppState {
config: Arc::new(RwLock::new(config)),
config_path: Arc::new(config_path),
build_scheduler: Arc::new(BuildScheduler::new()),
- rate_limiter,
- polling_manager,
+ polling_manager: Arc::new(PollingManager::new()),
};
// Setup SIGHUP handler for configuration hot-reload
@@ -345,37 +365,54 @@ pub async fn run(config: Config, config_path: PathBuf) -> Result<()> {
// Start polling tasks for sites with poll_interval configured
state.polling_manager.start_polling(state.clone()).await;
- let listener = TcpListener::bind(addr).await?;
- info!(%addr, "server listening");
+ let server = Arc::new(Server::http(addr).map_err(|e| anyhow::anyhow!("failed to bind: {e}"))?);
+ info!("server listening on {addr}");
- run_with_listener(state, listener, async {
+ // Shutdown handler: signal → unblock server
+ let shutdown_server = Arc::clone(&server);
+ tokio::spawn(async move {
let mut sigterm = signal(SignalKind::terminate()).expect("failed to setup SIGTERM handler");
let mut sigint = signal(SignalKind::interrupt()).expect("failed to setup SIGINT handler");
tokio::select! {
_ = sigterm.recv() => info!("received SIGTERM, shutting down"),
_ = sigint.recv() => info!("received SIGINT, shutting down"),
}
+ shutdown_server.unblock();
+ });
+
+ // Run HTTP loop on blocking thread
+ let handle = tokio::runtime::Handle::current();
+ tokio::task::spawn_blocking(move || {
+ handle_requests(server, state, handle);
})
- .await
+ .await?;
+
+ Ok(())
}
-/// Run the server on an already-bound listener with a custom shutdown signal.
+/// Run the server with a pre-built Server, shutting down when `shutdown_signal` resolves.
///
-/// This is the core server loop used by both production (`run`) and integration tests.
-/// Production delegates here after binding the listener and setting up SIGHUP handlers.
-/// Tests call this via `test_support::run_server` with their own listener and shutdown channel.
-pub(crate) async fn run_with_listener(
+/// Used by integration tests via [`test_support::run_server`].
+/// Returns a `std::thread::JoinHandle` for the request-handling thread.
+#[cfg(any(test, feature = "integration"))]
+pub(crate) fn run_with_server(
state: AppState,
- listener: TcpListener,
+ server: Arc<Server>,
shutdown_signal: impl std::future::Future<Output = ()> + Send + 'static,
-) -> Result<()> {
- let router = create_router(state);
+) -> std::thread::JoinHandle<()> {
+ let handle = tokio::runtime::Handle::current();
- axum::serve(listener, router)
- .with_graceful_shutdown(shutdown_signal)
- .await?;
+ // Shutdown: wait for signal, then unblock
+ let shutdown_server = Arc::clone(&server);
+ tokio::spawn(async move {
+ shutdown_signal.await;
+ shutdown_server.unblock();
+ });
- Ok(())
+ // Spawn request handler on std::thread, return handle for joining
+ std::thread::spawn(move || {
+ handle_requests(server, state, handle);
+ })
}
#[cfg(test)]
@@ -383,23 +420,13 @@ pub(crate) async fn run_with_listener(
mod tests {
use super::*;
use crate::config::{BuildOverrides, SiteConfig};
- use axum::body::Body;
- use axum::http::{Request, StatusCode};
- use axum::response::Response;
use std::path::PathBuf;
- use tower::ServiceExt as _;
fn test_state(config: Config) -> AppState {
- test_state_with_rate_limit(config, 1000) // High limit for most tests
- }
-
- fn test_state_with_rate_limit(config: Config, rate_limit: u32) -> AppState {
- let quota = Quota::per_minute(NonZeroU32::new(rate_limit).unwrap());
AppState {
config: Arc::new(RwLock::new(config)),
config_path: Arc::new(PathBuf::from("witryna.toml")),
build_scheduler: Arc::new(BuildScheduler::new()),
- rate_limiter: Arc::new(RateLimiter::dashmap(quota)),
polling_manager: Arc::new(PollingManager::new()),
}
}
@@ -411,7 +438,6 @@ mod tests {
base_dir: PathBuf::from("/var/lib/witryna"),
log_dir: PathBuf::from("/var/log/witryna"),
log_level: "info".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![],
@@ -445,221 +471,169 @@ mod tests {
}
}
+ /// Start a test server on a random port, returning the server handle, state, and port.
+ fn test_server(config: Config) -> (Arc<Server>, AppState, u16) {
+ let state = test_state(config);
+ let server = Arc::new(Server::http("127.0.0.1:0").unwrap());
+ let port = match server.server_addr() {
+ tiny_http::ListenAddr::IP(a) => a.port(),
+ _ => unreachable!("expected IP address"),
+ };
+ let handle = tokio::runtime::Handle::current();
+ let server_clone = server.clone();
+ let state_clone = state.clone();
+ std::thread::spawn(move || handle_requests(server_clone, state_clone, handle));
+ (server, state, port)
+ }
+
#[tokio::test]
async fn health_endpoint_returns_ok() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .uri("/health")
- .body(Body::empty())
- .unwrap(),
- )
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let resp = reqwest::get(format!("http://127.0.0.1:{port}/health"))
.await
.unwrap();
-
- assert_eq!(response.status(), StatusCode::OK);
- let body = axum::body::to_bytes(response.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 200);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["status"], "ok");
+ server.unblock();
}
#[tokio::test]
- async fn unknown_site_post_returns_not_found() {
- let state = test_state(test_config());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/nonexistent")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
+ async fn json_responses_have_content_type_header() {
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let resp = reqwest::get(format!("http://127.0.0.1:{port}/health"))
+ .await
+ .unwrap();
+ assert_eq!(
+ resp.headers()
+ .get("content-type")
+ .unwrap()
+ .to_str()
+ .unwrap(),
+ "application/json"
+ );
+ server.unblock();
+ }
- assert_eq!(response.status(), StatusCode::NOT_FOUND);
- let body = axum::body::to_bytes(response.into_body(), 1024)
+ #[tokio::test]
+ async fn unknown_site_post_returns_not_found() {
+ let (server, _state, port) = test_server(test_config());
+ let client = reqwest::Client::new();
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/nonexistent"))
+ .send()
.await
.unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 404);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "not_found");
+ server.unblock();
}
#[tokio::test]
async fn deploy_known_site_with_valid_token_returns_accepted() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
-
- assert_eq!(response.status(), StatusCode::ACCEPTED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Bearer secret-token")
+ .send()
.await
.unwrap();
- assert!(body.is_empty());
+ assert_eq!(resp.status().as_u16(), 202);
+ server.unblock();
}
#[tokio::test]
async fn deploy_missing_auth_header_returns_unauthorized() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
-
- assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .send()
.await
.unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 401);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "unauthorized");
+ server.unblock();
}
#[tokio::test]
async fn deploy_invalid_token_returns_unauthorized() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer wrong-token")
- .body(Body::empty())
- .unwrap(),
- )
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Bearer wrong-token")
+ .send()
.await
.unwrap();
-
- assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 401);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "unauthorized");
+ server.unblock();
}
#[tokio::test]
async fn deploy_malformed_auth_header_returns_unauthorized() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
// Test without "Bearer " prefix
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "secret-token")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "secret-token")
+ .send()
.await
.unwrap();
-
- assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 401);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "unauthorized");
+ server.unblock();
}
#[tokio::test]
async fn deploy_basic_auth_returns_unauthorized() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
// Test Basic auth instead of Bearer
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Basic dXNlcjpwYXNz")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
-
- assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Basic dXNlcjpwYXNz")
+ .send()
.await
.unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 401);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "unauthorized");
+ server.unblock();
}
#[tokio::test]
async fn deploy_get_method_not_allowed() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("GET")
- .uri("/my-site")
- .body(Body::empty())
- .unwrap(),
- )
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let resp = reqwest::get(format!("http://127.0.0.1:{port}/my-site"))
.await
.unwrap();
-
- assert_eq!(response.status(), StatusCode::METHOD_NOT_ALLOWED);
+ assert_eq!(resp.status().as_u16(), 405);
+ server.unblock();
}
#[tokio::test]
async fn deploy_unknown_site_with_token_returns_not_found() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state);
-
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/unknown-site")
- .header("Authorization", "Bearer any-token")
- .body(Body::empty())
- .unwrap(),
- )
+ let (server, _state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/unknown-site"))
+ .header("Authorization", "Bearer any-token")
+ .send()
.await
.unwrap();
-
// Returns 404 before checking token (site lookup first)
- assert_eq!(response.status(), StatusCode::NOT_FOUND);
- let body = axum::body::to_bytes(response.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 404);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "not_found");
+ server.unblock();
}
fn test_config_with_two_sites() -> Config {
@@ -669,7 +643,6 @@ mod tests {
base_dir: PathBuf::from("/var/lib/witryna"),
log_dir: PathBuf::from("/var/log/witryna"),
log_level: "info".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![
@@ -721,290 +694,92 @@ mod tests {
#[tokio::test]
async fn deploy_concurrent_same_site_gets_queued() {
- let state = test_state(test_config_with_sites());
- let router = create_router(state.clone());
-
- // First request should succeed (immediate build)
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
- let body1 = axum::body::to_bytes(response1.into_body(), 1024)
- .await
- .unwrap();
- assert!(body1.is_empty());
-
- // Second request to same site should be queued (202 with body)
- let response2: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response2.status(), StatusCode::ACCEPTED);
- let body2 = axum::body::to_bytes(response2.into_body(), 1024)
+ let (server, state, port) = test_server(test_config_with_sites());
+ let client = reqwest::Client::new();
+
+ // Pre-mark site as building to simulate an in-progress build
+ state
+ .build_scheduler
+ .in_progress
+ .lock()
+ .unwrap()
+ .insert("my-site".to_owned());
+
+ // First request to same site should be queued (202 with body)
+ let resp1 = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Bearer secret-token")
+ .send()
.await
.unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body2).unwrap();
+ assert_eq!(resp1.status().as_u16(), 202);
+ let json: serde_json::Value = resp1.json().await.unwrap();
assert_eq!(json["status"], "queued");
- // Third request should be collapsed (202, no body)
- let response3: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response3.status(), StatusCode::ACCEPTED);
- let body3 = axum::body::to_bytes(response3.into_body(), 1024)
+ // Second request should be collapsed (202, no body)
+ let resp2 = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Bearer secret-token")
+ .send()
.await
.unwrap();
- assert!(body3.is_empty());
+ assert_eq!(resp2.status().as_u16(), 202);
+
+ server.unblock();
}
#[tokio::test]
async fn deploy_concurrent_different_sites_both_succeed() {
- let state = test_state(test_config_with_two_sites());
- let router = create_router(state.clone());
+ let (server, _state, port) = test_server(test_config_with_two_sites());
+ let client = reqwest::Client::new();
// First site deployment
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/site-one")
- .header("Authorization", "Bearer token-one")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp1 = client
+ .post(format!("http://127.0.0.1:{port}/site-one"))
+ .header("Authorization", "Bearer token-one")
+ .send()
.await
.unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
+ assert_eq!(resp1.status().as_u16(), 202);
// Second site deployment should also succeed
- let response2: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/site-two")
- .header("Authorization", "Bearer token-two")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp2 = client
+ .post(format!("http://127.0.0.1:{port}/site-two"))
+ .header("Authorization", "Bearer token-two")
+ .send()
.await
.unwrap();
- assert_eq!(response2.status(), StatusCode::ACCEPTED);
+ assert_eq!(resp2.status().as_u16(), 202);
+
+ server.unblock();
}
#[tokio::test]
async fn deploy_site_in_progress_checked_after_auth() {
- let state = test_state(test_config_with_sites());
+ let (server, state, port) = test_server(test_config_with_sites());
// Pre-mark site as building
state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.insert("my-site".to_owned());
- let router = create_router(state);
+ let client = reqwest::Client::new();
// Request with wrong token should return 401 (auth checked before build status)
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer wrong-token")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/my-site"))
+ .header("Authorization", "Bearer wrong-token")
+ .send()
.await
.unwrap();
- assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
+ assert_eq!(resp.status().as_u16(), 401);
+ let json: serde_json::Value = resp.json().await.unwrap();
assert_eq!(json["error"], "unauthorized");
- }
- #[tokio::test]
- async fn rate_limit_exceeded_returns_429() {
- // Create state with rate limit of 2 per minute
- let state = test_state_with_rate_limit(test_config_with_sites(), 2);
- let router = create_router(state);
-
- // First request should succeed
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
-
- // Second request should succeed (or 409 if build in progress)
- let response2: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- // Could be 202 or 409 depending on timing
- assert!(
- response2.status() == StatusCode::ACCEPTED
- || response2.status() == StatusCode::CONFLICT
- );
-
- // Third request should hit rate limit
- let response3: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response3.status(), StatusCode::TOO_MANY_REQUESTS);
- let body = axum::body::to_bytes(response3.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
- assert_eq!(json["error"], "rate_limit_exceeded");
- }
-
- #[tokio::test]
- async fn rate_limit_different_tokens_independent() {
- // Create state with rate limit of 1 per minute
- let state = test_state_with_rate_limit(test_config_with_two_sites(), 1);
- let router = create_router(state);
-
- // First request with token-one should succeed
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/site-one")
- .header("Authorization", "Bearer token-one")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
-
- // Second request with token-one should hit rate limit
- let response2: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/site-one")
- .header("Authorization", "Bearer token-one")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response2.status(), StatusCode::TOO_MANY_REQUESTS);
- let body = axum::body::to_bytes(response2.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
- assert_eq!(json["error"], "rate_limit_exceeded");
-
- // Request with different token should still succeed
- let response3: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/site-two")
- .header("Authorization", "Bearer token-two")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response3.status(), StatusCode::ACCEPTED);
- }
-
- #[tokio::test]
- async fn rate_limit_checked_after_auth() {
- // Create state with rate limit of 1 per minute
- let state = test_state_with_rate_limit(test_config_with_sites(), 1);
- let router = create_router(state);
-
- // First valid request exhausts rate limit
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer secret-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
-
- // Request with invalid token should return 401, not 429
- // (auth is checked before rate limit)
- let response2: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/my-site")
- .header("Authorization", "Bearer wrong-token")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response2.status(), StatusCode::UNAUTHORIZED);
- let body = axum::body::to_bytes(response2.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
- assert_eq!(json["error"], "unauthorized");
+ server.unblock();
}
#[tokio::test]
@@ -1016,7 +791,6 @@ mod tests {
base_dir: PathBuf::from("/var/lib/witryna"),
log_dir: PathBuf::from("/var/log/witryna"),
log_level: "info".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![SiteConfig {
@@ -1052,7 +826,6 @@ mod tests {
base_dir: PathBuf::from("/tmp/new-base"),
log_dir: PathBuf::from("/tmp/new-logs"),
log_level: "debug".to_owned(),
- rate_limit_per_minute: 20,
max_builds_to_keep: 10,
git_timeout: None,
sites: vec![SiteConfig {
@@ -1080,7 +853,7 @@ mod tests {
// Apply the same merge logic used in setup_sighup_handler
let (old_listen, old_base, old_log_dir, old_log_level) = {
- let old_config = state.config.read().await;
+ let old_config = state.config.read().unwrap();
(
old_config.listen_address.clone(),
old_config.base_dir.clone(),
@@ -1095,21 +868,30 @@ mod tests {
final_config.log_dir = old_log_dir;
final_config.log_level = old_log_level;
- *state.config.write().await = final_config;
-
- // Verify non-reloadable fields are preserved
- let config = state.config.read().await;
- assert_eq!(config.listen_address, "127.0.0.1:8080");
- assert_eq!(config.base_dir, PathBuf::from("/var/lib/witryna"));
- assert_eq!(config.log_dir, PathBuf::from("/var/log/witryna"));
- assert_eq!(config.log_level, "info");
-
- // Verify reloadable fields are updated
- assert_eq!(config.container_runtime, "docker");
- assert_eq!(config.rate_limit_per_minute, 20);
- assert_eq!(config.max_builds_to_keep, 10);
- assert_eq!(config.sites.len(), 1);
- assert_eq!(config.sites[0].name, "new-site");
+ *state.config.write().unwrap() = final_config;
+
+ // Verify non-reloadable fields are preserved and reloadable fields are updated
+ let (listen, base, log_d, log_l, runtime, max_builds, sites_len, site_name) = {
+ let config = state.config.read().unwrap();
+ (
+ config.listen_address.clone(),
+ config.base_dir.clone(),
+ config.log_dir.clone(),
+ config.log_level.clone(),
+ config.container_runtime.clone(),
+ config.max_builds_to_keep,
+ config.sites.len(),
+ config.sites[0].name.clone(),
+ )
+ };
+ assert_eq!(listen, "127.0.0.1:8080");
+ assert_eq!(base, PathBuf::from("/var/lib/witryna"));
+ assert_eq!(log_d, PathBuf::from("/var/log/witryna"));
+ assert_eq!(log_l, "info");
+ assert_eq!(runtime, "docker");
+ assert_eq!(max_builds, 10);
+ assert_eq!(sites_len, 1);
+ assert_eq!(site_name, "new-site");
}
fn test_config_with_disabled_auth() -> Config {
@@ -1140,80 +922,34 @@ mod tests {
#[tokio::test]
async fn deploy_disabled_auth_returns_accepted() {
- let state = test_state(test_config_with_disabled_auth());
- let router = create_router(state);
+ let (server, _state, port) = test_server(test_config_with_disabled_auth());
+ let client = reqwest::Client::new();
// Request without Authorization header should succeed
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/open-site")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/open-site"))
+ .send()
.await
.unwrap();
+ assert_eq!(resp.status().as_u16(), 202);
- assert_eq!(response.status(), StatusCode::ACCEPTED);
+ server.unblock();
}
#[tokio::test]
async fn deploy_disabled_auth_ignores_token() {
- let state = test_state(test_config_with_disabled_auth());
- let router = create_router(state);
+ let (server, _state, port) = test_server(test_config_with_disabled_auth());
+ let client = reqwest::Client::new();
// Request WITH a Bearer token should also succeed (token ignored)
- let response: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/open-site")
- .header("Authorization", "Bearer any-token")
- .body(Body::empty())
- .unwrap(),
- )
+ let resp = client
+ .post(format!("http://127.0.0.1:{port}/open-site"))
+ .header("Authorization", "Bearer any-token")
+ .send()
.await
.unwrap();
+ assert_eq!(resp.status().as_u16(), 202);
- assert_eq!(response.status(), StatusCode::ACCEPTED);
- }
-
- #[tokio::test]
- async fn deploy_disabled_auth_rate_limited_by_site_name() {
- let state = test_state_with_rate_limit(test_config_with_disabled_auth(), 1);
- let router = create_router(state);
-
- // First request should succeed
- let response1: Response = router
- .clone()
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/open-site")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response1.status(), StatusCode::ACCEPTED);
-
- // Second request should hit rate limit (keyed by site name)
- let response2: Response = router
- .oneshot(
- Request::builder()
- .method("POST")
- .uri("/open-site")
- .body(Body::empty())
- .unwrap(),
- )
- .await
- .unwrap();
- assert_eq!(response2.status(), StatusCode::TOO_MANY_REQUESTS);
- let body = axum::body::to_bytes(response2.into_body(), 1024)
- .await
- .unwrap();
- let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
- assert_eq!(json["error"], "rate_limit_exceeded");
+ server.unblock();
}
}
diff --git a/src/state.rs b/src/state.rs
new file mode 100644
index 0000000..be4e981
--- /dev/null
+++ b/src/state.rs
@@ -0,0 +1,311 @@
+use anyhow::Result;
+use log::warn;
+use std::path::Path;
+
+/// A single build record within the site state.
+#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
+pub struct BuildEntry {
+ /// Build phase: "building", "success", "failed", "hook failed".
+ pub status: String,
+ /// Build timestamp (YYYYMMDD-HHMMSS-microseconds).
+ pub timestamp: String,
+ /// ISO 8601 UTC when the build started (for elapsed time calculation).
+ pub started_at: String,
+ /// Short git commit hash, or empty string if unknown.
+ pub git_commit: String,
+ /// Human-readable duration ("45s", "2m 30s"), empty while building.
+ pub duration: String,
+ /// Path to the build log file.
+ pub log: String,
+}
+
+/// Persistent per-site build state, written to `{base_dir}/builds/{site}/state.json`.
+///
+/// Contains the full build history and the currently active build timestamp.
+/// The CLI `status` command reads only this file — no log parsing needed.
+#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
+pub struct SiteState {
+ /// Timestamp of the currently active build (empty if none).
+ pub current: String,
+ /// All builds, newest first.
+ pub builds: Vec<BuildEntry>,
+}
+
+/// Atomically write site state to `{base_dir}/builds/{site_name}/state.json`.
+///
+/// Uses temp-file + rename for atomic writes. Creates parent directories
+/// if they don't exist. Errors are non-fatal — callers should log and continue.
+///
+/// # Errors
+///
+/// Returns an error if directory creation, JSON serialization, or the atomic write/rename fails.
+pub async fn save_state(base_dir: &Path, site_name: &str, state: &SiteState) -> Result<()> {
+ let builds_dir = base_dir.join("builds").join(site_name);
+ tokio::fs::create_dir_all(&builds_dir).await?;
+
+ let state_path = builds_dir.join("state.json");
+ let tmp_path = builds_dir.join("state.json.tmp");
+
+ let json = serde_json::to_string_pretty(state)?;
+ tokio::fs::write(&tmp_path, json.as_bytes()).await?;
+ tokio::fs::rename(&tmp_path, &state_path).await?;
+
+ Ok(())
+}
+
+/// Load site state from `{base_dir}/builds/{site_name}/state.json`.
+///
+/// Returns the default empty state if the file is missing or cannot be parsed.
+pub async fn load_state(base_dir: &Path, site_name: &str) -> SiteState {
+ let state_path = base_dir.join("builds").join(site_name).join("state.json");
+
+ let Ok(content) = tokio::fs::read_to_string(&state_path).await else {
+ return SiteState::default();
+ };
+
+ match serde_json::from_str(&content) {
+ Ok(state) => state,
+ Err(e) => {
+ warn!(
+ "[{site_name}] malformed state.json: {e} (path={})",
+ state_path.display()
+ );
+ SiteState::default()
+ }
+ }
+}
+
+/// Add a new build entry to the front of the builds list. Best-effort.
+pub async fn push_build(base_dir: &Path, site_name: &str, entry: BuildEntry) {
+ let mut state = load_state(base_dir, site_name).await;
+ state.builds.insert(0, entry);
+ if let Err(e) = save_state(base_dir, site_name, &state).await {
+ warn!("[{site_name}] failed to write state after push_build: {e}");
+ }
+}
+
+/// Update the most recent build entry in-place. Best-effort.
+///
+/// Does nothing if the builds list is empty.
+pub async fn update_latest_build(
+ base_dir: &Path,
+ site_name: &str,
+ updater: impl FnOnce(&mut BuildEntry),
+) {
+ let mut state = load_state(base_dir, site_name).await;
+ if let Some(entry) = state.builds.first_mut() {
+ updater(entry);
+ if let Err(e) = save_state(base_dir, site_name, &state).await {
+ warn!("[{site_name}] failed to write state after update_latest_build: {e}");
+ }
+ }
+}
+
+/// Set the currently active build timestamp. Best-effort.
+pub async fn set_current(base_dir: &Path, site_name: &str, timestamp: &str) {
+ let mut state = load_state(base_dir, site_name).await;
+ state.current = timestamp.to_owned();
+ if let Err(e) = save_state(base_dir, site_name, &state).await {
+ warn!("[{site_name}] failed to write state after set_current: {e}");
+ }
+}
+
+/// Remove build entries whose timestamps match any in `timestamps`. Best-effort.
+pub async fn remove_builds(base_dir: &Path, site_name: &str, timestamps: &[String]) {
+ if timestamps.is_empty() {
+ return;
+ }
+ let mut state = load_state(base_dir, site_name).await;
+ let before = state.builds.len();
+ state.builds.retain(|b| !timestamps.contains(&b.timestamp));
+ if state.builds.len() == before {
+ return; // nothing changed
+ }
+ if let Err(e) = save_state(base_dir, site_name, &state).await {
+ warn!("[{site_name}] failed to write state after remove_builds: {e}");
+ }
+}
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use super::*;
+ use crate::test_support::{cleanup, temp_dir};
+
+ fn test_entry() -> BuildEntry {
+ BuildEntry {
+ status: "building".to_owned(),
+ timestamp: "20260210-120000-000000".to_owned(),
+ started_at: "2026-02-10T12:00:00Z".to_owned(),
+ git_commit: "abc123d".to_owned(),
+ duration: String::new(),
+ log: "/var/log/witryna/my-site/20260210-120000-000000.log".to_owned(),
+ }
+ }
+
+ fn test_state() -> SiteState {
+ SiteState {
+ current: String::new(),
+ builds: vec![test_entry()],
+ }
+ }
+
+ #[tokio::test]
+ async fn save_and_load_roundtrip() {
+ let base_dir = temp_dir("state-test").await;
+ let state = test_state();
+
+ save_state(&base_dir, "my-site", &state).await.unwrap();
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.builds.len(), 1);
+ let b = &loaded.builds[0];
+ assert_eq!(b.status, "building");
+ assert_eq!(b.timestamp, "20260210-120000-000000");
+ assert_eq!(b.started_at, "2026-02-10T12:00:00Z");
+ assert_eq!(b.git_commit, "abc123d");
+ assert_eq!(b.duration, "");
+ assert!(b.log.contains("20260210-120000-000000.log"));
+ assert_eq!(loaded.current, "");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn load_state_missing_file_returns_default() {
+ let base_dir = temp_dir("state-test").await;
+
+ let loaded = load_state(&base_dir, "nonexistent").await;
+ assert!(loaded.builds.is_empty());
+ assert_eq!(loaded.current, "");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn load_state_malformed_json_returns_default() {
+ let base_dir = temp_dir("state-test").await;
+ let state_dir = base_dir.join("builds").join("bad-site");
+ tokio::fs::create_dir_all(&state_dir).await.unwrap();
+ tokio::fs::write(state_dir.join("state.json"), "not valid json{{{")
+ .await
+ .unwrap();
+
+ let loaded = load_state(&base_dir, "bad-site").await;
+ assert!(loaded.builds.is_empty());
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn save_state_atomic_no_tmp_left() {
+ let base_dir = temp_dir("state-test").await;
+ let state = test_state();
+
+ save_state(&base_dir, "my-site", &state).await.unwrap();
+
+ let tmp_path = base_dir
+ .join("builds")
+ .join("my-site")
+ .join("state.json.tmp");
+ assert!(!tmp_path.exists(), "temp file should not remain");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn push_build_prepends() {
+ let base_dir = temp_dir("state-test").await;
+
+ let entry1 = test_entry();
+ push_build(&base_dir, "my-site", entry1).await;
+
+ let mut entry2 = test_entry();
+ entry2.timestamp = "20260210-130000-000000".to_owned();
+ push_build(&base_dir, "my-site", entry2).await;
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.builds.len(), 2);
+ assert_eq!(loaded.builds[0].timestamp, "20260210-130000-000000");
+ assert_eq!(loaded.builds[1].timestamp, "20260210-120000-000000");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn update_latest_build_modifies_first() {
+ let base_dir = temp_dir("state-test").await;
+
+ push_build(&base_dir, "my-site", test_entry()).await;
+
+ update_latest_build(&base_dir, "my-site", |e| {
+ e.status = "success".to_owned();
+ e.duration = "30s".to_owned();
+ })
+ .await;
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.builds[0].status, "success");
+ assert_eq!(loaded.builds[0].duration, "30s");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn set_current_updates_field() {
+ let base_dir = temp_dir("state-test").await;
+
+ push_build(&base_dir, "my-site", test_entry()).await;
+ set_current(&base_dir, "my-site", "20260210-120000-000000").await;
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.current, "20260210-120000-000000");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn remove_builds_prunes_entries() {
+ let base_dir = temp_dir("state-test").await;
+
+ let mut e1 = test_entry();
+ e1.timestamp = "20260210-100000-000000".to_owned();
+ let mut e2 = test_entry();
+ e2.timestamp = "20260210-110000-000000".to_owned();
+ let mut e3 = test_entry();
+ e3.timestamp = "20260210-120000-000000".to_owned();
+
+ push_build(&base_dir, "my-site", e3).await;
+ push_build(&base_dir, "my-site", e2).await;
+ push_build(&base_dir, "my-site", e1).await;
+
+ remove_builds(
+ &base_dir,
+ "my-site",
+ &[
+ "20260210-100000-000000".to_owned(),
+ "20260210-120000-000000".to_owned(),
+ ],
+ )
+ .await;
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.builds.len(), 1);
+ assert_eq!(loaded.builds[0].timestamp, "20260210-110000-000000");
+
+ cleanup(&base_dir).await;
+ }
+
+ #[tokio::test]
+ async fn remove_builds_empty_list_is_noop() {
+ let base_dir = temp_dir("state-test").await;
+
+ push_build(&base_dir, "my-site", test_entry()).await;
+ remove_builds(&base_dir, "my-site", &[]).await;
+
+ let loaded = load_state(&base_dir, "my-site").await;
+ assert_eq!(loaded.builds.len(), 1);
+
+ cleanup(&base_dir).await;
+ }
+}
diff --git a/src/test_support.rs b/src/test_support.rs
index 8f2d2bf..d6a0a96 100644
--- a/src/test_support.rs
+++ b/src/test_support.rs
@@ -7,24 +7,21 @@
#![allow(clippy::unwrap_used, clippy::expect_used)]
-use crate::server::{AppState, run_with_listener};
-use anyhow::Result;
+use crate::server::{AppState, run_with_server};
use std::path::{Path, PathBuf};
-use tokio::net::TcpListener;
+use std::sync::Arc;
+use tiny_http::Server;
-/// Start the HTTP server on the given listener, shutting down when `shutdown` resolves.
+/// Start the HTTP server, returning a `JoinHandle` for the request-handling thread.
///
-/// The server behaves identically to production — same middleware, same handlers.
-///
-/// # Errors
-///
-/// Returns an error if the server encounters a fatal I/O error.
-pub async fn run_server(
+/// The server shuts down when `shutdown` resolves (calls `server.unblock()`).
+/// Callers should join the handle after triggering shutdown.
+pub fn run_server(
state: AppState,
- listener: TcpListener,
+ server: Arc<Server>,
shutdown: impl std::future::Future<Output = ()> + Send + 'static,
-) -> Result<()> {
- run_with_listener(state, listener, shutdown).await
+) -> std::thread::JoinHandle<()> {
+ run_with_server(state, server, shutdown)
}
/// Install the SIGHUP configuration-reload handler for `state`.
diff --git a/src/time.rs b/src/time.rs
new file mode 100644
index 0000000..2e084a8
--- /dev/null
+++ b/src/time.rs
@@ -0,0 +1,222 @@
+use std::time::{SystemTime, UNIX_EPOCH};
+
+/// Format as `YYYYMMDD-HHMMSS-ffffff` (build directories and log files).
+#[must_use]
+pub fn format_build_timestamp(t: SystemTime) -> String {
+ let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default();
+ let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs());
+ let us = dur.subsec_micros();
+ format!("{year:04}{month:02}{day:02}-{hour:02}{min:02}{sec:02}-{us:06}")
+}
+
+/// Format as `YYYY-MM-DDTHH:MM:SSZ` (state.json `started_at`, second precision).
+#[must_use]
+pub fn format_rfc3339(t: SystemTime) -> String {
+ let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default();
+ let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs());
+ format!("{year:04}-{month:02}-{day:02}T{hour:02}:{min:02}:{sec:02}Z")
+}
+
+/// Format as `YYYY-MM-DDTHH:MM:SS.mmmZ` (logger console output, millisecond precision, UTC).
+#[must_use]
+pub fn format_log_timestamp(t: SystemTime) -> String {
+ let dur = t.duration_since(UNIX_EPOCH).unwrap_or_default();
+ let (year, month, day, hour, min, sec) = epoch_to_civil(dur.as_secs());
+ let ms = dur.subsec_millis();
+ format!("{year:04}-{month:02}-{day:02}T{hour:02}:{min:02}:{sec:02}.{ms:03}Z")
+}
+
+/// Parse `YYYY-MM-DDTHH:MM:SSZ` back to `SystemTime`. Only handles `Z` suffix.
+#[must_use]
+pub fn parse_rfc3339(s: &str) -> Option<SystemTime> {
+ let s = s.strip_suffix('Z')?;
+ if s.len() != 19 {
+ return None;
+ }
+ let bytes = s.as_bytes();
+ if bytes.get(4) != Some(&b'-')
+ || bytes.get(7) != Some(&b'-')
+ || bytes.get(10) != Some(&b'T')
+ || bytes.get(13) != Some(&b':')
+ || bytes.get(16) != Some(&b':')
+ {
+ return None;
+ }
+ let year: u16 = s.get(0..4)?.parse().ok()?;
+ let month: u8 = s.get(5..7)?.parse().ok()?;
+ let day: u8 = s.get(8..10)?.parse().ok()?;
+ let hour: u8 = s.get(11..13)?.parse().ok()?;
+ let min: u8 = s.get(14..16)?.parse().ok()?;
+ let sec: u8 = s.get(17..19)?.parse().ok()?;
+ if !(1..=12).contains(&month) || !(1..=31).contains(&day) || hour > 23 || min > 59 || sec > 59 {
+ return None;
+ }
+ let epoch = civil_to_epoch(year, month, day, hour, min, sec);
+ Some(UNIX_EPOCH + std::time::Duration::from_secs(epoch))
+}
+
+/// Convert Unix epoch seconds to (year, month, day, hour, minute, second).
+/// Uses Howard Hinnant's `civil_from_days` algorithm.
+///
+/// # Safety (casts)
+/// All `as` casts are bounded by the civil-date algorithm:
+/// year fits u16 (0–9999), month/day/h/m/s fit u8, day-count fits i64.
+#[expect(
+ clippy::cast_possible_truncation,
+ clippy::cast_possible_wrap,
+ clippy::cast_sign_loss,
+ reason = "Hinnant civil_from_days algorithm: values bounded by calendar math"
+)]
+const fn epoch_to_civil(secs: u64) -> (u16, u8, u8, u8, u8, u8) {
+ let day_secs = secs % 86400;
+ let days = (secs / 86400) as i64 + 719_468;
+ let era = days.div_euclid(146_097);
+ let doe = days.rem_euclid(146_097) as u64; // day of era [0, 146096]
+ let yoe = (doe - doe / 1460 + doe / 36524 - doe / 146_096) / 365; // year of era
+ let year = (yoe as i64) + era * 400;
+ let doy = doe - (365 * yoe + yoe / 4 - yoe / 100); // day of year [0, 365]
+ let mp = (5 * doy + 2) / 153; // [0, 11]
+ let day = (doy - (153 * mp + 2) / 5 + 1) as u8;
+ let month = if mp < 10 { mp + 3 } else { mp - 9 } as u8;
+ let year = if month <= 2 { year + 1 } else { year } as u16;
+ let hour = (day_secs / 3600) as u8;
+ let min = ((day_secs % 3600) / 60) as u8;
+ let sec = (day_secs % 60) as u8;
+ (year, month, day, hour, min, sec)
+}
+
+/// Convert (year, month, day, hour, minute, second) to Unix epoch seconds.
+///
+/// # Safety (casts)
+/// All `as` casts are bounded by the civil-date algorithm:
+/// year fits i64, month/day/h/m/s fit u64, `doe` fits i64 (0–146096).
+/// Final `as u64` is non-negative for all valid civil dates.
+#[expect(
+ clippy::cast_possible_wrap,
+ clippy::cast_sign_loss,
+ reason = "Hinnant civil_from_days algorithm: values bounded by calendar math"
+)]
+const fn civil_to_epoch(year: u16, month: u8, day: u8, hour: u8, min: u8, sec: u8) -> u64 {
+ let year = (year as i64) - (month <= 2) as i64;
+ let era = year.div_euclid(400);
+ let yoe = year.rem_euclid(400) as u64; // year of era [0, 399]
+ let m_adj = if month > 2 {
+ (month as u64) - 3
+ } else {
+ (month as u64) + 9
+ }; // [0, 11]
+ let doy = (153 * m_adj + 2) / 5 + (day as u64) - 1; // day of year [0, 365]
+ let doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; // day of era [0, 146096]
+ let days = (era * 146_097 + doe as i64 - 719_468) as u64;
+ days * 86400 + (hour as u64) * 3600 + (min as u64) * 60 + (sec as u64)
+}
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use super::*;
+ use std::time::Duration;
+
+ #[test]
+ fn format_build_timestamp_unix_epoch() {
+ assert_eq!(format_build_timestamp(UNIX_EPOCH), "19700101-000000-000000");
+ }
+
+ #[test]
+ fn format_build_timestamp_format() {
+ let s = format_build_timestamp(SystemTime::now());
+ let parts: Vec<&str> = s.split('-').collect();
+ assert_eq!(parts.len(), 3, "expected 3 dash-separated parts, got: {s}");
+ assert_eq!(parts[0].len(), 8, "date part should be 8 digits");
+ assert_eq!(parts[1].len(), 6, "time part should be 6 digits");
+ assert_eq!(parts[2].len(), 6, "micros part should be 6 digits");
+ assert!(parts.iter().all(|p| p.chars().all(|c| c.is_ascii_digit())));
+ }
+
+ #[test]
+ fn format_rfc3339_unix_epoch() {
+ assert_eq!(format_rfc3339(UNIX_EPOCH), "1970-01-01T00:00:00Z");
+ }
+
+ #[test]
+ fn format_log_timestamp_unix_epoch() {
+ assert_eq!(format_log_timestamp(UNIX_EPOCH), "1970-01-01T00:00:00.000Z");
+ }
+
+ #[test]
+ fn format_rfc3339_known_date() {
+ // 2024-02-29T12:30:45Z (leap year)
+ let secs = civil_to_epoch(2024, 2, 29, 12, 30, 45);
+ let t = UNIX_EPOCH + Duration::from_secs(secs);
+ assert_eq!(format_rfc3339(t), "2024-02-29T12:30:45Z");
+ }
+
+ #[test]
+ fn parse_rfc3339_roundtrip() {
+ let now = SystemTime::now();
+ let s = format_rfc3339(now);
+ let parsed = parse_rfc3339(&s).unwrap();
+ // Roundtrip loses sub-second precision, so compare formatted output
+ assert_eq!(format_rfc3339(parsed), s);
+ }
+
+ #[test]
+ fn parse_rfc3339_valid() {
+ let t = parse_rfc3339("2026-02-13T14:30:00Z").unwrap();
+ assert_eq!(format_rfc3339(t), "2026-02-13T14:30:00Z");
+ }
+
+ #[test]
+ fn parse_rfc3339_rejects_plus_offset() {
+ assert!(parse_rfc3339("2026-02-13T14:30:00+00:00").is_none());
+ }
+
+ #[test]
+ fn parse_rfc3339_rejects_garbage() {
+ assert!(parse_rfc3339("not-a-date").is_none());
+ assert!(parse_rfc3339("").is_none());
+ assert!(parse_rfc3339("2026-13-01T00:00:00Z").is_none()); // month 13
+ assert!(parse_rfc3339("2026-00-01T00:00:00Z").is_none()); // month 0
+ }
+
+ #[test]
+ fn epoch_to_civil_roundtrip() {
+ let dates: &[(u16, u8, u8, u8, u8, u8)] = &[
+ (1970, 1, 1, 0, 0, 0),
+ (2000, 1, 1, 0, 0, 0),
+ (2024, 2, 29, 23, 59, 59), // leap year
+ (2024, 12, 31, 12, 0, 0),
+ (2026, 2, 13, 14, 30, 0),
+ ];
+ for &(y, m, d, h, min, s) in dates {
+ let epoch = civil_to_epoch(y, m, d, h, min, s);
+ let (y2, m2, d2, h2, min2, s2) = epoch_to_civil(epoch);
+ assert_eq!(
+ (y, m, d, h, min, s),
+ (y2, m2, d2, h2, min2, s2),
+ "roundtrip failed for {y}-{m:02}-{d:02}T{h:02}:{min:02}:{s:02}Z"
+ );
+ }
+ }
+
+ #[test]
+ fn format_log_timestamp_millisecond_precision() {
+ let t = UNIX_EPOCH + Duration::from_millis(1_234_567_890_123);
+ let s = format_log_timestamp(t);
+ assert!(s.ends_with("Z"));
+ assert!(s.contains('.'));
+ // The milliseconds portion should be 3 digits
+ let dot_pos = s.find('.').unwrap();
+ assert_eq!(&s[dot_pos + 4..], "Z");
+ }
+
+ #[test]
+ fn format_build_timestamp_microsecond_precision() {
+ let t = UNIX_EPOCH + Duration::from_micros(1_234_567_890_123_456);
+ let s = format_build_timestamp(t);
+ // Last 6 chars before end should be microseconds
+ let parts: Vec<&str> = s.split('-').collect();
+ assert_eq!(parts.len(), 3);
+ assert_eq!(parts[2].len(), 6);
+ }
+}
diff --git a/tests/integration/cache.rs b/tests/integration/cache.rs
index 42d2a15..cc20cdd 100644
--- a/tests/integration/cache.rs
+++ b/tests/integration/cache.rs
@@ -81,6 +81,8 @@ async fn cache_dir_persists_across_builds() {
.state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.contains("cache-site")
{
break;
diff --git a/tests/integration/cleanup.rs b/tests/integration/cleanup.rs
index e0cc902..a8bc84f 100644
--- a/tests/integration/cleanup.rs
+++ b/tests/integration/cleanup.rs
@@ -31,7 +31,6 @@ async fn old_builds_cleaned_up() {
base_dir: base_dir.clone(),
log_dir: base_dir.join("logs"),
log_level: "debug".to_owned(),
- rate_limit_per_minute: 100,
max_builds_to_keep: 2,
git_timeout: None,
sites: vec![site],
@@ -62,6 +61,8 @@ async fn old_builds_cleaned_up() {
.state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.contains("cleanup-site")
{
break;
diff --git a/tests/integration/cli_cleanup.rs b/tests/integration/cli_cleanup.rs
new file mode 100644
index 0000000..822c7bc
--- /dev/null
+++ b/tests/integration/cli_cleanup.rs
@@ -0,0 +1,341 @@
+use std::fmt::Write as _;
+use std::process::Stdio;
+use tempfile::TempDir;
+use tokio::process::Command;
+
+fn witryna_bin() -> std::path::PathBuf {
+ let mut path = std::path::PathBuf::from(env!("CARGO_BIN_EXE_witryna"));
+ if !path.exists() {
+ path = std::path::PathBuf::from("target/debug/witryna");
+ }
+ path
+}
+
+async fn write_cleanup_config(
+ dir: &std::path::Path,
+ sites: &[&str],
+ max_builds: u32,
+) -> (std::path::PathBuf, std::path::PathBuf, std::path::PathBuf) {
+ let base_dir = dir.join("data");
+ let log_dir = dir.join("logs");
+ tokio::fs::create_dir_all(&base_dir).await.unwrap();
+ tokio::fs::create_dir_all(&log_dir).await.unwrap();
+
+ let mut sites_toml = String::new();
+ for name in sites {
+ write!(
+ sites_toml,
+ r#"
+[[sites]]
+name = "{name}"
+repo_url = "https://example.com/{name}.git"
+branch = "main"
+"#
+ )
+ .unwrap();
+ }
+
+ let config_path = dir.join("witryna.toml");
+ let config = format!(
+ r#"listen_address = "127.0.0.1:0"
+container_runtime = "podman"
+base_dir = "{base_dir}"
+log_dir = "{log_dir}"
+log_level = "info"
+max_builds_to_keep = {max_builds}
+{sites_toml}"#,
+ base_dir = base_dir.display(),
+ log_dir = log_dir.display(),
+ );
+ tokio::fs::write(&config_path, config).await.unwrap();
+ (config_path, base_dir, log_dir)
+}
+
+async fn create_fake_builds(
+ base_dir: &std::path::Path,
+ log_dir: &std::path::Path,
+ site: &str,
+ timestamps: &[&str],
+) {
+ let builds_dir = base_dir.join("builds").join(site);
+ let site_log_dir = log_dir.join(site);
+ tokio::fs::create_dir_all(&builds_dir).await.unwrap();
+ tokio::fs::create_dir_all(&site_log_dir).await.unwrap();
+
+ for ts in timestamps {
+ tokio::fs::create_dir_all(builds_dir.join(ts))
+ .await
+ .unwrap();
+ tokio::fs::write(site_log_dir.join(format!("{ts}.log")), "build log")
+ .await
+ .unwrap();
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Tier 1: no container runtime / git needed
+// ---------------------------------------------------------------------------
+
+#[tokio::test]
+async fn cli_cleanup_unknown_site() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, _, _) = write_cleanup_config(tempdir.path(), &["real-site"], 5).await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "cleanup",
+ "--config",
+ config_path.to_str().unwrap(),
+ "nonexistent",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("not found"),
+ "should mention 'not found', got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_cleanup_keep_zero_refused() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, _, _) = write_cleanup_config(tempdir.path(), &["my-site"], 5).await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "cleanup",
+ "--config",
+ config_path.to_str().unwrap(),
+ "--keep",
+ "0",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("--keep 0 would delete all builds"),
+ "should refuse --keep 0, got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_cleanup_disabled_when_max_zero() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, _, _) = write_cleanup_config(tempdir.path(), &["my-site"], 0).await;
+
+ let output = Command::new(witryna_bin())
+ .args(["cleanup", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("cleanup disabled"),
+ "should say 'cleanup disabled', got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_cleanup_removes_old_builds() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir, log_dir) =
+ write_cleanup_config(tempdir.path(), &["site-a", "site-b"], 5).await;
+
+ let timestamps = &[
+ "20260126-100000-000001",
+ "20260126-100000-000002",
+ "20260126-100000-000003",
+ "20260126-100000-000004",
+ ];
+
+ create_fake_builds(&base_dir, &log_dir, "site-a", timestamps).await;
+ create_fake_builds(&base_dir, &log_dir, "site-b", timestamps).await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "cleanup",
+ "--config",
+ config_path.to_str().unwrap(),
+ "--keep",
+ "2",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("site-a: removed"),
+ "should report site-a removals, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("site-b: removed"),
+ "should report site-b removals, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("total:"),
+ "should print total summary for multi-site, got: {stderr}"
+ );
+
+ // Verify filesystem: oldest 2 gone, newest 2 remain for each site
+ for site in &["site-a", "site-b"] {
+ let builds = base_dir.join("builds").join(site);
+ assert!(!builds.join("20260126-100000-000001").exists());
+ assert!(!builds.join("20260126-100000-000002").exists());
+ assert!(builds.join("20260126-100000-000003").exists());
+ assert!(builds.join("20260126-100000-000004").exists());
+
+ let logs = log_dir.join(site);
+ assert!(!logs.join("20260126-100000-000001.log").exists());
+ assert!(!logs.join("20260126-100000-000002.log").exists());
+ assert!(logs.join("20260126-100000-000003.log").exists());
+ assert!(logs.join("20260126-100000-000004.log").exists());
+ }
+}
+
+#[tokio::test]
+async fn cli_cleanup_single_site_filter() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir, log_dir) =
+ write_cleanup_config(tempdir.path(), &["site-a", "site-b"], 5).await;
+
+ let timestamps = &[
+ "20260126-100000-000001",
+ "20260126-100000-000002",
+ "20260126-100000-000003",
+ "20260126-100000-000004",
+ ];
+
+ create_fake_builds(&base_dir, &log_dir, "site-a", timestamps).await;
+ create_fake_builds(&base_dir, &log_dir, "site-b", timestamps).await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "cleanup",
+ "--config",
+ config_path.to_str().unwrap(),
+ "--keep",
+ "2",
+ "site-a",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+
+ let stderr = String::from_utf8_lossy(&output.stderr);
+
+ // site-a should be cleaned
+ assert!(
+ stderr.contains("site-a: removed"),
+ "should report site-a removals, got: {stderr}"
+ );
+
+ // site-b should be untouched — not mentioned in output
+ assert!(
+ !stderr.contains("site-b"),
+ "site-b should not appear in output, got: {stderr}"
+ );
+
+ // No total line for single-site cleanup
+ assert!(
+ !stderr.contains("total:"),
+ "should not print total for single site, got: {stderr}"
+ );
+
+ // Verify site-b filesystem is untouched
+ let site_b_builds = base_dir.join("builds").join("site-b");
+ assert!(site_b_builds.join("20260126-100000-000001").exists());
+ assert!(site_b_builds.join("20260126-100000-000004").exists());
+}
+
+#[tokio::test]
+async fn cli_cleanup_keep_overrides_config() {
+ let tempdir = TempDir::new().unwrap();
+ // Config says max_builds_to_keep = 1
+ let (config_path, base_dir, log_dir) =
+ write_cleanup_config(tempdir.path(), &["my-site"], 1).await;
+
+ let timestamps = &[
+ "20260126-100000-000001",
+ "20260126-100000-000002",
+ "20260126-100000-000003",
+ "20260126-100000-000004",
+ ];
+
+ create_fake_builds(&base_dir, &log_dir, "my-site", timestamps).await;
+
+ // --keep 3 should override config's max_builds_to_keep=1
+ let output = Command::new(witryna_bin())
+ .args([
+ "cleanup",
+ "--config",
+ config_path.to_str().unwrap(),
+ "--keep",
+ "3",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+
+ // With --keep 3 and 4 builds: only 1 should be removed
+ let builds = base_dir.join("builds").join("my-site");
+ assert!(
+ !builds.join("20260126-100000-000001").exists(),
+ "oldest should be removed"
+ );
+ assert!(
+ builds.join("20260126-100000-000002").exists(),
+ "second should remain"
+ );
+ assert!(
+ builds.join("20260126-100000-000003").exists(),
+ "third should remain"
+ );
+ assert!(
+ builds.join("20260126-100000-000004").exists(),
+ "newest should remain"
+ );
+}
diff --git a/tests/integration/cli_run.rs b/tests/integration/cli_run.rs
index 0ea8d20..e12beb5 100644
--- a/tests/integration/cli_run.rs
+++ b/tests/integration/cli_run.rs
@@ -80,10 +80,10 @@ sites = []
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"run",
"nonexistent",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -130,10 +130,10 @@ async fn cli_run_build_failure_exits_nonzero() {
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"run",
"fail-site",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -193,10 +193,10 @@ async fn cli_run_builds_site_successfully() {
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"run",
"test-site",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -254,11 +254,11 @@ async fn cli_run_verbose_shows_build_output() {
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"run",
"verbose-site",
"--verbose",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
diff --git a/tests/integration/cli_status.rs b/tests/integration/cli_status.rs
index 25135fb..4eb50a4 100644
--- a/tests/integration/cli_status.rs
+++ b/tests/integration/cli_status.rs
@@ -1,3 +1,4 @@
+use std::fmt::Write as _;
use std::process::Stdio;
use tempfile::TempDir;
use tokio::process::Command;
@@ -12,17 +13,16 @@ fn witryna_bin() -> std::path::PathBuf {
}
/// Write a minimal witryna.toml config for status tests.
-async fn write_status_config(
- dir: &std::path::Path,
- sites: &[&str],
- log_dir: &std::path::Path,
-) -> std::path::PathBuf {
+async fn write_status_config(dir: &std::path::Path, sites: &[&str]) -> std::path::PathBuf {
let base_dir = dir.join("data");
+ let log_dir = dir.join("logs");
tokio::fs::create_dir_all(&base_dir).await.unwrap();
+ tokio::fs::create_dir_all(&log_dir).await.unwrap();
let mut sites_toml = String::new();
for name in sites {
- sites_toml.push_str(&format!(
+ write!(
+ sites_toml,
r#"
[[sites]]
name = "{name}"
@@ -30,7 +30,8 @@ repo_url = "https://example.com/{name}.git"
branch = "main"
webhook_token = "unused"
"#
- ));
+ )
+ .unwrap();
}
let config_path = dir.join("witryna.toml");
@@ -48,63 +49,43 @@ log_level = "info"
config_path
}
-/// Write a fake build log with a valid header.
-async fn write_test_build_log(
- log_dir: &std::path::Path,
+/// Write a state.json for a site with the new format.
+async fn write_state_json(
+ base_dir: &std::path::Path,
site_name: &str,
- timestamp: &str,
- status: &str,
- commit: &str,
- image: &str,
- duration: &str,
+ current: &str,
+ builds: &[serde_json::Value],
) {
- let site_log_dir = log_dir.join(site_name);
- tokio::fs::create_dir_all(&site_log_dir).await.unwrap();
-
- let content = format!(
- "=== BUILD LOG ===\n\
- Site: {site_name}\n\
- Timestamp: {timestamp}\n\
- Git Commit: {commit}\n\
- Image: {image}\n\
- Duration: {duration}\n\
- Status: {status}\n\
- \n\
- === STDOUT ===\n\
- build output\n\
- \n\
- === STDERR ===\n"
- );
+ let state_dir = base_dir.join("builds").join(site_name);
+ tokio::fs::create_dir_all(&state_dir).await.unwrap();
+
+ let content = serde_json::json!({
+ "current": current,
+ "builds": builds,
+ });
- let log_file = site_log_dir.join(format!("{timestamp}.log"));
- tokio::fs::write(&log_file, content).await.unwrap();
+ let state_path = state_dir.join("state.json");
+ tokio::fs::write(&state_path, content.to_string())
+ .await
+ .unwrap();
}
-/// Write a fake hook log with a valid header.
-async fn write_test_hook_log(
- log_dir: &std::path::Path,
- site_name: &str,
- timestamp: &str,
+/// Create a build entry JSON value.
+fn build_entry(
status: &str,
-) {
- let site_log_dir = log_dir.join(site_name);
- tokio::fs::create_dir_all(&site_log_dir).await.unwrap();
-
- let content = format!(
- "=== HOOK LOG ===\n\
- Site: {site_name}\n\
- Timestamp: {timestamp}\n\
- Command: hook-cmd\n\
- Duration: 1s\n\
- Status: {status}\n\
- \n\
- === STDOUT ===\n\
- \n\
- === STDERR ===\n"
- );
-
- let log_file = site_log_dir.join(format!("{timestamp}-hook.log"));
- tokio::fs::write(&log_file, content).await.unwrap();
+ timestamp: &str,
+ git_commit: &str,
+ duration: &str,
+ log: &str,
+) -> serde_json::Value {
+ serde_json::json!({
+ "status": status,
+ "timestamp": timestamp,
+ "started_at": "2026-02-10T12:00:00Z",
+ "git_commit": git_commit,
+ "duration": duration,
+ "log": log,
+ })
}
// ---------------------------------------------------------------------------
@@ -114,13 +95,10 @@ async fn write_test_hook_log(
#[tokio::test]
async fn cli_status_no_builds() {
let tempdir = TempDir::new().unwrap();
- let log_dir = tempdir.path().join("logs");
- tokio::fs::create_dir_all(&log_dir).await.unwrap();
-
- let config_path = write_status_config(tempdir.path(), &["empty-site"], &log_dir).await;
+ let config_path = write_status_config(tempdir.path(), &["empty-site"]).await;
let output = Command::new(witryna_bin())
- .args(["--config", config_path.to_str().unwrap(), "status"])
+ .args(["status", "--config", config_path.to_str().unwrap()])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
@@ -139,24 +117,25 @@ async fn cli_status_no_builds() {
#[tokio::test]
async fn cli_status_single_build() {
let tempdir = TempDir::new().unwrap();
- let log_dir = tempdir.path().join("logs");
- tokio::fs::create_dir_all(&log_dir).await.unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["my-site"]).await;
- write_test_build_log(
- &log_dir,
+ write_state_json(
+ &base_dir,
"my-site",
"20260126-143000-123456",
- "success",
- "abc123d",
- "node:20-alpine",
- "45s",
+ &[build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/my-site/20260126-143000-123456.log",
+ )],
)
.await;
- let config_path = write_status_config(tempdir.path(), &["my-site"], &log_dir).await;
-
let output = Command::new(witryna_bin())
- .args(["--config", config_path.to_str().unwrap(), "status"])
+ .args(["status", "--config", config_path.to_str().unwrap()])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
@@ -174,28 +153,29 @@ async fn cli_status_single_build() {
#[tokio::test]
async fn cli_status_json_output() {
let tempdir = TempDir::new().unwrap();
- let log_dir = tempdir.path().join("logs");
- tokio::fs::create_dir_all(&log_dir).await.unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["json-site"]).await;
- write_test_build_log(
- &log_dir,
+ write_state_json(
+ &base_dir,
"json-site",
"20260126-143000-123456",
- "success",
- "abc123d",
- "node:20-alpine",
- "45s",
+ &[build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/json-site/20260126-143000-123456.log",
+ )],
)
.await;
- let config_path = write_status_config(tempdir.path(), &["json-site"], &log_dir).await;
-
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"status",
"--json",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -217,41 +197,43 @@ async fn cli_status_json_output() {
#[tokio::test]
async fn cli_status_site_filter() {
let tempdir = TempDir::new().unwrap();
- let log_dir = tempdir.path().join("logs");
- tokio::fs::create_dir_all(&log_dir).await.unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["site-a", "site-b"]).await;
- // Create logs for two sites
- write_test_build_log(
- &log_dir,
+ write_state_json(
+ &base_dir,
"site-a",
"20260126-143000-000000",
- "success",
- "aaa1111",
- "alpine:latest",
- "10s",
+ &[build_entry(
+ "success",
+ "20260126-143000-000000",
+ "aaa1111",
+ "10s",
+ "/logs/a.log",
+ )],
)
.await;
- write_test_build_log(
- &log_dir,
+ write_state_json(
+ &base_dir,
"site-b",
"20260126-150000-000000",
- "success",
- "bbb2222",
- "alpine:latest",
- "20s",
+ &[build_entry(
+ "success",
+ "20260126-150000-000000",
+ "bbb2222",
+ "20s",
+ "/logs/b.log",
+ )],
)
.await;
- let config_path = write_status_config(tempdir.path(), &["site-a", "site-b"], &log_dir).await;
-
let output = Command::new(witryna_bin())
.args([
- "--config",
- config_path.to_str().unwrap(),
"status",
- "--site",
"site-a",
+ "--config",
+ config_path.to_str().unwrap(),
])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -271,33 +253,340 @@ async fn cli_status_site_filter() {
#[tokio::test]
async fn cli_status_hook_failed() {
let tempdir = TempDir::new().unwrap();
- let log_dir = tempdir.path().join("logs");
- tokio::fs::create_dir_all(&log_dir).await.unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["hook-site"]).await;
- // Build succeeded, but hook failed
- write_test_build_log(
- &log_dir,
+ write_state_json(
+ &base_dir,
"hook-site",
"20260126-143000-123456",
- "success",
- "abc123d",
- "alpine:latest",
- "12s",
+ &[build_entry(
+ "hook failed",
+ "20260126-143000-123456",
+ "abc123d",
+ "12s",
+ "/logs/hook-site/20260126-143000-123456.log",
+ )],
)
.await;
- write_test_hook_log(
- &log_dir,
- "hook-site",
+ let output = Command::new(witryna_bin())
+ .args(["status", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ assert!(
+ stdout.contains("hook failed"),
+ "should show 'hook failed', got: {stdout}"
+ );
+}
+
+#[tokio::test]
+async fn cli_status_building_shows_in_progress() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["building-site"]).await;
+
+ let started_at = witryna::time::format_rfc3339(std::time::SystemTime::now());
+
+ let state_dir = base_dir.join("builds").join("building-site");
+ tokio::fs::create_dir_all(&state_dir).await.unwrap();
+ let content = serde_json::json!({
+ "current": "",
+ "builds": [{
+ "status": "building",
+ "timestamp": "20260210-120000-000000",
+ "started_at": started_at,
+ "git_commit": "",
+ "duration": "",
+ "log": "/logs/building-site/20260210-120000-000000.log",
+ }],
+ });
+ tokio::fs::write(state_dir.join("state.json"), content.to_string())
+ .await
+ .unwrap();
+
+ let output = Command::new(witryna_bin())
+ .args(["status", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ assert!(
+ stdout.contains("building"),
+ "should show 'building' status, got: {stdout}"
+ );
+}
+
+#[tokio::test]
+async fn cli_status_json_includes_building_state() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["json-building"]).await;
+
+ let started_at = witryna::time::format_rfc3339(std::time::SystemTime::now());
+
+ let state_dir = base_dir.join("builds").join("json-building");
+ tokio::fs::create_dir_all(&state_dir).await.unwrap();
+ let content = serde_json::json!({
+ "current": "",
+ "builds": [{
+ "status": "building",
+ "timestamp": "20260210-120000-000000",
+ "started_at": started_at,
+ "git_commit": "",
+ "duration": "",
+ "log": "/logs/json-building/20260210-120000-000000.log",
+ }],
+ });
+ tokio::fs::write(state_dir.join("state.json"), content.to_string())
+ .await
+ .unwrap();
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "status",
+ "--json",
+ "--config",
+ config_path.to_str().unwrap(),
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap();
+ let arr = parsed.as_array().unwrap();
+ assert_eq!(arr.len(), 1);
+ assert_eq!(arr[0]["site_name"], "json-building");
+ assert_eq!(arr[0]["status"], "building");
+}
+
+// ---------------------------------------------------------------------------
+// current_build / "+" marker tests
+// ---------------------------------------------------------------------------
+
+#[tokio::test]
+async fn cli_status_marks_current_build() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["my-site"]).await;
+
+ write_state_json(
+ &base_dir,
+ "my-site",
+ "20260126-143000-123456",
+ &[build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/my-site/20260126-143000-123456.log",
+ )],
+ )
+ .await;
+
+ let output = Command::new(witryna_bin())
+ .args(["status", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+
+ let data_line = stdout
+ .lines()
+ .find(|l| l.contains("my-site"))
+ .expect("should have my-site row");
+ assert!(
+ data_line.starts_with('+'),
+ "current build row should start with '+', got: {data_line}"
+ );
+}
+
+#[tokio::test]
+async fn cli_status_no_current_no_marker() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["my-site"]).await;
+
+ write_state_json(
+ &base_dir,
+ "my-site",
+ "", // no current
+ &[build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/my-site/20260126-143000-123456.log",
+ )],
+ )
+ .await;
+
+ let output = Command::new(witryna_bin())
+ .args(["status", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+
+ for line in stdout.lines().skip(1) {
+ assert!(
+ !line.starts_with('+'),
+ "no row should have '+' without current, got: {line}"
+ );
+ }
+}
+
+#[tokio::test]
+async fn cli_status_json_includes_current_build() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["json-site"]).await;
+
+ write_state_json(
+ &base_dir,
+ "json-site",
+ "20260126-143000-123456",
+ &[build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/json-site/20260126-143000-123456.log",
+ )],
+ )
+ .await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "status",
+ "--json",
+ "--config",
+ config_path.to_str().unwrap(),
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+ let parsed: serde_json::Value = serde_json::from_str(&stdout).unwrap();
+ let arr = parsed.as_array().unwrap();
+ assert_eq!(arr.len(), 1);
+ assert_eq!(
+ arr[0]["current_build"], "20260126-143000-123456",
+ "JSON should include current_build field"
+ );
+}
+
+#[tokio::test]
+async fn cli_status_single_site_shows_all_builds() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["my-site"]).await;
+
+ write_state_json(
+ &base_dir,
+ "my-site",
"20260126-143000-123456",
- "failed (exit code 1)",
+ &[
+ build_entry(
+ "failed",
+ "20260126-150000-000000",
+ "def4567",
+ "30s",
+ "/logs/2.log",
+ ),
+ build_entry(
+ "success",
+ "20260126-143000-123456",
+ "abc123d",
+ "45s",
+ "/logs/1.log",
+ ),
+ ],
)
.await;
- let config_path = write_status_config(tempdir.path(), &["hook-site"], &log_dir).await;
+ // Single-site view
+ let output = Command::new(witryna_bin())
+ .args([
+ "status",
+ "my-site",
+ "--config",
+ config_path.to_str().unwrap(),
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+ let stdout = String::from_utf8_lossy(&output.stdout);
+
+ let data_lines: Vec<&str> = stdout.lines().skip(1).collect();
+ assert_eq!(data_lines.len(), 2, "should have 2 build rows");
+
+ // First row: failed (no marker — current points to a different timestamp)
+ assert!(
+ !data_lines[0].starts_with('+'),
+ "failed build should not have '+': {}",
+ data_lines[0]
+ );
+ // Second row: success (has marker — matches current)
+ assert!(
+ data_lines[1].starts_with('+'),
+ "current build should have '+': {}",
+ data_lines[1]
+ );
+}
+
+#[tokio::test]
+async fn cli_status_failed_build_shows_failed() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ let config_path = write_status_config(tempdir.path(), &["fail-site"]).await;
+
+ write_state_json(
+ &base_dir,
+ "fail-site",
+ "",
+ &[build_entry(
+ "failed",
+ "20260126-160000-000000",
+ "def4567",
+ "2m 0s",
+ "/logs/fail-site/20260126-160000-000000.log",
+ )],
+ )
+ .await;
let output = Command::new(witryna_bin())
- .args(["--config", config_path.to_str().unwrap(), "status"])
+ .args(["status", "--config", config_path.to_str().unwrap()])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
@@ -307,7 +596,16 @@ async fn cli_status_hook_failed() {
assert!(output.status.success(), "should exit 0");
let stdout = String::from_utf8_lossy(&output.stdout);
assert!(
- stdout.contains("hook failed"),
- "should show 'hook failed', got: {stdout}"
+ stdout.contains("failed"),
+ "should show 'failed' (not a long error), got: {stdout}"
+ );
+ // Verify that status column is clean (no long error string breaking the table)
+ let data_line = stdout
+ .lines()
+ .find(|l| l.contains("fail-site"))
+ .expect("should have fail-site row");
+ assert!(
+ data_line.contains("def4567"),
+ "should show commit in correct column, got: {data_line}"
);
}
diff --git a/tests/integration/cli_switch.rs b/tests/integration/cli_switch.rs
new file mode 100644
index 0000000..fcbced6
--- /dev/null
+++ b/tests/integration/cli_switch.rs
@@ -0,0 +1,330 @@
+use std::fmt::Write as _;
+use std::process::Stdio;
+use tempfile::TempDir;
+use tokio::process::Command;
+
+fn witryna_bin() -> std::path::PathBuf {
+ let mut path = std::path::PathBuf::from(env!("CARGO_BIN_EXE_witryna"));
+ if !path.exists() {
+ path = std::path::PathBuf::from("target/debug/witryna");
+ }
+ path
+}
+
+async fn write_switch_config(
+ dir: &std::path::Path,
+ sites: &[&str],
+) -> (std::path::PathBuf, std::path::PathBuf) {
+ let base_dir = dir.join("data");
+ tokio::fs::create_dir_all(&base_dir).await.unwrap();
+
+ let mut sites_toml = String::new();
+ for name in sites {
+ write!(
+ sites_toml,
+ r#"
+[[sites]]
+name = "{name}"
+repo_url = "https://example.com/{name}.git"
+branch = "main"
+webhook_token = "unused"
+"#
+ )
+ .unwrap();
+ }
+
+ let config_path = dir.join("witryna.toml");
+ let config = format!(
+ r#"listen_address = "127.0.0.1:0"
+container_runtime = "podman"
+base_dir = "{base_dir}"
+log_dir = "{log_dir}"
+log_level = "info"
+{sites_toml}"#,
+ base_dir = base_dir.display(),
+ log_dir = dir.join("logs").display(),
+ );
+ tokio::fs::write(&config_path, config).await.unwrap();
+ (config_path, base_dir)
+}
+
+// ---------------------------------------------------------------------------
+// Tier 1: no container runtime / git needed
+// ---------------------------------------------------------------------------
+
+#[tokio::test]
+async fn cli_switch_unknown_site_exits_nonzero() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, _) = write_switch_config(tempdir.path(), &["real-site"]).await;
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "nonexistent",
+ "20260126-143000-123456",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("not found"),
+ "should mention 'not found', got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_switch_nonexistent_build_exits_nonzero() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ // Create builds dir with one existing build
+ let builds_dir = base_dir.join("builds").join("my-site");
+ tokio::fs::create_dir_all(builds_dir.join("20260126-100000-000001"))
+ .await
+ .unwrap();
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "20260126-999999-999999",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("not found"),
+ "should mention 'not found', got: {stderr}"
+ );
+ assert!(
+ stderr.contains("20260126-100000-000001"),
+ "should list available builds, got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_switch_invalid_timestamp_format_exits_nonzero() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ // Create builds dir so the "no builds dir" check doesn't fire first
+ let builds_dir = base_dir.join("builds").join("my-site");
+ tokio::fs::create_dir_all(&builds_dir).await.unwrap();
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "not-a-timestamp",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("not a valid build timestamp"),
+ "should mention invalid timestamp, got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_switch_updates_symlink() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ let builds_dir = base_dir.join("builds").join("my-site");
+ let build1 = builds_dir.join("20260126-100000-000001");
+ let build2 = builds_dir.join("20260126-100000-000002");
+ tokio::fs::create_dir_all(&build1).await.unwrap();
+ tokio::fs::create_dir_all(&build2).await.unwrap();
+
+ // Point current at build1
+ let current = builds_dir.join("current");
+ tokio::fs::symlink(&build1, &current).await.unwrap();
+
+ // Switch to build2
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "20260126-100000-000002",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+
+ // Verify symlink now points to build2
+ let target = tokio::fs::read_link(&current).await.unwrap();
+ assert_eq!(target, build2, "symlink should point to build2");
+
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("switched my-site to build 20260126-100000-000002"),
+ "should confirm switch, got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_switch_preserves_builds() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ let builds_dir = base_dir.join("builds").join("my-site");
+ let build1 = builds_dir.join("20260126-100000-000001");
+ let build2 = builds_dir.join("20260126-100000-000002");
+ tokio::fs::create_dir_all(&build1).await.unwrap();
+ tokio::fs::create_dir_all(&build2).await.unwrap();
+
+ // Point current at build1
+ let current = builds_dir.join("current");
+ tokio::fs::symlink(&build1, &current).await.unwrap();
+
+ // Switch to build2
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "20260126-100000-000002",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+
+ // Both builds should still exist
+ assert!(build1.exists(), "build1 should still exist after switch");
+ assert!(build2.exists(), "build2 should still exist after switch");
+}
+
+#[tokio::test]
+async fn cli_switch_updates_state_json() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, base_dir) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ let builds_dir = base_dir.join("builds").join("my-site");
+ let build1 = builds_dir.join("20260126-100000-000001");
+ let build2 = builds_dir.join("20260126-100000-000002");
+ tokio::fs::create_dir_all(&build1).await.unwrap();
+ tokio::fs::create_dir_all(&build2).await.unwrap();
+
+ // Write initial state.json with current pointing to build1
+ let state_json = serde_json::json!({
+ "current": "20260126-100000-000001",
+ "builds": [
+ {
+ "status": "success",
+ "timestamp": "20260126-100000-000002",
+ "started_at": "2026-01-26T10:00:00Z",
+ "git_commit": "bbb2222",
+ "duration": "20s",
+ "log": "/logs/2.log",
+ },
+ {
+ "status": "success",
+ "timestamp": "20260126-100000-000001",
+ "started_at": "2026-01-26T10:00:00Z",
+ "git_commit": "aaa1111",
+ "duration": "10s",
+ "log": "/logs/1.log",
+ }
+ ],
+ });
+ tokio::fs::write(builds_dir.join("state.json"), state_json.to_string())
+ .await
+ .unwrap();
+
+ // Create symlink
+ let current = builds_dir.join("current");
+ tokio::fs::symlink(&build1, &current).await.unwrap();
+
+ // Switch to build2
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "20260126-100000-000002",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(output.status.success(), "should exit 0");
+
+ // Verify state.json was updated
+ let state_content = tokio::fs::read_to_string(builds_dir.join("state.json"))
+ .await
+ .unwrap();
+ let state: serde_json::Value = serde_json::from_str(&state_content).unwrap();
+ assert_eq!(
+ state["current"], "20260126-100000-000002",
+ "state.json current should be updated after switch"
+ );
+}
+
+#[tokio::test]
+async fn cli_switch_no_builds_dir_exits_nonzero() {
+ let tempdir = TempDir::new().unwrap();
+ let (config_path, _) = write_switch_config(tempdir.path(), &["my-site"]).await;
+
+ // Don't create builds directory at all
+
+ let output = Command::new(witryna_bin())
+ .args([
+ "switch",
+ "--config",
+ config_path.to_str().unwrap(),
+ "my-site",
+ "20260126-100000-000001",
+ ])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("no builds found"),
+ "should mention no builds, got: {stderr}"
+ );
+}
diff --git a/tests/integration/cli_validate.rs b/tests/integration/cli_validate.rs
new file mode 100644
index 0000000..c8a62c8
--- /dev/null
+++ b/tests/integration/cli_validate.rs
@@ -0,0 +1,137 @@
+use std::process::Stdio;
+use tempfile::TempDir;
+use tokio::process::Command;
+
+fn witryna_bin() -> std::path::PathBuf {
+ let mut path = std::path::PathBuf::from(env!("CARGO_BIN_EXE_witryna"));
+ if !path.exists() {
+ path = std::path::PathBuf::from("target/debug/witryna");
+ }
+ path
+}
+
+async fn write_validate_config(dir: &std::path::Path, content: &str) -> std::path::PathBuf {
+ let config_path = dir.join("witryna.toml");
+ tokio::fs::write(&config_path, content).await.unwrap();
+ config_path
+}
+
+// ---------------------------------------------------------------------------
+// Tier 1: no container runtime / git needed
+// ---------------------------------------------------------------------------
+
+#[tokio::test]
+async fn cli_validate_valid_config() {
+ let tempdir = TempDir::new().unwrap();
+ let base_dir = tempdir.path().join("data");
+ tokio::fs::create_dir_all(&base_dir).await.unwrap();
+
+ let config = format!(
+ r#"listen_address = "127.0.0.1:8080"
+container_runtime = "podman"
+base_dir = "{base_dir}"
+log_level = "info"
+
+[[sites]]
+name = "site-a"
+repo_url = "https://example.com/a.git"
+branch = "main"
+
+[[sites]]
+name = "site-b"
+repo_url = "https://example.com/b.git"
+branch = "main"
+"#,
+ base_dir = base_dir.display(),
+ );
+ let config_path = write_validate_config(tempdir.path(), &config).await;
+
+ let output = Command::new(witryna_bin())
+ .args(["validate", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ output.status.success(),
+ "should exit 0, stderr: {}",
+ String::from_utf8_lossy(&output.stderr)
+ );
+
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("Configuration valid:"),
+ "should say 'Configuration valid:', got: {stderr}"
+ );
+ assert!(
+ stderr.contains("127.0.0.1:8080"),
+ "should show listen address, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("podman"),
+ "should show runtime, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("Sites: 2"),
+ "should show site count, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("site-a"),
+ "should list site-a, got: {stderr}"
+ );
+ assert!(
+ stderr.contains("site-b"),
+ "should list site-b, got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_validate_missing_config_file() {
+ let output = Command::new(witryna_bin())
+ .args(["validate", "--config", "/nonexistent/witryna.toml"])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(!output.status.success(), "should exit non-zero");
+ let stderr = String::from_utf8_lossy(&output.stderr);
+ assert!(
+ stderr.contains("config file not found"),
+ "should mention 'config file not found', got: {stderr}"
+ );
+}
+
+#[tokio::test]
+async fn cli_validate_invalid_config() {
+ let tempdir = TempDir::new().unwrap();
+
+ // listen_address = "" is invalid — config validation rejects it
+ let config = r#"listen_address = ""
+container_runtime = "podman"
+base_dir = "/tmp/witryna"
+log_level = "info"
+
+[[sites]]
+name = "test"
+repo_url = "https://example.com/test.git"
+branch = "main"
+"#;
+ let config_path = write_validate_config(tempdir.path(), config).await;
+
+ let output = Command::new(witryna_bin())
+ .args(["validate", "--config", config_path.to_str().unwrap()])
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped())
+ .output()
+ .await
+ .unwrap();
+
+ assert!(
+ !output.status.success(),
+ "should exit non-zero for invalid config"
+ );
+}
diff --git a/tests/integration/concurrent.rs b/tests/integration/concurrent.rs
index e7f2b64..da09ac9 100644
--- a/tests/integration/concurrent.rs
+++ b/tests/integration/concurrent.rs
@@ -11,6 +11,8 @@ async fn concurrent_build_gets_queued() {
.state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.insert("my-site".to_owned());
let resp = TestServer::client()
@@ -37,11 +39,15 @@ async fn concurrent_build_queue_collapse() {
.state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.insert("my-site".to_owned());
server
.state
.build_scheduler
.queued
+ .lock()
+ .unwrap()
.insert("my-site".to_owned());
// Third request should collapse (202, no body)
@@ -97,6 +103,8 @@ async fn build_in_progress_checked_after_auth() {
.state
.build_scheduler
.in_progress
+ .lock()
+ .unwrap()
.insert("my-site".to_owned());
// Request with wrong token should return 401 (auth checked before build status)
diff --git a/tests/integration/env_vars.rs b/tests/integration/env_vars.rs
index 44f74fa..5638149 100644
--- a/tests/integration/env_vars.rs
+++ b/tests/integration/env_vars.rs
@@ -159,4 +159,8 @@ async fn env_vars_passed_to_post_deploy_hook() {
content.contains("WITRYNA_BUILD_TIMESTAMP="),
"WITRYNA_BUILD_TIMESTAMP should be set"
);
+ assert!(
+ content.contains("WITRYNA_BUILD_STATUS=success"),
+ "WITRYNA_BUILD_STATUS should be set to success"
+ );
}
diff --git a/tests/integration/harness.rs b/tests/integration/harness.rs
index c015fa8..b985971 100644
--- a/tests/integration/harness.rs
+++ b/tests/integration/harness.rs
@@ -1,11 +1,8 @@
-use governor::{Quota, RateLimiter};
use std::collections::HashMap;
-use std::num::NonZeroU32;
use std::path::PathBuf;
-use std::sync::Arc;
+use std::sync::{Arc, RwLock};
use tempfile::TempDir;
-use tokio::net::TcpListener;
-use tokio::sync::{RwLock, oneshot};
+use tiny_http::Server;
use witryna::build_guard::BuildScheduler;
use witryna::config::{BuildOverrides, Config, SiteConfig};
use witryna::polling::PollingManager;
@@ -18,18 +15,14 @@ pub struct TestServer {
/// Kept alive for RAII cleanup of the config file written during startup.
#[allow(dead_code)]
pub tempdir: TempDir,
- shutdown_tx: Option<oneshot::Sender<()>>,
+ server: Arc<Server>,
+ server_thread: Option<std::thread::JoinHandle<()>>,
}
impl TestServer {
/// Start a new test server with the given config.
/// Binds to `127.0.0.1:0` (OS-assigned port).
- pub async fn start(config: Config) -> Self {
- Self::start_with_rate_limit(config, 1000).await
- }
-
- /// Start a new test server with a specific rate limit.
- pub async fn start_with_rate_limit(mut config: Config, rate_limit: u32) -> Self {
+ pub async fn start(mut config: Config) -> Self {
let tempdir = TempDir::new().expect("failed to create temp dir");
let config_path = tempdir.path().join("witryna.toml");
@@ -44,38 +37,50 @@ impl TestServer {
.await
.expect("failed to resolve secrets");
- let quota = Quota::per_minute(NonZeroU32::new(rate_limit).expect("rate limit must be > 0"));
-
let state = AppState {
config: Arc::new(RwLock::new(config)),
config_path: Arc::new(config_path),
build_scheduler: Arc::new(BuildScheduler::new()),
- rate_limiter: Arc::new(RateLimiter::dashmap(quota)),
polling_manager: Arc::new(PollingManager::new()),
};
- let listener = TcpListener::bind("127.0.0.1:0")
- .await
- .expect("failed to bind to random port");
- let port = listener.local_addr().unwrap().port();
+ let server = Arc::new(Server::http("127.0.0.1:0").expect("failed to bind"));
+ let port = match server.server_addr() {
+ tiny_http::ListenAddr::IP(addr) => addr.port(),
+ _ => unreachable!("expected IP address"),
+ };
let base_url = format!("http://127.0.0.1:{port}");
- let (shutdown_tx, shutdown_rx) = oneshot::channel::<()>();
-
- let server_state = state.clone();
- tokio::spawn(async move {
- witryna::test_support::run_server(server_state, listener, async {
- let _ = shutdown_rx.await;
- })
- .await
- .expect("server failed");
- });
+ // Shutdown uses server.unblock() directly — no oneshot needed.
+ // We pass a future that never resolves; shutdown is triggered
+ // by calling server.unblock() from TestServer::shutdown().
+ let server_thread = witryna::test_support::run_server(
+ state.clone(),
+ server.clone(),
+ std::future::pending(),
+ );
+
+ // Readiness probe: wait for server to accept connections
+ let client = reqwest::Client::new();
+ for _ in 0..50 {
+ if client
+ .get(format!("{base_url}/health"))
+ .send()
+ .await
+ .map(|r| r.status().as_u16() == 200)
+ .unwrap_or(false)
+ {
+ break;
+ }
+ tokio::time::sleep(std::time::Duration::from_millis(10)).await;
+ }
Self {
base_url,
state,
tempdir,
- shutdown_tx: Some(shutdown_tx),
+ server,
+ server_thread: Some(server_thread),
}
}
@@ -91,8 +96,11 @@ impl TestServer {
/// Shut down the server gracefully.
pub fn shutdown(&mut self) {
- if let Some(tx) = self.shutdown_tx.take() {
- let _ = tx.send(());
+ // Unblock the HTTP request loop directly — no async channel needed
+ self.server.unblock();
+ // Join the HTTP thread to ensure clean teardown
+ if let Some(handle) = self.server_thread.take() {
+ let _ = handle.join();
}
}
}
@@ -112,7 +120,6 @@ pub fn test_config(base_dir: PathBuf) -> Config {
base_dir,
log_dir,
log_level: "debug".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![],
@@ -128,7 +135,6 @@ pub fn test_config_with_site(base_dir: PathBuf, site: SiteConfig) -> Config {
base_dir,
log_dir,
log_level: "debug".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![site],
@@ -144,7 +150,6 @@ pub fn test_config_with_sites(base_dir: PathBuf, sites: Vec<SiteConfig>) -> Conf
base_dir,
log_dir,
log_level: "debug".to_owned(),
- rate_limit_per_minute: 10,
max_builds_to_keep: 5,
git_timeout: None,
sites,
@@ -286,7 +291,6 @@ fn build_config_toml(config: &Config) -> String {
{}base_dir = "{}"
log_dir = "{}"
log_level = "{}"
-rate_limit_per_minute = {}
max_builds_to_keep = {}
"#,
config.listen_address,
@@ -294,7 +298,6 @@ max_builds_to_keep = {}
config.base_dir.display(),
config.log_dir.display(),
config.log_level,
- config.rate_limit_per_minute,
config.max_builds_to_keep,
);
diff --git a/tests/integration/hooks.rs b/tests/integration/hooks.rs
index 86684cc..d8b4fa3 100644
--- a/tests/integration/hooks.rs
+++ b/tests/integration/hooks.rs
@@ -1,6 +1,7 @@
use crate::git_helpers::create_local_repo;
use crate::harness::{SiteBuilder, TestServer, test_config_with_site};
use crate::runtime::{skip_without_git, skip_without_runtime};
+use std::path::Path;
use std::time::Duration;
// ---------------------------------------------------------------------------
@@ -135,3 +136,150 @@ async fn post_deploy_hook_failure_nonfatal() {
}
assert!(found_hook_log, "hook log should exist for failed hook");
}
+
+#[tokio::test]
+async fn post_deploy_hook_runs_on_build_failure() {
+ skip_without_git!();
+ skip_without_runtime!();
+
+ let tempdir = tempfile::tempdir().unwrap();
+ let base_dir = tempdir.path().to_path_buf();
+
+ let repo_dir = tempdir.path().join("repos");
+ tokio::fs::create_dir_all(&repo_dir).await.unwrap();
+ let repo_url = create_local_repo(&repo_dir, "main").await;
+
+ // Build command fails (exit 1); hook writes WITRYNA_BUILD_STATUS to a file in clone dir
+ let site = SiteBuilder::new("hook-on-fail", &repo_url, "test-token")
+ .overrides("alpine:latest", "exit 1", "out")
+ .post_deploy(vec![
+ "sh".to_owned(),
+ "-c".to_owned(),
+ "echo \"$WITRYNA_BUILD_STATUS\" > hook-status.txt".to_owned(),
+ ])
+ .build();
+
+ let server = TestServer::start(test_config_with_site(base_dir.clone(), site)).await;
+
+ let resp = TestServer::client()
+ .post(server.url("/hook-on-fail"))
+ .header("Authorization", "Bearer test-token")
+ .send()
+ .await
+ .unwrap();
+ assert_eq!(resp.status().as_u16(), 202);
+
+ // Wait for state.json to show "failed" (no current symlink on build failure)
+ let state_path = base_dir.join("builds/hook-on-fail/state.json");
+ let max_wait = Duration::from_secs(120);
+ let start = std::time::Instant::now();
+
+ loop {
+ assert!(start.elapsed() <= max_wait, "build timed out");
+ if state_path.exists() {
+ let content = tokio::fs::read_to_string(&state_path)
+ .await
+ .unwrap_or_default();
+ if content.contains("\"failed\"") {
+ // Give the hook a moment to finish writing
+ tokio::time::sleep(Duration::from_secs(2)).await;
+ break;
+ }
+ }
+ tokio::time::sleep(Duration::from_millis(500)).await;
+ }
+
+ // Verify hook ran and received build_status=failed
+ let clone_dir = base_dir.join("clones/hook-on-fail");
+ let hook_status_path = clone_dir.join("hook-status.txt");
+ assert!(
+ hook_status_path.exists(),
+ "hook should have created hook-status.txt in clone dir"
+ );
+ let status = tokio::fs::read_to_string(&hook_status_path).await.unwrap();
+ assert_eq!(
+ status.trim(),
+ "failed",
+ "hook should receive build_status=failed"
+ );
+
+ // Verify state.json says "failed" (not "hook failed")
+ let state_content = tokio::fs::read_to_string(&state_path).await.unwrap();
+ assert!(
+ state_content.contains("\"failed\""),
+ "state.json should show failed status"
+ );
+
+ // No current symlink should exist (build failed)
+ assert!(
+ !Path::new(&base_dir.join("builds/hook-on-fail/current")).is_symlink(),
+ "current symlink should not exist on build failure"
+ );
+}
+
+#[tokio::test]
+async fn post_deploy_hook_receives_success_status() {
+ skip_without_git!();
+ skip_without_runtime!();
+
+ let tempdir = tempfile::tempdir().unwrap();
+ let base_dir = tempdir.path().to_path_buf();
+
+ let repo_dir = tempdir.path().join("repos");
+ tokio::fs::create_dir_all(&repo_dir).await.unwrap();
+ let repo_url = create_local_repo(&repo_dir, "main").await;
+
+ // Successful build; hook writes WITRYNA_BUILD_STATUS to build dir
+ let site = SiteBuilder::new("hook-success-status", &repo_url, "test-token")
+ .overrides(
+ "alpine:latest",
+ "mkdir -p out && echo test > out/index.html",
+ "out",
+ )
+ .post_deploy(vec![
+ "sh".to_owned(),
+ "-c".to_owned(),
+ "echo \"$WITRYNA_BUILD_STATUS\" > \"$WITRYNA_BUILD_DIR/build-status.txt\"".to_owned(),
+ ])
+ .build();
+
+ let server = TestServer::start(test_config_with_site(base_dir.clone(), site)).await;
+
+ let resp = TestServer::client()
+ .post(server.url("/hook-success-status"))
+ .header("Authorization", "Bearer test-token")
+ .send()
+ .await
+ .unwrap();
+ assert_eq!(resp.status().as_u16(), 202);
+
+ // Wait for current symlink
+ let builds_dir = base_dir.join("builds/hook-success-status");
+ let max_wait = Duration::from_secs(120);
+ let start = std::time::Instant::now();
+
+ loop {
+ assert!(start.elapsed() <= max_wait, "build timed out");
+ if builds_dir.join("current").is_symlink() {
+ tokio::time::sleep(Duration::from_secs(3)).await;
+ break;
+ }
+ tokio::time::sleep(Duration::from_millis(500)).await;
+ }
+
+ // Read build-status.txt from build dir
+ let current_target = tokio::fs::read_link(builds_dir.join("current"))
+ .await
+ .expect("current symlink should exist");
+ let status_path = current_target.join("build-status.txt");
+ assert!(
+ status_path.exists(),
+ "hook should have created build-status.txt"
+ );
+ let status = tokio::fs::read_to_string(&status_path).await.unwrap();
+ assert_eq!(
+ status.trim(),
+ "success",
+ "hook should receive build_status=success"
+ );
+}
diff --git a/tests/integration/main.rs b/tests/integration/main.rs
index 7ee422e..be0d316 100644
--- a/tests/integration/main.rs
+++ b/tests/integration/main.rs
@@ -13,8 +13,11 @@ mod runtime;
mod auth;
mod cache;
mod cleanup;
+mod cli_cleanup;
mod cli_run;
mod cli_status;
+mod cli_switch;
+mod cli_validate;
mod concurrent;
mod deploy;
mod edge_cases;
@@ -26,6 +29,5 @@ mod not_found;
mod overrides;
mod packaging;
mod polling;
-mod rate_limit;
mod secrets;
mod sighup;
diff --git a/tests/integration/polling.rs b/tests/integration/polling.rs
index a4447cc..4bf8a05 100644
--- a/tests/integration/polling.rs
+++ b/tests/integration/polling.rs
@@ -53,7 +53,6 @@ async fn polling_triggers_build_on_new_commits() {
base_dir: base_dir.clone(),
log_dir: base_dir.join("logs"),
log_level: "debug".to_owned(),
- rate_limit_per_minute: 100,
max_builds_to_keep: 5,
git_timeout: None,
sites: vec![site],
diff --git a/tests/integration/rate_limit.rs b/tests/integration/rate_limit.rs
deleted file mode 100644
index 81378a2..0000000
--- a/tests/integration/rate_limit.rs
+++ /dev/null
@@ -1,114 +0,0 @@
-use crate::harness::{SiteBuilder, TestServer, test_config_with_site, test_config_with_sites};
-
-#[tokio::test]
-async fn rate_limit_exceeded_returns_429() {
- let dir = tempfile::tempdir().unwrap().keep();
- let site = SiteBuilder::new("my-site", "https://example.com/repo.git", "secret-token").build();
- let config = test_config_with_site(dir, site);
-
- // Rate limit of 2 per minute
- let server = TestServer::start_with_rate_limit(config, 2).await;
-
- // First request — accepted (or 202)
- let resp1 = TestServer::client()
- .post(server.url("/my-site"))
- .header("Authorization", "Bearer secret-token")
- .send()
- .await
- .unwrap();
- let status1 = resp1.status().as_u16();
- assert!(
- status1 == 202 || status1 == 409,
- "expected 202 or 409, got {status1}"
- );
-
- // Second request
- let resp2 = TestServer::client()
- .post(server.url("/my-site"))
- .header("Authorization", "Bearer secret-token")
- .send()
- .await
- .unwrap();
- let status2 = resp2.status().as_u16();
- assert!(
- status2 == 202 || status2 == 409,
- "expected 202 or 409, got {status2}"
- );
-
- // Third request should hit rate limit
- let resp3 = TestServer::client()
- .post(server.url("/my-site"))
- .header("Authorization", "Bearer secret-token")
- .send()
- .await
- .unwrap();
- assert_eq!(resp3.status().as_u16(), 429);
- let body = resp3.text().await.unwrap();
- let json: serde_json::Value = serde_json::from_str(&body).unwrap();
- assert_eq!(json["error"], "rate_limit_exceeded");
-}
-
-#[tokio::test]
-async fn rate_limit_different_tokens_independent() {
- let dir = tempfile::tempdir().unwrap().keep();
- let sites = vec![
- SiteBuilder::new("site-one", "https://example.com/one.git", "token-one").build(),
- SiteBuilder::new("site-two", "https://example.com/two.git", "token-two").build(),
- ];
- let config = test_config_with_sites(dir, sites);
-
- // Rate limit of 1 per minute
- let server = TestServer::start_with_rate_limit(config, 1).await;
-
- // token-one: first request succeeds
- let resp1 = TestServer::client()
- .post(server.url("/site-one"))
- .header("Authorization", "Bearer token-one")
- .send()
- .await
- .unwrap();
- assert_eq!(resp1.status().as_u16(), 202);
-
- // token-one: second request hits rate limit
- let resp2 = TestServer::client()
- .post(server.url("/site-one"))
- .header("Authorization", "Bearer token-one")
- .send()
- .await
- .unwrap();
- assert_eq!(resp2.status().as_u16(), 429);
-
- // token-two: still has its own budget
- let resp3 = TestServer::client()
- .post(server.url("/site-two"))
- .header("Authorization", "Bearer token-two")
- .send()
- .await
- .unwrap();
- assert_eq!(resp3.status().as_u16(), 202);
-}
-
-#[tokio::test]
-async fn rate_limit_checked_after_auth() {
- let dir = tempfile::tempdir().unwrap().keep();
- let site = SiteBuilder::new("my-site", "https://example.com/repo.git", "secret-token").build();
- let config = test_config_with_site(dir, site);
- let server = TestServer::start_with_rate_limit(config, 1).await;
-
- // Exhaust rate limit
- let _ = TestServer::client()
- .post(server.url("/my-site"))
- .header("Authorization", "Bearer secret-token")
- .send()
- .await
- .unwrap();
-
- // Wrong token should get 401, not 429
- let resp = TestServer::client()
- .post(server.url("/my-site"))
- .header("Authorization", "Bearer wrong-token")
- .send()
- .await
- .unwrap();
- assert_eq!(resp.status().as_u16(), 401);
-}
diff --git a/tests/integration/sighup.rs b/tests/integration/sighup.rs
index 23c0dfd..0474f1d 100644
--- a/tests/integration/sighup.rs
+++ b/tests/integration/sighup.rs
@@ -116,8 +116,20 @@ repo_url = "https://example.com/new.git"
branch = "main"
webhook_token = "new-token"
"#,
- server.state.config.read().await.base_dir.display(),
- server.state.config.read().await.log_dir.display(),
+ server
+ .state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .base_dir
+ .display(),
+ server
+ .state
+ .config
+ .read()
+ .expect("config lock poisoned")
+ .log_dir
+ .display(),
);
tokio::fs::write(config_path, &new_toml).await.unwrap();
@@ -134,16 +146,20 @@ webhook_token = "new-token"
assert_eq!(resp.status().as_u16(), 200);
// Verify the reloadable field (sites) was updated
- let config = server.state.config.read().await;
- assert_eq!(config.sites.len(), 2, "sites should have been reloaded");
- assert!(
- config.find_site("new-site").is_some(),
- "new-site should exist after reload"
- );
+ let (sites_len, has_new_site, listen_addr) = {
+ let config = server.state.config.read().expect("config lock poisoned");
+ (
+ config.sites.len(),
+ config.find_site("new-site").is_some(),
+ config.listen_address.clone(),
+ )
+ };
+ assert_eq!(sites_len, 2, "sites should have been reloaded");
+ assert!(has_new_site, "new-site should exist after reload");
// Verify non-reloadable field was preserved (not overwritten with "127.0.0.1:19999")
assert_ne!(
- config.listen_address, "127.0.0.1:19999",
+ listen_addr, "127.0.0.1:19999",
"listen_address should be preserved from original config"
);
}