use crate::build_guard::{BuildGuard, BuildScheduler}; use crate::config::{Config, SiteConfig}; use crate::polling::PollingManager; use anyhow::Result; use log::{error, info, warn}; use std::path::PathBuf; use std::sync::{Arc, RwLock}; use tiny_http::{Header, Method, Request, Response, Server}; use tokio::signal::unix::{SignalKind, signal}; #[derive(serde::Serialize)] struct ErrorResponse { error: &'static str, } #[derive(serde::Serialize)] struct QueuedResponse { status: &'static str, } #[derive(serde::Serialize)] struct HealthResponse { status: &'static str, } fn json_response(status: u16, body: &str) -> Response>> { let data = body.as_bytes().to_vec(); Response::from_data(data) .with_status_code(status) .with_header(Header::from_bytes("Content-Type", "application/json").expect("valid header")) } fn empty_response(status: u16) -> Response { Response::empty(status) } #[derive(Clone)] pub struct AppState { pub config: Arc>, pub config_path: Arc, pub build_scheduler: Arc, pub polling_manager: Arc, } /// Extract Bearer token from `tiny_http` headers. fn extract_bearer_token(headers: &[Header]) -> Option<&str> { headers .iter() .find(|h| h.field.equiv("Authorization")) .and_then(|h| h.value.as_str().strip_prefix("Bearer ")) } fn validate_token(provided: &str, expected: &str) -> bool { let a = provided.as_bytes(); let b = expected.as_bytes(); // Constant-time comparison — OWASP requirement. // Length check is not constant-time, but token length is not secret // (same early-return approach as subtle::ConstantTimeEq for slices). if a.len() != b.len() { return false; } let mut acc: u8 = 0; for (x, y) in a.iter().zip(b.iter()) { acc |= x ^ y; } acc == 0 } /// Check if path is a single segment (e.g., "/my-site"). fn is_site_path(path: &str) -> bool { path.starts_with('/') && path.len() > 1 && !path[1..].contains('/') } /// Handle POST `/{site_name}`. fn handle_deploy( request: Request, site_name: &str, state: &AppState, handle: &tokio::runtime::Handle, ) { info!("[{site_name}] deployment request received"); // Find site let site = state .config .read() .expect("config lock poisoned") .find_site(site_name) .cloned(); let Some(site) = site else { info!("[{site_name}] site not found"); let body = serde_json::to_string(&ErrorResponse { error: "not_found" }) .expect("static JSON serialization"); let _ = request.respond(json_response(404, &body)); return; }; // Auth check (if configured) if !site.webhook_token.is_empty() { let token_valid = extract_bearer_token(request.headers()) .is_some_and(|token| validate_token(token, &site.webhook_token)); if !token_valid { info!("[{site_name}] unauthorized request"); let body = serde_json::to_string(&ErrorResponse { error: "unauthorized", }) .expect("static JSON serialization"); let _ = request.respond(json_response(401, &body)); return; } } // Try immediate build let Some(guard) = BuildGuard::try_acquire(site_name.to_owned(), &state.build_scheduler) else { // Build in progress — try to queue if state.build_scheduler.try_queue(site_name) { info!("[{site_name}] build queued"); let body = serde_json::to_string(&QueuedResponse { status: "queued" }) .expect("static JSON serialization"); let _ = request.respond(json_response(202, &body)); return; } // Already queued — collapse info!("[{site_name}] build already queued, collapsing"); let _ = request.respond(empty_response(202)); return; }; info!("[{site_name}] deployment accepted"); // Spawn async build pipeline with queue drain loop let state = state.clone(); let site_name = site_name.to_owned(); handle.spawn(async move { let mut current_site = site; let mut current_guard = guard; loop { #[allow(clippy::large_futures)] run_build_pipeline( state.clone(), site_name.clone(), current_site.clone(), current_guard, ) .await; // Guard dropped here — build lock released if !state.build_scheduler.take_queued(&site_name) { break; } info!("[{site_name}] processing queued rebuild"); let Some(new_site) = state .config .read() .expect("config lock poisoned") .find_site(&site_name) .cloned() else { warn!("[{site_name}] site removed from config, skipping queued rebuild"); break; }; let Some(new_guard) = BuildGuard::try_acquire(site_name.clone(), &state.build_scheduler) else { break; // someone else grabbed it }; current_site = new_site; current_guard = new_guard; } }); let _ = request.respond(empty_response(202)); } /// Main request loop (runs on `std::thread`). #[allow(clippy::needless_pass_by_value)] // ownership required by std::thread::spawn callers pub(crate) fn handle_requests( server: Arc, state: AppState, handle: tokio::runtime::Handle, ) { for request in server.incoming_requests() { let path = request.url().split('?').next().unwrap_or("").to_owned(); let method = request.method().clone(); match (method, path.as_str()) { (Method::Get, "/health") => { let body = serde_json::to_string(&HealthResponse { status: "ok" }) .expect("static JSON serialization"); let _ = request.respond(json_response(200, &body)); } (_, "/health") => { let _ = request.respond(empty_response(405)); } (Method::Post, _) if is_site_path(&path) => { let site_name = &path[1..]; handle_deploy(request, site_name, &state, &handle); } (_, _) if is_site_path(&path) => { let _ = request.respond(empty_response(405)); } _ => { let body = serde_json::to_string(&ErrorResponse { error: "not_found" }) .expect("static JSON serialization"); let _ = request.respond(json_response(404, &body)); } } } } /// Run the complete build pipeline: git sync → build → publish. #[allow(clippy::large_futures)] pub(crate) async fn run_build_pipeline( state: AppState, site_name: String, site: SiteConfig, _guard: BuildGuard, ) { let (base_dir, log_dir, container_runtime, max_builds_to_keep, git_timeout) = { let config = state.config.read().expect("config lock poisoned"); ( config.base_dir.clone(), config.log_dir.clone(), config.container_runtime.clone(), config.max_builds_to_keep, config .git_timeout .unwrap_or(crate::git::GIT_TIMEOUT_DEFAULT), ) }; match crate::pipeline::run_build( &site_name, &site, &base_dir, &log_dir, &container_runtime, max_builds_to_keep, git_timeout, false, ) .await { Ok(result) => { info!( "[{site_name}] pipeline completed: build_dir={} duration_secs={}", result.build_dir.display(), result.duration.as_secs() ); } Err(e) => { error!("[{site_name}] pipeline failed: {e}"); } } } /// Setup SIGHUP signal handler for configuration hot-reload. pub(crate) fn setup_sighup_handler(state: AppState) { tokio::spawn(async move { #[allow(clippy::expect_used)] // fatal: cannot proceed without signal handler let mut sighup = signal(SignalKind::hangup()).expect("failed to setup SIGHUP signal handler"); loop { sighup.recv().await; info!("SIGHUP received, reloading configuration"); let config_path = state.config_path.as_ref(); match Config::load(config_path).await { Ok(new_config) => { let old_sites_count = state .config .read() .expect("config lock poisoned") .sites .len(); let new_sites_count = new_config.sites.len(); // Check for non-reloadable changes and capture old values let (old_listen, old_base, old_log_dir, old_log_level) = { let old_config = state.config.read().expect("config lock poisoned"); if old_config.listen_address != new_config.listen_address { warn!( "listen_address changed but cannot be reloaded (restart required): old={} new={}", old_config.listen_address, new_config.listen_address ); } if old_config.base_dir != new_config.base_dir { warn!( "base_dir changed but cannot be reloaded (restart required): old={} new={}", old_config.base_dir.display(), new_config.base_dir.display() ); } if old_config.log_dir != new_config.log_dir { warn!( "log_dir changed but cannot be reloaded (restart required): old={} new={}", old_config.log_dir.display(), new_config.log_dir.display() ); } if old_config.log_level != new_config.log_level { warn!( "log_level changed but cannot be reloaded (restart required): old={} new={}", old_config.log_level, new_config.log_level ); } ( old_config.listen_address.clone(), old_config.base_dir.clone(), old_config.log_dir.clone(), old_config.log_level.clone(), ) }; // Preserve non-reloadable fields from the running config let mut final_config = new_config; final_config.listen_address = old_listen; final_config.base_dir = old_base; final_config.log_dir = old_log_dir; final_config.log_level = old_log_level; // Apply the merged configuration *state.config.write().expect("config lock poisoned") = final_config; // Restart polling tasks with new configuration info!("restarting polling tasks"); state.polling_manager.stop_all().await; state.polling_manager.start_polling(state.clone()).await; info!( "configuration reloaded successfully: old_sites_count={old_sites_count} new_sites_count={new_sites_count}" ); } Err(e) => { error!("failed to reload configuration, keeping current config: {e}"); } } } }); } /// Start the server in production mode. /// /// # Errors /// /// Returns an error if the TCP listener cannot bind or the server encounters /// a fatal I/O error. pub async fn run(config: Config, config_path: PathBuf) -> Result<()> { let addr = config.parsed_listen_address(); let state = AppState { config: Arc::new(RwLock::new(config)), config_path: Arc::new(config_path), build_scheduler: Arc::new(BuildScheduler::new()), polling_manager: Arc::new(PollingManager::new()), }; // Setup SIGHUP handler for configuration hot-reload setup_sighup_handler(state.clone()); // Start polling tasks for sites with poll_interval configured state.polling_manager.start_polling(state.clone()).await; let server = Arc::new(Server::http(addr).map_err(|e| anyhow::anyhow!("failed to bind: {e}"))?); info!("server listening on {addr}"); // Shutdown handler: signal → unblock server let shutdown_server = Arc::clone(&server); tokio::spawn(async move { let mut sigterm = signal(SignalKind::terminate()).expect("failed to setup SIGTERM handler"); let mut sigint = signal(SignalKind::interrupt()).expect("failed to setup SIGINT handler"); tokio::select! { _ = sigterm.recv() => info!("received SIGTERM, shutting down"), _ = sigint.recv() => info!("received SIGINT, shutting down"), } shutdown_server.unblock(); }); // Run HTTP loop on blocking thread let handle = tokio::runtime::Handle::current(); tokio::task::spawn_blocking(move || { handle_requests(server, state, handle); }) .await?; Ok(()) } /// Run the server with a pre-built Server, shutting down when `shutdown_signal` resolves. /// /// Used by integration tests via [`test_support::run_server`]. /// Returns a `std::thread::JoinHandle` for the request-handling thread. #[cfg(any(test, feature = "integration"))] pub(crate) fn run_with_server( state: AppState, server: Arc, shutdown_signal: impl std::future::Future + Send + 'static, ) -> std::thread::JoinHandle<()> { let handle = tokio::runtime::Handle::current(); // Shutdown: wait for signal, then unblock let shutdown_server = Arc::clone(&server); tokio::spawn(async move { shutdown_signal.await; shutdown_server.unblock(); }); // Spawn request handler on std::thread, return handle for joining std::thread::spawn(move || { handle_requests(server, state, handle); }) } #[cfg(test)] #[allow(clippy::unwrap_used, clippy::indexing_slicing, clippy::expect_used)] mod tests { use super::*; use crate::config::{BuildOverrides, SiteConfig}; use std::path::PathBuf; fn test_state(config: Config) -> AppState { AppState { config: Arc::new(RwLock::new(config)), config_path: Arc::new(PathBuf::from("witryna.toml")), build_scheduler: Arc::new(BuildScheduler::new()), polling_manager: Arc::new(PollingManager::new()), } } fn test_config() -> Config { Config { listen_address: "127.0.0.1:8080".to_owned(), container_runtime: "podman".to_owned(), base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), max_builds_to_keep: 5, git_timeout: None, sites: vec![], } } fn test_config_with_sites() -> Config { Config { sites: vec![SiteConfig { name: "my-site".to_owned(), repo_url: "https://github.com/user/my-site.git".to_owned(), branch: "main".to_owned(), webhook_token: "secret-token".to_owned(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }], ..test_config() } } /// Start a test server on a random port, returning the server handle, state, and port. fn test_server(config: Config) -> (Arc, AppState, u16) { let state = test_state(config); let server = Arc::new(Server::http("127.0.0.1:0").unwrap()); let port = match server.server_addr() { tiny_http::ListenAddr::IP(a) => a.port(), _ => unreachable!("expected IP address"), }; let handle = tokio::runtime::Handle::current(); let server_clone = server.clone(); let state_clone = state.clone(); std::thread::spawn(move || handle_requests(server_clone, state_clone, handle)); (server, state, port) } #[tokio::test] async fn health_endpoint_returns_ok() { let (server, _state, port) = test_server(test_config_with_sites()); let resp = reqwest::get(format!("http://127.0.0.1:{port}/health")) .await .unwrap(); assert_eq!(resp.status().as_u16(), 200); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["status"], "ok"); server.unblock(); } #[tokio::test] async fn json_responses_have_content_type_header() { let (server, _state, port) = test_server(test_config_with_sites()); let resp = reqwest::get(format!("http://127.0.0.1:{port}/health")) .await .unwrap(); assert_eq!( resp.headers() .get("content-type") .unwrap() .to_str() .unwrap(), "application/json" ); server.unblock(); } #[tokio::test] async fn unknown_site_post_returns_not_found() { let (server, _state, port) = test_server(test_config()); let client = reqwest::Client::new(); let resp = client .post(format!("http://127.0.0.1:{port}/nonexistent")) .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 404); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "not_found"); server.unblock(); } #[tokio::test] async fn deploy_known_site_with_valid_token_returns_accepted() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Bearer secret-token") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 202); server.unblock(); } #[tokio::test] async fn deploy_missing_auth_header_returns_unauthorized() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 401); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); server.unblock(); } #[tokio::test] async fn deploy_invalid_token_returns_unauthorized() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Bearer wrong-token") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 401); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); server.unblock(); } #[tokio::test] async fn deploy_malformed_auth_header_returns_unauthorized() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); // Test without "Bearer " prefix let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "secret-token") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 401); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); server.unblock(); } #[tokio::test] async fn deploy_basic_auth_returns_unauthorized() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); // Test Basic auth instead of Bearer let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Basic dXNlcjpwYXNz") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 401); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); server.unblock(); } #[tokio::test] async fn deploy_get_method_not_allowed() { let (server, _state, port) = test_server(test_config_with_sites()); let resp = reqwest::get(format!("http://127.0.0.1:{port}/my-site")) .await .unwrap(); assert_eq!(resp.status().as_u16(), 405); server.unblock(); } #[tokio::test] async fn deploy_unknown_site_with_token_returns_not_found() { let (server, _state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); let resp = client .post(format!("http://127.0.0.1:{port}/unknown-site")) .header("Authorization", "Bearer any-token") .send() .await .unwrap(); // Returns 404 before checking token (site lookup first) assert_eq!(resp.status().as_u16(), 404); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "not_found"); server.unblock(); } fn test_config_with_two_sites() -> Config { Config { listen_address: "127.0.0.1:8080".to_owned(), container_runtime: "podman".to_owned(), base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), max_builds_to_keep: 5, git_timeout: None, sites: vec![ SiteConfig { name: "site-one".to_owned(), repo_url: "https://github.com/user/site-one.git".to_owned(), branch: "main".to_owned(), webhook_token: "token-one".to_owned(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }, SiteConfig { name: "site-two".to_owned(), repo_url: "https://github.com/user/site-two.git".to_owned(), branch: "main".to_owned(), webhook_token: "token-two".to_owned(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }, ], } } #[tokio::test] async fn deploy_concurrent_same_site_gets_queued() { let (server, state, port) = test_server(test_config_with_sites()); let client = reqwest::Client::new(); // Pre-mark site as building to simulate an in-progress build state .build_scheduler .in_progress .lock() .unwrap() .insert("my-site".to_owned()); // First request to same site should be queued (202 with body) let resp1 = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Bearer secret-token") .send() .await .unwrap(); assert_eq!(resp1.status().as_u16(), 202); let json: serde_json::Value = resp1.json().await.unwrap(); assert_eq!(json["status"], "queued"); // Second request should be collapsed (202, no body) let resp2 = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Bearer secret-token") .send() .await .unwrap(); assert_eq!(resp2.status().as_u16(), 202); server.unblock(); } #[tokio::test] async fn deploy_concurrent_different_sites_both_succeed() { let (server, _state, port) = test_server(test_config_with_two_sites()); let client = reqwest::Client::new(); // First site deployment let resp1 = client .post(format!("http://127.0.0.1:{port}/site-one")) .header("Authorization", "Bearer token-one") .send() .await .unwrap(); assert_eq!(resp1.status().as_u16(), 202); // Second site deployment should also succeed let resp2 = client .post(format!("http://127.0.0.1:{port}/site-two")) .header("Authorization", "Bearer token-two") .send() .await .unwrap(); assert_eq!(resp2.status().as_u16(), 202); server.unblock(); } #[tokio::test] async fn deploy_site_in_progress_checked_after_auth() { let (server, state, port) = test_server(test_config_with_sites()); // Pre-mark site as building state .build_scheduler .in_progress .lock() .unwrap() .insert("my-site".to_owned()); let client = reqwest::Client::new(); // Request with wrong token should return 401 (auth checked before build status) let resp = client .post(format!("http://127.0.0.1:{port}/my-site")) .header("Authorization", "Bearer wrong-token") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 401); let json: serde_json::Value = resp.json().await.unwrap(); assert_eq!(json["error"], "unauthorized"); server.unblock(); } #[tokio::test] async fn sighup_preserves_non_reloadable_fields() { // Original config with specific non-reloadable values let original = Config { listen_address: "127.0.0.1:8080".to_owned(), container_runtime: "podman".to_owned(), base_dir: PathBuf::from("/var/lib/witryna"), log_dir: PathBuf::from("/var/log/witryna"), log_level: "info".to_owned(), max_builds_to_keep: 5, git_timeout: None, sites: vec![SiteConfig { name: "old-site".to_owned(), repo_url: "https://example.com/old.git".to_owned(), branch: "main".to_owned(), webhook_token: "old-token".to_owned(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }], }; let state = test_state(original); // Simulate a new config loaded from disk with changed non-reloadable // AND reloadable fields let new_config = Config { listen_address: "0.0.0.0:9999".to_owned(), container_runtime: "docker".to_owned(), base_dir: PathBuf::from("/tmp/new-base"), log_dir: PathBuf::from("/tmp/new-logs"), log_level: "debug".to_owned(), max_builds_to_keep: 10, git_timeout: None, sites: vec![SiteConfig { name: "new-site".to_owned(), repo_url: "https://example.com/new.git".to_owned(), branch: "develop".to_owned(), webhook_token: "new-token".to_owned(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }], }; // Apply the same merge logic used in setup_sighup_handler let (old_listen, old_base, old_log_dir, old_log_level) = { let old_config = state.config.read().unwrap(); ( old_config.listen_address.clone(), old_config.base_dir.clone(), old_config.log_dir.clone(), old_config.log_level.clone(), ) }; let mut final_config = new_config; final_config.listen_address = old_listen; final_config.base_dir = old_base; final_config.log_dir = old_log_dir; final_config.log_level = old_log_level; *state.config.write().unwrap() = final_config; // Verify non-reloadable fields are preserved and reloadable fields are updated let (listen, base, log_d, log_l, runtime, max_builds, sites_len, site_name) = { let config = state.config.read().unwrap(); ( config.listen_address.clone(), config.base_dir.clone(), config.log_dir.clone(), config.log_level.clone(), config.container_runtime.clone(), config.max_builds_to_keep, config.sites.len(), config.sites[0].name.clone(), ) }; assert_eq!(listen, "127.0.0.1:8080"); assert_eq!(base, PathBuf::from("/var/lib/witryna")); assert_eq!(log_d, PathBuf::from("/var/log/witryna")); assert_eq!(log_l, "info"); assert_eq!(runtime, "docker"); assert_eq!(max_builds, 10); assert_eq!(sites_len, 1); assert_eq!(site_name, "new-site"); } fn test_config_with_disabled_auth() -> Config { Config { sites: vec![SiteConfig { name: "open-site".to_owned(), repo_url: "https://github.com/user/open-site.git".to_owned(), branch: "main".to_owned(), webhook_token: String::new(), webhook_token_file: None, build_overrides: BuildOverrides::default(), poll_interval: None, build_timeout: None, cache_dirs: None, post_deploy: None, env: None, container_memory: None, container_cpus: None, container_pids_limit: None, container_network: "none".to_owned(), git_depth: None, container_workdir: None, config_file: None, }], ..test_config() } } #[tokio::test] async fn deploy_disabled_auth_returns_accepted() { let (server, _state, port) = test_server(test_config_with_disabled_auth()); let client = reqwest::Client::new(); // Request without Authorization header should succeed let resp = client .post(format!("http://127.0.0.1:{port}/open-site")) .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 202); server.unblock(); } #[tokio::test] async fn deploy_disabled_auth_ignores_token() { let (server, _state, port) = test_server(test_config_with_disabled_auth()); let client = reqwest::Client::new(); // Request WITH a Bearer token should also succeed (token ignored) let resp = client .post(format!("http://127.0.0.1:{port}/open-site")) .header("Authorization", "Bearer any-token") .send() .await .unwrap(); assert_eq!(resp.status().as_u16(), 202); server.unblock(); } }