diff --git a/README.md b/README.md index 6187960..1d57e34 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Server Manager - Next-Gen Media Server Orchestrator 🚀 -![Server Manager Banner](https://img.shields.io/badge/Status-Tested-brightgreen) ![Version](https://img.shields.io/badge/Version-1.0.7-blue) ![Rust](https://img.shields.io/badge/Built%20With-Rust-orange) ![Docker](https://img.shields.io/badge/Powered%20By-Docker-blue) +![Server Manager Banner](https://img.shields.io/badge/Status-Tested-brightgreen) ![Version](https://img.shields.io/badge/Version-1.0.8-blue) ![Rust](https://img.shields.io/badge/Built%20With-Rust-orange) ![Docker](https://img.shields.io/badge/Powered%20By-Docker-blue) **Server Manager** is a powerful and intelligent tool written in Rust to deploy, manage, and optimize a complete personal media and cloud server stack. It detects your hardware and automatically configures 28 Docker services for optimal performance. diff --git a/server_manager/Cargo.lock b/server_manager/Cargo.lock index 9d73bac..88fc67f 100644 --- a/server_manager/Cargo.lock +++ b/server_manager/Cargo.lock @@ -1288,7 +1288,7 @@ dependencies = [ [[package]] name = "server_manager" -version = "1.0.7" +version = "1.0.8" dependencies = [ "anyhow", "async-trait", diff --git a/server_manager/Cargo.toml b/server_manager/Cargo.toml index 475f80e..59a6547 100644 --- a/server_manager/Cargo.toml +++ b/server_manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "server_manager" -version = "1.0.7" +version = "1.0.8" edition = "2021" [dependencies] diff --git a/server_manager/src/core/config.rs b/server_manager/src/core/config.rs index c57c2a7..ed23fd1 100644 --- a/server_manager/src/core/config.rs +++ b/server_manager/src/core/config.rs @@ -1,17 +1,18 @@ use anyhow::{Context, Result}; -use log::info; +use log::{info, warn}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::fs; use std::path::Path; use std::sync::OnceLock; -use std::time::SystemTime; +use std::time::{Duration, Instant, SystemTime}; use tokio::sync::RwLock; #[derive(Debug, Clone)] struct CachedConfig { config: Config, last_mtime: Option, + last_check: Option, } static CONFIG_CACHE: OnceLock> = OnceLock::new(); @@ -27,7 +28,6 @@ impl Config { let path = Path::new("config.yaml"); if path.exists() { let content = fs::read_to_string(path).context("Failed to read config.yaml")?; - // If empty file, return default if content.trim().is_empty() { return Ok(Config::default()); } @@ -42,65 +42,126 @@ impl Config { RwLock::new(CachedConfig { config: Config::default(), last_mtime: None, + last_check: None, }) }); - // Fast path: Optimistic read { let guard = cache.read().await; - if let Some(cached_mtime) = guard.last_mtime { - // Check if file still matches - if let Ok(metadata) = tokio::fs::metadata("config.yaml").await { - if let Ok(modified) = metadata.modified() { - if modified == cached_mtime { - return Ok(guard.config.clone()); - } - } + if let Some(last_check) = guard.last_check { + if last_check.elapsed() < Duration::from_millis(500) { + return Ok(guard.config.clone()); } } } - // Slow path: Update cache let mut guard = cache.write().await; + if let Some(last_check) = guard.last_check { + if last_check.elapsed() < Duration::from_millis(500) { + return Ok(guard.config.clone()); + } + } - // Check metadata again (double-checked locking pattern) - let metadata_res = tokio::fs::metadata("config.yaml").await; + Self::reload_guard(&mut guard).await?; - match metadata_res { - Ok(metadata) => { - let modified = metadata.modified().unwrap_or(SystemTime::now()); + guard.last_check = Some(Instant::now()); + Ok(guard.config.clone()) + } - if let Some(cached_mtime) = guard.last_mtime { - if modified == cached_mtime { - return Ok(guard.config.clone()); - } - } + async fn reload_guard(guard: &mut tokio::sync::RwLockWriteGuard<'_, CachedConfig>) -> Result<()> { + let last_mtime = guard.last_mtime; - // Load file - match tokio::fs::read_to_string("config.yaml").await { - Ok(content) => { - let config = if content.trim().is_empty() { - Config::default() - } else { - serde_yaml_ng::from_str(&content) - .context("Failed to parse config.yaml")? - }; - - guard.config = config.clone(); - guard.last_mtime = Some(modified); - Ok(config) - } - Err(e) => Err(anyhow::Error::new(e).context("Failed to read config.yaml")), - } + // Use blocking IO inside spawn_blocking to be consistent with load() and robust + let res = tokio::task::spawn_blocking(move || -> Result)>> { + let path = Path::new("config.yaml"); + let mtime = match std::fs::metadata(path) { + Ok(m) => m.modified().ok(), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => None, + Err(e) => return Err(anyhow::Error::new(e).context("Failed to read config metadata")), + }; + + if mtime == last_mtime && last_mtime.is_some() { + return Ok(None); } - Err(e) if e.kind() == std::io::ErrorKind::NotFound => { - // File not found -> Default - guard.config = Config::default(); - guard.last_mtime = None; - Ok(guard.config.clone()) + + // Reload + // load() handles file reading/parsing + match Self::load() { + Ok(cfg) => Ok(Some((cfg, mtime))), + Err(e) => { + // If parsing fails, we might return error. + // But load_async behavior was to return cached config on error. + // Here we return Err. The caller (reload_guard) needs to handle this policy? + // Or load() should handle it? load() returns Err on parse failure. + // The previous load_async implementation swallowed errors. + // To match that: + warn!("Failed to reload config: {}. Preserving cache.", e); + Ok(None) + } } - Err(e) => Err(anyhow::Error::new(e).context("Failed to read config metadata")), + }).await.map_err(|e| anyhow::anyhow!("Task join error: {}", e))??; + + if let Some((cfg, mtime)) = res { + guard.config = cfg; + guard.last_mtime = mtime; } + Ok(()) + } + + pub async fn enable_service_async(service_name: String) -> Result<()> { + let cache = CONFIG_CACHE.get_or_init(|| { + RwLock::new(CachedConfig { + config: Config::default(), + last_mtime: None, + last_check: None, + }) + }); + + let mut guard = cache.write().await; + Self::reload_guard(&mut guard).await?; + + let mut config = guard.config.clone(); + + let (new_config, new_mtime) = tokio::task::spawn_blocking(move || -> Result<(Config, Option)> { + config.enable_service(&service_name); + config.save()?; + let mtime = std::fs::metadata("config.yaml").ok().and_then(|m| m.modified().ok()); + Ok((config, mtime)) + }).await.map_err(|e| anyhow::anyhow!("Task join error: {}", e))??; + + guard.config = new_config; + guard.last_mtime = new_mtime; + guard.last_check = Some(Instant::now()); + + Ok(()) + } + + pub async fn disable_service_async(service_name: String) -> Result<()> { + let cache = CONFIG_CACHE.get_or_init(|| { + RwLock::new(CachedConfig { + config: Config::default(), + last_mtime: None, + last_check: None, + }) + }); + + let mut guard = cache.write().await; + Self::reload_guard(&mut guard).await?; + + let mut config = guard.config.clone(); + + let (new_config, new_mtime) = tokio::task::spawn_blocking(move || -> Result<(Config, Option)> { + config.disable_service(&service_name); + config.save()?; + let mtime = std::fs::metadata("config.yaml").ok().and_then(|m| m.modified().ok()); + Ok((config, mtime)) + }).await.map_err(|e| anyhow::anyhow!("Task join error: {}", e))??; + + guard.config = new_config; + guard.last_mtime = new_mtime; + guard.last_check = Some(Instant::now()); + + Ok(()) } pub fn save(&self) -> Result<()> { diff --git a/server_manager/src/core/hardware.rs b/server_manager/src/core/hardware.rs index 09f3ff2..b148750 100644 --- a/server_manager/src/core/hardware.rs +++ b/server_manager/src/core/hardware.rs @@ -1,5 +1,5 @@ use log::{info, warn}; -use nix::unistd::User; +use nix::unistd::{Uid, User}; use std::path::Path; use sysinfo::{DiskExt, System, SystemExt}; use which::which; @@ -93,6 +93,15 @@ impl HardwareInfo { } } + // Try fallback with only SUDO_UID if present (e.g. if SUDO_GID missing) + if let Ok(uid_str) = std::env::var("SUDO_UID") { + if let Ok(uid) = uid_str.parse::() { + if let Ok(Some(user)) = User::from_uid(Uid::from_raw(uid)) { + return (user.uid.to_string(), user.gid.to_string()); + } + } + } + warn!("SUDO_USER not found or lookup failed. Defaulting to UID/GID 1000."); ("1000".to_string(), "1000".to_string()) } diff --git a/server_manager/src/core/users.rs b/server_manager/src/core/users.rs index 41870d0..0749138 100644 --- a/server_manager/src/core/users.rs +++ b/server_manager/src/core/users.rs @@ -6,7 +6,20 @@ use nix::unistd::Uid; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs; -use std::path::Path; +use std::path::{Path, PathBuf}; +use std::sync::OnceLock; +use std::time::{Duration, Instant, SystemTime}; +use tokio::sync::RwLock; + +#[derive(Debug)] +struct CachedUsers { + manager: UserManager, + last_mtime: Option, + last_check: Option, + loaded_path: Option, +} + +static USERS_CACHE: OnceLock> = OnceLock::new(); #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum Role { @@ -30,16 +43,144 @@ pub struct UserManager { impl UserManager { pub async fn load_async() -> Result { - tokio::task::spawn_blocking(Self::load).await? + let cache = USERS_CACHE.get_or_init(|| { + RwLock::new(CachedUsers { + manager: UserManager::default(), + last_mtime: None, + last_check: None, + loaded_path: None, + }) + }); + + { + let guard = cache.read().await; + if let Some(last_check) = guard.last_check { + if last_check.elapsed() < Duration::from_millis(500) { + return Ok(guard.manager.clone()); + } + } + } + + let mut guard = cache.write().await; + if let Some(last_check) = guard.last_check { + if last_check.elapsed() < Duration::from_millis(500) { + return Ok(guard.manager.clone()); + } + } + + Self::reload_guard(&mut guard).await?; + + guard.last_check = Some(Instant::now()); + Ok(guard.manager.clone()) + } + + async fn reload_guard(guard: &mut tokio::sync::RwLockWriteGuard<'_, CachedUsers>) -> Result<()> { + let last_mtime = guard.last_mtime; + let last_path = guard.loaded_path.clone(); + + let res = tokio::task::spawn_blocking(move || -> Result, PathBuf)>> { + let path = Path::new("users.yaml"); + let fallback_path = Path::new("/opt/server_manager/users.yaml"); + let target = if fallback_path.exists() { fallback_path } else { path }; + + let mtime = std::fs::metadata(target).ok().and_then(|m| m.modified().ok()); + + if Some(target.to_path_buf()) == last_path && mtime == last_mtime && last_mtime.is_some() { + return Ok(None); + } + + let mgr = Self::load()?; + Ok(Some((mgr, mtime, target.to_path_buf()))) + }).await.map_err(|e| anyhow!("Task join error: {}", e))??; + + if let Some((mgr, mtime, path)) = res { + guard.manager = mgr; + guard.last_mtime = mtime; + guard.loaded_path = Some(path); + } + Ok(()) + } + + pub async fn add_user_async( + username: String, + password: String, + role: Role, + quota_gb: Option, + ) -> Result<()> { + let cache = USERS_CACHE.get_or_init(|| { + RwLock::new(CachedUsers { + manager: UserManager::default(), + last_mtime: None, + last_check: None, + loaded_path: None, + }) + }); + + let mut guard = cache.write().await; + + // Ensure latest state before modifying + Self::reload_guard(&mut guard).await?; + + let mut manager = guard.manager.clone(); + + let (new_manager, new_mtime) = tokio::task::spawn_blocking(move || -> Result<(UserManager, Option)> { + manager.add_user(&username, &password, role, quota_gb)?; + + // Get new mtime to update cache + let target = if Path::new("/opt/server_manager").exists() { + Path::new("/opt/server_manager/users.yaml") + } else { + Path::new("users.yaml") + }; + let mtime = std::fs::metadata(target).ok().and_then(|m| m.modified().ok()); + Ok((manager, mtime)) + }).await.map_err(|e| anyhow!("Task join error: {}", e))??; + + guard.manager = new_manager; + guard.last_mtime = new_mtime; + guard.last_check = Some(Instant::now()); + + Ok(()) + } + + pub async fn delete_user_async(username: String) -> Result<()> { + let cache = USERS_CACHE.get_or_init(|| { + RwLock::new(CachedUsers { + manager: UserManager::default(), + last_mtime: None, + last_check: None, + loaded_path: None, + }) + }); + + let mut guard = cache.write().await; + Self::reload_guard(&mut guard).await?; + + let mut manager = guard.manager.clone(); + + let (new_manager, new_mtime) = tokio::task::spawn_blocking(move || -> Result<(UserManager, Option)> { + manager.delete_user(&username)?; + + let target = if Path::new("/opt/server_manager").exists() { + Path::new("/opt/server_manager/users.yaml") + } else { + Path::new("users.yaml") + }; + let mtime = std::fs::metadata(target).ok().and_then(|m| m.modified().ok()); + Ok((manager, mtime)) + }).await.map_err(|e| anyhow!("Task join error: {}", e))??; + + guard.manager = new_manager; + guard.last_mtime = new_mtime; + guard.last_check = Some(Instant::now()); + + Ok(()) } pub fn load() -> Result { - // Try CWD or /opt/server_manager let path = Path::new("users.yaml"); let fallback_path = Path::new("/opt/server_manager/users.yaml"); - // Priority: /opt/server_manager/users.yaml > ./users.yaml - // This aligns with save() behavior which prefers /opt if available. let load_path = if fallback_path.exists() { Some(fallback_path) } else if path.exists() { @@ -59,16 +200,8 @@ impl UserManager { UserManager::default() }; - // Ensure default admin exists if no users if manager.users.is_empty() { info!("No users found. Creating default 'admin' user."); - // We use a generated secret for the initial password if secrets exist, - // otherwise generate one. - // Better: use 'admin' / 'admin' but WARN, or generate random. - // Let's generate a random one and print it, safer. - // Re-using secrets generation logic if possible, or just simple random. - // For simplicity in this context, let's look for a stored password or default to 'admin' and log a warning. - let pass = "admin"; let hash = hash(pass, DEFAULT_COST)?; manager.users.insert( @@ -88,7 +221,6 @@ impl UserManager { } pub fn save(&self) -> Result<()> { - // Prefer saving to /opt/server_manager if it exists/is writable, else CWD let target = if Path::new("/opt/server_manager").exists() { Path::new("/opt/server_manager/users.yaml") } else { @@ -111,7 +243,6 @@ impl UserManager { return Err(anyhow!("User already exists")); } - // System User Integration if Uid::effective().is_root() { system::create_system_user(username, password)?; if let Some(gb) = quota_gb { @@ -145,7 +276,6 @@ impl UserManager { return Err(anyhow!("Cannot delete the last admin user")); } - // System User Deletion if Uid::effective().is_root() { system::delete_system_user(username)?; } else { @@ -161,7 +291,6 @@ impl UserManager { pub fn update_password(&mut self, username: &str, new_password: &str) -> Result<()> { if let Some(user) = self.users.get_mut(username) { - // System Password Update if Uid::effective().is_root() { system::set_system_user_password(username, new_password)?; } else { diff --git a/server_manager/src/interface/web.rs b/server_manager/src/interface/web.rs index 1f677c3..76ea43e 100644 --- a/server_manager/src/interface/web.rs +++ b/server_manager/src/interface/web.rs @@ -17,7 +17,6 @@ use std::time::SystemTime; use sysinfo::{CpuExt, DiskExt, System, SystemExt}; use time::Duration; use tokio::process::Command; -use tokio::sync::RwLock; use tower_sessions::{Expiry, MemoryStore, Session, SessionManagerLayer}; #[derive(Serialize, Deserialize, Clone)] @@ -28,101 +27,13 @@ struct SessionUser { const SESSION_KEY: &str = "user"; -struct CachedConfig { - config: Config, - last_modified: Option, -} - -struct CachedUsers { - manager: UserManager, - last_modified: Option, -} - struct AppState { system: Mutex, last_system_refresh: Mutex, - config_cache: RwLock, - users_cache: RwLock, } type SharedState = Arc; -impl AppState { - async fn get_config(&self) -> Config { - // Fast path: check metadata - let current_mtime = tokio::fs::metadata("config.yaml") - .await - .and_then(|m| m.modified()) - .ok(); - - { - let cache = self.config_cache.read().await; - if cache.last_modified == current_mtime { - return cache.config.clone(); - } - } - - // Slow path: reload - let mut cache = self.config_cache.write().await; - - // Re-check mtime under write lock to avoid race - let current_mtime_2 = tokio::fs::metadata("config.yaml") - .await - .and_then(|m| m.modified()) - .ok(); - - if cache.last_modified == current_mtime_2 { - return cache.config.clone(); - } - - if let Ok(cfg) = Config::load_async().await { - cache.config = cfg; - cache.last_modified = current_mtime_2; - } - - cache.config.clone() - } - - async fn get_users(&self) -> UserManager { - // Determine path logic (matches UserManager::load) - let path = std::path::Path::new("users.yaml"); - let fallback_path = std::path::Path::new("/opt/server_manager/users.yaml"); - let file_path = if path.exists() { path } else { fallback_path }; - - // Fast path: check metadata - let current_mtime = tokio::fs::metadata(file_path).await - .and_then(|m| m.modified()) - .ok(); - - { - let cache = self.users_cache.read().await; - // If mtime matches (or both None), return cached - if cache.last_modified == current_mtime { - return cache.manager.clone(); - } - } - - // Slow path: reload - let mut cache = self.users_cache.write().await; - - // Re-check mtime under write lock - let current_mtime_2 = tokio::fs::metadata(file_path).await - .and_then(|m| m.modified()) - .ok(); - - if cache.last_modified == current_mtime_2 { - return cache.manager.clone(); - } - - if let Ok(mgr) = UserManager::load_async().await { - cache.manager = mgr; - cache.last_modified = current_mtime_2; - } - - cache.manager.clone() - } -} - pub async fn start_server(port: u16) -> anyhow::Result<()> { // Session setup let session_store = MemoryStore::default(); @@ -134,32 +45,9 @@ pub async fn start_server(port: u16) -> anyhow::Result<()> { let mut sys = System::new_all(); sys.refresh_all(); - let initial_config = Config::load().unwrap_or_default(); - let initial_config_mtime = std::fs::metadata("config.yaml") - .ok() - .and_then(|m| m.modified().ok()); - - let initial_users = UserManager::load().unwrap_or_default(); - let initial_users_mtime = std::fs::metadata("users.yaml") - .ok() - .and_then(|m| m.modified().ok()) - .or_else(|| { - std::fs::metadata("/opt/server_manager/users.yaml") - .ok() - .and_then(|m| m.modified().ok()) - }); - let app_state = Arc::new(AppState { system: Mutex::new(sys), last_system_refresh: Mutex::new(SystemTime::now()), - config_cache: RwLock::new(CachedConfig { - config: initial_config, - last_modified: initial_config_mtime, - }), - users_cache: RwLock::new(CachedUsers { - manager: initial_users, - last_modified: initial_users_mtime, - }), }); let app = Router::new() @@ -226,9 +114,19 @@ struct LoginPayload { password: String, } -async fn login_handler(State(state): State, session: Session, Form(payload): Form) -> impl IntoResponse { +async fn login_handler(session: Session, Form(payload): Form) -> impl IntoResponse { // Reload users on login attempt to get fresh data - let user_manager = state.get_users().await; + let user_manager = match UserManager::load_async().await { + Ok(m) => m, + Err(e) => { + error!("Failed to load users during login: {}", e); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to load user database", + ) + .into_response(); + } + }; if let Some(user) = user_manager.verify_async(&payload.username, &payload.password).await { let session_user = SessionUser { @@ -328,7 +226,7 @@ async fn dashboard(State(state): State, session: Session) -> impl I let is_admin = matches!(user.role, Role::Admin); let services = services::get_all_services(); - let config = state.get_config().await; + let config = Config::load_async().await.unwrap_or_default(); // System Stats let mut sys = state.system.lock().unwrap(); @@ -495,7 +393,7 @@ async fn dashboard(State(state): State, session: Session) -> impl I } // User Management Page -async fn users_page(State(state): State, session: Session) -> impl IntoResponse { +async fn users_page(session: Session) -> impl IntoResponse { let user: SessionUser = match session.get(SESSION_KEY).await { Ok(Some(u)) => u, _ => return Redirect::to("/login").into_response(), @@ -505,7 +403,18 @@ async fn users_page(State(state): State, session: Session) -> impl return Redirect::to("/").into_response(); } - let user_manager = state.get_users().await; + let user_manager = match UserManager::load_async().await { + Ok(m) => m, + Err(e) => { + error!("Failed to load users: {}", e); + return ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed to load user database", + ) + .into_response(); + } + }; + let mut html = String::with_capacity(4096); write_html_head(&mut html, "User Management - Server Manager"); @@ -605,7 +514,7 @@ struct AddUserPayload { quota: Option, } -async fn add_user_handler(State(state): State, session: Session, Form(payload): Form) -> impl IntoResponse { +async fn add_user_handler(session: Session, Form(payload): Form) -> impl IntoResponse { let session_user: SessionUser = match session.get(SESSION_KEY).await { Ok(Some(u)) => u, _ => return Redirect::to("/login").into_response(), @@ -626,29 +535,16 @@ async fn add_user_handler(State(state): State, session: Session, Fo None => None, }; - let mut cache = state.users_cache.write().await; - let res = tokio::task::block_in_place(|| { - cache.manager.add_user(&payload.username, &payload.password, role_enum, quota_val) - }); - - if let Err(e) = res { + if let Err(e) = UserManager::add_user_async(payload.username.clone(), payload.password.clone(), role_enum, quota_val).await { error!("Failed to add user: {}", e); - // In a real app we'd flash a message. Here just redirect. } else { info!("User {} added via Web UI by {}", payload.username, session_user.username); - // Update mtime to prevent unnecessary reload - let path = std::path::Path::new("users.yaml"); - let fallback_path = std::path::Path::new("/opt/server_manager/users.yaml"); - let file_path = if path.exists() { path } else { fallback_path }; - if let Ok(m) = std::fs::metadata(file_path) { - cache.last_modified = m.modified().ok(); - } } Redirect::to("/users").into_response() } -async fn delete_user_handler(State(state): State, session: Session, Path(username): Path) -> impl IntoResponse { +async fn delete_user_handler(session: Session, Path(username): Path) -> impl IntoResponse { let session_user: SessionUser = match session.get(SESSION_KEY).await { Ok(Some(u)) => u, _ => return Redirect::to("/login").into_response(), @@ -658,22 +554,10 @@ async fn delete_user_handler(State(state): State, session: Session, return (StatusCode::FORBIDDEN, "Access Denied").into_response(); } - let mut cache = state.users_cache.write().await; - let res = tokio::task::block_in_place(|| { - cache.manager.delete_user(&username) - }); - - if let Err(e) = res { + if let Err(e) = UserManager::delete_user_async(username.clone()).await { error!("Failed to delete user: {}", e); } else { info!("User {} deleted via Web UI by {}", username, session_user.username); - // Update mtime to prevent unnecessary reload - let path = std::path::Path::new("users.yaml"); - let fallback_path = std::path::Path::new("/opt/server_manager/users.yaml"); - let file_path = if path.exists() { path } else { fallback_path }; - if let Ok(m) = std::fs::metadata(file_path) { - cache.last_modified = m.modified().ok(); - } } Redirect::to("/users").into_response() @@ -697,6 +581,17 @@ async fn check_admin_role(session: Session, name: &str, enable: bool) -> impl In return (StatusCode::FORBIDDEN, "Access Denied: Admin role required").into_response(); } + // Update config first for immediate UI feedback + let res = if enable { + Config::enable_service_async(name.to_string()).await + } else { + Config::disable_service_async(name.to_string()).await + }; + + if let Err(e) = res { + error!("Failed to update config for service {}: {}", name, e); + } + run_cli_toggle(name, enable); Redirect::to("/").into_response() }