added backend unit tests

This commit is contained in:
Maya
2025-10-06 12:43:29 -04:00
parent e50bcb958e
commit e10e8fe47b
16 changed files with 1604 additions and 85 deletions
+329 -6
View File
@@ -390,6 +390,50 @@ dependencies = [
"piper",
]
[[package]]
name = "bollard"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d41711ad46fda47cd701f6908e59d1bd6b9a2b7464c0d0aeab95c6d37096ff8a"
dependencies = [
"base64 0.22.1",
"bollard-stubs",
"bytes",
"futures-core",
"futures-util",
"hex",
"http 1.3.1",
"http-body-util",
"hyper 1.6.0",
"hyper-named-pipe",
"hyper-util",
"hyperlocal",
"log",
"pin-project-lite",
"serde",
"serde_derive",
"serde_json",
"serde_repr",
"serde_urlencoded",
"thiserror 1.0.69",
"tokio",
"tokio-util",
"tower-service",
"url",
"winapi",
]
[[package]]
name = "bollard-stubs"
version = "1.45.0-rc.26.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d7c5415e3a6bc6d3e99eff6268e488fd4ee25e7b28c10f08fa6760bd9de16e4"
dependencies = [
"serde",
"serde_repr",
"serde_with",
]
[[package]]
name = "bumpalo"
version = "3.19.0"
@@ -727,6 +771,16 @@ dependencies = [
"zeroize",
]
[[package]]
name = "deranged"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d630bccd429a5bb5a64b5e94f693bfc48c9f8566418fda4c494cc94f911f87cc"
dependencies = [
"powerfmt",
"serde",
]
[[package]]
name = "dhcproto"
version = "0.13.0"
@@ -941,6 +995,18 @@ dependencies = [
"version_check",
]
[[package]]
name = "filetime"
version = "0.2.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed"
dependencies = [
"cfg-if",
"libc",
"libredox",
"windows-sys 0.60.2",
]
[[package]]
name = "fixedbitset"
version = "0.5.7"
@@ -1174,13 +1240,19 @@ dependencies = [
"futures-sink",
"futures-util",
"http 0.2.12",
"indexmap",
"indexmap 2.10.0",
"slab",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
version = "0.14.5"
@@ -1407,6 +1479,22 @@ dependencies = [
"pin-project-lite",
"smallvec",
"tokio",
"want",
]
[[package]]
name = "hyper-named-pipe"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278"
dependencies = [
"hex",
"hyper 1.6.0",
"hyper-util",
"pin-project-lite",
"tokio",
"tower-service",
"winapi",
]
[[package]]
@@ -1430,10 +1518,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
dependencies = [
"bytes",
"futures-channel",
"futures-core",
"futures-util",
"http 1.3.1",
"http-body 1.0.1",
"hyper 1.6.0",
"libc",
"pin-project-lite",
"socket2 0.6.0",
"tokio",
"tower-service",
"tracing",
]
[[package]]
name = "hyperlocal"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7"
dependencies = [
"hex",
"http-body-util",
"hyper 1.6.0",
"hyper-util",
"pin-project-lite",
"tokio",
"tower-service",
@@ -1596,6 +1704,17 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "indexmap"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
dependencies = [
"autocfg",
"hashbrown 0.12.3",
"serde",
]
[[package]]
name = "indexmap"
version = "2.10.0"
@@ -1604,6 +1723,7 @@ checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661"
dependencies = [
"equivalent",
"hashbrown 0.15.4",
"serde",
]
[[package]]
@@ -1728,6 +1848,7 @@ checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
dependencies = [
"bitflags 2.9.1",
"libc",
"redox_syscall",
]
[[package]]
@@ -1753,6 +1874,12 @@ version = "0.4.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
[[package]]
name = "linux-raw-sys"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039"
[[package]]
name = "litemap"
version = "0.8.0"
@@ -1953,6 +2080,7 @@ dependencies = [
"axum",
"base64ct",
"blob-uuid",
"bollard",
"chrono",
"cidr",
"clap",
@@ -1991,6 +2119,8 @@ dependencies = [
"sqlx",
"strum",
"strum_macros",
"tar",
"tempfile",
"thiserror 1.0.69",
"tokio",
"tokio-util",
@@ -2002,6 +2132,7 @@ dependencies = [
"url",
"uuid",
"validator",
"walkdir",
"webpki-roots 0.25.4",
"windows",
]
@@ -2062,6 +2193,12 @@ dependencies = [
"zeroize",
]
[[package]]
name = "num-conv"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "num-integer"
version = "0.1.46"
@@ -2306,7 +2443,7 @@ checksum = "54acf3a685220b533e437e264e4d932cfbdc4cc7ec0cd232ed73c08d03b8a7ca"
dependencies = [
"fixedbitset",
"hashbrown 0.15.4",
"indexmap",
"indexmap 2.10.0",
"serde",
"serde_derive",
]
@@ -2467,6 +2604,12 @@ dependencies = [
"zerovec",
]
[[package]]
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "ppv-lite86"
version = "0.2.21"
@@ -2532,7 +2675,7 @@ dependencies = [
"hex",
"lazy_static",
"procfs-core",
"rustix",
"rustix 0.38.44",
]
[[package]]
@@ -2646,6 +2789,26 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "ref-cast"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d"
dependencies = [
"ref-cast-impl",
]
[[package]]
name = "ref-cast-impl"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "regex"
version = "1.11.3"
@@ -2823,10 +2986,23 @@ dependencies = [
"bitflags 2.9.1",
"errno",
"libc",
"linux-raw-sys",
"linux-raw-sys 0.4.15",
"windows-sys 0.59.0",
]
[[package]]
name = "rustix"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
dependencies = [
"bitflags 2.9.1",
"errno",
"libc",
"linux-raw-sys 0.11.0",
"windows-sys 0.60.2",
]
[[package]]
name = "rustls"
version = "0.21.12"
@@ -2904,6 +3080,39 @@ version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schemars"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "schemars"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0"
dependencies = [
"dyn-clone",
"ref-cast",
"serde",
"serde_json",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -2962,6 +3171,17 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_repr"
version = "0.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "serde_spanned"
version = "0.6.9"
@@ -2983,6 +3203,25 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_with"
version = "3.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e"
dependencies = [
"base64 0.22.1",
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.10.0",
"schemars 0.9.0",
"schemars 1.0.4",
"serde",
"serde_derive",
"serde_json",
"time",
]
[[package]]
name = "sha1"
version = "0.10.6"
@@ -3135,7 +3374,7 @@ dependencies = [
"futures-util",
"hashbrown 0.15.4",
"hashlink 0.10.0",
"indexmap",
"indexmap 2.10.0",
"log",
"memchr",
"once_cell",
@@ -3414,6 +3653,30 @@ dependencies = [
"libc",
]
[[package]]
name = "tar"
version = "0.4.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a"
dependencies = [
"filetime",
"libc",
"xattr",
]
[[package]]
name = "tempfile"
version = "3.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
dependencies = [
"fastrand",
"getrandom 0.3.3",
"once_cell",
"rustix 1.1.2",
"windows-sys 0.60.2",
]
[[package]]
name = "thiserror"
version = "1.0.69"
@@ -3463,6 +3726,37 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "time"
version = "0.3.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d"
dependencies = [
"deranged",
"itoa",
"num-conv",
"powerfmt",
"serde",
"time-core",
"time-macros",
]
[[package]]
name = "time-core"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b"
[[package]]
name = "time-macros"
version = "0.2.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3"
dependencies = [
"num-conv",
"time-core",
]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
@@ -3588,7 +3882,7 @@ version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
dependencies = [
"indexmap",
"indexmap 2.10.0",
"serde",
"serde_spanned",
"toml_datetime",
@@ -3935,6 +4229,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
@@ -4109,6 +4413,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.60.2",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
@@ -4449,6 +4762,16 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
[[package]]
name = "xattr"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156"
dependencies = [
"libc",
"rustix 1.1.2",
]
[[package]]
name = "yaml-rust2"
version = "0.8.1"
+7 -1
View File
@@ -118,6 +118,7 @@ dyn-eq = "0.1.3"
ipgen = "1.0.2"
validator = { version = "0.20", features = ["derive"] }
regex = "1.11.3"
tempfile = "3.23.0"
# === Platform-specific Dependencies ===
[target.'cfg(target_os = "linux")'.dependencies]
@@ -129,4 +130,9 @@ windows = { version = "0.52", features = [
"Win32_Foundation"
]}
openssl = { version = "0.10.73", features = ["vendored"] }
openssl = { version = "0.10.73", features = ["vendored"] }
[dev-dependencies]
bollard = "0.17" # Docker API client
walkdir = "2.5" # For directory traversal
tar = "0.4" # For creating build context
+7 -2
View File
@@ -115,10 +115,15 @@ async fn main() -> anyhow::Result<()> {
// Create self as host, register with server, and save daemon ID
discovery_service.run_self_report_discovery().await?;
let host_id = runtime_service.config_store.get_host_id().await?
let host_id = runtime_service
.config_store
.get_host_id()
.await?
.ok_or_else(|| anyhow::anyhow!("Host ID not set after self-report"))?;
runtime_service.register_with_server(host_id, daemon_id).await?;
runtime_service
.register_with_server(host_id, daemon_id)
.await?;
};
tracing::info!("✅ Daemon ID: {}", daemon_id);
@@ -422,11 +422,11 @@ impl DaemonDiscoveryService {
if let (Some(service), mut matched_ports) =
Service::from_discovery(ServiceFromDiscoveryParams {
service_definition,
ip: host_ip,
ip: &host_ip,
open_ports: &unclaimed_ports,
endpoint_responses: &endpoint_responses,
subnet: &subnet,
mac_address: mac,
mac_address: &mac,
host_id: &host.id,
interface_bindings: &interface_bindings,
matched_service_definitions: &matched_service_definitions,
+43
View File
@@ -250,3 +250,46 @@ impl ConfigStore {
config.clone()
}
}
#[cfg(test)]
mod tests {
use std::path::Path;
use crate::{daemon::shared::storage::AppConfig, tests::DAEMON_CONFIG_FIXTURE};
#[test]
fn test_daemon_config_backward_compatibility() {
// Try to load config from fixture (from latest release)
let config_path = Path::new(DAEMON_CONFIG_FIXTURE);
if config_path.exists() {
println!("Testing backward compatibility with fixture from latest release");
let config_json =
std::fs::read_to_string(config_path).expect("Failed to read daemon config fixture");
let loaded: Result<AppConfig, _> = serde_json::from_str(&config_json);
assert!(
loaded.is_ok(),
"Failed to load daemon config from latest release: {:?}",
loaded.err()
);
let config = loaded.unwrap();
// Verify required fields exist
assert!(!config.name.is_empty(), "Config name is empty");
assert!(config.port > 0, "Config port is invalid");
println!("✅ Successfully loaded daemon config from latest release");
} else {
println!(
"⚠️ No daemon config fixture found at {}",
DAEMON_CONFIG_FIXTURE
);
println!(" Run release workflow to generate fixtures");
assert!(false, "Failed to load config fixture");
}
}
}
+3
View File
@@ -1,2 +1,5 @@
pub mod daemon;
pub mod server;
#[cfg(test)]
pub mod tests;
+4 -27
View File
@@ -32,35 +32,12 @@ async fn create_host(
Json(request): Json<HostWithServicesRequest>,
) -> ApiResult<Json<ApiResponse<Host>>> {
let host_service = &state.services.host_service;
let service_service = &state.services.service_service;
let request_host = request.host.clone();
let (host, _) = host_service
.create_host_with_services(request.host, request.services)
.await?;
// Create host first (handles duplicates via upsert_host)
let mut created_host = host_service.create_host(request.host.base).await?;
// Create services, handling case where created_service was upserted from host in request instead of created anew and interfaces/ports were overwritten
let service_futures = request.services.into_iter().map(|mut service| {
service = service_service.transfer_service_to_new_host(
&mut service,
&request_host,
&created_host,
);
service_service.create_service(service)
});
let services = try_join_all(service_futures).await?;
// Add all successfully created/found services to the host
for service in &services {
if !created_host.base.services.contains(&service.id) {
created_host.base.services.push(service.id);
}
}
let host_with_final_services = host_service.update_host(created_host).await?;
Ok(Json(ApiResponse::success(host_with_final_services)))
Ok(Json(ApiResponse::success(host)))
}
async fn get_all_hosts(
+234 -5
View File
@@ -39,9 +39,42 @@ impl HostService {
self.storage.get_all().await
}
pub async fn create_host_with_services(
&self,
host: Host,
services: Vec<Service>,
) -> Result<(Host, Vec<Service>)> {
// Create host first (handles duplicates via upsert_host)
let mut created_host = self.create_host(&host.base).await?;
// Create services, handling case where created_host was upserted instead of created anew, which means that host ID + interfaces/port IDs
// are different from what's mapped to the service and they need to be updated
let service_futures = services.into_iter().map(|mut service| {
service = self.service_service.transfer_service_to_new_host(
&mut service,
&host,
&created_host,
);
self.service_service.create_service(service)
});
let services = try_join_all(service_futures).await?;
// Add all successfully created/found services to the host
for service in &services {
if !created_host.base.services.contains(&service.id) {
created_host.base.services.push(service.id);
}
}
let host_with_final_services = self.update_host(created_host).await?;
Ok((host_with_final_services, services))
}
/// Create a new host
pub async fn create_host(&self, host_base: HostBase) -> Result<Host> {
let host = Host::new(host_base);
async fn create_host(&self, host_base: &HostBase) -> Result<Host> {
let host = Host::new(host_base.clone());
let all_hosts = self.storage.get_all().await?;
@@ -77,7 +110,7 @@ impl HostService {
.await?
.ok_or_else(|| anyhow!("Host '{}' not found", host.id))?;
self.update_services(&current_host, &host).await?;
self.update_host_services(&current_host, &host).await?;
self.update_subnet_host_relationships(&current_host, true)
.await?;
@@ -91,6 +124,10 @@ impl HostService {
/// Merge new discovery data with existing host
async fn upsert_host(&self, mut existing_host: Host, new_host: Host) -> Result<Host> {
if existing_host.id == new_host.id {
return Err(anyhow!("Can't upsert a host with itself"));
}
let mut interface_updates = 0;
let mut port_updates = 0;
let mut hostname_update = false;
@@ -168,6 +205,10 @@ impl HostService {
destination_host: Host,
other_host: Host,
) -> Result<Host> {
if destination_host.id == other_host.id {
return Err(anyhow!("Can't consolidate a host with itself"));
}
let other_host_services = self
.service_service
.get_services_for_host(&other_host.id)
@@ -175,7 +216,7 @@ impl HostService {
let (other_host_name, other_host_id) = (&other_host.base.name, &other_host.id);
let updated_host = self
.upsert_host(destination_host, other_host.clone())
.upsert_host(destination_host.clone(), other_host.clone())
.await?;
let service_update_futures = other_host_services.into_iter().map(|mut s| {
@@ -184,6 +225,7 @@ impl HostService {
&other_host,
&updated_host,
);
self.service_service.update_service(s)
});
@@ -201,7 +243,11 @@ impl HostService {
Ok(updated_host)
}
pub async fn update_services(&self, current_host: &Host, updates: &Host) -> Result<(), Error> {
pub async fn update_host_services(
&self,
current_host: &Host,
updates: &Host,
) -> Result<(), Error> {
let services = self
.service_service
.get_services_for_host(&current_host.id)
@@ -300,3 +346,186 @@ impl HostService {
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::tests::*;
#[tokio::test]
async fn test_host_deduplication_on_create() {
let (storage, services) = test_services().await;
let start_host_count = storage.hosts.get_all().await.unwrap().len();
// Create first host
let host1 = host();
let (created1, _) = services
.host_service
.create_host_with_services(host1.clone(), vec![])
.await
.unwrap();
// Try to create duplicate (same interfaces)
let host2 = host();
let (created2, _) = services
.host_service
.create_host_with_services(host2.clone(), vec![])
.await
.unwrap();
// Should return same host (upserted)
assert_eq!(created1.id, created2.id);
// Verify only one host in DB
let end_host_count = storage.hosts.get_all().await.unwrap().len();
assert_eq!(start_host_count + 1, end_host_count);
}
#[tokio::test]
async fn test_host_upsert_merges_new_data() {
let (_, services) = test_services().await;
// Create host with one interface
let mut host1 = host();
let subnet1 = subnet();
services
.subnet_service
.create_subnet(subnet1.clone())
.await
.unwrap();
host1.base.interfaces = vec![interface(&subnet1.id)];
let (created, _) = services
.host_service
.create_host_with_services(host1.clone(), vec![])
.await
.unwrap();
// Create "duplicate" with additional interface
let mut host2 = host();
let subnet2 = subnet();
services
.subnet_service
.create_subnet(subnet2.clone())
.await
.unwrap();
host2.base.interfaces = vec![interface(&subnet1.id), interface(&subnet2.id)];
let (upserted, _) = services
.host_service
.create_host_with_services(host2.clone(), vec![])
.await
.unwrap();
// Should have merged interfaces
assert_eq!(upserted.id, created.id);
assert_eq!(upserted.base.interfaces.len(), 2);
}
#[tokio::test]
async fn test_host_consolidation() {
let (_, services) = test_services().await;
let subnet_obj = subnet();
services
.subnet_service
.create_subnet(subnet_obj.clone())
.await
.unwrap();
let mut host1 = host();
host1.base.interfaces = Vec::new();
let (created1, _) = services
.host_service
.create_host_with_services(host1.clone(), vec![])
.await
.unwrap();
let mut host2 = host();
host2.base.interfaces = vec![interface(&subnet_obj.id)];
let mut svc = service(&host2.id);
svc.base.port_bindings = vec![host2.base.ports[0].id];
svc.base.interface_bindings = vec![host2.base.interfaces[0].id];
let (created2, created_svcs) = services
.host_service
.create_host_with_services(host2.clone(), vec![svc])
.await
.unwrap();
let created_svc = &created_svcs[0];
// Consolidate host2 into host1
let consolidated = services
.host_service
.consolidate_hosts(created1.clone(), created2.clone())
.await
.unwrap();
// Host1 should have host2's service
assert!(consolidated.base.services.contains(&created_svc.id));
// Host2 should be deleted
let host2_after = services.host_service.get_host(&created2.id).await.unwrap();
assert!(host2_after.is_none());
// Service should now belong to host1
let svc_after = services
.service_service
.get_service(&created_svc.id)
.await
.unwrap()
.unwrap();
assert_eq!(svc_after.base.host_id, consolidated.id);
}
#[tokio::test]
async fn test_host_deletion_removes_subnet_relationships() {
let (_, services) = test_services().await;
let subnet_obj = subnet();
let created_subnet = services
.subnet_service
.create_subnet(subnet_obj.clone())
.await
.unwrap();
// Create host with interface on subnet
let mut host_obj = host();
host_obj.base.interfaces = vec![interface(&created_subnet.id)];
let (created_host, _) = services
.host_service
.create_host_with_services(host_obj.clone(), vec![])
.await
.unwrap();
// Subnet should have host relationship
let subnet_after_create = services
.subnet_service
.get_subnet(&created_subnet.id)
.await
.unwrap()
.unwrap();
assert!(subnet_after_create.base.hosts.contains(&created_host.id));
// Delete host (with services)
services
.host_service
.delete_host(&created_host.id, true)
.await
.unwrap();
// Subnet should no longer have host relationship
let subnet_after_delete = services
.subnet_service
.get_subnet(&created_subnet.id)
.await
.unwrap()
.unwrap();
assert!(!subnet_after_delete.base.hosts.contains(&created_host.id));
}
}
+158 -17
View File
@@ -37,28 +37,27 @@ impl ServiceService {
}
pub async fn create_service(&self, service: Service) -> Result<Service> {
let host_service = self
.host_service
.get()
.ok_or_else(|| anyhow::anyhow!("Host service not initialized"))?;
let all_hosts = host_service.get_all_hosts().await?;
let existing_services = self.get_services_for_host(&service.base.host_id).await?;
let service_from_storage = match existing_services.into_iter().find(|existing: &Service| {
if let (Some(existing_service_host), Some(new_service_host)) = (
all_hosts.iter().find(|h| h.id == existing.base.host_id),
all_hosts.iter().find(|h| h.id == service.base.host_id),
) {
let port_match = new_service_host
// Duplicate if being created for same host, has same definition, and same ports
let host_match = existing.base.host_id == service.base.host_id;
let definition_match =
service.base.service_definition == existing.base.service_definition;
let port_match = service
.base
.port_bindings
.iter()
.all(|p| existing.base.port_bindings.contains(p))
&& existing
.base
.ports
.port_bindings
.iter()
.any(|p| existing_service_host.base.ports.contains(p));
let definition_match =
service.base.service_definition == existing.base.service_definition;
return port_match && definition_match;
}
false
.all(|p| service.base.port_bindings.contains(p));
host_match && definition_match && port_match
}) {
Some(existing_service) => {
tracing::warn!(
@@ -352,3 +351,145 @@ impl ServiceService {
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::tests::*;
#[tokio::test]
async fn test_service_deduplication_on_create() {
let (_, services) = test_services().await;
let subnet_obj = subnet();
services
.subnet_service
.create_subnet(subnet_obj.clone())
.await
.unwrap();
// Create first service + host
let mut host_obj = host();
host_obj.base.interfaces = vec![interface(&subnet_obj.id)];
let svc1 = service(&host_obj.id);
let (created_host, created1) = services
.host_service
.create_host_with_services(host_obj.clone(), vec![svc1])
.await
.unwrap();
// Try to create duplicate (same definition + matching ports)
let svc2 = service(&created_host.id);
let created2 = services
.service_service
.create_service(svc2.clone())
.await
.unwrap();
// Should return same service (upserted)
assert_eq!(created1[0].id, created2.id);
// Verify only one service in DB
let all_services = services
.service_service
.get_services_for_host(&created_host.id)
.await
.unwrap();
assert_eq!(all_services.len(), 1);
}
#[tokio::test]
async fn test_service_update_maintains_subnet_relationships() {
let (_, services) = test_services().await;
let subnet_obj = subnet();
let created_subnet = services
.subnet_service
.create_subnet(subnet_obj.clone())
.await
.unwrap();
let mut host_obj = host();
host_obj.base.interfaces = vec![interface(&created_subnet.id)];
// Create router service (will add to subnet gateways)
let router_def =
crate::server::services::definitions::ServiceDefinitionRegistry::find_by_id("Router")
.unwrap();
let mut svc = service(&host_obj.id);
svc.base.service_definition = router_def;
svc.base.interface_bindings = vec![host_obj.base.interfaces[0].id];
svc.base.port_bindings = vec![host_obj.base.ports[0].id];
let (_, created_svcs) = services
.host_service
.create_host_with_services(host_obj.clone(), vec![svc])
.await
.unwrap();
// Subnet should have gateway relationship
let subnet_after = services
.subnet_service
.get_subnet(&created_subnet.id)
.await
.unwrap()
.unwrap();
assert!(!subnet_after.base.gateways.is_empty());
assert_eq!(subnet_after.base.gateways[0], created_svcs[0].id);
}
#[tokio::test]
async fn test_service_deletion_cleans_up_relationships() {
let (_, services) = test_services().await;
let subnet_obj = subnet();
let created_subnet = services
.subnet_service
.create_subnet(subnet_obj.clone())
.await
.unwrap();
let mut host_obj = host();
host_obj.base.interfaces = vec![interface(&created_subnet.id)];
// Create service in a group
let mut svc = service(&host_obj.id);
svc.base.port_bindings = vec![host_obj.base.ports[0].id];
svc.base.interface_bindings = vec![host_obj.base.interfaces[0].id];
let (created_host, created_svcs) = services
.host_service
.create_host_with_services(host_obj.clone(), vec![svc])
.await
.unwrap();
let created_svc = &created_svcs[0];
let mut group_obj = group();
group_obj.base.service_bindings =
vec![crate::server::hosts::types::targets::ServiceBinding {
service_id: created_svc.id,
port_id: created_host.base.ports[0].id,
interface_id: created_host.base.interfaces[0].id,
}];
let created_group = services
.group_service
.create_group(group_obj)
.await
.unwrap();
// Delete service
services
.service_service
.delete_service(&created_svc.id)
.await
.unwrap();
// Group should no longer have service binding
let group_after = services
.group_service
.get_group(&created_group.id)
.await
.unwrap()
.unwrap();
assert!(group_after.base.service_bindings.is_empty());
}
}
+4 -4
View File
@@ -47,11 +47,11 @@ pub struct Service {
pub struct ServiceFromDiscoveryParams<'a> {
pub service_definition: Box<dyn ServiceDefinition>,
pub ip: IpAddr,
pub ip: &'a IpAddr,
pub open_ports: &'a [PortBase],
pub endpoint_responses: &'a [EndpointResponse],
pub subnet: &'a Subnet,
pub mac_address: Option<MacAddress>,
pub mac_address: &'a Option<MacAddress>,
pub host_id: &'a Uuid,
pub interface_bindings: &'a [Uuid],
pub matched_service_definitions: &'a Vec<Box<dyn ServiceDefinition>>,
@@ -128,8 +128,8 @@ impl Service {
} = params;
if let Ok(result) = service_definition.discovery_pattern().matches(
open_ports.to_owned(),
endpoint_responses.to_owned(),
open_ports,
endpoint_responses,
ip,
subnet,
mac_address,
@@ -13,10 +13,10 @@ use std::hash::Hash;
// Main trait used in service definition implementation
pub trait ServiceDefinition: HasId + DynClone + DynHash + DynEq + Send + Sync {
/// Service name, will also be used as unique identifier. < 15 characters.
/// Service name, will also be used as unique identifier. < 25 characters.
fn name(&self) -> &'static str;
/// Service description. < 60 characters.
/// Service description. < 100 characters.
fn description(&self) -> &'static str;
/// Category from ServiceCategory enum
@@ -229,3 +229,131 @@ impl ServiceDefinition for DefaultServiceDefinition {
Pattern::None
}
}
#[cfg(test)]
mod tests {
use crate::server::services::{
definitions::ServiceDefinitionRegistry,
types::{definitions::ServiceDefinition, patterns::Pattern},
};
use std::collections::HashSet;
#[test]
fn test_all_service_definitions_register() {
// Get all registered services using inventory
let registry = ServiceDefinitionRegistry::all_service_definitions();
// Verify at least some services are registered
assert!(
!registry.is_empty(),
"No service definitions registered! Check inventory setup."
);
// Verify no duplicate names
let names: HashSet<_> = registry.iter().map(|s| s.name()).collect();
assert_eq!(
names.len(),
registry.len(),
"Duplicate service definition names found!"
);
// Print registered services for debugging
println!("Registered {} services:", registry.len());
for service in &registry {
println!(" - {}", service.name());
}
}
#[test]
fn test_service_definition_has_required_fields() {
let registry = ServiceDefinitionRegistry::all_service_definitions();
for service in registry {
// Every service must have non-empty name
assert!(!service.name().is_empty(), "Service has empty name");
// Name should be reasonable length (< 25 chars)
assert!(
service.name().len() < 25,
"Service name '{}' is too long; must be < 25 characters",
service.name()
);
// Every service must have description
assert!(
!service.description().is_empty(),
"Service '{}' has empty description",
service.name()
);
// Description should be reasonable length
assert!(
service.description().len() < 100,
"Service '{}' description is too long; must be < 100 characters",
service.name()
);
// Every service must have a category
let category = service.category();
// Just verify it doesn't panic
let _ = format!("{:?}", category);
// Discovery pattern must exist
let pattern = service.discovery_pattern();
// Verify pattern is one of the valid types
match pattern {
Pattern::Port(_)
| Pattern::AnyPort(_)
| Pattern::Endpoint(_)
| Pattern::AnyOf(_)
| Pattern::AllOf(_)
| Pattern::AllPort { .. }
| Pattern::IsGatewayIp
| Pattern::WebService(..)
| Pattern::None
| Pattern::MacVendor(..)
| Pattern::NotGatewayIp
| Pattern::SubnetIsType(_)
| Pattern::SubnetIsNotType(_)
| Pattern::IsVpnSubnetGateway
| Pattern::IsDockerHost
| Pattern::HasAnyMatchedService
| Pattern::AnyMatchedService(_)
| Pattern::AllMatchedService(_) => {
// Valid pattern
}
}
}
}
#[test]
fn test_service_definition_serialization() {
let registry = ServiceDefinitionRegistry::all_service_definitions();
// Test that we can serialize and deserialize service definitions
for service in registry.iter().take(5) {
// Test first 5 to save time
// Serialize to JSON
let json = serde_json::to_string(&service)
.expect(&format!("Failed to serialize {}", service.name()));
// Deserialize back
let deserialized: Box<dyn ServiceDefinition> = serde_json::from_str(&json)
.expect(&format!("Failed to deserialize {}", service.name()));
// Verify key fields match
assert_eq!(
service.name(),
deserialized.name(),
"Name mismatch after serialization"
);
assert_eq!(
service.description(),
deserialized.description(),
"Description mismatch after serialization"
);
}
}
}
+201 -19
View File
@@ -50,8 +50,8 @@ pub enum Pattern {
fn web_service_endpoint_responses(
ip: Option<IpAddr>,
path: &&str,
resp: &&str,
path: &str,
resp: &str,
) -> Vec<EndpointResponse> {
vec![
EndpointResponse {
@@ -86,11 +86,11 @@ impl Vendor {
impl Pattern {
pub fn matches(
&self,
open_ports: Vec<PortBase>,
responses: Vec<EndpointResponse>,
ip: IpAddr,
open_ports: &[PortBase],
responses: &[EndpointResponse],
ip: &IpAddr,
subnet: &Subnet,
mac_address: Option<MacAddress>,
mac_address: &Option<MacAddress>,
matched_service_definitions: &Vec<Box<dyn ServiceDefinition>>,
) -> Result<Vec<Option<Port>>, Error> {
// Return ports that matched if any
@@ -154,8 +154,8 @@ impl Pattern {
.iter()
.filter_map(|p| {
match p.matches(
open_ports.clone(),
responses.clone(),
open_ports,
responses,
ip,
subnet,
mac_address,
@@ -184,8 +184,8 @@ impl Pattern {
.iter()
.filter_map(|p| {
match p.matches(
open_ports.clone(),
responses.clone(),
open_ports,
responses,
ip,
subnet,
mac_address,
@@ -210,9 +210,9 @@ impl Pattern {
Pattern::AnyPort(port_bases) => {
let matched_ports: Vec<Option<Port>> = open_ports
.into_iter()
.iter()
.filter(|p| port_bases.contains(p))
.map(|p| Some(Port::new(p)))
.map(|p| Some(Port::new(p.clone())))
.collect();
if matched_ports.is_empty() {
@@ -224,9 +224,9 @@ impl Pattern {
Pattern::AllPort(port_bases) => {
let matched_ports: Vec<Option<Port>> = open_ports
.into_iter()
.iter()
.filter(|p| port_bases.contains(p))
.map(|p| Some(Port::new(p)))
.map(|p| Some(Port::new(p.clone())))
.collect();
if matched_ports.len() == port_bases.len() {
@@ -237,7 +237,7 @@ impl Pattern {
}
Pattern::WebService(path, resp) => {
let endpoints = web_service_endpoint_responses(Some(ip), path, resp)
let endpoints = web_service_endpoint_responses(Some(*ip), path, resp)
.into_iter()
.map(Pattern::Endpoint)
.collect();
@@ -304,8 +304,8 @@ impl Pattern {
Pattern::IsVpnSubnetGateway => {
let gateway_result = Pattern::IsGatewayIp.matches(
open_ports.clone(),
responses.clone(),
open_ports,
responses,
ip,
subnet,
mac_address,
@@ -321,8 +321,8 @@ impl Pattern {
Pattern::IsDockerHost => {
let gateway_result = Pattern::IsGatewayIp.matches(
open_ports.clone(),
responses.clone(),
open_ports,
responses,
ip,
subnet,
mac_address,
@@ -407,3 +407,185 @@ impl Pattern {
}
}
}
#[cfg(test)]
mod tests {
use std::net::{IpAddr, Ipv4Addr};
use crate::{
server::{
hosts::types::ports::PortBase,
services::{
definitions::ServiceDefinitionRegistry,
types::{
endpoints::{Endpoint, EndpointResponse},
patterns::Pattern,
},
},
},
tests::subnet,
};
#[tokio::test]
async fn test_pattern_port_matching() {
let subnet = subnet();
// Test pi-hole service
let pi =
ServiceDefinitionRegistry::find_by_id("Pi-Hole").expect("Pi-hole service not found");
let open_ports = vec![PortBase::DnsUdp, PortBase::DnsTcp];
let ip = IpAddr::V4(Ipv4Addr::new(192, 168, 4, 35));
let responses = vec![EndpointResponse {
endpoint: Endpoint::http(Some(ip), "/admin"),
response: "Pi-hole".to_string(),
}];
let result =
pi.discovery_pattern()
.matches(&open_ports, &responses, &ip, &subnet, &None, &vec![]);
assert!(
result.is_ok(),
"Pi-hole pattern should match port 53 and admin endpoint"
);
// Test with wrong port - should not match
let open_ports = vec![PortBase::new_tcp(80)];
let result = pi.discovery_pattern().matches(
&open_ports,
&responses,
&IpAddr::V4(Ipv4Addr::new(192, 168, 4, 35)),
&subnet,
&None,
&vec![],
);
assert!(result.is_err(), "SSH pattern should not match port 80");
}
#[test]
fn test_pattern_and_logic() {
let subnet = subnet();
// Create AND pattern requiring both port 80 and 443
let pattern = Pattern::AllOf(vec![
Pattern::Port(PortBase::new_tcp(80)),
Pattern::Port(PortBase::new_tcp(443)),
]);
// Test with both ports - should match
let open_ports = vec![PortBase::new_tcp(80), PortBase::new_tcp(443)];
let result = pattern.matches(
&open_ports,
&vec![],
&IpAddr::V4(Ipv4Addr::new(192, 168, 4, 35)),
&subnet,
&None,
&vec![],
);
assert!(
result.is_ok(),
"AND pattern should match when both conditions met"
);
// Test with only one port - should not match
let open_ports = vec![PortBase::new_tcp(80)];
let result = pattern.matches(
&open_ports,
&vec![],
&"192.168.1.100".parse().unwrap(),
&subnet,
&None,
&vec![],
);
assert!(
result.is_err(),
"AND pattern should not match when only one condition met"
);
// Test with neither port - should not match
let open_ports = vec![PortBase::new_tcp(22)];
let result = pattern.matches(
&open_ports,
&vec![],
&"192.168.1.100".parse().unwrap(),
&subnet,
&None,
&vec![],
);
assert!(
result.is_err(),
"AND pattern should not match when no conditions met"
);
}
#[test]
fn test_pattern_or_logic() {
let subnet = subnet();
// Create OR pattern for database ports (MySQL or PostgreSQL)
let pattern = Pattern::AnyOf(vec![
Pattern::Port(PortBase::new_tcp(3306)), // MySQL
Pattern::Port(PortBase::new_tcp(5432)), // PostgreSQL
]);
// Test with MySQL port - should match
let open_ports = vec![PortBase::new_tcp(3306)];
let result = pattern.matches(
&open_ports,
&vec![],
&IpAddr::V4(Ipv4Addr::new(192, 168, 4, 35)),
&subnet,
&None,
&vec![],
);
assert!(result.is_ok(), "OR pattern should match MySQL port");
// Test with PostgreSQL port - should match
let open_ports = vec![PortBase::new_tcp(5432)];
let result = pattern.matches(
&open_ports,
&vec![],
&IpAddr::V4(Ipv4Addr::new(192, 168, 4, 35)),
&subnet,
&None,
&vec![],
);
assert!(result.is_ok(), "OR pattern should match PostgreSQL port");
// Test with both ports - should match
let open_ports = vec![PortBase::new_tcp(3306), PortBase::new_tcp(5432)];
let result = pattern.matches(
&open_ports,
&vec![],
&"192.168.1.100".parse().unwrap(),
&subnet,
&None,
&vec![],
);
assert!(result.is_ok(), "OR pattern should match with both ports");
// Test with neither port - should not match
let open_ports = vec![PortBase::new_tcp(22)];
let result = pattern.matches(
&open_ports,
&vec![],
&"192.168.1.100".parse().unwrap(),
&subnet,
&None,
&vec![],
);
assert!(
result.is_err(),
"OR pattern should not match when no conditions met"
);
}
}
+12
View File
@@ -0,0 +1,12 @@
{
"server_target": "127.0.0.1",
"server_port": 60072,
"port": 60073,
"name": "netvisor-daemon",
"log_level": "info",
"heartbeat_interval": 30,
"bind_address": "0.0.0.0",
"id": "62af6b18-02cd-4aa5-87f4-0fad0e56a156",
"last_heartbeat": "2025-10-04T01:15:28.244836Z",
"host_id": "2ac6bc92-cd1d-4dff-80ca-cdcfacf325b2"
}
+70
View File
@@ -0,0 +1,70 @@
use crate::server::shared::storage::DatabaseMigrations;
use sqlx::SqlitePool;
use std::path::Path;
use crate::tests::SERVER_DB_FIXTURE;
#[tokio::test]
async fn test_database_schema_backward_compatibility() {
let db_path = Path::new(SERVER_DB_FIXTURE);
if db_path.exists() {
println!("Testing backward compatibility with database from latest release");
// Connect to the fixture database
let db_url = format!("sqlite:{}", db_path.display());
let pool = SqlitePool::connect(&db_url)
.await
.expect("Failed to connect to fixture database");
// Try to read from all tables with current code
let hosts_result = sqlx::query("SELECT * FROM hosts").fetch_all(&pool).await;
assert!(
hosts_result.is_ok(),
"Failed to read hosts table: {:?}",
hosts_result.err()
);
let services_result = sqlx::query("SELECT * FROM services").fetch_all(&pool).await;
assert!(
services_result.is_ok(),
"Failed to read services table: {:?}",
services_result.err()
);
let subnets_result = sqlx::query("SELECT * FROM subnets").fetch_all(&pool).await;
assert!(
subnets_result.is_ok(),
"Failed to read subnets table: {:?}",
subnets_result.err()
);
let groups_result = sqlx::query("SELECT * FROM groups").fetch_all(&pool).await;
assert!(
groups_result.is_ok(),
"Failed to read groups table: {:?}",
groups_result.err()
);
let daemons_result = sqlx::query("SELECT * FROM daemons").fetch_all(&pool).await;
assert!(
daemons_result.is_ok(),
"Failed to read daemons table: {:?}",
daemons_result.err()
);
println!("✅ Successfully read all tables from latest release database");
// Test that we can apply current schema to the old database
DatabaseMigrations::initialize(&pool)
.await
.expect("Failed to apply current schema to old database");
println!("✅ Successfully applied current schema to old database");
} else {
println!("⚠️ No database fixture found at {}", SERVER_DB_FIXTURE);
println!(" Run release workflow to generate fixtures");
assert!(false, "Failed to load database fixture");
}
}
+132
View File
@@ -0,0 +1,132 @@
use crate::server::{
config::{AppState, ServerConfig},
daemons::types::base::{Daemon, DaemonBase},
discovery::manager::DiscoverySessionManager,
groups::types::{Group, GroupBase, GroupType},
hosts::types::{
base::{Host, HostBase},
interfaces::{Interface, InterfaceBase},
ports::{Port, PortBase},
targets::HostTarget,
},
services::{
definitions::ServiceDefinitionRegistry,
types::base::{Service, ServiceBase},
},
shared::{services::ServiceFactory, types::storage::StorageFactory},
subnets::types::base::{Subnet, SubnetBase, SubnetSource, SubnetType},
utils::base::{NetworkUtils, ServerNetworkUtils},
};
use axum::Router;
use cidr::IpCidr;
use cidr::Ipv4Cidr;
use mac_address::MacAddress;
use sqlx::SqlitePool;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::sync::Arc;
use uuid::Uuid;
#[cfg(test)]
pub mod database;
pub const DAEMON_CONFIG_FIXTURE: &str = "src/tests/daemon_config.json";
pub const SERVER_DB_FIXTURE: &str = "src/tests/netvisor.db";
pub fn host() -> Host {
Host::new(HostBase {
name: "Test Host".to_string(),
hostname: Some("test.local".to_string()),
description: None,
target: HostTarget::Hostname,
interfaces: vec![interface(&Uuid::new_v4())],
services: vec![],
ports: vec![Port::new(PortBase::new_tcp(22))],
})
}
pub fn interface(subnet_id: &Uuid) -> Interface {
Interface::new(InterfaceBase {
subnet_id: *subnet_id,
ip_address: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)),
mac_address: Some(MacAddress::new([1, 2, 3, 4, 5, 6])),
name: Some("eth0".to_string()),
})
}
pub fn subnet() -> Subnet {
Subnet::new(SubnetBase {
name: "Test Subnet".to_string(),
description: None,
cidr: IpCidr::V4(Ipv4Cidr::new(Ipv4Addr::new(192, 168, 1, 0), 24).unwrap()),
subnet_type: SubnetType::Lan,
source: SubnetSource::System,
hosts: vec![],
dns_resolvers: vec![],
gateways: vec![],
reverse_proxies: vec![],
})
}
pub fn service(host_id: &Uuid) -> Service {
let service_def = ServiceDefinitionRegistry::find_by_id("Dns Server")
.unwrap_or_else(|| ServiceDefinitionRegistry::all_service_definitions()[0].clone());
Service::new(ServiceBase {
name: "Test Service".to_string(),
host_id: *host_id,
port_bindings: vec![],
interface_bindings: vec![],
service_definition: service_def,
})
}
pub fn group() -> Group {
Group::new(GroupBase {
name: "Test Group".to_string(),
description: None,
group_type: GroupType::NetworkPath,
service_bindings: vec![],
})
}
pub fn daemon(host_id: &Uuid) -> Daemon {
Daemon::new(
Uuid::new_v4(),
DaemonBase {
host_id: *host_id,
ip: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 50)),
port: 60073,
},
)
}
pub async fn setup_test_db() -> SqlitePool {
let pool = SqlitePool::connect(":memory:").await.unwrap();
crate::server::shared::storage::DatabaseMigrations::initialize(&pool)
.await
.unwrap();
pool
}
pub async fn test_storage() -> StorageFactory {
StorageFactory::new_sqlite(":memory:").await.unwrap()
}
pub async fn test_services() -> (StorageFactory, ServiceFactory) {
let storage = test_storage().await;
let services = ServiceFactory::new(&storage).await.unwrap();
(storage, services)
}
pub async fn setup_test_app() -> Router<Arc<AppState>> {
let config = ServerConfig::default();
let discovery_manager = DiscoverySessionManager::new();
let utils = ServerNetworkUtils::new();
let state = AppState::new(config, discovery_manager, utils)
.await
.unwrap();
crate::server::shared::handlers::create_router().with_state(state)
}
+268
View File
@@ -0,0 +1,268 @@
// #[tokio::test]
// #[ignore] // Run with --ignored flag: cargo test --ignored
// async fn test_container_daemon_server_integration() {
// use bollard::Docker;
// use bollard::container::{Config, CreateContainerOptions, StartContainerOptions};
// use bollard::network::CreateNetworkOptions;
// use bollard::image::BuildImageOptions;
// use bollard::models::HostConfig;
// let docker = Docker::connect_with_local_defaults()
// .expect("Failed to connect to Docker - is Docker running?");
// let network_name = "netvisor-test-network";
// docker.create_network(CreateNetworkOptions {
// name: network_name,
// driver: "bridge",
// ..Default::default()
// }).await.ok();
// println!("Building server image from local files...");
// let server_context = create_build_context("./backend").await;
// let build_server_options = BuildImageOptions {
// dockerfile: "Dockerfile",
// t: "netvisor-server-test",
// rm: true,
// ..Default::default()
// };
// let mut build_stream = docker.build_image(build_server_options, None, Some(server_context.into()));
// while let Some(build_info) = build_stream.next().await {
// match build_info {
// Ok(info) => {
// if let Some(stream) = info.stream {
// print!("{}", stream);
// }
// if let Some(error) = info.error {
// panic!("Server build failed: {}", error);
// }
// }
// Err(e) => panic!("Server build error: {}", e),
// }
// }
// println!("Building daemon image...");
// let daemon_context = create_build_context("./backend").await;
// let build_daemon_options = BuildImageOptions {
// dockerfile: "Dockerfile",
// t: "netvisor-daemon-test",
// rm: true,
// ..Default::default()
// };
// let mut build_stream = docker.build_image(build_daemon_options, None, Some(daemon_context.into()));
// while let Some(build_info) = build_stream.next().await {
// match build_info {
// Ok(info) => {
// if let Some(stream) = info.stream {
// print!("{}", stream);
// }
// if let Some(error) = info.error {
// panic!("Daemon build failed: {}", error);
// }
// }
// Err(e) => panic!("Daemon build error: {}", e),
// }
// }
// println!("Starting server container...");
// let server_config = Config {
// image: Some("netvisor-server-test:latest"),
// env: Some(vec![
// "NETVISOR_PORT=60072",
// "NETVISOR_LOG_LEVEL=debug",
// "NETVISOR_DATABASE_PATH=/data/netvisor.db",
// ]),
// host_config: Some(HostConfig {
// network_mode: Some(network_name.to_string()),
// binds: Some(vec!["netvisor-test-data:/data".to_string()]),
// ..Default::default()
// }),
// ..Default::default()
// };
// let server_container = docker
// .create_container(
// Some(CreateContainerOptions {
// name: "netvisor-test-server",
// ..Default::default()
// }),
// server_config,
// )
// .await
// .expect("Failed to create server container");
// docker
// .start_container(&server_container.id, None::<StartContainerOptions<String>>)
// .await
// .expect("Failed to start server container");
// let server_inspect = docker
// .inspect_container(&server_container.id, None)
// .await
// .expect("Failed to inspect server");
// let server_ip = server_inspect
// .network_settings
// .and_then(|ns| ns.networks)
// .and_then(|networks| networks.get(network_name).cloned())
// .and_then(|network| network.ip_address)
// .expect("Failed to get server IP");
// println!("Server IP: {}", server_ip);
// println!("Waiting for server to start...");
// tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// println!("Starting daemon container...");
// let daemon_config = Config {
// image: Some("netvisor-daemon-test:latest"),
// cmd: Some(vec![
// "/app/daemon",
// "--server-target", &server_ip,
// "--server-port", "60072",
// ]),
// env: Some(vec!["RUST_LOG=debug"]),
// host_config: Some(HostConfig {
// network_mode: Some(network_name.to_string()),
// binds: Some(vec!["netvisor-test-daemon-config:/root/.config/netvisor/daemon".to_string()]),
// ..Default::default()
// }),
// ..Default::default()
// };
// let daemon_container = docker
// .create_container(
// Some(CreateContainerOptions {
// name: "netvisor-test-daemon",
// ..Default::default()
// }),
// daemon_config,
// )
// .await
// .expect("Failed to create daemon container");
// docker
// .start_container(&daemon_container.id, None::<StartContainerOptions<String>>)
// .await
// .expect("Failed to start daemon container");
// println!("Waiting for daemon to register...");
// tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
// // Check logs
// println!("Checking server logs...");
// let mut log_stream = docker.logs::<String>(
// &server_container.id,
// Some(bollard::container::LogsOptions {
// stdout: true,
// stderr: true,
// follow: false,
// tail: "all",
// ..Default::default()
// }),
// );
// let mut server_logs = String::new();
// while let Some(log) = log_stream.next().await {
// if let Ok(log_output) = log {
// let log_str = log_output.to_string();
// server_logs.push_str(&log_str);
// print!("{}", log_str);
// }
// }
// println!("\nChecking daemon logs...");
// let mut log_stream = docker.logs::<String>(
// &daemon_container.id,
// Some(bollard::container::LogsOptions {
// stdout: true,
// stderr: true,
// follow: false,
// tail: "all",
// ..Default::default()
// }),
// );
// let mut daemon_logs = String::new();
// while let Some(log) = log_stream.next().await {
// if let Ok(log_output) = log {
// let log_str = log_output.to_string();
// daemon_logs.push_str(&log_str);
// print!("{}", log_str);
// }
// }
// // Cleanup
// println!("\nCleaning up...");
// docker.stop_container(&daemon_container.id, None).await.ok();
// docker.stop_container(&server_container.id, None).await.ok();
// docker.remove_container(&daemon_container.id, None).await.ok();
// docker.remove_container(&server_container.id, None).await.ok();
// docker.remove_network(network_name).await.ok();
// // Verify registration happened
// let found_registration =
// server_logs.contains("register") ||
// server_logs.contains("Register") ||
// server_logs.contains("daemon") ||
// daemon_logs.contains("registered") ||
// daemon_logs.contains("Registered") ||
// daemon_logs.contains("Server at");
// assert!(
// found_registration,
// "Daemon did not successfully register with server. Check logs above."
// );
// println!("✅ Container integration test passed!");
// }
// // Helper function to create tar archive for Docker build context
// async fn create_build_context(path: &str) -> Vec<u8> {
// use std::fs;
// use walkdir::WalkDir;
// use std::io::Read;
// let mut tar_data = Vec::new();
// let mut tar_builder = Builder::new(&mut tar_data);
// let base_path = PathBuf::from(path);
// // Walk the directory and add files to tar
// for entry in WalkDir::new(&base_path)
// .into_iter()
// .filter_map(|e| e.ok())
// .filter(|e| e.file_type().is_file())
// {
// let file_path = entry.path();
// // Skip target directory and other build artifacts
// if file_path.to_str().unwrap().contains("/target/") ||
// file_path.to_str().unwrap().contains("/.git/") ||
// file_path.to_str().unwrap().contains("/node_modules/") {
// continue;
// }
// let relative_path = file_path.strip_prefix(&base_path).unwrap();
// let mut file = fs::File::open(file_path).unwrap();
// let metadata = file.metadata().unwrap();
// let mut header = Header::new_gnu();
// header.set_path(relative_path).unwrap();
// header.set_size(metadata.len());
// header.set_mode(0o644);
// header.set_cksum();
// tar_builder.append(&header, &mut file).unwrap();
// }
// tar_builder.finish().unwrap();
// drop(tar_builder);
// tar_data
// }