chore: fix integration test, improve logging throughout app

This commit is contained in:
Maya
2025-11-16 18:37:43 -05:00
parent ef12ec4509
commit 843e6c822b
19 changed files with 703 additions and 306 deletions
+1 -1
View File
@@ -65,7 +65,7 @@ dev-container-rebuild-clean:
docker compose -f docker-compose.dev.yml up
dev-down:
docker compose -f docker-compose.dev.yml down --volumes --rmi all
docker compose -f docker-compose.dev.yml down --volumes --rmi local
build:
@echo "Building Server + UI Docker image..."
+37 -12
View File
@@ -177,7 +177,7 @@ pub trait RunsDiscovery: AsRef<DaemonDiscoveryService> + Send + Sync {
);
}
tracing::debug!(
tracing::trace!(
"Discovery update reported for session {}",
session.info.session_id
);
@@ -252,6 +252,15 @@ pub trait DiscoversNetworkedEntities:
})
.await?;
let session = self.as_ref().get_session().await?;
tracing::info!(
session_id = %session.info.session_id,
discovery_type = ?self.discovery_type(),
total_to_process = %session.info.total_to_process,
"Discovery session started"
);
Ok(())
}
@@ -269,7 +278,11 @@ pub trait DiscoversNetworkedEntities:
match &discovery_result {
Ok(_) => {
tracing::info!("Discovery session {} completed successfully", session_id);
tracing::info!(
session_id = %session_id,
processed = %final_processed_count,
"Discovery session completed successfully"
);
self.report_discovery_update(DiscoverySessionUpdate {
phase: DiscoveryPhase::Complete,
processed: final_processed_count,
@@ -279,7 +292,11 @@ pub trait DiscoversNetworkedEntities:
.await?;
}
Err(_) if cancel.is_cancelled() => {
tracing::warn!("Discovery session {} was cancelled", session_id);
tracing::warn!(
session_id = %session_id,
processed = %final_processed_count,
"Discovery session cancelled"
);
self.report_discovery_update(DiscoverySessionUpdate {
phase: DiscoveryPhase::Cancelled,
processed: final_processed_count,
@@ -289,7 +306,12 @@ pub trait DiscoversNetworkedEntities:
.await?;
}
Err(e) => {
tracing::error!("Discovery session {} failed: {}", session_id, e);
tracing::error!(
session_id = %session_id,
processed = %final_processed_count,
error = %e,
"Discovery session failed"
);
let error = DiscoveryCriticalError::from_error_string(e.to_string())
.map(|e| e.to_string())
@@ -310,11 +332,9 @@ pub trait DiscoversNetworkedEntities:
*current_session = None;
if cancel.is_cancelled() {
tracing::info!("Discovery session {} was cancelled", session_id);
return Ok(());
}
tracing::info!("Discovery session {} finished", session_id,);
Ok(())
}
@@ -387,7 +407,13 @@ pub trait DiscoversNetworkedEntities:
host.base.name = interface.base.ip_address.to_string()
}
tracing::info!("Processed host for ip {}", interface.base.ip_address);
tracing::info!(
ip = %interface.base.ip_address,
host_name = %host.base.name,
service_count = %services.len(),
"Processed host for ip {}",
interface.base.ip_address
);
Ok(Some((host, services)))
}
@@ -544,11 +570,10 @@ pub trait DiscoversNetworkedEntities:
// Always report when complete
{
tracing::debug!(
"Progress update: {}/{} (threshold: {}, total: {})",
current_processed,
total_to_process,
threshold,
total_to_process
processed = %current_processed,
total = %total_to_process,
percentage = format!("{:.1}%", current_processed as f32 / total_to_process as f32),
"Discovery progress update"
);
self.report_discovery_update(DiscoverySessionUpdate::scanning(current_processed))
+15 -9
View File
@@ -132,6 +132,14 @@ impl RunsDiscovery for DiscoveryRunner<DockerScanDiscovery> {
)
.await;
if let Ok(ref container_data) = discovered_hosts_services {
tracing::info!(
total_containers = %container_list.len(),
discovered = %container_data.len(),
"🐳 Docker scan complete"
);
}
let discovery_result = if discovered_hosts_services.is_ok() {
Ok(())
} else {
@@ -329,7 +337,13 @@ impl DiscoveryRunner<DockerScanDiscovery> {
match result {
Ok(Some((host, services))) => all_container_data.push((host, services)),
Ok(None) => {}
Err(e) => tracing::warn!("Error processing container: {}", e),
Err(e) => {
tracing::warn!(
error = %e,
phase = "container_processing",
"Container processing error"
);
}
}
last_reported_processed_count = self
@@ -397,14 +411,6 @@ impl DiscoveryRunner<DockerScanDiscovery> {
..
} = params;
tracing::info!(
"Processing host mode container {}",
container
.name
.as_ref()
.unwrap_or(&"Unknown Container Name".to_string())
);
let host_ip = self.as_ref().utils.get_own_ip_address()?;
if let Some(Some(p)) = container.config.as_ref().map(|c| c.exposed_ports.as_ref()) {
+25 -10
View File
@@ -200,7 +200,12 @@ impl DiscoveryRunner<NetworkScanDiscovery> {
Ok(None)
}
Err(e) => {
tracing::debug!("Host {} - scan error: {}", ip, e);
tracing::warn!(
ip = %ip,
error = %e,
phase = "port_endpoint_scan",
"Scan error"
);
Err(e)
}
Ok(Some((all_ports, endpoint_responses))) => {
@@ -253,7 +258,10 @@ impl DiscoveryRunner<NetworkScanDiscovery> {
tracing::warn!("✗ Host {} - failed to create in database", ip);
}
} else {
tracing::debug!("Host {} - process_host returned None", ip);
tracing::debug!(
ip = %ip,
"Host processing returned None - no services matched or error occurred"
);
}
Ok(None)
}
@@ -282,7 +290,11 @@ impl DiscoveryRunner<NetworkScanDiscovery> {
if DiscoveryCriticalError::is_critical_error(e.to_string()) {
return Err(e);
} else {
tracing::warn!("Error during scanning/processing: {}", e);
tracing::warn!(
error = %e,
phase = "scan_and_process",
"Host scan/processing error"
);
}
}
}
@@ -292,12 +304,11 @@ impl DiscoveryRunner<NetworkScanDiscovery> {
.await?;
}
tracing::info!("📊 Scan complete:");
tracing::info!(" - Total IPs: {}", total_ips);
tracing::info!(" - Scanned: {}", scanned);
tracing::info!(
" - Successfully created: {} hosts",
successful_discoveries.len()
tracing::warn!(
total_ips = %total_ips,
scanned = %scanned,
discovered = %successful_discoveries.len(),
"📊 Scan complete"
);
Ok(successful_discoveries)
@@ -359,7 +370,11 @@ impl DiscoveryRunner<NetworkScanDiscovery> {
}
}
Err(e) => {
tracing::debug!("Error scanning host {}: {}", ip, e);
tracing::warn!(
ip = %ip,
error = %e,
"Host scan failed"
);
if DiscoveryCriticalError::is_critical_error(e.to_string()) {
Err(e)
+23 -5
View File
@@ -67,7 +67,11 @@ impl DaemonRuntimeService {
}
}
} else {
tracing::warn!("network_id not set, skipping work request");
let daemon_id = self.config_store.get_id().await?;
tracing::warn!(
daemon_id = %daemon_id,
"Work request skipped - network_id not configured"
);
}
}
}
@@ -100,7 +104,10 @@ impl DaemonRuntimeService {
.send()
.await?;
tracing::info!("💓 Heartbeat sent");
tracing::info!(
daemon_id = %daemon_id,
"💓 Heartbeat sent"
);
if !response.status().is_success() {
let api_response: ApiResponse<()> = response.json().await?;
@@ -109,7 +116,10 @@ impl DaemonRuntimeService {
let error_msg = api_response
.error
.unwrap_or_else(|| "Unknown error".to_string());
tracing::warn!(" ❤️‍🩹 Heartbeat failed: {}", error_msg);
tracing::error!(
error = %error_msg,
"❤️‍🩹 Heartbeat failed - check network connectivity"
);
}
}
@@ -117,7 +127,10 @@ impl DaemonRuntimeService {
tracing::warn!("Failed to update heartbeat timestamp: {}", e);
}
} else {
tracing::warn!("network_id not set, skipping heartbeat");
tracing::warn!(
daemon_id = %daemon_id,
"Heartbeat skipped - network_id not configured"
);
}
}
}
@@ -142,7 +155,12 @@ impl DaemonRuntimeService {
self.register_with_server(daemon_id, network_id, has_docker_client)
.await?;
tracing::info!("Daemon fully initialized!");
tracing::info!(
daemon_id = %daemon_id,
network_id = %network_id,
has_docker = %has_docker_client,
"Daemon fully initialized"
);
Ok(())
}
+33 -130
View File
@@ -160,11 +160,11 @@ pub async fn scan_ports_and_endpoints(
open_ports.sort_by_key(|p| (p.number(), p.protocol()));
open_ports.dedup();
tracing::debug!(
"Scan results for {}: found {} open ports, {} endpoint responses",
ip,
open_ports.len(),
endpoint_responses.len()
tracing::info!(
ip = %ip,
open_ports = %open_ports.len(),
endpoint_responses = %endpoint_responses.len(),
"Host scan complete"
);
Ok((open_ports, endpoint_responses))
@@ -187,15 +187,6 @@ pub async fn scan_tcp_ports(
})
.collect();
let total_ports = ports.len();
tracing::debug!(
"Scanning {} TCP ports on {} with batch size {}",
total_ports,
ip,
batch_size
);
let open_ports = batch_scan(ports, batch_size, cancel, move |port| async move {
let socket = SocketAddr::new(ip, port.number());
@@ -218,19 +209,19 @@ pub async fn scan_tcp_ports(
let use_https = match peek_result {
Ok(Ok(0)) => {
tracing::trace!("Port open - HTTPS (immediate close)");
// Port open - HTTPS (immediate close)"
true
}
Ok(Ok(n)) => {
tracing::trace!("Port open - got {} bytes", n);
Ok(Ok(_)) => {
// Port open - got bytes
false
}
Ok(Err(_)) => {
tracing::trace!("Port open - peek error");
// Port open - peek error
false
}
Err(_) => {
tracing::trace!("Port open - no immediate response");
// Port open - no immediate response
false
}
};
@@ -279,13 +270,6 @@ pub async fn scan_tcp_ports(
})
.await;
tracing::debug!(
"Completed TCP scan of {} on {} ports: {} open",
ip,
total_ports,
open_ports.len()
);
Ok(open_ports)
}
@@ -371,13 +355,6 @@ pub async fn scan_endpoints(
})
.collect();
tracing::debug!(
"Endpoint scan for {}: filtering to {} endpoints based on {} open ports",
ip,
all_endpoints.len(),
filter_ports.as_ref().map(|p| p.len()).unwrap_or(0)
);
// Group endpoints by (port, path) to avoid duplicate requests
let mut unique_endpoints: HashMap<(u16, String), Endpoint> = HashMap::new();
for endpoint in all_endpoints {
@@ -390,13 +367,6 @@ pub async fn scan_endpoints(
let endpoint_batch_size = std::cmp::min(batch_size / 2, 50);
tracing::debug!(
"Scanning {} unique endpoints on {} with batch size {}",
total_endpoints,
ip,
endpoint_batch_size
);
let use_https_ports_is_none = use_https_ports.is_none();
let https_ports = use_https_ports.unwrap_or_default();
@@ -487,10 +457,10 @@ pub async fn scan_endpoints(
.await;
tracing::info!(
"Completed endpoint scan of {}: {} responses from {} endpoints",
ip,
responses.len(),
total_endpoints
ip = %ip,
endpoints_scanned = %total_endpoints,
responses = %responses.len(),
"Endpoint scan complete"
);
Ok(responses)
@@ -510,14 +480,8 @@ pub async fn test_dns_service(ip: IpAddr) -> Result<Option<u16>, Error> {
)
.await
{
Ok(Ok(_)) => {
tracing::trace!("DNS server responding at {}:53", ip);
Ok(Some(53))
}
_ => {
tracing::trace!("DNS server not responding at {}:53", ip);
Ok(None)
}
Ok(Ok(_)) => Ok(Some(53)),
_ => Ok(None),
}
}
@@ -535,30 +499,16 @@ pub async fn test_ntp_service(ip: IpAddr) -> Result<Option<u16>, Error> {
// Validate that we got a meaningful time response
if let Ok(datetime) = result.datetime().unix_timestamp() {
if datetime > Duration::from_secs(0) {
// Sanity check for valid timestamp
tracing::trace!(
"NTP server responding at {}:123 with time {}",
ip,
datetime.as_millis()
);
Ok(Some(123))
} else {
tracing::trace!("Invalid NTP response from {}:123", ip);
Ok(None)
}
} else {
tracing::trace!("Invalid NTP response from {}:123", ip);
Ok(None)
}
}
Ok(Err(e)) => {
tracing::trace!("NTP error from {}:123 - {}", ip, e);
Ok(None)
}
Err(_) => {
tracing::trace!("NTP timeout from {}:123", ip);
Ok(None)
}
Ok(Err(_)) => Ok(None),
Err(_) => Ok(None),
}
}
@@ -574,27 +524,16 @@ pub async fn test_snmp_service(ip: IpAddr) -> Result<Option<u16>, Error> {
match timeout(Duration::from_millis(2000), session.get(&sys_descr_oid)).await {
Ok(Ok(mut response)) => {
if let Some(_varbind) = response.varbinds.next() {
tracing::trace!("SNMP server responding at {}:161", ip);
Ok(Some(161))
} else {
tracing::trace!("Empty SNMP response from {}:161", ip);
Ok(None)
}
}
Ok(Err(e)) => {
tracing::trace!("SNMP error from {}:161 - {}", ip, e);
Ok(None)
}
Err(_) => {
tracing::trace!("SNMP timeout from {}:161", ip);
Ok(None)
}
Ok(Err(_)) => Ok(None),
Err(_) => Ok(None),
}
}
Err(e) => {
tracing::trace!("SNMP session creation failed for {}:161 - {}", ip, e);
Ok(None)
}
Err(_) => Ok(None),
}
}
@@ -606,16 +545,14 @@ pub async fn test_dhcp_service(ip: IpAddr, subnet_cidr: &IpCidr) -> Result<Optio
// If port 68 is busy (another DHCP client), try random port
match UdpSocket::bind("0.0.0.0:0").await {
Ok(s) => s,
Err(e) => {
tracing::debug!("Failed to bind UDP socket for DHCP test: {}", e);
Err(_) => {
return Ok(None);
}
}
}
};
if let Err(e) = socket.set_broadcast(true) {
tracing::debug!("Failed to enable broadcast for DHCP test: {}", e);
if socket.set_broadcast(true).is_err() {
return Ok(None);
}
@@ -626,7 +563,6 @@ pub async fn test_dhcp_service(ip: IpAddr, subnet_cidr: &IpCidr) -> Result<Optio
SocketAddr::new(IpAddr::V4(broadcast_ip), 67)
}
IpCidr::V6(_) => {
tracing::trace!("Skipping DHCP test for IPv6 address");
return Ok(None);
}
};
@@ -661,56 +597,23 @@ pub async fn test_dhcp_service(ip: IpAddr, subnet_cidr: &IpCidr) -> Result<Optio
let mut encoder = Encoder::new(&mut buf);
msg.encode(&mut encoder)?;
// Try broadcast first
tracing::trace!(
"Sending DHCP DISCOVER broadcast to {} for testing {} (xid: {:#x}, {} bytes)",
broadcast_addr,
ip,
transaction_id,
buf.len()
);
match socket.send_to(&buf, broadcast_addr).await {
Ok(sent) => {
tracing::trace!("Sent {} bytes via broadcast", sent);
// Try to receive multiple responses - might get responses from multiple servers
if let Some(port) =
wait_for_dhcp_responses(&socket, ip, transaction_id, "broadcast", 3).await?
{
return Ok(Some(port));
}
}
Err(e) => {
tracing::trace!("Broadcast DHCP DISCOVER failed: {}", e);
}
if socket.send_to(&buf, broadcast_addr).await.is_ok()
&& let Some(port) =
wait_for_dhcp_responses(&socket, ip, transaction_id, "broadcast", 3).await?
{
return Ok(Some(port));
}
// Fall back to unicast
let unicast_addr = SocketAddr::new(ip, 67);
tracing::trace!(
"Trying unicast DHCP DISCOVER to {} (xid: {:#x})",
unicast_addr,
transaction_id
);
match socket.send_to(&buf, unicast_addr).await {
Ok(sent) => {
tracing::trace!("Sent {} bytes via unicast", sent);
if let Some(port) =
wait_for_dhcp_responses(&socket, ip, transaction_id, "unicast", 3).await?
{
return Ok(Some(port));
}
}
Err(e) => {
tracing::trace!("Unicast DHCP DISCOVER failed: {}", e);
}
if socket.send_to(&buf, unicast_addr).await.is_ok()
&& let Some(port) =
wait_for_dhcp_responses(&socket, ip, transaction_id, "unicast", 3).await?
{
return Ok(Some(port));
}
tracing::trace!(
"No DHCP response from {} after broadcast and unicast attempts",
ip
);
Ok(None)
}
+86 -11
View File
@@ -28,11 +28,32 @@ pub fn create_router() -> Router<Arc<AppState>> {
pub async fn create_handler(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Json(api_key): Json<ApiKey>,
) -> ApiResult<Json<ApiResponse<ApiKeyResponse>>> {
tracing::debug!(
api_key_name = %api_key.base.name,
network_id = %api_key.base.network_id,
user_id = %user.user_id,
"API key create request received"
);
let service = ApiKey::get_service(&state);
let api_key = service.create(api_key).await?;
let api_key = service.create(api_key).await.map_err(|e| {
tracing::error!(
error = %e,
user_id = %user.user_id,
"Failed to create API key"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
api_key_id = %api_key.id,
api_key_name = %api_key.base.name,
user_id = %user.user_id,
"API key created via API (key shown to user)"
);
Ok(Json(ApiResponse::success(ApiKeyResponse {
key: api_key.base.key.clone(),
@@ -42,36 +63,90 @@ pub async fn create_handler(
pub async fn rotate_key_handler(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Path(api_key_id): Path<Uuid>,
) -> ApiResult<Json<ApiResponse<String>>> {
tracing::info!(
api_key_id = %api_key_id,
user_id = %user.user_id,
"API key rotation request received"
);
let service = ApiKey::get_service(&state);
let key = service.rotate_key(api_key_id).await?;
let key = service.rotate_key(api_key_id).await.map_err(|e| {
tracing::error!(
api_key_id = %api_key_id,
user_id = %user.user_id,
error = %e,
"Failed to rotate API key"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
api_key_id = %api_key_id,
user_id = %user.user_id,
"API key rotated via API (new key shown to user)"
);
Ok(Json(ApiResponse::success(key)))
}
pub async fn update_handler(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Path(id): Path<Uuid>,
Json(mut request): Json<ApiKey>,
) -> ApiResult<Json<ApiResponse<ApiKey>>> {
tracing::debug!(
api_key_id = %id,
user_id = %user.user_id,
"API key update request received"
);
let service = ApiKey::get_service(&state);
// Verify entity exists
let existing = service
.get_by_id(&id)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?
.ok_or_else(|| ApiError::not_found(format!("Api Key '{}' not found", id)))?;
.map_err(|e| {
tracing::error!(
api_key_id = %id,
user_id = %user.user_id,
error = %e,
"Failed to fetch API key for update"
);
ApiError::internal_error(&e.to_string())
})?
.ok_or_else(|| {
tracing::warn!(
api_key_id = %id,
user_id = %user.user_id,
"API key not found for update"
);
ApiError::not_found(format!("Api Key '{}' not found", id))
})?;
// Preserve the key - don't allow it to be changed via update
request.base.key = existing.base.key;
let updated = service
.update(&mut request)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
let updated = service.update(&mut request).await.map_err(|e| {
tracing::error!(
api_key_id = %id,
user_id = %user.user_id,
error = %e,
"Failed to update API key"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
api_key_id = %id,
api_key_name = %updated.base.name,
user_id = %user.user_id,
"API key updated via API"
);
Ok(Json(ApiResponse::success(updated)))
}
+31 -1
View File
@@ -37,6 +37,12 @@ impl ApiKeyService {
pub async fn create(&self, api_key: ApiKey) -> Result<ApiKey> {
let key = self.generate_api_key();
tracing::debug!(
api_key_name = %api_key.base.name,
network_id = %api_key.base.network_id,
"Creating API key"
);
let api_key = ApiKey::new(ApiKeyBase {
key: key.clone(),
name: api_key.base.name,
@@ -46,10 +52,24 @@ impl ApiKeyService {
is_enabled: true,
});
self.storage.create(&api_key).await
let created = self.storage.create(&api_key).await?;
tracing::info!(
api_key_id = %created.id,
api_key_name = %created.base.name,
network_id = %created.base.network_id,
"API key created"
);
Ok(created)
}
pub async fn rotate_key(&self, api_key_id: Uuid) -> Result<String> {
tracing::info!(
api_key_id = %api_key_id,
"Rotating API key"
);
if let Some(mut api_key) = self.get_by_id(&api_key_id).await? {
let new_key = self.generate_api_key();
@@ -57,8 +77,18 @@ impl ApiKeyService {
self.update(&mut api_key).await?;
tracing::info!(
api_key_id = %api_key_id,
api_key_name = %api_key.base.name,
"API key rotated successfully"
);
Ok(new_key)
} else {
tracing::warn!(
api_key_id = %api_key_id,
"API key not found for rotation"
);
Err(anyhow!(
"Could not find api key {}. Unable to update API key.",
api_key_id
+7
View File
@@ -43,6 +43,13 @@ impl AuthenticatedEntity {
}
}
pub fn entity_id(&self) -> String {
match self {
AuthenticatedEntity::User { user_id, .. } => user_id.to_string(),
AuthenticatedEntity::Daemon(network_id) => format!("Daemon for network {}", network_id),
}
}
/// Get network_ids that daemon / user have access to
pub fn network_ids(&self) -> Vec<Uuid> {
match self {
+5 -1
View File
@@ -43,7 +43,11 @@ async fn create_host(
let host_service = &state.services.host_service;
if let Err(e) = request.host.base.validate() {
tracing::error!("Host validation failed: {:?}", e);
tracing::warn!(
error = %e,
host_name = %request.host.base.name,
"Host validation failed"
);
return Err(ApiError::bad_request(&format!(
"Host validation failed: {}",
e
+40 -19
View File
@@ -89,6 +89,13 @@ impl HostService {
// Since bindings were already reassigned above, we just update the host record
let host_with_final_services = self.storage.update(&mut created_host).await?;
tracing::info!(
host_id = %created_host.id,
host_name = %created_host.base.name,
service_count = %created_services.len(),
"Created host with services"
);
Ok((host_with_final_services, created_services))
}
@@ -130,7 +137,7 @@ impl HostService {
_ => {
self.storage.create(&host).await?;
tracing::info!("Created host {}: {}", host.base.name, host.id);
tracing::debug!("Result: {:?}", host);
tracing::trace!("Result: {:?}", host);
host
}
};
@@ -154,7 +161,7 @@ impl HostService {
self.storage.update(&mut host).await?;
tracing::info!("Updated host {:?}: {:?}", host.base.name, host.id);
tracing::debug!("Result: {:?}", host);
tracing::trace!("Result: {:?}", host);
Ok(host)
}
@@ -256,18 +263,17 @@ impl HostService {
if !data.is_empty() {
tracing::info!(
"Upserted new discovery data: {} to host {}: {}",
existing_host.base.name,
existing_host.id,
data.join(", ")
host_id = %existing_host.id,
host_name = %existing_host.base.name,
updates = %data.join(", "),
"Upserted discovery data to host"
);
tracing::trace!("Result: {:?}", existing_host);
} else {
tracing::info!(
"No new information to upsert from host {} to host {}: {}",
tracing::debug!(
"No new data to upsert from host {} to {}",
new_host_data.base.name,
existing_host.base.name,
existing_host.id
existing_host.base.name
);
}
@@ -318,6 +324,7 @@ impl HostService {
// Update host_id, network_id, and interface/port binding IDs to what's available on new host
// bindings IDs from old host may no longer exist if new host already had the port / interface
let service_transfer_futures: Vec<_> = other_host_services
.clone()
.into_iter()
.map(|s| {
self.service_service.reassign_service_interface_bindings(
@@ -357,8 +364,15 @@ impl HostService {
// Delete host, ignore services because they are just being moved to other host
self.delete_host(&other_host.id, false).await?;
tracing::info!("Consolidated host {} into {}", other_host, updated_host);
tracing::debug!("Result: {:?}", updated_host);
tracing::info!(
source_host_id = %other_host.id,
source_host_name = %other_host.base.name,
dest_host_id = %updated_host.id,
dest_host_name = %updated_host.base.name,
transferred_services = %other_host_services.len(),
"Hosts consolidated"
);
tracing::trace!("Consolidation result: {:?}", updated_host);
Ok(updated_host)
}
@@ -398,9 +412,15 @@ impl HostService {
let updated_services = try_join_all(update_service_futures).await?;
tracing::info!("Updated host {} services", updates);
tracing::debug!(
"Result - host: {:?}, updated services: {:?}, deleted services: {:?}",
tracing::info!(
host_id = %current_host.id,
host_name = %current_host.base.name,
updated_services = %updated_services.len(),
deleted_services = %delete_services.len(),
"Host services updated"
);
tracing::trace!(
"Full update - host: {:?}, updated: {:?}, deleted: {:?}",
updates,
updated_services,
delete_services
@@ -433,10 +453,11 @@ impl HostService {
self.storage.delete(id).await?;
tracing::info!(
"Deleted host {}: {}; deleted service + associated subnet/group bindings: {}",
host.base.name,
host.id,
!delete_services
host_id = %host.id,
host_name = %host.base.name,
service_count = %host.base.services.len(),
deleted_services = %delete_services,
"Host deleted"
);
Ok(())
}
@@ -19,12 +19,7 @@ impl ServiceDefinition for HomeAssistant {
}
fn discovery_pattern(&self) -> Pattern<'_> {
Pattern::Endpoint(
PortBase::new_tcp(8123),
"/auth/authorize",
"home assistant",
None,
)
Pattern::Endpoint(PortBase::new_tcp(8123), "/", "home assistant", None)
}
fn logo_url(&self) -> &'static str {
+30 -20
View File
@@ -95,11 +95,13 @@ impl ServiceService {
_ => {
self.storage.create(&service).await?;
tracing::info!(
"Created service {} for host {}",
service,
service.base.host_id
service_id = %service.id,
service_name = %service.base.name,
host_id = %service.base.host_id,
binding_count = %service.base.bindings.len(),
"Service created"
);
tracing::debug!("Result: {:?}", service);
tracing::trace!("Result: {:?}", service);
service
}
};
@@ -117,7 +119,7 @@ impl ServiceService {
let lock = self.get_service_lock(&existing_service.id).await;
let _guard = lock.lock().await;
tracing::debug!(
tracing::trace!(
"Upserting new service data {:?} into {:?}",
new_service_data,
existing_service
@@ -199,16 +201,16 @@ impl ServiceService {
if !data.is_empty() {
tracing::info!(
"Upserted service {} with new data: {}",
existing_service,
data.join(", ")
service_id = %existing_service.id,
service_name = %existing_service.base.name,
updates = %data.join(", "),
"Upserted service with new data"
);
tracing::debug!("Result {:?}", existing_service);
} else {
tracing::info!(
"No new information to upsert from service {} to service {}",
new_service_data,
existing_service,
tracing::debug!(
"Service upsert - no changes needed for {}",
existing_service
);
}
@@ -219,7 +221,7 @@ impl ServiceService {
let lock = self.get_service_lock(&service.id).await;
let _guard = lock.lock().await;
tracing::debug!("Updating service: {:?}", service);
tracing::trace!("Updating service: {:?}", service);
let current_service = self
.get_by_id(&service.id)
@@ -231,11 +233,12 @@ impl ServiceService {
self.storage.update(&mut service).await?;
tracing::info!(
"Updated service {} for host {}",
service,
service.base.host_id
service_id = %service.id,
service_name = %service.base.name,
host_id = %service.base.host_id,
"Service updated"
);
tracing::debug!("Result: {:?}", service);
tracing::trace!("Result: {:?}", service);
Ok(service)
}
@@ -244,7 +247,7 @@ impl ServiceService {
current_service: &Service,
updates: Option<&Service>,
) -> Result<(), Error> {
tracing::debug!(
tracing::trace!(
"Updating group bindings referencing {:?}, with changes {:?}",
current_service,
updates
@@ -314,7 +317,7 @@ impl ServiceService {
let lock = self.get_service_lock(&service.id).await;
let _guard = lock.lock().await;
tracing::debug!(
tracing::trace!(
"Preparing service {:?} for transfer from host {:?} to host {:?}",
service,
original_host,
@@ -393,7 +396,14 @@ impl ServiceService {
mutable_service.base.network_id = updated_host.base.network_id;
tracing::debug!(
tracing::info!(
"Reassigned service {} bindings for from host {} to host {}",
mutable_service,
original_host,
updated_host
);
tracing::trace!(
"Reassigned service {:?} bindings for from host {:?} to host {:?}",
mutable_service,
original_host,
+165 -27
View File
@@ -54,13 +54,19 @@ where
pub async fn create_handler<T>(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Json(request): Json<T>,
) -> ApiResult<Json<ApiResponse<T>>>
where
T: CrudHandlers + 'static,
{
if let Err(err) = request.validate() {
tracing::warn!(
entity_type = T::table_name(),
user_id = %user.user_id,
error = %err,
"Entity validation failed"
);
return Err(ApiError::bad_request(&format!(
"{} validation failed: {}",
T::entity_name(),
@@ -68,11 +74,29 @@ where
)));
}
tracing::debug!(
entity_type = T::table_name(),
user_id = %user.user_id,
"Create request received"
);
let service = T::get_service(&state);
let created = service
.create(request)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
let created = service.create(request).await.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
user_id = %user.user_id,
error = %e,
"Failed to create entity"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
entity_type = T::table_name(),
entity_id = %created.id(),
user_id = %user.user_id,
"Entity created via API"
);
Ok(Json(ApiResponse::success(created)))
}
@@ -84,57 +108,144 @@ pub async fn get_all_handler<T>(
where
T: CrudHandlers + 'static,
{
tracing::debug!(
entity_type = T::table_name(),
user_id = %user.user_id,
network_count = %user.network_ids.len(),
"Get all request received"
);
let network_filter = EntityFilter::unfiltered().network_ids(&user.network_ids);
let service = T::get_service(&state);
let entities = service
.get_all(network_filter)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
let entities = service.get_all(network_filter).await.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
user_id = %user.user_id,
error = %e,
"Failed to fetch entities"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::debug!(
entity_type = T::table_name(),
user_id = %user.user_id,
count = %entities.len(),
"Entities fetched successfully"
);
Ok(Json(ApiResponse::success(entities)))
}
pub async fn get_by_id_handler<T>(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Path(id): Path<Uuid>,
) -> ApiResult<Json<ApiResponse<T>>>
where
T: CrudHandlers + 'static,
{
tracing::debug!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Get by ID request received"
);
let service = T::get_service(&state);
let entity = service
.get_by_id(&id)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?
.ok_or_else(|| ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)))?;
.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
error = %e,
"Failed to fetch entity by ID"
);
ApiError::internal_error(&e.to_string())
})?
.ok_or_else(|| {
tracing::warn!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Entity not found"
);
ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id))
})?;
tracing::debug!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Entity fetched successfully"
);
Ok(Json(ApiResponse::success(entity)))
}
pub async fn update_handler<T>(
State(state): State<Arc<AppState>>,
RequireMember(_user): RequireMember,
RequireMember(user): RequireMember,
Path(id): Path<Uuid>,
Json(mut request): Json<T>,
) -> ApiResult<Json<ApiResponse<T>>>
where
T: CrudHandlers + 'static,
{
tracing::debug!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Update request received"
);
let service = T::get_service(&state);
// Verify entity exists
service
.get_by_id(&id)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?
.ok_or_else(|| ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)))?;
.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
error = %e,
"Failed to fetch entity for update"
);
ApiError::internal_error(&e.to_string())
})?
.ok_or_else(|| {
tracing::warn!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Entity not found for update"
);
ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id))
})?;
let updated = service
.update(&mut request)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
let updated = service.update(&mut request).await.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
error = %e,
"Failed to update entity"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
entity_type = T::table_name(),
entity_id = %id,
user_id = %user.user_id,
"Entity updated via API"
);
Ok(Json(ApiResponse::success(updated)))
}
@@ -149,17 +260,44 @@ where
{
let service = T::get_service(&state);
// Verify entity exists
service
// Verify entity exists and log the deletion attempt
let entity = service
.get_by_id(&id)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?
.ok_or_else(|| ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)))?;
.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
entity_id = %id,
error = %e,
"Failed to fetch entity for deletion"
);
ApiError::internal_error(&e.to_string())
})?
.ok_or_else(|| {
tracing::warn!(
entity_type = T::table_name(),
entity_id = %id,
"Entity not found for deletion"
);
ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id))
})?;
service
.delete(&id)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
tracing::info!(
entity_type = T::table_name(),
entity_id = %id,
entity_name = %entity,
"Delete request received"
);
service.delete(&id).await.map_err(|e| {
tracing::error!(
entity_type = T::table_name(),
entity_id = %id,
error = %e,
"Failed to delete entity"
);
ApiError::internal_error(&e.to_string())
})?;
Ok(Json(ApiResponse::success(())))
}
+71 -18
View File
@@ -18,18 +18,6 @@ where
/// Get reference to the storage
fn storage(&self) -> &Arc<GenericPostgresStorage<T>>;
/// Create entity
async fn create(&self, entity: T) -> Result<T, anyhow::Error> {
// User-created have uuid nil
let entity = if entity.id() == Uuid::nil() {
T::new(entity.get_base())
} else {
entity
};
self.storage().create(&entity).await
}
/// Get entity by ID
async fn get_by_id(&self, id: &Uuid) -> Result<Option<T>, anyhow::Error> {
self.storage().get_by_id(id).await
@@ -45,13 +33,78 @@ where
self.storage().get_one(filter).await
}
/// Update entity
async fn update(&self, entity: &mut T) -> Result<T, anyhow::Error> {
self.storage().update(entity).await
}
/// Delete entity by ID
async fn delete(&self, id: &Uuid) -> Result<(), anyhow::Error> {
self.storage().delete(id).await
// ADD logging before deletion
if let Some(entity) = self.get_by_id(id).await? {
tracing::info!(
entity_type = T::table_name(),
entity_id = %id,
entity_name = %entity,
"Deleting entity"
);
self.storage().delete(id).await?;
tracing::debug!(
entity_type = T::table_name(),
entity_id = %id,
"Entity deleted successfully"
);
Ok(())
} else {
Err(anyhow::anyhow!(
"{} with id {} not found",
T::table_name(),
id
))
}
}
/// Create entity
async fn create(&self, entity: T) -> Result<T, anyhow::Error> {
let entity = if entity.id() == Uuid::nil() {
T::new(entity.get_base())
} else {
entity
};
// ADD logging before creation
tracing::debug!(
entity_type = T::table_name(),
entity_id = %entity.id(),
entity_name = %entity,
"Creating entity"
);
let created = self.storage().create(&entity).await?;
tracing::info!(
entity_type = T::table_name(),
entity_id = %created.id(),
entity_name = %created,
"Entity created"
);
Ok(created)
}
/// Update entity
async fn update(&self, entity: &mut T) -> Result<T, anyhow::Error> {
tracing::debug!(
entity_type = T::table_name(),
entity_id = %entity.id(),
entity_name = %entity,
"Updating entity"
);
let updated = self.storage().update(entity).await?;
tracing::info!(
entity_type = T::table_name(),
entity_id = %updated.id(),
entity_name = %updated,
"Entity updated"
);
Ok(updated)
}
}
+51 -7
View File
@@ -27,21 +27,47 @@ pub fn create_router() -> Router<Arc<AppState>> {
pub async fn create_handler(
State(state): State<Arc<AppState>>,
MemberOrDaemon { .. }: MemberOrDaemon,
MemberOrDaemon { entity, .. }: MemberOrDaemon,
Json(request): Json<Subnet>,
) -> ApiResult<Json<ApiResponse<Subnet>>> {
if let Err(err) = request.validate() {
tracing::warn!(
subnet_name = %request.base.name,
subnet_cidr = %request.base.cidr,
entity_id = %entity.entity_id(),
error = %err,
"Subnet validation failed"
);
return Err(ApiError::bad_request(&format!(
"Subnet validation failed: {}",
err
)));
}
tracing::debug!(
subnet_name = %request.base.name,
subnet_cidr = %request.base.cidr,
network_id = %request.base.network_id,
entity_id = %entity.entity_id(),
"Subnet create request received"
);
let service = Subnet::get_service(&state);
let created = service
.create(request)
.await
.map_err(|e| ApiError::internal_error(&e.to_string()))?;
let created = service.create(request).await.map_err(|e| {
tracing::error!(
error = %e,
entity_id = %entity.entity_id(),
"Failed to create subnet"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::info!(
subnet_id = %created.id,
subnet_name = %created.base.name,
entity_id = %entity.entity_id(),
"Subnet created via API"
);
Ok(Json(ApiResponse::success(created)))
}
@@ -50,11 +76,29 @@ async fn get_all_subnets(
State(state): State<Arc<AppState>>,
entity: AuthenticatedEntity,
) -> ApiResult<Json<ApiResponse<Vec<Subnet>>>> {
let service = &state.services.subnet_service;
tracing::debug!(
entity_id = %entity.entity_id(),
network_count = %entity.network_ids().len(),
"Get all subnets request received"
);
let service = &state.services.subnet_service;
let filter = EntityFilter::unfiltered().network_ids(&entity.network_ids());
let subnets = service.get_all(filter).await?;
let subnets = service.get_all(filter).await.map_err(|e| {
tracing::error!(
error = %e,
entity_id = %entity.entity_id(),
"Failed to fetch subnets"
);
ApiError::internal_error(&e.to_string())
})?;
tracing::debug!(
entity_id = %entity.entity_id(),
subnet_count = %subnets.len(),
"Subnets fetched successfully"
);
Ok(Json(ApiResponse::success(subnets)))
}
+40 -9
View File
@@ -39,7 +39,13 @@ impl CrudService<Subnet> for SubnetService {
subnet
};
tracing::debug!("Creating subnet {:?}", subnet);
tracing::debug!(
subnet_id = %subnet.id,
subnet_name = %subnet.base.name,
subnet_cidr = %subnet.base.cidr,
network_id = %subnet.base.network_id,
"Creating subnet"
);
let subnet_from_storage = match all_subnets.iter().find(|s| subnet.eq(s)) {
// Docker will default to the same subnet range for bridge networks, so we need a way to distinguish docker bridge subnets
@@ -85,18 +91,25 @@ impl CrudService<Subnet> for SubnetService {
} =>
{
tracing::warn!(
"Duplicate subnet for {}: {} found, returning existing {}: {}",
subnet.base.name,
subnet.id,
existing_subnet.base.name,
existing_subnet.id
existing_subnet_id = %existing_subnet.id,
existing_subnet_name = %existing_subnet.base.name,
new_subnet_id = %subnet.id,
new_subnet_name = %subnet.base.name,
subnet_cidr = %subnet.base.cidr,
"Duplicate subnet found, returning existing"
);
existing_subnet.clone()
}
// If there's no existing subnet, create a new one
_ => {
self.storage.create(&subnet).await?;
tracing::info!("Created subnet {}: {}", subnet.base.name, subnet.id);
tracing::info!(
subnet_id = %subnet.id,
subnet_name = %subnet.base.name,
subnet_cidr = %subnet.base.cidr,
network_id = %subnet.base.network_id,
"Subnet created"
);
subnet
}
};
@@ -109,6 +122,13 @@ impl CrudService<Subnet> for SubnetService {
.await?
.ok_or_else(|| anyhow::anyhow!("Subnet not found"))?;
tracing::info!(
subnet_id = %subnet.id,
subnet_name = %subnet.base.name,
subnet_cidr = %subnet.base.cidr,
"Deleting subnet"
);
let filter = EntityFilter::unfiltered().network_ids(&[subnet.base.network_id]);
let hosts = self.host_service.get_all(filter).await?;
@@ -127,10 +147,21 @@ impl CrudService<Subnet> for SubnetService {
None
});
try_join_all(update_futures).await?;
let updated_hosts = try_join_all(update_futures).await?;
tracing::debug!(
subnet_id = %subnet.id,
affected_hosts = %updated_hosts.len(),
"Cleaned up host interfaces referencing subnet"
);
self.storage.delete(id).await?;
tracing::info!("Deleted subnet {}: {}", subnet.base.name, subnet.id);
tracing::info!(
subnet_id = %subnet.id,
subnet_name = %subnet.base.name,
affected_hosts = %updated_hosts.len(),
"Subnet deleted"
);
Ok(())
}
}
+32 -16
View File
@@ -2,6 +2,7 @@ use email_address::EmailAddress;
use netvisor::server::auth::r#impl::api::{LoginRequest, RegisterRequest};
use netvisor::server::daemons::r#impl::api::DiscoveryUpdatePayload;
use netvisor::server::daemons::r#impl::base::Daemon;
use netvisor::server::discovery::r#impl::types::DiscoveryType;
use netvisor::server::networks::r#impl::Network;
use netvisor::server::organizations::r#impl::base::Organization;
#[cfg(feature = "generate-fixtures")]
@@ -13,7 +14,6 @@ use netvisor::server::shared::types::api::ApiResponse;
use netvisor::server::shared::types::metadata::HasId;
use netvisor::server::users::r#impl::base::User;
use std::process::{Child, Command};
use uuid::Uuid;
const BASE_URL: &str = "http://localhost:60072";
const TEST_PASSWORD: &str = "TestPassword123!";
@@ -39,6 +39,7 @@ impl ContainerManager {
"docker-compose.dev.yml",
"up",
"--build",
"--force-recreate",
"--wait",
])
.current_dir("..")
@@ -66,8 +67,18 @@ impl ContainerManager {
.current_dir("..")
.output();
// Stop and remove containers/volumes, but keep third-party images
let _ = Command::new("docker")
.args(["compose", "down", "-v", "--remove-orphans"])
.args([
"compose",
"-f",
"docker-compose.dev.yml",
"down",
"-v",
"--rmi",
"local", // Only remove locally built images (netvisor-*), not pulled images
"--remove-orphans",
])
.current_dir("..")
.output();
@@ -284,11 +295,9 @@ async fn wait_for_network(client: &TestClient) -> Result<Network, String> {
.await
}
async fn wait_for_daemon(client: &TestClient, network_id: Uuid) -> Result<Daemon, String> {
async fn wait_for_daemon(client: &TestClient) -> Result<Daemon, String> {
retry("wait for daemon registration", 15, 2, || async {
let daemons: Vec<Daemon> = client
.get(&format!("/api/daemons?network_id={}", network_id))
.await?;
let daemons: Vec<Daemon> = client.get(&format!("/api/daemons")).await?;
if daemons.is_empty() {
return Err("No daemons registered yet".to_string());
@@ -331,6 +340,11 @@ async fn run_discovery(client: &TestClient) -> Result<(), String> {
if let Some(data) = line.strip_prefix("data: ") {
if let Ok(update) = serde_json::from_str::<DiscoveryUpdatePayload>(data) {
// Only care about Network discovery, not SelfReport
if !matches!(update.discovery_type, DiscoveryType::Network { .. }) {
continue;
}
println!(
"📊 Discovery: {} - {}/{} processed",
update.phase,
@@ -358,22 +372,24 @@ async fn run_discovery(client: &TestClient) -> Result<(), String> {
}
}
async fn verify_home_assistant_discovered(
client: &TestClient,
network_id: Uuid,
) -> Result<Service, String> {
async fn verify_home_assistant_discovered(client: &TestClient) -> Result<Service, String> {
println!("\n=== Verifying Home Assistant Discovery ===");
retry("find Home Assistant service", 10, 2, || async {
let services: Vec<Service> = client
.get(&format!("/api/services?network_id={}", network_id))
.await?;
let services: Vec<Service> = client.get(&format!("/api/services")).await?;
if services.is_empty() {
return Err("No services found yet".to_string());
}
println!("✅ Found {} service(s)", services.len());
println!("✅ Found {} service(s):", services.len());
for service in &services {
println!(
" - {} ({})",
service.base.name,
service.base.service_definition.id()
);
}
services
.into_iter()
@@ -528,7 +544,7 @@ async fn test_full_integration() {
// Wait for daemon
println!("\n=== Waiting for Daemon ===");
let daemon = wait_for_daemon(&client, network.id)
let daemon = wait_for_daemon(&client)
.await
.expect("Failed to find daemon");
println!("✅ Daemon registered: {}", daemon.id);
@@ -537,7 +553,7 @@ async fn test_full_integration() {
run_discovery(&client).await.expect("Discovery failed");
// Verify service discovered
let _service = verify_home_assistant_discovered(&client, network.id)
let _service = verify_home_assistant_discovered(&client)
.await
.expect("Failed to find Home Assistant");
+10 -4
View File
@@ -12,7 +12,7 @@ services:
netvisor-dev:
ipv4_address: 172.25.0.2
postgres:
postgres-dev:
image: postgres:17-alpine
environment:
POSTGRES_DB: netvisor
@@ -21,7 +21,7 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
ports:
- "5432:5432"
- "5435:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
@@ -40,7 +40,7 @@ services:
- "60072:60072"
environment:
- RUST_LOG=debug,sqlx=off,tower_http=off
- NETVISOR_DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD:-password}@postgres:5432/netvisor
- NETVISOR_DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD:-password}@postgres-dev:5432/netvisor
- NETVISOR_WEB_EXTERNAL_PATH=/app/static
- NETVISOR_LOG_LEVEL=${NETVISOR_LOG_LEVEL:-debug}
- NETVISOR_INTEGRATED_DAEMON_URL=${NETVISOR_INTEGRATED_DAEMON_URL:-http://daemon:60073}
@@ -52,7 +52,7 @@ services:
- cargo-git:/usr/local/cargo/git
- server-target:/app/target
depends_on:
postgres:
postgres-dev:
condition: service_healthy
daemon:
condition: service_healthy
@@ -109,6 +109,12 @@ services:
ports:
- 8123:8123
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8123 || exit 1"]
interval: 5s
timeout: 3s
retries: 30
start_period: 60s
networks:
netvisor-dev:
ipv4_address: 172.25.0.5