added grouping for docker bridge subnets in visualization

This commit is contained in:
Maya
2025-10-11 12:00:02 -04:00
parent 00e6e38d5c
commit df44832f86
15 changed files with 382 additions and 210 deletions
+93 -93
View File
@@ -93,10 +93,15 @@ impl DiscoversNetworkedEntities for DiscoveryHandler<DockerScanDiscovery> {
let subnets = self.discover_create_subnets().await?;
let (mut host_interfaces, _) = self.as_ref().utils.scan_interfaces(daemon_id).await?;
let containers = self.get_containers_and_summaries().await?;
let containers_interfaces_and_subnets = self.get_container_interfaces(&containers, &subnets, &mut host_interfaces);
let containers_interfaces_and_subnets =
self.get_container_interfaces(&containers, &subnets, &mut host_interfaces);
let discovery_result = self
.scan_and_process_containers(cancel.clone(), containers, &containers_interfaces_and_subnets)
.scan_and_process_containers(
cancel.clone(),
containers,
&containers_interfaces_and_subnets,
)
.await
.map(|_| ());
@@ -142,9 +147,7 @@ impl DiscoversNetworkedEntities for DiscoveryHandler<DockerScanDiscovery> {
let (_, host_subnets) = self.as_ref().utils.scan_interfaces(daemon_id).await?;
let docker_subnets = self
.get_subnets_from_docker_networks(daemon_id)
.await?;
let docker_subnets = self.get_subnets_from_docker_networks(daemon_id).await?;
let subnets = [host_subnets, docker_subnets].concat();
@@ -172,7 +175,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
&self,
cancel: CancellationToken,
containers: Vec<(ContainerInspectResponse, ContainerSummary)>,
containers_interfaces_and_subnets: &HashMap<String, Vec<(Interface, Subnet)>>
containers_interfaces_and_subnets: &HashMap<String, Vec<(Interface, Subnet)>>,
) -> Result<Vec<Host>> {
let session = self.as_ref().get_session().await?;
let scanned_count = session.scanned_count.clone();
@@ -190,7 +193,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
async move {
self.process_single_container(
&containers_interfaces_and_subnets,
containers_interfaces_and_subnets,
container,
container_summary,
scanned,
@@ -259,16 +262,17 @@ impl DiscoveryHandler<DockerScanDiscovery> {
let empty_vec_ref: &Vec<_> = &Vec::new();
let container_interfaces_and_subnets = if let Some(id) = container.id {
containers_interfaces_and_subnets.get(&id).unwrap_or(empty_vec_ref)
containers_interfaces_and_subnets
.get(&id)
.unwrap_or(empty_vec_ref)
} else {
empty_vec_ref
};
let (host_ip_to_host_ports, container_ips_to_container_ports) =
self.get_ports_from_container(container_summary, &container_interfaces_and_subnets);
for (interface,subnet) in container_interfaces_and_subnets {
self.get_ports_from_container(container_summary, container_interfaces_and_subnets);
for (interface, subnet) in container_interfaces_and_subnets {
if cancel.is_cancelled() {
return Err(Error::msg("Discovery was cancelled"));
}
@@ -302,14 +306,16 @@ impl DiscoveryHandler<DockerScanDiscovery> {
}
let empty_vec_ref: &Vec<_> = &Vec::new();
let container_ports_on_interface = container_ips_to_container_ports.get(&interface.base.ip_address).unwrap_or(empty_vec_ref);
let container_ports_on_interface = container_ips_to_container_ports
.get(&interface.base.ip_address)
.unwrap_or(empty_vec_ref);
if let Ok(Some((mut host, mut services))) = self
.process_host(
ServiceDiscoveryBaselineParams {
subnet: &subnet,
interface: &interface,
open_ports: &container_ports_on_interface,
subnet,
interface,
open_ports: container_ports_on_interface,
endpoint_responses: &endpoint_responses,
host_has_docker_client: &false,
docker_container_name: &container_name,
@@ -320,7 +326,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
{
host.id = self.discovery_type.host_id;
container_interfaces_and_subnets.iter().for_each(|(i,_)| {
container_interfaces_and_subnets.iter().for_each(|(i, _)| {
if !host.base.interfaces.contains(i) {
host.base.interfaces.push(i.clone())
}
@@ -329,8 +335,9 @@ impl DiscoveryHandler<DockerScanDiscovery> {
services.iter_mut().for_each(|s| {
// Add all host port + IPs and any container ports which weren't matched
// We know they are open on this host even if no services matched them
container_ports_on_interface.iter().for_each(
|container_port| {
container_ports_on_interface
.iter()
.for_each(|container_port| {
// Add bindings for container ports which weren't matched
match host.base.ports.iter().find(|p| p.base == *container_port) {
Some(unmatched_container_port)
@@ -339,9 +346,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
.bindings
.iter()
.filter_map(|b| b.port_id())
.any(|port_id| {
port_id == unmatched_container_port.id
}) =>
.any(|port_id| port_id == unmatched_container_port.id) =>
{
s.base.bindings.push(Binding::new_l4(
unmatched_container_port.id,
@@ -350,8 +355,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
}
_ => (),
}
},
);
});
// Add bindings for all host ports, provided there's an interface
host_ip_to_host_ports.iter().for_each(|(ip, pbs)| {
@@ -365,9 +369,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
.bindings
.iter()
.filter_map(|b| match b.port_id() {
Some(port_id)
if port_id == existing_port.id =>
{
Some(port_id) if port_id == existing_port.id => {
Some(b.id())
}
_ => None,
@@ -382,7 +384,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
.interfaces
.iter()
.find(|i| i.base.ip_address == *ip);
match interface {
Some(interface) => {
s.base
@@ -403,7 +405,6 @@ impl DiscoveryHandler<DockerScanDiscovery> {
}
_ => {}
}
});
});
});
@@ -504,13 +505,15 @@ impl DiscoveryHandler<DockerScanDiscovery> {
fn get_ports_from_container(
&self,
container_summary: ContainerSummary,
container_interfaces_and_subnets: &Vec<(Interface, Subnet)>
container_interfaces_and_subnets: &[(Interface, Subnet)],
) -> (IpPortHashMap, IpPortHashMap) {
let mut host_ip_to_host_ports: IpPortHashMap = HashMap::new();
let mut container_ips_to_container_ports: IpPortHashMap = HashMap::new();
let container_ips: Vec<IpAddr> = container_interfaces_and_subnets.iter().map(|(i,_)| i.base.ip_address).collect();
let container_ips: Vec<IpAddr> = container_interfaces_and_subnets
.iter()
.map(|(i, _)| i.base.ip_address)
.collect();
if let Some(ports) = &container_summary.ports {
ports.iter().for_each(|p| {
@@ -547,25 +550,18 @@ impl DiscoveryHandler<DockerScanDiscovery> {
}
});
return (
host_ip_to_host_ports,
container_ips_to_container_ports,
);
return (host_ip_to_host_ports, container_ips_to_container_ports);
};
(
host_ip_to_host_ports,
container_ips_to_container_ports,
)
(host_ip_to_host_ports, container_ips_to_container_ports)
}
fn get_container_interfaces(
&self,
containers: &Vec<(ContainerInspectResponse, ContainerSummary)>,
containers: &[(ContainerInspectResponse, ContainerSummary)],
subnets: &[Subnet],
host_interfaces: &mut [Interface]
host_interfaces: &mut [Interface],
) -> HashMap<String, Vec<(Interface, Subnet)>> {
// Created subnets may differ from discovered if there are existing subnets with the same CIDR, so we need to update interface subnet_id references
let host_interfaces_and_subnets = host_interfaces
.iter_mut()
@@ -576,7 +572,7 @@ impl DiscoveryHandler<DockerScanDiscovery> {
{
i.base.subnet_id = subnet.id;
return Some((i.clone(), subnet.clone()))
return Some((i.clone(), subnet.clone()));
}
None
@@ -584,65 +580,69 @@ impl DiscoveryHandler<DockerScanDiscovery> {
.collect::<Vec<(Interface, Subnet)>>();
// Collect interfaces from container
containers.iter().filter_map(|(container, _)| {
let mut interfaces_and_subnets: Vec<(Interface, Subnet)> = if let Some(network_settings) = &container.network_settings {
if let Some(networks) = &network_settings.networks {
networks
.iter()
.filter_map(|(network_name, endpoint)| {
// Parse interface if IP
if let Some(ip_string) = &endpoint.ip_address {
let ip_address = ip_string.parse::<IpAddr>().ok();
containers
.iter()
.filter_map(|(container, _)| {
let mut interfaces_and_subnets: Vec<(Interface, Subnet)> =
if let Some(network_settings) = &container.network_settings {
if let Some(networks) = &network_settings.networks {
networks
.iter()
.filter_map(|(network_name, endpoint)| {
// Parse interface if IP
if let Some(ip_string) = &endpoint.ip_address {
let ip_address = ip_string.parse::<IpAddr>().ok();
if let Some(ip_address) = ip_address {
if let Some(subnet) =
subnets.iter().find(|s| s.base.cidr.contains(&ip_address))
{
// Parse MAC address
let mac_address =
if let Some(mac_string) = &endpoint.mac_address {
mac_string.parse::<MacAddress>().ok()
} else {
None
};
if let Some(ip_address) = ip_address {
if let Some(subnet) = subnets
.iter()
.find(|s| s.base.cidr.contains(&ip_address))
{
// Parse MAC address
let mac_address = if let Some(mac_string) =
&endpoint.mac_address
{
mac_string.parse::<MacAddress>().ok()
} else {
None
};
return Some((
Interface::new(InterfaceBase {
subnet_id: subnet.id,
ip_address,
mac_address,
name: Some(network_name.to_owned()),
}),
subnet.clone(),
));
return Some((
Interface::new(InterfaceBase {
subnet_id: subnet.id,
ip_address,
mac_address,
name: Some(network_name.to_owned()),
}),
subnet.clone(),
));
}
}
}
}
}
tracing::warn!(
tracing::warn!(
"No matching subnet found for container {:?} on network '{}'",
container.name,
network_name
);
None
})
.collect::<Vec<(Interface, Subnet)>>()
} else {
Vec::new()
}
} else {
Vec::new()
};
None
})
.collect::<Vec<(Interface, Subnet)>>()
} else {
Vec::new()
}
} else {
Vec::new()
};
// Merge in host interfaces
interfaces_and_subnets.extend(host_interfaces_and_subnets.clone());
// Merge in host interfaces
interfaces_and_subnets.extend(host_interfaces_and_subnets.clone());
match &container.id {
Some(id) => return Some((id.clone(), interfaces_and_subnets)),
None => return None
}
})
.collect()
container
.id
.as_ref()
.map(|id| (id.clone(), interfaces_and_subnets))
})
.collect()
}
}
@@ -92,7 +92,8 @@ impl DiscoveryHandler<SelfReportDiscovery> {
}
});
let daemon_bound_subnet_ids: Vec<Uuid> = if binding_address == ALL_INTERFACES_IP.to_string() {
let daemon_bound_subnet_ids: Vec<Uuid> = if binding_address == ALL_INTERFACES_IP.to_string()
{
created_subnets.iter().map(|s| s.id).collect()
} else {
created_subnets
+1 -1
View File
@@ -97,7 +97,7 @@ pub trait DaemonUtils: NetworkUtils {
mac_address,
}));
}
}
}
let subnets: Vec<Subnet> = subnet_map.into_values().collect();
+1 -2
View File
@@ -72,7 +72,6 @@ impl HostService {
// Create services, handling case where created_host was upserted instead of created anew (ie during discovery), which means that host ID + interfaces/port IDs
// are different from what's mapped to the service and they need to be updated
let service_futures = services.into_iter().map(|mut service| {
service = self.service_service.transfer_service_to_new_host(
&mut service,
&host,
@@ -398,7 +397,7 @@ impl HostService {
.await?
.ok_or_else(|| anyhow::anyhow!("Host {} not found", id))?;
let lock = self.get_host_lock(&id).await;
let lock = self.get_host_lock(id).await;
let _guard = lock.lock().await;
if delete_services {
+3 -2
View File
@@ -46,8 +46,9 @@ impl Hash for Interface {
impl PartialEq for Interface {
fn eq(&self, other: &Self) -> bool {
(self.base.ip_address == other.base.ip_address && self.base.subnet_id == other.base.subnet_id) ||
(self.id == other.id)
(self.base.ip_address == other.base.ip_address
&& self.base.subnet_id == other.base.subnet_id)
|| (self.id == other.id)
}
}
+1 -5
View File
@@ -80,11 +80,7 @@ impl ServiceService {
);
for new_service_binding in &new_service_data.base.bindings {
if !existing_service
.base
.bindings
.contains(&new_service_binding)
{
if !existing_service.base.bindings.contains(new_service_binding) {
binding_updates += 1;
existing_service
.base
+10 -1
View File
@@ -34,7 +34,7 @@ impl Default for ServiceBase {
}
}
#[derive(Debug, Clone, Validate, Serialize, Deserialize, Hash)]
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
pub struct Service {
pub id: Uuid,
pub created_at: DateTime<Utc>,
@@ -81,6 +81,15 @@ impl PartialEq for Service {
}
}
impl Hash for Service {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.id.hash(state);
self.base.service_definition.hash(state);
self.base.name.hash(state);
self.base.host_id.hash(state);
}
}
impl Display for Service {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}: {:?}", self.base.name, self.id)
@@ -108,7 +108,7 @@ impl Vendor {
pub const UBIQUITI: &'static str = "Ubiquiti Networks Inc";
}
impl<'a> Pattern<'a> {
impl Pattern<'_> {
pub fn matches(&self, params: &ServiceDiscoveryParams) -> Result<Vec<Option<Port>>, Error> {
// Return ports that matched if any
@@ -46,7 +46,7 @@ impl AnchorAnalyzer {
}
let (primary_handle, should_relocate) =
Self::calculate_optimal_handle(&handle_counts, is_infra);
Self::calculate_optimal_handle(&handle_counts, is_infra, interface_id, edges, ctx);
(primary_handle, total_edges, should_relocate)
}
@@ -61,10 +61,13 @@ impl AnchorAnalyzer {
/// Calculate the optimal handle placement based on edge distribution
/// Returns (handle, should_relocate_handles)
/// should_relocate_handles is true when vertical edges should move to the side
/// should_relocate_handles is true when edges should be relocated to avoid crossing nodes
fn calculate_optimal_handle(
handle_counts: &HashMap<EdgeHandle, usize>,
is_infra: bool,
interface_id: Uuid,
edges: &[Edge],
ctx: &TopologyContext,
) -> (Option<EdgeHandle>, bool) {
if handle_counts.is_empty() {
return (None, false);
@@ -77,17 +80,6 @@ impl AnchorAnalyzer {
EdgeHandle::Right
};
// Convert the handle counts directly to a placement decision
Self::handle_counts_to_placement(handle_counts, &forbidden_handle)
}
/// Determine placement based on handle counts
/// If node has Top handle, place it at Top so edge is short
/// Returns (handle, should_relocate_handles)
fn handle_counts_to_placement(
handle_counts: &HashMap<EdgeHandle, usize>,
forbidden: &EdgeHandle,
) -> (Option<EdgeHandle>, bool) {
// Check for opposing vertical edges (Top + Bottom)
let has_top = handle_counts.get(&EdgeHandle::Top).unwrap_or(&0) > &0;
let has_bottom = handle_counts.get(&EdgeHandle::Bottom).unwrap_or(&0) > &0;
@@ -98,20 +90,29 @@ impl AnchorAnalyzer {
let has_right = handle_counts.get(&EdgeHandle::Right).unwrap_or(&0) > &0;
let has_opposing_horizontal = has_left && has_right;
// Special case: If node has edges on both top and bottom,
// Check if edges would actually cross nodes before relocating
let would_cross = Self::would_edges_cross_nodes(
interface_id,
edges,
ctx,
has_opposing_vertical,
has_opposing_horizontal,
);
// Special case: If node has edges on both top and bottom AND they would cross nodes,
// place it on the side to avoid vertical edges traversing the subnet
if has_opposing_vertical {
let preferred_side = if *forbidden == EdgeHandle::Left {
EdgeHandle::Left
} else {
if has_opposing_vertical && would_cross {
let preferred_side = if forbidden_handle == EdgeHandle::Left {
EdgeHandle::Right
} else {
EdgeHandle::Left
};
return (Some(preferred_side), true); // true = relocate handles
}
// Special case: If node has edges on both left and right,
// Special case: If node has edges on both left and right AND they would cross nodes,
// place it on top or bottom based on which has more edges
if has_opposing_horizontal {
if has_opposing_horizontal && would_cross {
let top_count = handle_counts.get(&EdgeHandle::Top).unwrap_or(&0);
let bottom_count = handle_counts.get(&EdgeHandle::Bottom).unwrap_or(&0);
@@ -147,10 +148,10 @@ impl AnchorAnalyzer {
if bottom_count == max_count {
return (Some(EdgeHandle::Bottom), false);
}
if right_count == max_count && *forbidden != EdgeHandle::Right {
if right_count == max_count && forbidden_handle != EdgeHandle::Right {
return (Some(EdgeHandle::Right), false);
}
if left_count == max_count && *forbidden != EdgeHandle::Left {
if left_count == max_count && forbidden_handle != EdgeHandle::Left {
return (Some(EdgeHandle::Left), false);
}
@@ -174,4 +175,87 @@ impl AnchorAnalyzer {
Some(EdgeHandle::Top)
}
}
/// Check if edges would cross over any nodes in the subnet
/// Returns true if relocation would help avoid crossings
fn would_edges_cross_nodes(
interface_id: Uuid,
edges: &[Edge],
ctx: &TopologyContext,
has_opposing_vertical: bool,
has_opposing_horizontal: bool,
) -> bool {
// Get the subnet this interface belongs to
let subnet = match ctx.get_subnet_from_interface_id(interface_id) {
Some(s) => s,
None => return false,
};
// Find all interfaces in the same subnet
let subnet_interfaces: Vec<Uuid> = ctx
.hosts
.iter()
.flat_map(|h| &h.base.interfaces)
.filter(|i| i.base.subnet_id == subnet.id)
.map(|i| i.id)
.collect();
// If there are fewer than 3 nodes in the subnet, edges likely won't cross nodes
if subnet_interfaces.len() < 3 {
return false;
}
// Count inter-subnet edges that would traverse the subnet
let mut vertical_edge_count = 0;
let mut horizontal_edge_count = 0;
for edge in edges {
if edge.source != interface_id && edge.target != interface_id {
continue;
}
// Get the other end of the edge
let other_interface = if edge.source == interface_id {
edge.target
} else {
edge.source
};
// Check if the other interface is in a different subnet
let other_subnet = ctx.get_subnet_from_interface_id(other_interface);
if other_subnet.map(|s| s.id) == Some(subnet.id) {
continue; // Intra-subnet edge, skip
}
// Determine if this is a vertical or horizontal edge based on handle
let relevant_handle = if edge.source == interface_id {
&edge.source_handle
} else {
&edge.target_handle
};
match relevant_handle {
EdgeHandle::Top | EdgeHandle::Bottom => vertical_edge_count += 1,
EdgeHandle::Left | EdgeHandle::Right => horizontal_edge_count += 1,
}
}
// If we have opposing vertical edges and multiple vertical edges exist, likely to cross
if has_opposing_vertical && vertical_edge_count >= 2 {
return true;
}
// If we have opposing horizontal edges and multiple horizontal edges exist, likely to cross
if has_opposing_horizontal && horizontal_edge_count >= 2 {
return true;
}
// Additional heuristic: if subnet has many nodes (5+) and opposing edges exist,
// it's very likely edges will cross nodes
if subnet_interfaces.len() >= 5 && (has_opposing_vertical || has_opposing_horizontal) {
return true;
}
false
}
}
@@ -3,6 +3,7 @@ use std::collections::{BTreeMap, HashMap};
use uuid::Uuid;
use crate::server::{
shared::types::metadata::TypeMetadataProvider,
subnets::types::base::SubnetType,
topology::{
service::{
@@ -19,10 +20,12 @@ use crate::server::{
const SUBNET_PADDING: Uxy = Uxy { x: 75, y: 75 };
const NODE_PADDING: Uxy = Uxy { x: 50, y: 50 };
const GROUP_DOCKER_BRIDGES_BY_HOST: bool = true;
pub struct SubnetLayoutPlanner {
no_subnet_id: Uuid,
handle_relocation_map: HashMap<Uuid, EdgeHandle>,
consolidated_docker_subnets: HashMap<Uuid, Vec<Uuid>>,
}
impl Default for SubnetLayoutPlanner {
@@ -36,6 +39,7 @@ impl SubnetLayoutPlanner {
Self {
no_subnet_id: Uuid::new_v4(),
handle_relocation_map: HashMap::new(),
consolidated_docker_subnets: HashMap::new(),
}
}
@@ -47,9 +51,13 @@ impl SubnetLayoutPlanner {
&self.handle_relocation_map
}
pub fn get_consolidated_docker_subnets(&self) -> &HashMap<Uuid, Vec<Uuid>> {
&self.consolidated_docker_subnets
}
/// Main entry point: calculate subnet layouts and create all child nodes
pub fn create_subnet_child_nodes(
&mut self, // ← TO THIS
&mut self,
ctx: &TopologyContext,
all_edges: &[Edge],
) -> (HashMap<Uuid, SubnetLayout>, Vec<Node>) {
@@ -68,90 +76,132 @@ impl SubnetLayoutPlanner {
(subnet_sizes, child_nodes)
}
/// Group host interfaces by subnet, analyzing edges to determine anchor placement
/// Group host interfaces by subnet, with optional special handling for DockerBridge
/// If GROUP_DOCKER_BRIDGES_BY_HOST is true, all DockerBridge interfaces for a given host
/// are consolidated into one subnet
fn group_children_by_subnet(
&self,
&mut self,
ctx: &TopologyContext,
all_edges: &[Edge],
) -> HashMap<Uuid, Vec<SubnetChild>> {
ctx.hosts
.iter()
.flat_map(|host| {
if !host.base.interfaces.is_empty() {
host.base
.interfaces
.iter()
.filter_map(|interface| {
let subnet_type = ctx
.get_subnet_by_id(interface.base.subnet_id)
.map(|s| s.base.subnet_type.clone())
.unwrap_or_default();
let mut children_by_subnet: HashMap<Uuid, Vec<SubnetChild>> = HashMap::new();
let interface_bound_services: Vec<Uuid> = ctx
.services
.iter()
.filter_map(|s| {
let has_relevant_binding =
s.base.bindings.iter().any(|b| match b.interface_id() {
Some(binding_interface_id)
if binding_interface_id == interface.id =>
{
true
}
None => !subnet_type.is_internal(),
_ => false,
});
// Track DockerBridge interfaces by host (only used if grouping is enabled)
// Map: host_id -> (primary_subnet_id, Vec<(subnet_id, SubnetChild)>)
let mut docker_by_host: HashMap<Uuid, (Uuid, Vec<(Uuid, SubnetChild)>)> = HashMap::new();
if has_relevant_binding {
Some(s.id)
} else {
None
}
})
.collect();
for host in ctx.hosts {
if host.base.interfaces.is_empty() {
// No interfaces - add to no_subnet
children_by_subnet
.entry(self.no_subnet_id)
.or_default()
.push(SubnetChild {
id: host.id,
host_id: host.id,
interface_id: None,
size: SubnetChildNodeSize::Small,
primary_handle: None,
anchor_count: 0,
should_relocate_handles: false,
});
continue;
}
let (primary_handle, anchor_count, should_relocate) =
AnchorAnalyzer::analyze_child_anchors(interface.id, all_edges, ctx);
for interface in &host.base.interfaces {
let subnet = ctx.get_subnet_by_id(interface.base.subnet_id);
let subnet_type = subnet
.map(|s| s.base.subnet_type.clone())
.unwrap_or_default();
if !interface_bound_services.is_empty() {
Some((
interface.base.subnet_id,
SubnetChild {
id: interface.id,
host_id: host.id,
interface_id: Some(interface.id),
size: SubnetChildNodeSize::from_service_count(
interface_bound_services.len(),
),
primary_handle,
anchor_count,
should_relocate_handles: should_relocate,
},
))
} else {
None
}
})
.collect::<Vec<_>>()
} else {
vec![(
self.no_subnet_id,
SubnetChild {
id: host.id,
host_id: host.id,
interface_id: None,
size: SubnetChildNodeSize::Small,
primary_handle: None,
anchor_count: 0,
should_relocate_handles: false,
},
)]
let interface_bound_services: Vec<Uuid> = ctx
.services
.iter()
.filter_map(|s| {
let has_relevant_binding =
s.base.bindings.iter().any(|b| match b.interface_id() {
Some(binding_interface_id)
if binding_interface_id == interface.id =>
{
true
}
None => !subnet_type.is_internal(),
_ => false,
});
if has_relevant_binding {
Some(s.id)
} else {
None
}
})
.collect();
if interface_bound_services.is_empty() {
continue;
}
})
.fold(HashMap::new(), |mut acc, (subnet_id, child)| {
acc.entry(subnet_id).or_default().push(child);
acc
})
let (primary_handle, anchor_count, should_relocate) =
AnchorAnalyzer::analyze_child_anchors(interface.id, all_edges, ctx);
let child = SubnetChild {
id: interface.id,
host_id: host.id,
interface_id: Some(interface.id),
size: SubnetChildNodeSize::from_service_count(interface_bound_services.len()),
primary_handle,
anchor_count,
should_relocate_handles: should_relocate,
};
// Special handling for DockerBridge (only if grouping is enabled)
if GROUP_DOCKER_BRIDGES_BY_HOST && matches!(subnet_type, SubnetType::DockerBridge) {
let entry = docker_by_host.entry(host.id).or_insert_with(|| {
// Use the first DockerBridge subnet we encounter for this host
(interface.base.subnet_id, Vec::new())
});
entry.1.push((interface.base.subnet_id, child));
} else {
children_by_subnet
.entry(interface.base.subnet_id)
.or_default()
.push(child);
}
}
}
// Consolidate all DockerBridge children into their primary subnet (only if grouping is enabled)
if GROUP_DOCKER_BRIDGES_BY_HOST {
for (_host_id, (primary_subnet_id, docker_children_with_subnets)) in docker_by_host {
if !docker_children_with_subnets.is_empty() {
// Track which subnets were consolidated
let mut consolidated_subnet_ids: Vec<Uuid> = docker_children_with_subnets
.iter()
.map(|(subnet_id, _)| *subnet_id)
.collect();
// Remove duplicates and sort for consistency
consolidated_subnet_ids.sort();
consolidated_subnet_ids.dedup();
// Store the consolidation mapping
self.consolidated_docker_subnets
.insert(primary_subnet_id, consolidated_subnet_ids);
// Add all children to the primary subnet
children_by_subnet
.entry(primary_subnet_id)
.or_default()
.extend(
docker_children_with_subnets
.into_iter()
.map(|(_, child)| child),
);
}
}
}
children_by_subnet
}
/// Calculate the size and layout of a subnet, creating child nodes
@@ -308,6 +358,32 @@ impl SubnetLayoutPlanner {
node_type: NodeType::SubnetNode {
infra_width: layout.infra_width,
subnet_type: SubnetType::None,
label_override: None,
},
position: position.clone(),
size: layout.size.clone(),
});
}
if let Some(consolidated_subnet_ids) =
self.consolidated_docker_subnets.get(subnet_id)
{
let label_override = SubnetType::DockerBridge.name().to_owned()
+ ": ("
+ &ctx
.subnets
.iter()
.filter(|s| consolidated_subnet_ids.contains(&s.id))
.map(|s| s.base.cidr.to_string())
.join(", ")
+ ")";
return Some(Node {
id: *subnet_id,
node_type: NodeType::SubnetNode {
infra_width: layout.infra_width,
subnet_type: SubnetType::DockerBridge,
label_override: Some(label_override),
},
position: position.clone(),
size: layout.size.clone(),
@@ -321,6 +397,7 @@ impl SubnetLayoutPlanner {
node_type: NodeType::SubnetNode {
infra_width: layout.infra_width,
subnet_type: subnet.base.subnet_type.clone(),
label_override: None,
},
position: position.clone(),
size: layout.size.clone(),
@@ -27,6 +27,7 @@ pub enum NodeType {
SubnetNode {
infra_width: usize,
subnet_type: SubnetType,
label_override: Option<String>,
},
HostNode {
subnet_id: Uuid,