adds extended logging, dns name lookup, primary ip assignement

Also ESXi interfaces are now represented properly
IPv6 and VM primary IP assignment still ongoing
This commit is contained in:
Ricardo Bartels
2020-11-03 17:50:26 +01:00
parent 546c0ab743
commit e3ad3dbd56
9 changed files with 766 additions and 166 deletions

View File

@@ -7,18 +7,28 @@ from module.common.misc import do_error_exit
# define DEBUG2 and DEBUG3 log levels
DEBUG2 = 6 # extended messages
DEBUG3 = 3 # extra extended messages
# define valid log levels
valid_log_levels = [ "DEBUG2", "DEBUG", "INFO", "WARNING", "ERROR"]
valid_log_levels = [ "DEBUG3", "DEBUG2", "DEBUG", "INFO", "WARNING", "ERROR"]
# add log level DEBUG2
logging.addLevelName(DEBUG2, "DEBUG2")
# add log level DEBUG3
logging.addLevelName(DEBUG3, "DEBUG3")
def debug2(self, message, *args, **kws):
if self.isEnabledFor(DEBUG2):
# Yes, logger takes its '*args' as 'args'.
self._log(DEBUG2, message, args, **kws)
logging.Logger.debug2 = debug2
def debug3(self, message, *args, **kws):
if self.isEnabledFor(DEBUG3):
# Yes, logger takes its '*args' as 'args'.
self._log(DEBUG3, message, args, **kws)
logging.Logger.debug3 = debug3
def get_logger():
@@ -36,6 +46,8 @@ def setup_logging(log_level=None, log_file=None):
"""
log_format = '%(asctime)s - %(levelname)s: %(message)s'
if log_level is None or log_level == "":
do_error_exit("ERROR: log level undefined or empty. Check config please.")
@@ -46,10 +58,13 @@ def setup_logging(log_level=None, log_file=None):
# check the provided log level
if log_level == "DEBUG2":
numeric_log_level = DEBUG2
elif log_level == "DEBUG3":
numeric_log_level = DEBUG3
logging.basicConfig(level=logging.DEBUG, format=log_format)
else:
numeric_log_level = getattr(logging, log_level.upper(), None)
log_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
log_format = logging.Formatter(log_format)
# create logger instance
logger = get_logger()
@@ -57,9 +72,10 @@ def setup_logging(log_level=None, log_file=None):
logger.setLevel(numeric_log_level)
# setup stream handler
log_stream = logging.StreamHandler()
log_stream.setFormatter(log_format)
logger.addHandler(log_stream)
if log_level != "DEBUG3":
log_stream = logging.StreamHandler()
log_stream.setFormatter(log_format)
logger.addHandler(log_stream)
# setup log file handler
if log_file is not None:

View File

@@ -1,7 +1,11 @@
from ipaddress import ip_interface
import aiodns
import logging
import asyncio
from module.common.logging import get_logger
log = get_logger()
def format_ip(ip_addr):
@@ -23,4 +27,52 @@ def normalize_mac_address(mac_address=None):
return mac_address
def perform_ptr_lookups(ips, dns_servers=None):
loop = asyncio.get_event_loop()
resolver = aiodns.DNSResolver(loop=loop)
if dns_servers is not None:
if isinstance(dns_servers, list):
log.debug2("using provided DNS servers to perform lookup: %s" % ", ".join(dns_servers))
resolver.nameservers = dns_servers
else:
log.error(f"List of provided DNS servers invalid: {dns_servers}")
queue = asyncio.gather(*(reverse_lookup(resolver, ip) for ip in ips))
results = loop.run_until_complete(queue)
# return dictionary instead of a list of dictionaries
return {k:v for x in results for k,v in x.items()}
async def reverse_lookup(resolver, ip):
valid_hostname_characters = "abcdefghijklmnopqrstuvwxyz0123456789-."
resolved_name = None
response = None
log.debug2(f"Requesting PTR record: {ip}")
try:
response = await resolver.gethostbyaddr(ip)
except aiodns.error.DNSError as err:
log.debug("Unable to find a PTR record for %s: %s", ip, err.args[1])
if response is not None and response.name is not None:
# validate record to check if this is a valid host name
if all([bool(c.lower() in valid_hostname_characters) for c in response.name]):
resolved_name = response.name.lower()
log.debug(f"PTR record for {ip}: {resolved_name}")
else:
log.debug(f"PTR record contains invalid characters: {response.name}")
return {ip: resolved_name}
# EOF

View File

@@ -1,9 +1,8 @@
import requests
import json
import logging
from datetime import datetime
import requests
from http.client import HTTPConnection
import pickle
from packaging import version
@@ -13,14 +12,12 @@ import pprint
from module import plural
from module.common.misc import grab, do_error_exit, dump
from module.netbox.object_classes import *
from module.common.logging import get_logger
from module.common.logging import get_logger, DEBUG3
log = get_logger()
# ToDo:
# * DNS lookup
# * primary ip
# * get vrf for IP
class NetBoxHandler:
"""
@@ -54,9 +51,9 @@ class NetBoxHandler:
instance_virtual_interfaces = {}
# testing option
use_netbox_caching_for_testing = False
use_netbox_caching_for_testing = True
def __init__(self, cli_args=None, settings=None, inventory=None):
def __init__(self, settings=None, inventory=None):
self.settings = settings
self.inventory = inventory
@@ -66,6 +63,12 @@ class NetBoxHandler:
self.parse_config_settings(settings)
# flood the console
if log.level == DEBUG3:
log.warning("Log level is set to DEBUG3, Request logs will only be printed to console")
HTTPConnection.debuglevel = 1
proto = "https"
if bool(self.disable_tls) is True:
proto = "http"
@@ -97,7 +100,8 @@ class NetBoxHandler:
validation_failed = True
if validation_failed is True:
do_error_exit("Config validation failed. Exit!")
log.error("Config validation failed. Exit!")
exit(1)
for setting in self.settings.keys():
setattr(self, setting, config_settings.get(setting))
@@ -109,7 +113,10 @@ class NetBoxHandler:
:return: `True` if session created else `False`
:rtype: bool
"""
header = {"Authorization": f"Token {self.api_token}"}
header = {
"Authorization": f"Token {self.api_token}",
"User-Agent": "netbox-sync/0.0.1"
}
session = requests.Session()
session.headers.update(header)
@@ -137,7 +144,7 @@ class NetBoxHandler:
result = str(response.headers["API-Version"])
log.info(f"Successfully connected to NetBox '{self.host_fqdn}'")
log.debug(f"Detected NetBox API v{result}.")
log.debug(f"Detected NetBox API version: {result}")
return result
@@ -174,7 +181,6 @@ class NetBoxHandler:
if response.status_code == 200:
# retrieve paginated results
#""" pagination disabled
if this_request.method == "GET" and result is not None:
while response.json().get("next") is not None:
this_request.url = response.json().get("next")
@@ -182,7 +188,7 @@ class NetBoxHandler:
response = self.single_request(this_request)
result["results"].extend(response.json().get("results"))
#"""
elif response.status_code in [201, 204]:
action = "created" if response.status_code == 201 else "deleted"
@@ -211,7 +217,10 @@ class NetBoxHandler:
def single_request(self, this_request):
req = None
response = None
if log.level == DEBUG3:
pprint.pprint(vars(this_request))
for _ in range(self.max_retry_attempts):
@@ -223,7 +232,7 @@ class NetBoxHandler:
log.debug2(log_message)
try:
req = self.session.send(this_request,
response = self.session.send(this_request,
timeout=self.timeout, verify=self.validate_tls_certs)
except (ConnectionError, requests.exceptions.ConnectionError,
@@ -235,9 +244,17 @@ class NetBoxHandler:
else:
do_error_exit(f"Giving up after {self.max_retry_attempts} retries.")
log.debug2("Received HTTP Status %s.", req.status_code)
log.debug2("Received HTTP Status %s.", response.status_code)
return req
# print debugging information
if log.level == DEBUG3:
log.debug("Response Body:")
try:
pprint.pprint(response.json())
except json.decoder.JSONDecodeError as e:
log.error(e)
return response
def query_current_data(self, netbox_objects_to_query=None):
@@ -281,25 +298,29 @@ class NetBoxHandler:
return
def inizialize_basic_data(self):
"""
Adds the two basic tags to keep track of objects and see which
objects are no longer exists in source to automatically remove them
"""
log.debug("Checking/Adding NetBox Sync dependencies")
prune_text = f"Pruning is enabled and Objects will be automatically removed after {self.prune_delay_in_days} days"
if self.prune_enabled is False:
prune_text = f"Objects would be automatically removed after {self.prune_delay_in_days} days but pruning is currently disabled."
self.inventory.add_update_object(NBTags, data = {
"name": self.orphaned_tag,
"color": "607d8b",
"description": "The source which has previously "
"provided the object no longer "
"states it exists.{}".format(
" An object with the 'Orphaned' tag will "
"remain in this state until it ages out "
"and is automatically removed."
) if bool(self.settings.get("prune_enabled", False)) else ""
"description": "A source which has previously provided this object no "
f"longer states it exists. {prune_text}"
})
self.inventory.add_update_object(NBTags, data = {
"name": self.primary_tag,
"description": "Created and used by NetBox Sync Script "
"to keep track of created items."
"description": "Created and used by NetBox Sync Script to keep track of created items. "
"DO NOT change this tag, otherwise syncing can't keep track of deleted objects."
})
def update_object(self, nb_object_sub_class):
@@ -359,6 +380,7 @@ class NetBoxHandler:
if returned_object_data is not None:
object.update(data = returned_object_data, read_from_netbox=True)
object.resolve_relations()
elif issued_request is True:
log.error(f"Request Failed for {nb_object_sub_class.name}. Used data: {data_to_patch}")
@@ -395,24 +417,106 @@ class NetBoxHandler:
if self.orphaned_tag not in object.get_tags():
continue
log.debug2(f"Object '{object.get_display_name()}' is Orphaned")
date_last_update = grab(object, "data.last_updated")
if date_last_update is None:
continue
# only need the date including seconds
date_last_update = date_last_update[0:19]
log.debug2(f"Object '{object.get_display_name()}' is Orphaned. Last time changed: {date_last_update}")
# check prune delay.
last_updated = None
try:
last_updated = datetime.strptime(grab(object, "data.last_updated"),"%Y-%m-%dT%H:%M:%S")
last_updated = datetime.strptime(date_last_update,"%Y-%m-%dT%H:%M:%S")
except Exception:
continue
days_since_last_update = (now - last_updated).days
days_since_last_update = (today - last_updated).days
# it seems we need to delete this object
if last_updated is not None and days_since_last_update >= self.prune_delay_in_days:
log.info(f"{nb_object_sub_class.name.capitalize()} '{object.get_display_name()}' is orphaned for {days_since_last_update} and will be deleted.")
log.info(f"{nb_object_sub_class.name.capitalize()} '{object.get_display_name()}' is orphaned for {days_since_last_update} days and will be deleted.")
# Todo:
# * Needs testing
#self.request(nb_object_sub_class, req_type="DELETE", nb_id=object.nb_id)
return
def just_delete_all_the_things(self):
"""
Using a brute force approach. Try to delete everything 10 times.
This way we don't need to care about dependencies.
"""
log.info("Querying necessary objects from Netbox. This might take a while.")
self.query_current_data(NetBoxObject.__subclasses__())
log.info("Finished querying necessary objects from Netbox")
self.inventory.resolve_relations()
log.warning(f"Starting purge now. All objects with the tag '{self.primary_tag}' will be deleted!!!")
for iteration in range(10):
log.debug("Iteration %d trying to deleted all the objects." % (iteration + 1))
found_objects_to_delete = False
for nb_object_sub_class in reversed(NetBoxObject.__subclasses__()):
# tags need to be deleted at the end
if nb_object_sub_class == NBTags:
continue
# object has no tags so we can't be sure it was created with this tool
if NBTags not in nb_object_sub_class.data_model.values():
continue
for object in self.inventory.get_all_items(nb_object_sub_class):
# already deleted
if getattr(object, "deleted", False) is True:
continue
found_objects_to_delete = True
if self.primary_tag in object.get_tags():
log.info(f"{nb_object_sub_class.name} '{object.get_display_name()}' will be deleted now")
"""
# Todo:
# * Needs testing
result = self.request(nb_object_sub_class, req_type="DELETE", nb_id=object.nb_id)
if result is not None:
object.deleted = True
"""
if found_objects_to_delete is False:
# get tag objects
primary_tag = self.inventory.add_update_object(NBTags, data = {"name": self.primary_tag})
orpahned_tag = self.inventory.get_by_data(NBTags, data = {"name": self.orphaned_tag})
# try to delete them
log.info(f"{NBTags.name} '{primary_tag.get_display_name()}' will be deleted now")
#self.request(NBTags, req_type="DELETE", nb_id=primary_tag.nb_id)
log.info(f"{NBTags.name} '{orpahned_tag.get_display_name()}' will be deleted now")
#self.request(NBTags, req_type="DELETE", nb_id=orpahned_tag.nb_id)
log.info("Successfully deleted all objects which were sync by this program.")
break
else:
log.warning("Unfortunately we were not able to delete all objects. Sorry")
return
# EOF

View File

@@ -8,6 +8,7 @@ from ipaddress import ip_address, ip_network, ip_interface, IPv6Network, IPv4Net
from module.netbox.object_classes import *
from module.common.logging import get_logger
from module.common.support import perform_ptr_lookups
log = get_logger()
@@ -171,7 +172,7 @@ class NetBoxInventory:
log.debug("Start resolving relations")
for object_type in NetBoxObject.__subclasses__():
for object in self.base_structure.get(object_type.name, list()):
for object in self.get_all_items(object_type):
object.resolve_relations()
@@ -186,7 +187,7 @@ class NetBoxInventory:
return self.base_structure.get(object_type.name, list())
def tag_all_the_things(self, sources, netbox_handler):
def tag_all_the_things(self, netbox_handler):
# ToDo:
# * DONE: add main tag to all objects retrieved from a source
@@ -195,14 +196,10 @@ class NetBoxInventory:
# * DONE: objects tagged by a source but not present in source anymore (add)
# * DONE: objects tagged as orphaned but are present again (remove)
source_tags = [x.source_tag for x in sources]
for object_type in NetBoxObject.__subclasses__():
if self.base_structure[object_type.name] is None:
continue
for object in self.base_structure[object_type.name]:
for object in self.get_all_items(object_type):
# if object was found in source
if object.source is not None:
@@ -212,12 +209,11 @@ class NetBoxInventory:
if netbox_handler.orphaned_tag in object.get_tags():
object.remove_tags(netbox_handler.orphaned_tag)
# if object was tagged by a source in previous runs but is not present
# if object was tagged by this program in previous runs but is not present
# anymore then add the orphaned tag
else:
for source_tag in source_tags:
if source_tag in object.get_tags():
object.add_tags(netbox_handler.orphaned_tag)
if netbox_handler.primary_tag in object.get_tags():
object.add_tags(netbox_handler.orphaned_tag)
def update_all_ip_addresses(self):
@@ -339,6 +335,7 @@ class NetBoxInventory:
data["vrf"] = vrf
# only overwrite tenant if not already defined
# ToDo: document behavior
if tenant is not None and grab(ip, "data.tenant.id") is None and str(tenant) != str(grab(ip, "data.tenant.id")):
data["tenant"] = tenant
@@ -347,10 +344,114 @@ class NetBoxInventory:
break
# perform DNS name lookup
ip_lookup_dict = dict()
log.debug("Starting to look up PTR records for IP addresses")
for ip in all_addresses:
if ip.source is None:
continue
ip_dns_name_lookup = grab(ip, "source.dns_name_lookup", fallback=False)
if ip_lookup_dict.get(ip.source) is None:
ip_lookup_dict[ip.source] = {
"ips": list(),
"servers": grab(ip, "source.custom_dns_servers")
}
if ip_dns_name_lookup is True:
ip_lookup_dict[ip.source].get("ips").append(grab(ip, "data.address", fallback="").split("/")[0])
for source, data in ip_lookup_dict.items():
if len(data.get("ips")) == 0:
continue
# get DNS names for IP addresses:
records = perform_ptr_lookups(data.get("ips"), data.get("servers"))
for ip in all_addresses:
if ip.source != source:
continue
ip_a = grab(ip, "data.address", fallback="").split("/")[0]
dns_name = records.get(ip_a)
if dns_name is not None:
ip.update(data = {"dns_name": dns_name})
def set_primary_ips(self):
pass
for nb_object_class in [NBDevices, NBVMs]:
for object in self.get_all_items(nb_object_class):
if object.source is None:
continue
if nb_object_class == NBDevices:
log.debug2(f"Trying to find ESXi Management Interface for '{object.get_display_name()}'")
management_interface = None
for interface in self.get_all_items(NBInterfaces):
if grab(interface, "data.device") == object:
if "management" in grab(interface, "data.description", fallback="").lower():
management_interface = interface
break
if management_interface is None:
continue
log.debug2(f"Found Management interface '{management_interface.get_display_name()}'")
ipv4_assigned = False
ipv6_assigend = False
for ip in self.get_all_items(NBIPAddresses):
#if grab(ip, "data.address") == "10.100.5.28/24":
# print(ip)
# print(management_interface)
if grab(ip, "data.assigned_object_id") == management_interface:
log.debug2(f"Found Management IP '{ip.get_display_name()}'")
ip_version = None
try:
ip_version = ip_address(grab(ip, "data.address", fallback="").split("/")[0]).version
except ValueError:
pass
if ip_version == 4 and ipv4_assigned == False:
log.debug2(f"Assigning IP '{ip.get_display_name()}' as primary IP v4 address to '{object.get_display_name()}'")
if grab(object, "data.primary_ip4.address") != grab(ip, "data.address"):
object.update(data = {"primary_ip4": ip.nb_id})
ipv4_assigned = True
else:
log.debug2("primary IP v4 did not change")
if ip_version == 6 and ipv6_assigned == False:
log.debug2(f"Assigning IP '{ip.get_display_name()}' as primary IP v6 address to '{object.get_display_name()}'")
if grab(object, "data.primary_ip6.address") != grab(ip, "data.address"):
object.update(data = {"primary_ip6": ip.nb_id})
ipv6_assigned = True
else:
log.debug2("primary IP v6 did not change")
#if nb_object_class == NBDevices:
def to_dict(self):

View File

@@ -575,6 +575,8 @@ class NBDevices(NetBoxObject):
"status": [ "offline", "active", "planned", "staged", "failed", "inventory", "decommissioning" ],
"cluster": NBClusters,
"asset_tag": 50,
"primary_ip4": int,
"primary_ip6": int,
"tags": NBTags
}
@@ -594,6 +596,8 @@ class NBVMs(NetBoxObject):
"memory": int,
"disk": int,
"comments": str,
"primary_ip4": int,
"primary_ip6": int,
"tags": NBTags
}
@@ -603,6 +607,7 @@ class NBVMInterfaces(NetBoxObject):
primary_key = "name"
secondary_key = "virtual_machine"
enforce_secondary_key = True
is_primary = False
data_model = {
"name": 64,
"virtual_machine": NBVMs,
@@ -618,6 +623,7 @@ class NBInterfaces(NetBoxObject):
primary_key = "name"
secondary_key = "device"
enforce_secondary_key = True
is_primary = False
data_model = {
"name": 64,
"device": NBDevices,
@@ -642,9 +648,10 @@ class NBIPAddresses(NetBoxObject):
"assigned_object_type": ["dcim.interface", "virtualization.vminterface"],
"assigned_object_id": [ NBInterfaces, NBVMInterfaces ],
"description": 200,
"dns_name": 255,
"tags": NBTags,
"vrf": int,
"tenant": int
"tenant": int,
"vrf": int
}
# add relation between two attributes
data_model_relation = {
@@ -695,6 +702,13 @@ class NBIPAddresses(NetBoxObject):
if "assigned_object_id" in self.updated_items:
self.updated_items.append("assigned_object_type")
def get_dependencies(self):
"""
This is hard coded in here. Updated if data_model attribute changes!!!!
"""
return [ NBInterfaces, NBVMInterfaces, NBTags ]
class NBPrefixes(NetBoxObject):
name = "IP prefix"

View File

@@ -8,26 +8,40 @@ valid_sources = [ VMWareHandler ]
###############
from module.common.configuration import get_config
from module.common.logging import get_logger
from module.netbox.inventory import NetBoxInventory
# ToDo:
# * add post initialization validation
def validate_source(source_class=None):
def validate_source(source_class_object=None, state="pre"):
necessary_atrtributes = [
"dependend_netbox_objects",
"init_successfull",
"inventory",
"name",
"settings",
"source_tag",
"source_type",
]
necessary_atrtributes = {
"dependend_netbox_objects": list,
"init_successfull": bool,
"inventory": NetBoxInventory,
"name": str,
"settings": dict,
"source_tag": str,
"source_type": str,
}
for attr in necessary_atrtributes:
for attr in necessary_atrtributes.keys():
# raise exception if attribute not present
getattr(source_class, attr)
getattr(source_class_object, attr)
if state == "pre":
return
# post initialization validation
for attr, value_type in necessary_atrtributes.items():
value = getattr(source_class_object, attr)
if not isinstance(value, value_type):
raise ValueError(f"Value for attribute '{attr}' needs to be {value_type}")
if value_type in [list,str] and len(value) == 0:
raise ValueError(f"Value for attribute '{attr}' can't be empty.")
def instanciate_sources(config_handler=None, inventory=None):
@@ -82,6 +96,8 @@ def instanciate_sources(config_handler=None, inventory=None):
inventory=inventory,
settings=source_config)
validate_source(source_handler, "post")
# add to list of source handlers
if source_handler.init_successfull is True:
sources.append(source_handler)

View File

@@ -13,7 +13,7 @@ from module.netbox.object_classes import *
from module.common.misc import grab, do_error_exit, dump, get_string_or_none
from module.common.support import normalize_mac_address, format_ip
from module import plural
from module.common.logging import get_logger
from module.common.logging import get_logger, DEBUG3
log = get_logger()
@@ -51,7 +51,9 @@ class VMWareHandler():
"netbox_vm_device_role": "Server",
"permitted_subnets": None,
"collect_hardware_asset_tag": True,
"cluster_site_relation": None
"cluster_site_relation": None,
"dns_name_lookup": False,
"custom_dns_servers": None
}
init_successfull = False
@@ -66,11 +68,14 @@ class VMWareHandler():
site_name = None
networks = dict()
dvs_mtu = dict()
standalone_hosts = list()
processed_host_names = list()
processed_vm_names = list()
processed_vm_uuid = list()
parsing_vms_the_first_time = True
def __init__(self, name=None, settings=None, inventory=None):
@@ -137,15 +142,30 @@ class VMWareHandler():
site_name = relation.split("=")[1].strip()
if len(cluster_name) == 0 or len(site_name) == 0:
log.error("Config option 'cluster_site_relation' malformed got '{cluster_name}' for cluster_name and '{site_name}' for site name.")
log.error(f"Config option 'cluster_site_relation' malformed got '{cluster_name}' for cluster_name and '{site_name}' for site name.")
validation_failed = True
relation_data[cluster_name] = site_name
config_settings["cluster_site_relation"] = relation_data
if config_settings.get("dns_name_lookup") is True and config_settings.get("custom_dns_servers") is not None:
custom_dns_servers = [x.strip() for x in config_settings.get("custom_dns_servers").split(",") if x.strip() != ""]
tested_custom_dns_servers = list()
for custom_dns_server in custom_dns_servers:
try:
tested_custom_dns_servers.append(str(ip_address(custom_dns_server)))
except ValueError:
log.error(f"Config option 'custom_dns_servers' value '{custom_dns_server}' does not appear to be an IP address.")
validation_failed = True
config_settings["custom_dns_servers"] = tested_custom_dns_servers
if validation_failed is True:
do_error_exit("Config validation failed. Exit!")
log.error("Config validation failed. Exit!")
exit(1)
for setting in self.settings.keys():
setattr(self, setting, config_settings.get(setting))
@@ -186,7 +206,21 @@ class VMWareHandler():
log.info(f"Query data from vCenter: '{self.host_fqdn}'")
# Mapping of object type keywords to view types and handlers
"""
Mapping of object type keywords to view types and handlers
iterate over all VMs twice.
To handle VMs with the same name in a cluster we first
iterate over all VMs and look only at the active ones
and sync these first.
Then we iterate a second time to catch the rest.
This has been implemented to support migration scenarios
where you create the same machines with a different setup
like a new version or something. This way NetBox will be
updated primarily with the actual active VM data.
"""
object_mapping = {
"datacenter": {
"view_type": vim.Datacenter,
@@ -207,6 +241,10 @@ class VMWareHandler():
"virtual machine": {
"view_type": vim.VirtualMachine,
"view_handler": self.add_virtual_machine
},
"offline virtual machine": {
"view_type": vim.VirtualMachine,
"view_handler": self.add_virtual_machine
}
}
@@ -234,14 +272,39 @@ class VMWareHandler():
log.error(f"Creating vCenter view for '{view_name}s' failed!")
continue
log.debug("vCenter returned '%d' %s%s" % (len(view_objects), view_name, plural(len(view_objects))))
if view_name != "offline virtual machine":
log.debug("vCenter returned '%d' %s%s" % (len(view_objects), view_name, plural(len(view_objects))))
else:
self.parsing_vms_the_first_time = False
log.debug("Iterating over all virtual machines a second time ")
for obj in view_objects:
if log.level == DEBUG3:
try:
dump(obj)
except Exception as e:
log.error(e)
view_details.get("view_handler")(obj)
container_view.Destroy()
@staticmethod
def passes_filter(name, include_filter, exclude_filter):
# first includes
if include_filter is not None and not include_filter.match(name):
log.debug(f"Cluster '{name}' did not match include filter '{include_filter.pattern}'. Skipping")
return False
# second excludes
if exclude_filter is not None and exclude_filter.match(name):
log.debug(f"Cluster '{name}' matched exclude filter '{exclude_filter.pattern}'. Skipping")
return False
return True
def add_datacenter(self, obj):
name = get_string_or_none(grab(obj, "name"))
@@ -263,17 +326,8 @@ class VMWareHandler():
log.debug2(f"Parsing vCenter cluster: {name}")
# first includes
if self.cluster_include_filter is not None:
if not self.cluster_include_filter.match(name):
log.debug(f"Cluster '{name}' did not match include filter '{self.cluster_include_filter.pattern}'. Skipping")
return
# second excludes
if self.cluster_exclude_filter is not None:
if self.cluster_exclude_filter.match(name):
log.debug(f"Cluster '{name}' matched exclude filter '{self.cluster_exclude_filter.pattern}'. Skipping")
return
if self.passes_filter(name, self.cluster_include_filter, self.cluster_exclude_filter) is False:
return
# set default site name
site_name = self.site_name
@@ -296,13 +350,17 @@ class VMWareHandler():
key = get_string_or_none(grab(obj, "key"))
name = get_string_or_none(grab(obj, "name"))
vlan_id = grab(obj, "config.defaultPortConfig.vlan.vlanId")
if key is None or name is None:
return
log.debug2(f"Parsing vCenter network: {name}")
self.networks[key] = name
self.networks[key] = {
"name": name,
"vlan_id": vlan_id
}
def add_host(self, obj):
@@ -321,22 +379,13 @@ class VMWareHandler():
self.processed_host_names.append(name)
# filter hosts
# first includes
if self.host_include_filter is not None:
if not self.host_include_filter.match(name):
log.debug(f"Host '{name}' did not match include filter '{self.host_include_filter.pattern}'. Skipping")
return
# second excludes
if self.host_exclude_filter is not None:
if self.host_exclude_filter.match(name):
log.debug(f"Host '{name}' matched exclude filter '{self.host_exclude_filter.pattern}'. Skipping")
return
if self.passes_filter(name, self.host_include_filter, self.host_exclude_filter) is False:
return
manufacturer = get_string_or_none(grab(obj, "summary.hardware.vendor"))
model = get_string_or_none(grab(obj, "summary.hardware.model"))
product_name = get_string_or_none(grab(obj, "config.product.name"))
product_version = get_string_or_none(grab(obj, "config.product.version"))
product_name = get_string_or_none(grab(obj, "summary.config.product.name"))
product_version = get_string_or_none(grab(obj, "summary.config.product.version"))
platform = f"{product_name} {product_version}"
@@ -414,9 +463,63 @@ class VMWareHandler():
host_object = self.inventory.add_update_object(NBDevices, data=data, source=self)
host_vswitches = dict()
for vswitch in grab(obj, "config.network.vswitch", fallback=list()):
vswitch_name = grab(vswitch, "name")
vswitch_pnics = [str(x) for x in grab(vswitch, "pnic", fallback=list())]
if vswitch_name is not None:
log.debug2(f"Found vSwitch {vswitch_name}")
host_vswitches[vswitch_name] = {
"mtu": grab(vswitch, "mtu"),
"pnics": vswitch_pnics
}
host_pswitches = dict()
for pswitch in grab(obj, "config.network.proxySwitch", fallback=list()):
pswitch_uuid = grab(pswitch, "dvsUuid")
pswitch_name = grab(pswitch, "dvsName")
pswitch_pnics = [str(x) for x in grab(pswitch, "pnic", fallback=list())]
if pswitch_uuid is not None:
log.debug2(f"Found proxySwitch {pswitch_name}")
host_pswitches[pswitch_uuid] = {
"name": pswitch_name,
"mtu": grab(pswitch, "mtu"),
"pnics": pswitch_pnics
}
self.dvs_mtu[pswitch_uuid] = grab(pswitch, "mtu")
host_portgroups = dict()
for pgroup in grab(obj, "config.network.portgroup", fallback=list()):
pgroup_name = grab(pgroup, "spec.name")
if pgroup_name is not None:
log.debug2(f"Found portGroup {pgroup_name}")
host_portgroups[pgroup_name] = {
"vlan_id": grab(pgroup, "spec.vlanId"),
"vswitch": grab(pgroup, "spec.vswitchName")
}
for pnic in grab(obj, "config.network.pnic", fallback=list()):
log.debug2("Parsing {}: {}".format(grab(pnic, "_wsdlName"), grab(pnic, "device")))
pnic_name = grab(pnic, "device")
pnic_key = grab(pnic, "key")
log.debug2("Parsing {}: {}".format(grab(pnic, "_wsdlName"), pnic_name))
pnic_mtu = None
pnic_link_speed = grab(pnic, "linkSpeed.speedMb")
if pnic_link_speed is None:
@@ -424,7 +527,30 @@ class VMWareHandler():
if pnic_link_speed is None:
pnic_link_speed = grab(pnic, "validLinkSpecification.0.speedMb")
pnic_link_speed_text = f"{pnic_link_speed}Mbps " if pnic_link_speed is not None else ""
# determine link speed text
pnic_description = ""
if pnic_link_speed is not None:
if pnic_link_speed >= 1000:
pnic_description = "%iGb/s " % int(pnic_link_speed / 1000)
else:
pnic_description = f"{pnic_link_speed}Mb/s "
pnic_description = f"{pnic_description} pNIC"
# check virtual switches for interface data
for vs_name, vs_data in host_vswitches.items():
if pnic_key in vs_data.get("pnics", list()):
pnic_description = f"{pnic_description} ({vs_name})"
pnic_mtu = vs_data.get("mtu")
# check proxy switches for interface data
for ps_uuid, ps_data in host_pswitches.items():
if pnic_key in ps_data.get("pnics", list()):
ps_name = ps_data.get("name")
pnic_description = f"{pnic_description} ({ps_name})"
pnic_mtu = ps_data.get("mtu")
pnic_speed_type_mapping = {
100: "100base-tx",
@@ -435,93 +561,128 @@ class VMWareHandler():
}
pnic_data = {
"name": grab(pnic, "device"),
"name": pnic_name,
"device": host_object,
"mac_address": normalize_mac_address(grab(pnic, "mac")),
"enabled": bool(grab(pnic, "spec.linkSpeed")),
"description": f"{pnic_link_speed_text}Physical Interface",
"enabled": bool(grab(pnic, "linkSpeed")),
"description": pnic_description,
"type": pnic_speed_type_mapping.get(pnic_link_speed, "other")
}
if pnic_mtu is not None:
pnic_data["mtu"] = pnic_mtu
self.inventory.add_update_object(NBInterfaces, data=pnic_data, source=self)
for vnic in grab(obj, "config.network.vnic", fallback=list()):
log.debug2("Parsing {}: {}".format(grab(vnic, "_wsdlName"), grab(vnic, "device")))
vnic_name = grab(vnic, "device")
log.debug2("Parsing {}: {}".format(grab(vnic, "_wsdlName"), vnic_name))
vnic_portgroup = grab(vnic, "portgroup")
vnic_portgroup_data = host_portgroups.get(vnic_portgroup)
vnic_description = vnic_portgroup
if vnic_portgroup_data is not None:
vnic_vlan_id = vnic_portgroup_data.get("vlan_id")
vnic_vswitch = vnic_portgroup_data.get("vswitch")
vnic_description = f"{vnic_description} ({vnic_vswitch}, vlan ID: {vnic_vlan_id})"
vnic_data = {
"name": grab(vnic, "device"),
"name": vnic_name,
"device": host_object,
"mac_address": normalize_mac_address(grab(vnic, "spec.mac")),
"mtu": grab(vnic, "spec.mtu"),
"description": grab(vnic, "portgroup"),
"description": vnic_description,
"type": "virtual"
}
vnic_object = self.inventory.add_update_object(NBInterfaces, data=vnic_data, source=self)
vnic_ip = "{}/{}".format(grab(vnic, "spec.ip.ipAddress"), grab(vnic, "spec.ip.subnetMask"))
# check if interface has the default route
if grab(vnic, "spec.ipRouteSpec") is not None:
vnic_object.is_primary = True
if format_ip(vnic_ip) is None:
logging.error(f"IP address '{vnic_ip}' for {vnic_object.get_display_name()} invalid!")
continue
vnic_ips = list()
ip_permitted = False
vnic_ips.append("{}/{}".format(grab(vnic, "spec.ip.ipAddress"), grab(vnic, "spec.ip.subnetMask")))
ip_address_object = ip_address(grab(vnic, "spec.ip.ipAddress"))
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
for ipv6_a in grab(vnic, "spec.ip.ipV6Config.ipV6Address", fallback=list()):
if ip_permitted is False:
log.debug(f"IP address {vnic_ip} not part of any permitted subnet. Skipping.")
continue
vnic_ips.append("{}/{}".format(grab(ipv6_a, "ipAddress"), grab(ipv6_a, "prefixLength")))
vnic_ip_data = {
"address": format_ip(vnic_ip),
"assigned_object_id": vnic_object,
}
# add all interface IPs
for vnic_ip in vnic_ips:
self.inventory.add_update_object(NBIPAddresses, data=vnic_ip_data, source=self)
if format_ip(vnic_ip) is None:
logging.error(f"IP address '{vnic_ip}' for {vnic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_interface(vnic_ip).ip
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
if ip_permitted is False:
log.debug(f"IP address {vnic_ip} not part of any permitted subnet. Skipping.")
continue
vnic_ip_data = {
"address": format_ip(vnic_ip),
"assigned_object_id": vnic_object,
}
self.inventory.add_update_object(NBIPAddresses, data=vnic_ip_data, source=self)
def add_virtual_machine(self, obj):
name = get_string_or_none(grab(obj, "name"))
log.debug2(f"Parsing vCenter host: {name}")
# get VM UUID
vm_uuid = grab(obj, "config.uuid")
if name in self.processed_vm_names:
log.warning(f"Virtual machine '{name}' already parsed. Make sure to use unique host names. Skipping")
if vm_uuid is None or vm_uuid in self.processed_vm_uuid:
return
self.processed_vm_names.append(name)
log.debug2(f"Parsing vCenter VM: {name}")
# first includes
if self.vm_include_filter is not None:
if not self.vm_include_filter.match(name):
log.debug(f"Virtual machine '{name}' did not match include filter '{self.vm_include_filter.pattern}'. Skipping")
return
# get VM power state
status = "active" if get_string_or_none(grab(obj, "runtime.powerState")) == "poweredOn" else "offline"
# second excludes
if self.vm_exclude_filter is not None:
if self.vm_exclude_filter.match(name):
log.debug(f"Virtual Machine '{name}' matched exclude filter '{self.vm_exclude_filter.pattern}'. Skipping")
return
# ignore offline VMs during first run
if self.parsing_vms_the_first_time == True and status == "offline":
log.debug2(f"Ignoring {status} VM '{name}' on first run")
return
# filter VMs
if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:
return
cluster = get_string_or_none(grab(obj, "runtime.host.parent.name"))
if cluster is None:
log.error(f"Requesting cluster for Virtual Machine '{name}' failed. Skipping.")
return
if name in self.processed_vm_names:
log.warning(f"Virtual machine '{name}' already parsed. Make sure to use unique VM names. Skipping")
return
# add to processed VMs
self.processed_vm_uuid.append(vm_uuid)
self.processed_vm_names.append(name)
if cluster in self.standalone_hosts:
cluster = "Standalone ESXi Host"
platform = grab(obj, "config.guestFullName")
platform = get_string_or_none(grab(obj, "guest.guestFullName", fallback=platform))
status = "active" if get_string_or_none(grab(obj, "runtime.powerState")) == "poweredOn" else "offline"
hardware_devices = grab(obj, "config.hardware.device", fallback=list())
@@ -551,6 +712,38 @@ class VMWareHandler():
device_nic_data = list()
device_ip_addresses = dict()
default_route_4_int_mac = None
default_route_6_int_mac = None
for route in grab(obj, "guest.ipStack.0.ipRouteConfig.ipRoute", fallback=list()):
# we found a default route
if grab(route, "prefixLength") == 0:
ip_a = None
try:
ip_a = ip_address(grab(route, "network"))
except ValueError:
pass
if ip_a is None:
continue
gateway_device = grab(route, "gateway.device", fallback="")
nics = grab(obj, "guest.net", fallback=list())
print(grab(nics, f"{gateway_device}"))
gateway_device_mac = normalize_mac_address(grab(nics, f"{gateway_device}.macAddress"))
if ip_a.version == 4 and gateway_device_mac is not None:
default_route_4_int_mac = gateway_device_mac
elif ip_a.version == 6 and gateway_device_mac is not None:
default_route_6_int_mac = gateway_device_mac
#print(default_route_4_int_mac)
#print(default_route_6_int_mac)
# get vm interfaces
for vm_device in hardware_devices:
@@ -564,7 +757,17 @@ class VMWareHandler():
log.debug2(f"Parsing device {device_class}: {int_mac}")
int_network_name = self.networks.get(grab(vm_device, "backing.port.portgroupKey"))
int_portgroup_data = self.networks.get(grab(vm_device, "backing.port.portgroupKey"))
int_network_name = grab(int_portgroup_data, "name")
int_network_vlan_id = grab(int_portgroup_data, "vlan_id")
int_dvswitch_uuid = grab(vm_device, "backing.port.switchUuid")
int_mtu = None
if int_dvswitch_uuid is not None:
int_mtu = self.dvs_mtu.get(int_dvswitch_uuid)
int_connected = grab(vm_device, "connectable.connected")
int_label = grab(vm_device, "deviceInfo.label", fallback="")
@@ -608,17 +811,53 @@ class VMWareHandler():
if int_network_name is not None:
int_full_name = f"{int_full_name} ({int_network_name})"
int_description = f"{int_label} ({device_class})"
if int_network_vlan_id is not None:
int_description = f"{int_description} (vlan ID: {int_network_vlan_id})"
vm_nic_data = {
"name": int_full_name,
"mac_address": int_mac,
"description": f"{int_label} ({device_class})",
"description": int_description,
"enabled": int_connected,
}
if int_mtu is not None:
vm_nic_data["mtu"] = int_mtu
device_nic_data.append(vm_nic_data)
device_ip_addresses[int_full_name] = int_ip_addresses
#####
# add reported guest IP if
# * VM has one interface and
# * this one interface hast no IPv4
"""
if len(device_nic_data) == 1 and guest_ip is not None:
ip_a = None
try:
ip_a = ip_address(guest_ip)
except ValueError:
pass
if ip_a is not None:
prefix_len = 32 if ip_a.version == 4 else 128
nic_name = grab(device_nic_data, "0.name")
add_this_ip = True
for nic_ip in device_ip_addresses[nic_name]:
if nic_ip.split("/")[0] == guest_ip:
add_this_ip = False
break
if add_this_ip is True:
device_ip_addresses[int_full_name].append(f"{guest_ip}/{prefix_len}")
"""
# now we collected all the device data
# lets try to find a matching object on following order
# * try to match name
@@ -631,9 +870,34 @@ class VMWareHandler():
# if nothing of the above worked then it's probably a new VM
vm_object = None
vm_object_candidates = list()
# check existing VMs for matches
for vm in self.inventory.get_all_items(NBVMs):
if vm.source is not None:
continue
if grab(vm, "data.name") != vm_data.get("name"):
continue
# name and cluster match exactly, we most likely found the correct VM
if grab(vm, "data.cluster.data.name") == vm_data.get("cluster.name"):
vm_object = vm
break
vm_object_candidates.append(vm)
if vm_object is None:
for vm_interface in self.inventory.get_all_items(NBVMInterfaces):
# check all interfaces against all MACs and try to find candidates
pass
if vm_object is None:
vm_object = self.inventory.add_update_object(NBVMs, data=vm_data, source=self)
else:
vm_object.update(data=vm_data, source=self)
for vm_nic_data in device_nic_data:
@@ -706,3 +970,4 @@ class VMWareHandler():
# EOF

View File

@@ -13,9 +13,8 @@ from module.common.logging import setup_logging
from module.common.configuration import get_config_file, open_config_file, get_config
from module.netbox.connection import NetBoxHandler
from module.netbox.inventory import NetBoxInventory
from module.netbox.object_classes import *
from module.sources import *
from module.netbox.object_classes import NBPrefixes
from module.sources import instanciate_sources
import pprint
@@ -35,8 +34,21 @@ default_config_file_path = "./settings.ini"
"""
ToDo:
* host "Management" interface is Primary
* return more then one object if found more then one and add somehow to returned objects. Maybe related?
* Add purge option
* documentation
* describe migration (rename tags)
* proper naming to assign sites to clusters
* connection details
* installation
* Standalone Host declaration
* source module structure
* how a vm is picked
* how interfaces are named
* how objects are abducted (taken over by this program)
* thanks to original Owner of ideas
* ensure NTP is set up properly between all instances (pruning delay)
* primary IP assignment
* complain about prefix length mismatch
* test all log levels
"""
def main():
@@ -85,7 +97,14 @@ def main():
netbox_settings = get_config(config_handler, section="netbox", valid_settings=NetBoxHandler.settings)
# establish NetBox connection
NB_handler = NetBoxHandler(cli_args=args, settings=netbox_settings, inventory=inventory)
NB_handler = NetBoxHandler(settings=netbox_settings, inventory=inventory)
# if purge was selected we go ahead and remove all items which were managed by this tools
if args.purge is True:
NB_handler.just_delete_all_the_things()
# that's it, we are done here
exit(0)
# instantiate source handlers and get attributes
log.info("Initializing sources")
@@ -100,7 +119,7 @@ def main():
for source in sources:
netbox_objects_to_query.extend(source.dependend_netbox_objects)
# we need to collect prefixes as well to so which ip belongs to which prefix
# we need to collect prefixes as well to so which IP belongs to which prefix
netbox_objects_to_query.append(NBPrefixes)
# request NetBox data
@@ -119,7 +138,7 @@ def main():
source.apply()
# add/remove tags to/from all inventory items
inventory.tag_all_the_things(sources, NB_handler)
inventory.tag_all_the_things(NB_handler)
# update all IP addresses
inventory.update_all_ip_addresses()
@@ -142,3 +161,5 @@ def main():
if __name__ == "__main__":
main()
# EOF

View File

@@ -1,18 +1,44 @@
[common]
log_level = INFO
# Places all logs in a rotating file if True
log_to_file = True
#log_to_file = False
log_file = log/netbox_sync.log
#log_file = log/netbox_sync.log
# define different sources
# can also be defined multiple times with different settings
# currently supported
# * vmware : VMware vcenter
[netbox]
api_token = XYZ
host_fqdn = netbox.example.com
#port = 443
#disable_tls = false
#validate_tls_certs = true
#prune_enabled = true
#prune_delay_in_days = 30
#default_netbox_result_limit = 200
#timeout = 30
#max_retry_attempts = 4
#####################
# S O U R C E S #
#####################
[source/my-example]
type = vmware
@@ -37,32 +63,17 @@ permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fe80::/64
#netbox_host_device_role = Server
#netbox_vm_device_role = Server
#cluster_site_relation =
#cluster_site_relation = Cluser_NYC = New York, Cluster_FFM = Frankfurt
# Attempt to collect asset tags from vCenter hosts
collect_hardware_asset_tag = True
# ToDo:
# * add following options
#collect_hardware_asset_tag = True
#dns_name_lookup = True
#custom_dns_servers = 192.168.1.11, 192.168.1.12
[netbox]
api_token = XYZ
host_fqdn = netbox.example.com
#port = 443
#disable_tls = false
#validate_tls_certs = true
#prune_enabled = true
# prune_delay_in_days = 30
# EOF