fixes a lot of logical errors and adds object caching by default

This is the first full (hopefully) working version of this syncing
script.

Now documentation needs some excessive love. Probably a lot of
dead code as well which needs to be removed.
This commit is contained in:
Ricardo Bartels
2020-11-13 23:53:14 +01:00
parent fca25a8376
commit 4e2cb031d7
7 changed files with 531 additions and 404 deletions
+188 -63
View File
@@ -5,6 +5,7 @@ import requests
from http.client import HTTPConnection
import urllib3
import pickle
import os
from packaging import version
@@ -17,9 +18,6 @@ from module.common.logging import get_logger, DEBUG3
log = get_logger()
# ToDo:
# * primary ip
class NetBoxHandler:
"""
Handles NetBox connection state and interaction with API
@@ -45,14 +43,16 @@ class NetBoxHandler:
primary_tag = "NetBox-synced"
orphaned_tag = f"{primary_tag}: Orphaned"
cache_directory = None
use_netbox_caching = True
inventory = None
instance_tags = None
instance_interfaces = {}
instance_virtual_interfaces = {}
# testing option
use_netbox_caching_for_testing = False
resolved_dependencies = set()
def __init__(self, settings=None, inventory=None):
@@ -91,6 +91,45 @@ class NetBoxHandler:
do_error_exit(f"Netbox API version '{self.api_version}' not supported. "
f"Minimum API version: {self.minimum_api_version}")
self.setup_caching()
def setup_caching(self):
cache_folder_name = "cache"
base_dir = os.sep.join(__file__.split(os.sep)[0:-3])
if cache_folder_name[0] != os.sep:
cache_folder_name = f"{base_dir}/{cache_folder_name}"
self.cache_directory = os.path.realpath(cache_folder_name)
# check if directory is a file
if os.path.isfile(self.cache_directory):
log.warning(f"The cache directory ({self.cache_directory}) seems to be file.")
self.use_netbox_caching = False
# check if directory exists
if not os.path.exists(self.cache_directory):
# try to create directory
try:
os.makedirs(self.cache_directory, 0o700)
except OSError:
log.warning(f"Unable to create cache directory: {self.cache_directory}")
self.use_netbox_caching = False
except Exception as e:
log.warning(f"Unknown exception while creating cache directory {self.cache_directory}: {e}")
self.use_netbox_caching = False
# check if directory is writable
if not os.access(self.cache_directory, os.X_OK | os.W_OK):
log.warning(f"Error writing to cache directory: {self.cache_directory}")
self.use_netbox_caching = False
if self.use_netbox_caching is False:
log.warning("NetBox caching DISABLED")
else:
log.debug(f"Successfully configured cache directory: {self.cache_directory}")
def parse_config_settings(self, config_settings):
validation_failed = False
@@ -153,7 +192,7 @@ class NetBoxHandler:
return result
def request(self, object_class, req_type="GET", data=None, params=None, nb_id=None):
def request(self, object_class, req_type="GET", data=None, params=dict(), nb_id=None):
result = None
@@ -164,11 +203,15 @@ class NetBoxHandler:
if nb_id is not None:
request_url += f"{nb_id}/"
if params is None:
if not isinstance(params, dict):
log.debug(f"Params passed to NetBox request need to be a dict, got: {params}")
params = dict()
if req_type == "GET":
params["limit"] = self.default_netbox_result_limit
if "limit" not in params.keys():
params["limit"] = self.default_netbox_result_limit
# always exclude config context
params["exclude"] = "config_context"
# prepare request
@@ -179,7 +222,6 @@ class NetBoxHandler:
# issue request
response = self.single_request(this_request)
try:
result = response.json()
except json.decoder.JSONDecodeError:
@@ -210,6 +252,9 @@ class NetBoxHandler:
log.info(f"NetBox successfully {action} {object_class.name} object '{object_name}'.")
if response.status_code == 204:
result = True
# token issues
elif response.status_code == 403:
@@ -271,6 +316,7 @@ class NetBoxHandler:
def query_current_data(self, netbox_objects_to_query=None):
if netbox_objects_to_query is None:
raise AttributeError(f"Attribute netbox_objects_to_query is: '{netbox_objects_to_query}'")
@@ -280,34 +326,111 @@ class NetBoxHandler:
if nb_object_class not in NetBoxObject.__subclasses__():
raise AttributeError(f"Class '{nb_object_class.__name__}' must be a subclass of '{NetBoxObject.__name__}'")
# if objects are multiple times requested but already retrieved
if nb_object_class in self.resolved_dependencies:
continue
# initialize cache variables
cached_nb_data = None
if self.use_netbox_caching_for_testing is True:
cache_file = f"{self.cache_directory}{os.sep}{nb_object_class.__name__}.cache"
cache_this_class = False
latest_update = None
# check if cache file is accessible
if self.use_netbox_caching is True:
cache_this_class = True
if os.path.exists(cache_file) and not os.access(cache_file, os.R_OK):
log.warning("Got no permission to read existing cache file: {cache_file}")
cache_this_class = False
if os.path.exists(cache_file) and not os.access(cache_file, os.W_OK):
log.warning("Got no permission to write to existing cache file: {cache_file}")
cache_this_class = False
# read data from cache file
if cache_this_class is True:
try:
cached_nb_data = pickle.load( open( f"cache/{nb_object_class.__name__}.cache", "rb" ) )
cached_nb_data = pickle.load( open( cache_file, "rb" ) )
except Exception:
pass
nb_data = dict()
if cached_nb_data is None:
# get date of latest update in cache file
if cached_nb_data is not None:
latest_update_list = [x.get("last_updated") for x in cached_nb_data if x.get("last_updated") is not None]
if len(latest_update_list) > 0:
latest_update = sorted(latest_update_list)[-1]
log.debug(f"Successfully read cached data with {len(cached_nb_data)} '{nb_object_class.name}%s', last updated '{latest_update}'" % plural(len(cached_nb_data)))
else:
cache_this_class = False
full_nb_data = None
brief_nb_data = None
updated_nb_data = None
# no cache data found
if latest_update is None:
# get all objects of this class
log.debug(f"Requesting {nb_object_class.name}s from NetBox")
nb_data = self.request(nb_object_class)
log.debug(f"Requesting all {nb_object_class.name}s from NetBox")
full_nb_data = self.request(nb_object_class)
if full_nb_data.get("results") is None:
log.error(f"Result data from NetBox for object {nb_object_class.__name__} missing!")
do_error_exit("Reading data from NetBox failed.")
if self.use_netbox_caching_for_testing is True:
pickle.dump(nb_data.get("results"), open( f"cache/{nb_object_class.__name__}.cache", "wb" ) )
else:
nb_data["results"] = cached_nb_data
# request a brief list of existing objects
log.debug(f"Requesting a brief list of {nb_object_class.name}s from NetBox")
brief_nb_data = self.request(nb_object_class, params={"brief":1, "limit": 500})
log.debug("NetBox returned %d results." % len(brief_nb_data.get("results", list())))
if nb_data.get("results") is None:
log.warning(f"Result data from NetBox for object {nb_object_class.__name__} missing!")
continue
log.debug(f"Requesting the last updates since {latest_update} of {nb_object_class.name}s from NetBox")
updated_nb_data = self.request(nb_object_class, params={"last_updated__gte": latest_update})
log.debug("NetBox returned %d results." % len(updated_nb_data.get("results", list())))
log.debug(f"Processing %s returned {nb_object_class.name}%s" % (len(nb_data.get("results")),plural(len(nb_data.get("results")))))
if brief_nb_data.get("results") is None or updated_nb_data.get("results") is None:
log.error(f"Result data from NetBox for object {nb_object_class.__name__} missing!")
do_error_exit("Reading data from NetBox failed.")
for object_data in nb_data.get("results"):
# read a full set from NetBox
nb_objects = list()
if full_nb_data is not None:
nb_objects = full_nb_data.get("results")
# read the delta from NetBox and
else:
currently_existing_ids = [x.get("id") for x in brief_nb_data.get("results")]
changed_ids = [x.get("id") for x in updated_nb_data.get("results")]
for object in cached_nb_data:
if object.get("id") in currently_existing_ids and object.get("id") not in changed_ids:
nb_objects.append(object)
nb_objects.extend(updated_nb_data.get("results"))
if cache_this_class is True:
try:
pickle.dump(nb_objects, open( cache_file, "wb" ) )
log.debug("Successfully cached %d objects." % (len(nb_objects)))
except Exception as e:
log.warning(f"Failed to write NetBox data to cache file: {e}")
log.debug(f"Processing %s returned {nb_object_class.name}%s" % (len(nb_objects),plural(len(nb_objects))))
for object_data in nb_objects:
self.inventory.add_item_from_netbox(nb_object_class, data=object_data)
# mark this object class as retrieved
self.resolved_dependencies.add(nb_object_class)
return
def inizialize_basic_data(self):
@@ -342,12 +465,15 @@ class NetBoxHandler:
# resolve dependencies
for dependency in object.get_dependencies():
if dependency not in self.inventory.resolved_dependencies:
if dependency not in self.resolved_dependencies:
log.debug2("Resolving dependency: %s" % (dependency.name))
self.update_object(dependency)
# unset data if requested
if unset is True and len(object.unset_items) > 0:
if unset is True:
if len(object.unset_items) == 0:
continue
unset_data = {x: None for x in object.unset_items}
@@ -366,21 +492,14 @@ class NetBoxHandler:
continue
returned_object_data = None
data_to_patch = dict()
unresolved_dependency_data = dict()
if object.is_new is True:
object.updated_items = object.data.keys()
for key, value in object.data.items():
if key in object.updated_items:
if key == "tags":
data_to_patch[key] = [{"name": d.get_display_name()} for d in value]
elif isinstance(value, NetBoxObject):
if isinstance(value, (NetBoxObject,NBObjectList)):
if value.get_nb_reference() is None:
unresolved_dependency_data[key] = value
@@ -391,18 +510,23 @@ class NetBoxHandler:
data_to_patch[key] = value
issued_request = False
if object.is_new is True:
log.info("Creating new NetBox '%s' object: %s" % (object.name, object.get_display_name()))
returned_object_data = None
if len(data_to_patch.keys()) > 0:
returned_object_data = self.request(nb_object_sub_class, req_type="POST", data=data_to_patch)
# default is a new object
nb_id = None
req_type = "POST"
action = "Creating new"
issued_request = True
# if its not a new object then update it
if object.is_new is False:
nb_id = object.nb_id
req_type = "PATCH"
action = "Updating"
if object.is_new is False and len(object.updated_items) > 0:
log.info("%s NetBox '%s' object '%s' with data: %s" % (action, object.name, object.get_display_name(), data_to_patch))
log.info("Updating NetBox '%s' object '%s' with data: %s" % (object.name, object.get_display_name(), data_to_patch))
returned_object_data = self.request(nb_object_sub_class, req_type="PATCH", data=data_to_patch, nb_id=object.nb_id)
returned_object_data = self.request(nb_object_sub_class, req_type=req_type, data=data_to_patch, nb_id=nb_id)
issued_request = True
@@ -410,37 +534,42 @@ class NetBoxHandler:
object.update(data = returned_object_data, read_from_netbox=True)
# add unresolved dependencies back to object
if len(unresolved_dependency_data.keys()) > 0:
object.update(data = unresolved_dependency_data)
object.resolve_relations()
elif issued_request is True:
log.error(f"Request Failed for {nb_object_sub_class.name}. Used data: {data_to_patch}")
# add unresolved dependencies back to object
if len(unresolved_dependency_data.keys()) > 0:
log.debug2("Adding unresolved dependencies back to object: %s" % list(unresolved_dependency_data.keys()))
object.update(data=unresolved_dependency_data)
object.resolve_relations()
# add class to resolved dependencies
self.inventory.resolved_dependencies = list(set(self.inventory.resolved_dependencies + [nb_object_sub_class] ))
self.resolved_dependencies.add(nb_object_sub_class)
def update_instance(self):
log.info("Updating changed data in NetBox")
# update all items in NetBox but unset items first
self.inventory.resolved_dependencies = list()
log.debug("First run, unset attributes if necessary.")
self.resolved_dependencies = set()
for nb_object_sub_class in NetBoxObject.__subclasses__():
self.update_object(nb_object_sub_class, unset=True)
# update all items
self.inventory.resolved_dependencies = list()
log.debug("Second run, update all items")
self.resolved_dependencies = set()
for nb_object_sub_class in NetBoxObject.__subclasses__():
self.update_object(nb_object_sub_class)
# run again to updated objects with previous unresolved dependencies
self.inventory.resolved_dependencies = list()
log.debug("Third run, update all items with previous unresolved items")
self.resolved_dependencies = set()
for nb_object_sub_class in NetBoxObject.__subclasses__():
self.update_object(nb_object_sub_class)
# ToDo: check for objects with unresolved relations
def prune_data(self):
@@ -470,7 +599,7 @@ class NetBoxHandler:
# only need the date including seconds
date_last_update = date_last_update[0:19]
log.debug2(f"Object '{object.get_display_name()}' is Orphaned. Last time changed: {date_last_update}")
log.debug2(f"Object '{object.name}' '{object.get_display_name()}' is Orphaned. Last time changed: {date_last_update}")
# check prune delay.
last_updated = None
@@ -516,8 +645,8 @@ class NetBoxHandler:
if nb_object_sub_class == NBTags:
continue
# object has no tags so we can't be sure it was created with this tool
if NBTags not in nb_object_sub_class.data_model.values():
# object has no tags so we can't be sure it was created with this program
if NBTagList not in nb_object_sub_class.data_model.values():
continue
for object in self.inventory.get_all_items(nb_object_sub_class):
@@ -526,36 +655,32 @@ class NetBoxHandler:
if getattr(object, "deleted", False) is True:
continue
found_objects_to_delete = True
if self.primary_tag in object.get_tags():
log.info(f"{nb_object_sub_class.name} '{object.get_display_name()}' will be deleted now")
"""
# Todo:
# * Needs testing
result = self.request(nb_object_sub_class, req_type="DELETE", nb_id=object.nb_id)
if result is not None:
object.deleted = True
"""
if found_objects_to_delete is False:
# ToDo: test delete all
# get tag objects
primary_tag = self.inventory.add_update_object(NBTags, data = {"name": self.primary_tag})
primary_tag = self.inventory.get_by_data(NBTags, data = {"name": self.primary_tag})
orpahned_tag = self.inventory.get_by_data(NBTags, data = {"name": self.orphaned_tag})
# try to delete them
log.info(f"{NBTags.name} '{primary_tag.get_display_name()}' will be deleted now")
#self.request(NBTags, req_type="DELETE", nb_id=primary_tag.nb_id)
self.request(NBTags, req_type="DELETE", nb_id=primary_tag.nb_id)
log.info(f"{NBTags.name} '{orpahned_tag.get_display_name()}' will be deleted now")
#self.request(NBTags, req_type="DELETE", nb_id=orpahned_tag.nb_id)
self.request(NBTags, req_type="DELETE", nb_id=orpahned_tag.nb_id)
log.info("Successfully deleted all objects which were sync by this program.")
log.info("Successfully deleted all objects which were synced and tagged by this program.")
break
else:
+2 -163
View File
@@ -186,80 +186,13 @@ class NetBoxInventory:
if netbox_handler.primary_tag in object.get_tags():
object.add_tags(netbox_handler.orphaned_tag)
def update_all_ip_addresses(self):
def query_ptr_records_for_all_ips(self):
"""
def _return_longest_match(ip_to_match=None, list_of_prefixes=None):
if ip_to_match is None or list_of_prefixes is None:
return
if not isinstance(ip_to_match, (IPv4Address, IPv6Address)):
try:
ip_to_match = ip_address(ip_to_match)
except ValueError:
return
if not isinstance(list_of_prefixes, list):
return
sanatized_list_of_prefixes = list()
for prefix in list_of_prefixes:
if not isinstance(prefix, (IPv4Network, IPv6Network)):
try:
sanatized_list_of_prefixes.append(ip_network(prefix))
except ValueError:
return
else:
sanatized_list_of_prefixes.append(prefix)
current_longest_matching_prefix_length = 0
current_longest_matching_prefix = None
for prefix in sanatized_list_of_prefixes:
if ip_to_match in prefix and \
prefix.prefixlen >= current_longest_matching_prefix_length:
current_longest_matching_prefix_length = prefix.prefixlen
current_longest_matching_prefix = prefix
return current_longest_matching_prefix
"""
#log.info("Trying to math IPs to existing prefixes")
"""
all_prefixes = self.get_all_items(NBPrefixes)
all_addresses = self.get_all_items(NBIPAddresses)
"""
log.debug("Starting to look up PTR records for IP addresses")
# store IP addresses to look them up in bulk
ip_lookup_dict = dict()
"""
# prepare prefixes
# dict of simple prefixes to pass to function for longest match
prefixes_per_site = dict()
# dict of prefix objects so we don't need to search for them again
prefixes_per_site_objects = dict()
for this_prefix in all_prefixes:
# name of the site or None (as string)
prefix_site = str(grab(this_prefix, "data.site.data.name"))
if prefixes_per_site.get(prefix_site) is None:
prefixes_per_site[prefix_site] = list()
prefixes_per_site_objects[prefix_site] = dict()
prefix = ip_network(grab(this_prefix, "data.prefix"))
prefixes_per_site[prefix_site].append(prefix)
prefixes_per_site_objects[prefix_site][str(prefix)] = this_prefix
"""
# iterate over all IP addresses and try to match them to a prefix
for ip in self.get_all_items(NBIPAddresses):
@@ -282,100 +215,6 @@ class NetBoxInventory:
ip_lookup_dict[ip.source].get("ips").append(ip_a)
"""
object_site = "None"
assigned_device_vm = None
# name of the site or None (as string)
# -> NBInterfaces -> NBDevices -> NBSites
if grab(ip, "data.assigned_object_type") == "dcim.interface":
object_site = str(grab(ip, "data.assigned_object_id.data.device.data.site.data.name"))
assigned_device_vm = grab(ip, "data.assigned_object_id.data.device")
# -> NBVMInterfaces -> NBVMs -> NBClusters -> NBSites
elif grab(ip, "data.assigned_object_type") == "virtualization.vminterface":
object_site = str(grab(ip, "data.assigned_object_id.data.virtual_machine.data.cluster.data.site.data.name"))
assigned_device_vm = grab(ip, "data.assigned_object_id.data.virtual_machine")
# set/update/remove primary IP addresses
ip_version = 6 if ":" in ip_a else 4
if grab(ip, "source.set_primary_ip") == "always" and ip.is_primary == True:
for object_type in [NBDevices, NBVMs]:
for devices_vms in self.get_all_items(object_type):
# device has no primary IP of this version
this_primary_ip = grab(devices_vms, f"data.primary_ip{ip_version}")
if this_primary_ip is None:
continue
if not isinstance(this_primary_ip, dict):
continue
# device has the same object assigned
if ip.is_new is False and ip.nb_id == this_primary_ip.get("id") and devices_vms != assigned_device_vm:
devices_vms.unset_attribute(f"primary_ip{ip_version}")
log.debug(f"Setting '{assigned_device_vm.get_display_name()}' attribute 'primary_ip{ip_version}' to '{ip.get_display_name()}'")
assigned_device_vm.update(data = {f"primary_ip{ip_version}": ip})
elif grab(ip, "source.set_primary_ip") != "never" and ip.is_primary == True:
if grab(assigned_device_vm, f"data.primary_ip{ip_version}") is None:
log.debug(f"Setting '{assigned_device_vm.get_display_name()}' attribute 'primary_ip{ip_version}' to '{ip.get_display_name()}'")
assigned_device_vm.update(data = {f"primary_ip{ip_version}": ip})
log.debug2("Trying to find prefix for IP: %s" % ip.get_display_name())
log.debug2(f"Site name for this IP: {object_site}")
# test site prefixes first
matching_site_name = object_site
matching_site_prefix = _return_longest_match(ip_a, prefixes_per_site.get(object_site))
# nothing was found then check prefixes with site name
if matching_site_prefix is None:
matching_site_name = "None"
matching_site_prefix = _return_longest_match(ip_a, prefixes_per_site.get(matching_site_name))
# no matching prefix found, give up
if matching_site_prefix is None:
continue
log.debug2(f"Found IP '{ip_a}' matches prefix '{matching_site_prefix}' in site '{matching_site_name.replace('None', 'undefined')}'")
# get matching prefix object
prefix_object = prefixes_per_site_objects.get(matching_site_name).get(str(matching_site_prefix))
if prefix_object is None:
continue
# check if prefix net size and ip address prefix length match
if matching_site_prefix.prefixlen != int(ip_prefix_length):
interface_object = grab(ip, "data.assigned_object_id")
log.warning(f"IP prefix length of '{ip_a}/{ip_prefix_length}' ({interface_object.get_display_name()}) doesn't match network prefix length '{matching_site_prefix}'!")
data = dict()
vrf = grab(prefix_object, "data.vrf.id")
tenant = grab(prefix_object, "data.tenant.id")
if vrf is not None and str(vrf) != str(grab(ip, "data.vrf.id")):
data["vrf"] = vrf
# only overwrite tenant if not already defined
# ToDo: document behavior
if tenant is not None and grab(ip, "data.tenant.id") is None and str(tenant) != str(grab(ip, "data.tenant.id")):
data["tenant"] = tenant
if len(data.keys()) > 0:
ip.update(data=data)
"""
log.debug("Starting to look up PTR records for IP addresses")
# now perform DNS requests to look up DNS names for IP addresses
for source, data in ip_lookup_dict.items():
+213 -100
View File
@@ -36,6 +36,14 @@ class NetBoxObject():
# store provided inventory handle
self.inventory = inventory
# initialize empty data dict
self.data = dict()
# add empty lists for list items
for key, data_type in self.data_model.items():
if data_type in NBObjectList.__subclasses__():
self.data[key] = data_type()
# store source handle
if source is not None:
self.source = source
@@ -67,7 +75,7 @@ class NetBoxObject():
if isinstance(dvalue, list):
new_dvalue = list()
for possible_option in dvalue:
if possible_option in NetBoxObject.__subclasses__() + [IPv4Network, IPv6Network]:
if type(possible_option) == type:
new_dvalue.append(str(possible_option))
else:
new_dvalue.append(possible_option)
@@ -86,12 +94,12 @@ class NetBoxObject():
data = dict()
for dkey, dvalue in value.items():
# if value is class name then print class name
# if value is class name then print class representation
if isinstance(dvalue, (NetBoxObject, IPv4Network, IPv6Network)):
dvalue = repr(dvalue)
if dkey == "tags":
dvalue = [x.get_display_name() for x in dvalue]
if isinstance(dvalue, NBObjectList):
dvalue = [repr(x) for x in dvalue]
data[dkey] = dvalue
@@ -220,14 +228,17 @@ class NetBoxObject():
if type_check_faild is True:
continue
# tags need to be treated as list of dictionaries, tags are only added
if defined_value_type == NBTagList:
value = self.compile_tags(value)
# VLANs will overwrite the whole list of current VLANs
if defined_value_type == NBVLANList:
value = self.compile_vlans(value)
# this is meant to be reference to a different object
if defined_value_type in NetBoxObject.__subclasses__():
# tags need to be treated as list of dictionaries
if defined_value_type == NBTags:
self.add_tags(value)
continue
if not isinstance(value, NetBoxObject):
# try to find object.
value = self.inventory.add_update_object(defined_value_type, data=value)
@@ -244,50 +255,42 @@ class NetBoxObject():
parsed_data["slug"] = self.format_slug(text=parsed_data.get(self.primary_key), max_len=self.data_model.get("slug"))
# this is a new set of data
if self.data is None:
self.data = parsed_data
# update all data items
for key, new_value in parsed_data.items():
# add empty tag list if not tags were provided
if "tags" in self.data_model.keys() and data.get("tags") is None:
self.data["tags"] = list()
# nothing changed, continue with next key
current_value = self.data.get(key)
if current_value == new_value:
continue
# see if data just got updated and mark it as such.
else:
# get current value str
if isinstance(current_value, (NetBoxObject, NBObjectList)):
current_value_str = str(current_value.get_display_name())
for key, new_value in parsed_data.items():
# if data model is a list then we need to read the netbox data value
elif isinstance(self.data_model.get(key), list) and isinstance(current_value, dict):
current_value_str = str(current_value.get("value"))
# nothing changed, continue with next key
current_value = self.data.get(key)
if current_value == new_value:
continue
elif key.startswith("primary_ip") and isinstance(current_value, dict):
current_value_str = str(current_value.get("address"))
# get current value str
if isinstance(current_value, NetBoxObject):
current_value_str = str(current_value.get_display_name())
else:
current_value_str = str(current_value).replace("\r","")
# if data model is a list then we need to read the netbox data value
elif isinstance(self.data_model.get(key), list) and isinstance(current_value, dict):
current_value_str = str(current_value.get("value"))
# get new value str
if isinstance(new_value, (NetBoxObject, NBObjectList)):
new_value_str = str(new_value.get_display_name())
else:
new_value_str = str(new_value).replace("\r","")
elif key.startswith("primary_ip") and isinstance(current_value, dict):
current_value_str = str(current_value.get("address"))
else:
current_value_str = str(current_value).replace("\r","")
# just check again if values might match now
if current_value_str == new_value_str:
continue
# get new value str
if isinstance(new_value, NetBoxObject):
new_value_str = str(new_value.get_display_name())
else:
new_value_str = str(new_value).replace("\r","")
# just check again if values might match now
if current_value_str == new_value_str:
continue
self.data[key] = new_value
self.updated_items.append(key)
self.data[key] = new_value
self.updated_items.append(key)
if self.is_new is False:
log.debug(f"{self.name.capitalize()} '{display_name}' attribute '{key}' changed from '{current_value_str}' to '{new_value_str}'")
self.resolve_relations()
@@ -325,35 +328,36 @@ class NetBoxObject():
def resolve_relations(self):
for key, value in self.data_model.items():
for key, data_type in self.data_model.items():
if self.data.get(key) is None:
continue
if key.startswith("primary_ip"):
value = NBIPAddresses
data_type = NBIPAddresses
# continue if value is not an NetBox object
if value not in NetBoxObject.__subclasses__():
# continue if data_type is not an NetBox object
if data_type not in NetBoxObject.__subclasses__() + NBObjectList.__subclasses__():
continue
data_value = self.data.get(key)
resolved_data = None
if value == NBTags:
if data_type in NBObjectList.__subclasses__():
resolved_tag_list = list()
for tag in data_value:
resolved_object_list = data_type()
for item in data_value:
if isinstance(tag, NetBoxObject):
tag_object = tag
if isinstance(item, data_type.member_type):
item_object = item
else:
tag_object = self.inventory.get_by_data(value, data=tag)
item_object = self.inventory.get_by_data(data_type.member_type, data=item)
if tag_object is not None:
resolved_tag_list.append(tag_object)
if item_object is not None:
resolved_object_list.append(item_object)
resolved_data = resolved_object_list
resolved_data = resolved_tag_list
else:
if data_value is None:
continue
@@ -367,7 +371,7 @@ class NetBoxObject():
elif isinstance(data_value, dict):
data_to_find = data_value
resolved_data = self.inventory.get_by_data(value, data=data_to_find)
resolved_data = self.inventory.get_by_data(data_type, data=data_to_find)
if resolved_data is not None:
self.data[key] = resolved_data
@@ -380,27 +384,33 @@ class NetBoxObject():
def get_dependencies(self):
return [x for x in self.data_model.values() if x in NetBoxObject.__subclasses__()]
r = [x for x in self.data_model.values() if x in NetBoxObject.__subclasses__()]
r.extend([x.member_type for x in self.data_model.values() if x in NBObjectList.__subclasses__()])
return r
def get_tags(self):
return [x.get_display_name() for x in self.data.get("tags", list())]
def update_tags(self, tags, remove=False):
def compile_tags(self, tags, remove=False):
if tags is None or NBTags not in self.data_model.values():
if tags is None or NBTagList not in self.data_model.values():
return
action = "Adding" if remove is False else "Removing"
# list of parsed tag strings
sanatized_tag_strings = list()
log.debug2(f"{action} Tags: {tags}")
new_tags = list()
log.debug2(f"Compiling TAG list")
new_tag_list = NBTagList()
def extract_tags(this_tags):
if isinstance(this_tags, str):
new_tags.append(this_tags)
if isinstance(this_tags, NBTags):
sanatized_tag_strings.append(this_tags.get_display_name())
elif isinstance(this_tags, str):
sanatized_tag_strings.append(this_tags)
elif isinstance(this_tags, dict) and this_tags.get("name") is not None:
new_tags.append(this_tags.get("name"))
sanatized_tag_strings.append(this_tags.get("name"))
if isinstance(tags, list):
for tag in tags:
@@ -408,39 +418,63 @@ class NetBoxObject():
else:
extract_tags(tags)
# current list of tag strings
current_tag_strings = self.get_tags()
log.debug2(f"Parsed tag list: {new_tags}")
new_tags = list()
removed_tags = list()
current_tags = self.get_tags()
for tag_name in sanatized_tag_strings:
tag_has_changed = False
for tag_name in new_tags:
if tag_name not in current_tags and remove == False:
# add tag
# add tag
if tag_name not in current_tag_strings and remove == False:
tag = self.inventory.add_update_object(NBTags, data={"name": tag_name})
self.data["tags"].append(tag)
if self.is_new is False:
self.updated_items.append("tags")
new_tags.append(tag)
tag_has_changed = True
if tag_name in current_tags and remove == True:
if tag_name in current_tag_strings and remove == True:
tag = self.inventory.get_by_data(NBTags, data={"name": tag_name})
self.data["tags"].remove(tag)
if self.is_new is False:
self.updated_items.append("tags")
removed_tags.append(tag)
tag_has_changed = True
current_tags = grab(self, "data.tags", fallback=NBTagList())
new_tags = self.get_tags()
if len(new_tags) > 0:
if tag_has_changed is True:
log.debug(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from '{current_tags}' to '{new_tags}'")
for tag in new_tags + current_tags:
new_tag_list.append(tag)
elif len(removed_tags) > 0:
for tag in current_tags:
if tag not in removed_tags:
new_tag_list.append(tag)
else:
new_tag_list = current_tags
return new_tag_list
def update_tags(self, tags, remove=False):
if tags is None or NBTagList not in self.data_model.values():
return
action = "Adding" if remove is False else "Removing"
log.debug2(f"{action} Tags: {tags}")
current_tags = grab(self, "data.tags", fallback=NBTagList())
new_tags = self.compile_tags(tags, remove=remove)
if str(current_tags.get_display_name()) != str(new_tags.get_display_name()):
self.data["tags"] = new_tags
self.updated_items.append("tags")
log.debug(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from '{current_tags.get_display_name()}' to '{new_tags.get_display_name()}'")
def add_tags(self, tags_to_add):
self.update_tags(tags_to_add)
@@ -448,6 +482,34 @@ class NetBoxObject():
def remove_tags(self, tags_to_remove):
self.update_tags(tags_to_remove, remove=True)
def compile_vlans(self, vlans):
if vlans is None or NBVLANList not in self.data_model.values():
return
data_key = "tagged_vlans"
log.debug2(f"Compiling VLAN list")
new_vlan_list = NBVLANList()
for vlan in vlans:
if isinstance(vlan, NBVLANs):
new_vlan_object = vlan
elif isinstance(vlan, dict):
new_vlan_object = self.inventory.add_update_object(NBVLANs, data=vlan)
else:
log.error(f"Unable to parse provided VLAN data: {vlan}")
continue
# VLAN already in list, must have been submitted twice
if new_vlan_object in new_vlan_list:
continue
new_vlan_list.append(new_vlan_object)
return new_vlan_list
def unset_attribute(self, attribute_name=None):
if attribute_name is None:
@@ -479,6 +541,18 @@ class NetBoxObject():
return self.nb_id
class NBObjectList(list):
pass
# def __int__(slef, *args, **kwargs):
# self.members = super().__init__(args[0])
# def __iter__(self):
# for item in self.members:
# yield item
def get_display_name(self):
return sorted([x.get_display_name() for x in self])
class NBTags(NetBoxObject):
name = "tag"
@@ -491,6 +565,25 @@ class NBTags(NetBoxObject):
"description": 200
}
class NBTagList(NBObjectList):
member_type = NBTags
def get_nb_reference(self):
"""
return None if one tag is unresolvable
Once the tag was created in NetBox it can be assigned to objects
"""
return_list = list()
for tag in self:
if tag.nb_id == 0:
return None
return_list.append({"name": tag.get_display_name()})
return return_list
class NBTenants(NetBoxObject):
name = "tenant"
api_path = "tenancy/tenants"
@@ -500,7 +593,7 @@ class NBTenants(NetBoxObject):
"slug": 50,
"comments": str,
"description": 200,
"tags": NBTags
"tags": NBTagList
}
class NBSites(NetBoxObject):
@@ -512,7 +605,7 @@ class NBSites(NetBoxObject):
"slug": 50,
"comments": str,
"tenant": NBTenants,
"tags": NBTags
"tags": NBTagList
}
class NBVrfs(NetBoxObject):
@@ -523,7 +616,7 @@ class NBVrfs(NetBoxObject):
"name": 50,
"description": 200,
"tenant": NBTenants,
"tags": NBTags
"tags": NBTagList
}
class NBVLANs(NetBoxObject):
@@ -538,7 +631,7 @@ class NBVLANs(NetBoxObject):
"site": NBSites,
"description": 200,
"tenant": NBTenants,
"tags": NBTags
"tags": NBTagList
}
def get_display_name(self, data=None, including_second_key=False):
@@ -585,6 +678,24 @@ class NBVLANs(NetBoxObject):
super().update(data=data, read_from_netbox=read_from_netbox, source=source)
class NBVLANList(NBObjectList):
member_type = NBVLANs
def get_nb_reference(self):
"""
return None if one VLAN is unresolvable
Once the VLAN was created in NetBox it can be assigned to objects
"""
return_list = list()
for vlan in self:
if vlan.nb_id == 0:
return None
return_list.append(vlan.nb_id)
return return_list
class NBPrefixes(NetBoxObject):
name = "IP prefix"
api_path = "ipam/prefixes"
@@ -596,7 +707,7 @@ class NBPrefixes(NetBoxObject):
"vlan": NBVLANs,
"vrf": NBVrfs,
"description": 200,
"tags": NBTags
"tags": NBTagList
}
def update(self, data=None, read_from_netbox=False, source=None):
@@ -636,7 +747,7 @@ class NBDeviceTypes(NetBoxObject):
"part_number": 50,
"description": 200,
"manufacturer": NBManufacturers,
"tags": NBTags
"tags": NBTagList
}
class NBPlatforms(NetBoxObject):
@@ -694,7 +805,7 @@ class NBClusters(NetBoxObject):
"type": NBClusterTypes,
"group": NBClusterGroups,
"site": NBSites,
"tags": NBTags
"tags": NBTagList
}
@@ -715,7 +826,7 @@ class NBDevices(NetBoxObject):
"asset_tag": 50,
"primary_ip4": object,
"primary_ip6": object,
"tags": NBTags
"tags": NBTagList
}
class NBVMs(NetBoxObject):
@@ -735,7 +846,7 @@ class NBVMs(NetBoxObject):
"comments": str,
"primary_ip4": object,
"primary_ip6": object,
"tags": NBTags
"tags": NBTagList
}
class NBVMInterfaces(NetBoxObject):
@@ -752,8 +863,9 @@ class NBVMInterfaces(NetBoxObject):
"mtu": int,
"mode": [ "access", "tagged", "tagged-all" ],
"untagged_vlan": NBVLANs,
"tagged_vlans": NBVLANList,
"description": 200,
"tags": NBTags
"tags": NBTagList
}
class NBInterfaces(NetBoxObject):
@@ -773,9 +885,10 @@ class NBInterfaces(NetBoxObject):
"mtu": int,
"mode": [ "access", "tagged", "tagged-all" ],
"untagged_vlan": NBVLANs,
"tagged_vlans": NBVLANList,
"description": 200,
"connection_status": bool,
"tags": NBTags
"tags": NBTagList
}
@@ -790,7 +903,7 @@ class NBIPAddresses(NetBoxObject):
"assigned_object_id": [ NBInterfaces, NBVMInterfaces ],
"description": 200,
"dns_name": 255,
"tags": NBTags,
"tags": NBTagList,
"tenant": NBTenants,
"vrf": NBVrfs
}
@@ -848,7 +961,7 @@ class NBIPAddresses(NetBoxObject):
This is hard coded in here. Updated if data_model attribute changes!!!!
"""
return [ NBInterfaces, NBVMInterfaces, NBTags ]
return [ NBInterfaces, NBVMInterfaces, NBTags, NBTenants, NBVrfs ]
+40 -22
View File
@@ -41,6 +41,7 @@ class VMWareHandler():
]
settings = {
"enabled": True,
"host_fqdn": None,
"port": 443,
"username": None,
@@ -100,13 +101,20 @@ class VMWareHandler():
self.parse_config_settings(settings)
self.create_session()
self.source_tag = f"Source: {name}"
self.site_name = f"vCenter: {name}"
if self.session is not None:
self.init_successfull = True
if self.enabled is False:
log.info(f"Source '{name}' is currently disabled. Skipping")
return
self.create_session()
if self.session is None:
log.info(f"Source '{name}' is currently unavailable. Skipping")
return
self.init_successfull = True
def parse_config_settings(self, config_settings):
@@ -215,13 +223,14 @@ class VMWareHandler():
atexit.register(Disconnect, instance)
self.session = instance.RetrieveContent()
except (gaierror, vim.fault.InvalidLogin, OSError) as e:
except (gaierror, OSError) as e:
log.error(
f"Unable to connect to vCenter instance '{self.host_fqdn}' on port {self.port}. "
f"Reason: {e}"
)
return False
except vim.fault.InvalidLogin as e:
log.error(f"Unable to connect to vCenter instance '{self.host_fqdn}' on port {self.port}. {e.msg}")
return False
log.info(f"Successfully connected to vCenter '{self.host_fqdn}'")
@@ -256,10 +265,10 @@ class VMWareHandler():
"view_type": vim.ClusterComputeResource,
"view_handler": self.add_cluster
},
"virtual switch": {
"view_type": vim.DistributedVirtualSwitch,
"view_handler": self.add_virtual_switch
},
# "virtual switch": {
# "view_type": vim.DistributedVirtualSwitch,
# "view_handler": self.add_virtual_switch
# },
"network": {
"view_type": vim.dvs.DistributedVirtualPortgroup,
"view_handler": self.add_port_group
@@ -276,6 +285,7 @@ class VMWareHandler():
"view_type": vim.VirtualMachine,
"view_handler": self.add_virtual_machine
}
}
for view_name, view_details in object_mapping.items():
@@ -349,7 +359,7 @@ class VMWareHandler():
# check if site was provided in config
config_name = "host_site_relation" if object_type == NBDevices else "cluster_site_relation"
site_relations = getattr(self, config_name, list())
site_relations = grab(self, config_name, fallback=list())
for site_relation in site_relations:
object_regex = site_relation.get("object_regex")
@@ -436,6 +446,7 @@ class VMWareHandler():
def _matches_device_primary_ip(device_primary_ip, ip_needle):
ip = None
if device_primary_ip is not None and ip_needle is not None:
if isinstance(device_primary_ip, dict):
ip = grab(device_primary_ip, "address")
@@ -653,7 +664,8 @@ class VMWareHandler():
if device_vm_object is None:
log.debug(f"No exiting {object_type.name} object. Creating a new {object_type.name}.")
object_name = object_data.get(object_type.primary_key)
log.debug(f"No exiting {object_type.name} object for {object_name}. Creating a new {object_type.name}.")
device_vm_object = self.inventory.add_update_object(object_type, data=object_data, source=self)
else:
device_vm_object.update(data=object_data, source=self)
@@ -1113,6 +1125,7 @@ class VMWareHandler():
# check vlans on this pnic
pnic_vlans = list()
for pg_name, pg_data in host_portgroups.items():
if pnic_name in pg_data.get("nics", list()):
@@ -1145,12 +1158,11 @@ class VMWareHandler():
# determine interface mode for non VM traffic NICs
if len(pnic_vlans) > 0:
if len(pnic_vlans) == 1 and pnic_vlans[0].get("vid") == 0:
vlan_ids = list(set([x.get("vid") for x in pnic_vlans]))
if len(vlan_ids) == 1 and vlan_ids[0] == 0:
pnic_data["mode"] = "access"
elif 0 in [x.get("vid") for x in pnic_vlans]:
pnic_mode = "tagged"
else:
pnic_mode = "tagged-all"
pnic_data["mode"] = "tagged"
tagged_vlan_list = list()
for pnic_vlan in pnic_vlans:
@@ -1192,7 +1204,10 @@ class VMWareHandler():
if vnic_portgroup_data is not None:
vnic_vlan_id = vnic_portgroup_data.get("vlan_id")
vnic_vswitch = vnic_portgroup_data.get("vswitch")
vnic_description = f"{vnic_description} ({vnic_vswitch}, vlan ID: {vnic_vlan_id})"
if vnic_vlan_id != 0:
vnic_description = f"{vnic_description} ({vnic_vswitch}, vlan ID: {vnic_vlan_id})"
else:
vnic_description = f"{vnic_description} ({vnic_vswitch})"
vnic_data = {
"name": vnic_name,
@@ -1204,7 +1219,7 @@ class VMWareHandler():
"mode": "access",
}
if vnic_portgroup_data is not None:
if vnic_portgroup_data is not None and vnic_vlan_id != 0:
vnic_data["untagged_vlan"] = {
"name": f"ESXi {vnic_portgroup} (ID: {vnic_vlan_id}) ({site_name})",
@@ -1218,7 +1233,10 @@ class VMWareHandler():
# check if interface has the default route or is described as management interface
vnic_is_primary = False
if "management" in vnic_description.lower() or grab(vnic, "spec.ipRouteSpec") is not None:
if "management" in vnic_description.lower() or \
"mgmt" in vnic_description.lower() or \
grab(vnic, "spec.ipRouteSpec") is not None:
vnic_is_primary = True
vnic_ips[vnic_name] = list()
@@ -1399,9 +1417,9 @@ class VMWareHandler():
int_network_vlan_ids = grab(int_portgroup_data, "vlan_ids")
if len(int_network_vlan_ids) == 1:
int_mode == "access"
int_mode = "access"
else:
int_mode == "tagged-all"
int_mode = "tagged-all"
# ToDo:
# reading mtu from settings for host DVS is unreliable
+6 -21
View File
@@ -47,9 +47,6 @@ ToDo:
* ensure NTP is set up properly between all instances (pruning delay)
* test all log levels
* check for ToDo/Fixme/pprint statements
* new netbox class list of objects
* add tagged_vlans list to interfaces
* change tags object to list object
"""
def main():
@@ -113,26 +110,14 @@ def main():
# all sources are unavailable
if len(sources) == 0:
do_error_exit("No working sources found. Exit.")
log.error("No working sources found. Exit.")
exit(1)
# collect all dependent object classes
netbox_objects_to_query = list()
for source in sources:
netbox_objects_to_query.extend(source.dependend_netbox_objects)
# we need to collect prefixes as well to so which IP belongs to which prefix
# ToDo:
# * add dependencies somewhere else
# * might be obsolete if update_all_ip_addresses is removed
netbox_objects_to_query.append(NBPrefixes)
netbox_objects_to_query.append(NBTenants)
netbox_objects_to_query.append(NBVrfs)
netbox_objects_to_query.append(NBVLANs)
# request NetBox data
# ToDo: remove set and check in function if data has already been fetched
log.info("Querying necessary objects from Netbox. This might take a while.")
NB_handler.query_current_data(list(set(netbox_objects_to_query)))
for source in sources:
NB_handler.query_current_data(source.dependend_netbox_objects)
log.info("Finished querying necessary objects from Netbox")
# resolve object relations within the initial inventory
@@ -149,7 +134,7 @@ def main():
inventory.tag_all_the_things(NB_handler)
# update all IP addresses
inventory.update_all_ip_addresses()
inventory.query_ptr_records_for_all_ips()
# update data in NetBox
NB_handler.update_instance()
+3 -12
View File
@@ -1,13 +1,4 @@
aiodns==2.0.0
certifi==2019.11.28
cffi==1.14.0
chardet==3.0.4
idna==2.8
pycares==3.1.1
pycparser==2.19
pyvmomi==6.7.3
requests==2.24.0
six==1.13.0
typing==3.7.4.1
urllib3==1.25.7
packaging
requests==2.24.0
pyvmomi==6.7.3
aiodns==2.0.0
+79 -23
View File
@@ -1,56 +1,114 @@
### Welcome to the NetBox-Sync configuration file.
# The values in this file override the default values used by the system if
# a config option is not specified. The commented out lines are the configuration
# field and the default value used. Uncommenting a line and changing the value
# will change the value used at runtime when the process is restarted.
###
### [common]
###
### Controls the parameters for logging.
###
[common]
log_level = INFO
# Logs will always be printed to stdout/stderr.
# Logging can be set to following log levels
# ERROR: Fatal Errors which stops regular a run
# WARNING: Warning messages won't stop the syncing process but mostly worth
# to have a look at.
# INFO: Informations about objects that will be create/updated/deleted in NetBox
# DEBUG: Will log information about retrieved information, changes in internal
# data structure and parsed config
# DEBUG2: Will also log information about how/why data is parsed or skipped.
# DEBUG3: Logs all source and NetBox queries/results to stdout. Very useful for
# troubleshooting, but will log any sensitive data contained within a query.
#
# log_level = INFO
# Places all logs in a rotating file if True
#log_to_file = False
# Enabling this options will write all logs to a log file defined in "log_file"
# log_to_file = False
#log_file = log/netbox_sync.log
# Destination of the log file if "log_to_file" is enabled.
# Log file will be rotated maximum 5 times once the log file reaches size of 10 MB
# log_file = log/netbox_sync.log
# define different sources
# can also be defined multiple times with different settings
# currently supported
# * vmware : VMware vcenter
###
### [netbox]
###
### Controls the connection parameters to your netBox instance
###
[netbox]
# Requires an NetBox API token with full permissions on all objects except:
# * auth
# * secrets
# * users
api_token = XYZ
# Requires a hostname or IP which points to your NetBox instance
host_fqdn = netbox.example.com
#port = 443
# Define the port your NetBox instance is listening on. If "disable_tls" is
# set to "true" this option might be set to 80
# port = 443
#disable_tls = false
#validate_tls_certs = true
# Weather TLS encryption is enabled or disabled.
# disable_tls = false
#prune_enabled = true
# Enforces TLS certificate validation. If this system doesn't trust the NetBox
# web server certificate then this option needs to be changed
# validate_tls_certs = true
#prune_delay_in_days = 30
# Whether items which were created by this program but can't be found in any
# source anymore will be deleted or not.
# prune_enabled = true
#default_netbox_result_limit = 200
# Orphaned objects will first be tagged before they get deleted. Once the amount
# of days passed the object will actually be deleted.
# prune_delay_in_days = 30
#timeout = 30
# The maximum number of objects returned in a single request. If a NetBox instance
# is very quick responding the value should be raised.
# default_netbox_result_limit = 200
#max_retry_attempts = 4
# The maximum time a query is allowed to execute before being killed and considered failed.
# timeout = 30
# The amount of times a failed request will be reissued. Once the maximum is reached the
# syncing process will be stopped completely.
# max_retry_attempts = 4
#####################
# S O U R C E S #
#####################
###
### [sources/*]
###
### Controls the parameters of a defined source. The string past the slash
### will be used as a sources name. Sources can be defined multiple times to
### represent different sources. It is planned to support different types of sources.
### Currently supported
### * vmware : VMWare vcenter
[source/my-example]
#
# enabled = true
type = vmware
host_fqdn = vcenter.example.com
port = 443
# port = 443
username = vcenteruser
password = supersecret
permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8
# filter are all treated as regex expressions
#host_exclude_filter =
#host_include_filter =
@@ -82,8 +140,6 @@ permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fd00::/8
# * when-undefined (default) (only sets primary IP if undefined, will cause ERRORs if same IP is assigned more then once to different hosts and IP is set as the objects primary IP)
# * never (don't set any primary IPs, will cause the same ERRORs as "when-undefined"
set_primary_ip = always
# set_primary_ip = when-undefined
# EOF