fixes handlying of IP address objects, adds loads of data validation

This commit is contained in:
Ricardo Bartels
2020-10-29 16:16:45 +01:00
parent 921fde0753
commit 3eede8e90d
9 changed files with 558 additions and 339 deletions
+18 -10
View File
@@ -1,7 +1,7 @@
import sys
from modules import plural
from module import plural
def grab(structure=None, path=None, separator=".", fallback=None):
"""
@@ -9,7 +9,7 @@ def grab(structure=None, path=None, separator=".", fallback=None):
"." separated path information. If a part of a path
is not not present then this function returns the
value of fallback (default: "None").
example structure:
data_structure = {
"rows": [{
@@ -26,7 +26,7 @@ def grab(structure=None, path=None, separator=".", fallback=None):
"rows.0.elements.0.distance.value"
example return value:
15193
Parameters
----------
structure: dict, list, object
@@ -38,7 +38,7 @@ def grab(structure=None, path=None, separator=".", fallback=None):
contains the default (.) separator.
fallback: dict, list, str, int
data to return if no match was found.
Returns
-------
str, dict, list
@@ -64,7 +64,7 @@ def grab(structure=None, path=None, separator=".", fallback=None):
for attribute in r_path.split(separator):
if isinstance(r_structure, dict):
r_structure = {k.lower(): v for k, v in r_structure.items()}
try:
if isinstance(r_structure, list):
data = r_structure[int(attribute)]
@@ -72,7 +72,7 @@ def grab(structure=None, path=None, separator=".", fallback=None):
data = r_structure.get(attribute.lower())
else:
data = getattr(r_structure, attribute)
except Exception:
return fallback
@@ -83,13 +83,13 @@ def grab(structure=None, path=None, separator=".", fallback=None):
return traverse(structure, path)
def dump(obj):
for attr in dir(obj):
if hasattr( obj, attr ):
print( "obj.%s = %s" % (attr, getattr(obj, attr)))
def do_error_exit(log_text):
"""log an error and exit with return code 1
Parameters
@@ -118,4 +118,12 @@ def get_relative_time(delta):
return ", ".join(return_string)
def get_string_or_none(text=None):
if text is not None and len(str(text).strip()) > 0:
return str(text).strip()
return None
# EOF
+1 -9
View File
@@ -3,17 +3,8 @@ from ipaddress import ip_network, ip_interface
import aiodns
import logging
def format_ip(ip_addr):
"""
Formats IPv4 addresses and subnet to IP with CIDR standard notation.
:param ip_addr: IP address with subnet; example `192.168.0.0/255.255.255.0`
:type ip_addr: str
:return: IP address with CIDR notation; example `192.168.0.0/24`
:rtype: str
"""
try:
return ip_interface(ip_addr).compressed
except Exception:
@@ -32,3 +23,4 @@ def normalize_mac_address(mac_address=None):
return mac_address
# EOF
+33 -30
View File
@@ -42,16 +42,19 @@ class NetBoxHandler:
"timeout": 30,
"max_retry_attempts": 4
}
primary_tag = "NetBox-synced"
orphaned_tag = f"{primary_tag}: Orphaned"
inventory = None
instance_tags = None
instance_interfaces = {}
instance_virtual_interfaces = {}
# testing option
use_netbox_caching_for_testing = False
def __init__(self, cli_args=None, settings=None, inventory=None):
self.settings = settings
@@ -59,7 +62,7 @@ class NetBoxHandler:
# set primary tag
setattr(self.inventory, "primary_tag", self.primary_tag)
self.parse_config_settings(settings)
proto = "https"
@@ -73,7 +76,7 @@ class NetBoxHandler:
self.url = f"{proto}://{self.host_fqdn}{port}/api/"
self.session = self.create_session()
# check for minimum version
if version.parse(self.get_api_version()) < version.parse(self.minimum_api_version):
do_error_exit(f"Netbox API version '{self.api_version}' not supported. "
@@ -110,7 +113,7 @@ class NetBoxHandler:
session = requests.Session()
session.headers.update(header)
log.debug("Created new Session for NetBox.")
log.debug("Created new requests Session for NetBox.")
return session
@@ -132,6 +135,7 @@ class NetBoxHandler:
result = str(response.headers["API-Version"])
log.info(f"Successfully connected to NetBox '{self.host_fqdn}'")
log.debug(f"Detected NetBox API v{result}.")
return result
@@ -149,9 +153,9 @@ class NetBoxHandler:
if params is None:
params = dict()
params["limit"] = self.default_netbox_result_limit
# prepare request
this_request = self.session.prepare_request(
requests.Request(req_type, request_url, params=params, json=data)
@@ -195,7 +199,7 @@ class NetBoxHandler:
elif response.status_code >= 400 and response.status_code < 500:
log.error(f"NetBox returned: {this_request.method} {this_request.path_url} {response.reason}")
log.debug(f"NetBox returned body: {result}")
log.error(f"NetBox returned body: {result}")
result = None
elif response.status_code >= 500:
@@ -231,44 +235,45 @@ class NetBoxHandler:
do_error_exit(f"Giving up after {self.max_retry_attempts} retries.")
log.debug2("Received HTTP Status %s.", req.status_code)
return req
def query_current_data(self, netbox_objects_to_query=None):
if netbox_objects_to_query is None:
raise AttributeError(f"Argument netbox_objects_to_query is: '{netbox_objects_to_query}'")
raise AttributeError(f"Attribute netbox_objects_to_query is: '{netbox_objects_to_query}'")
# query all dependencies
for nb_object_class in netbox_objects_to_query:
if nb_object_class not in NetBoxObject.__subclasses__():
raise AttributeError(f"Class '{nb_object_class.__name__}' must be a subclass of '{NetBoxObject.__name__}'")
cached_nb_data = None
try:
cached_nb_data = pickle.load( open( f"cache/{nb_object_class.__name__}.cache", "rb" ) )
#pprint.pprint(cached_nb_data)
except Exception:
pass
if self.use_netbox_caching_for_testing is True:
try:
cached_nb_data = pickle.load( open( f"cache/{nb_object_class.__name__}.cache", "rb" ) )
except Exception:
pass
nb_data = dict()
if cached_nb_data is None:
# get all objects of this class
log.debug(f"Requesting {nb_object_class.name}s from NetBox")
nb_data = self.request(nb_object_class)
pickle.dump(nb_data.get("results"), open( f"cache/{nb_object_class.__name__}.cache", "wb" ) )
if self.use_netbox_caching_for_testing is True:
pickle.dump(nb_data.get("results"), open( f"cache/{nb_object_class.__name__}.cache", "wb" ) )
else:
nb_data["results"] = cached_nb_data
if nb_data.get("results") is None:
log.warning(f"Result data from NetBox for object {nb_object_class.__name__} missing!")
continue
log.debug(f"Processing %s returned {nb_object_class.name}%s" % (len(nb_data.get("results")),plural(len(nb_data.get("results")))))
for object_data in nb_data.get("results"):
self.inventory.add_item_from_netbox(nb_object_class, data=object_data)
@@ -314,16 +319,14 @@ class NetBoxHandler:
if object.is_new is True:
object.updated_items = object.data.keys()
for key, value in object.data.items():
if key in object.updated_items:
object_type = object.data_model.get(key)
if object_type == NBTags:
if key == "tags":
data_to_patch[key] = [{"name": d.get_display_name()} for d in value]
elif object_type in NetBoxObject.__subclasses__():
elif isinstance(value, NetBoxObject):
data_to_patch[key] = value.get_nb_reference()
if value.nb_id == 0:
@@ -341,7 +344,7 @@ class NetBoxHandler:
log.info("Creating new NetBox '%s' object: %s" % (object.name, object.get_display_name()))
returned_object_data = self.request(nb_object_sub_class, req_type="POST", data=data_to_patch)
issued_request = True
if object.is_new is False and len(object.updated_items) > 0:
@@ -351,14 +354,14 @@ class NetBoxHandler:
returned_object_data = self.request(nb_object_sub_class, req_type="PATCH", data=data_to_patch, nb_id=object.nb_id)
issued_request = True
if returned_object_data is not None:
object.update(data = returned_object_data, read_from_netbox=True)
elif issued_request is True:
log.error(f"Request Failed for {nb_object_sub_class.name}. Used data: {data_to_patch}")
pprint.pprint(object.to_dict())
# pprint.pprint(object.to_dict())
# add class to resolved dependencies
self.inventory.resolved_dependencies = list(set(self.inventory.resolved_dependencies + [nb_object_sub_class] ))
+58 -55
View File
@@ -1,41 +1,40 @@
import logging
import json
import pprint
import json
from ipaddress import ip_address, ip_network, ip_interface
from module.netbox.object_classes import *
from module.common.logging import get_logger
log = get_logger()
class NetBoxInventorySearchResult:
members = list()
class NetBoxInventory:
base_structure = dict()
resolved_dependencies = list()
primary_tag = None
def __init__(self):
for object_type in NetBoxObject.__subclasses__():
self.base_structure[object_type.name] = list()
def get_by_id(self, object_type, id=None):
if object_type not in NetBoxObject.__subclasses__():
raise AttributeError("'%s' object must be a sub class of '%s'." %
(object_type.__name__, NetBoxObject.__name__))
if id is None or self.base_structure[object_type.name] is None:
return None
for object in self.base_structure[object_type.name]:
if object.nb_id == id:
return object
@@ -52,14 +51,14 @@ class NetBoxInventory:
if self.base_structure[object_type.name] is None:
return None
if not isinstance(data, dict):
# ToDo:
# * proper handling
log.error("data is not dict")
pprint.pprint(data)
exit(0)
# shortcut if data contains valid id
data_id = data.get("id")
if data_id is not None and data_id != 0:
@@ -70,62 +69,62 @@ class NetBoxInventory:
object_name_to_find = None
results = list()
for object in self.base_structure[object_type.name]:
# Todo:
# * try to compare second key if present.
if object_name_to_find is None:
object_name_to_find = object.get_display_name(data)
#print(f"get_by_data(): Object Display Name: {object_name_to_find}")
if object_name_to_find == object.get_display_name():
results.append(object)
# found exactly one match
# ToDo:
# * add force secondary key if one object has a secondary key
if len(results) == 1:
#print(f"found exact match: {object_name_to_find}")
return results[0]
# compare secondary key
elif len(results) > 1:
object_name_to_find = None
for object in results:
if object_name_to_find is None:
object_name_to_find = object.get_display_name(data, including_second_key=True)
#print(f"get_by_data(): Object Display Name: {object_name_to_find}")
if object_name_to_find == object.get_display_name(including_second_key=True):
return object
# try to match all data attributes
else:
for object in self.base_structure[object_type.name]:
all_items_match = True
for attr_name, attr_value in data.items():
if object.data.get(attr_name) != attr_value:
all_items_match = False
break
if all_items_match == True:
return object
"""
if data.get(object_type.primary_key) is not None and \
object.resolve_attribute(object_type.primary_key) == object.resolve_attribute(object_type.primary_key, data=data):
# object type has a secondary key, lets check if it matches
if getattr(object_type, "secondary_key", None) is not None and data.get(object_type.secondary_key) is not None:
if object.resolve_attribute(object_type.secondary_key) == object.resolve_attribute(object_type.secondary_key, data=data):
return_data.append(object)
# object has no secondary key but the same name, add to list
else:
return_data.append(object)
@@ -136,17 +135,17 @@ class NetBoxInventory:
"""
only to be used if data is read from NetBox and added to inventory
"""
# create new object
new_object = object_type(data, read_from_netbox=True, inventory=self)
# add to inventory
self.base_structure[object_type.name].append(new_object)
return
def add_update_object(self, object_type, data=None, read_from_netbox=False, source=None):
if data is None:
# ToDo:
# * proper error handling
@@ -162,33 +161,31 @@ class NetBoxInventory:
log.debug(f"Created new {this_object.name} object: {this_object.get_display_name()}")
else:
# ToDo:
# * resolve relations if updated from netbox
this_object.update(data, read_from_netbox=read_from_netbox, source=source)
log.debug("Updated %s object: %s" % (this_object.name, this_object.get_display_name()))
return this_object
def resolve_relations(self):
log.debug("Start resolving relations")
for object_type in NetBoxObject.__subclasses__():
for object in self.base_structure.get(object_type.name, list()):
object.resolve_relations()
log.debug("Finished resolving relations")
def get_all_items(self, object_type):
if object_type not in NetBoxObject.__subclasses__():
raise AttributeError("'%s' object must be a sub class of '%s'." %
(object_type.__name__, NetBoxObject.__name__))
return self.base_structure.get(object_type.name, list())
def tag_all_the_things(self, sources, netbox_handler):
# ToDo:
@@ -199,43 +196,49 @@ class NetBoxInventory:
# * DONE: objects tagged as orphaned but are present again (remove)
source_tags = [x.source_tag for x in sources]
for object_type in NetBoxObject.__subclasses__():
if self.base_structure[object_type.name] is None:
continue
for object in self.base_structure[object_type.name]:
# if object was found in source
if object.source is not None:
object.add_tags([netbox_handler.primary_tag, object.source.source_tag])
# if object was orphaned remove tag again
if netbox_handler.orphaned_tag in object.get_tags():
object.remove_tags(netbox_handler.orphaned_tag)
# if object was tagged by a source in previous runs but is not present
# anymore then add the orphaned tag
else:
for source_tag in source_tags:
if source_tag in object.get_tags():
object.add_tags(netbox_handler.orphaned_tag)
def update_all_ip_addresses(self):
all_prefixes = slef.get_all_items(NBPrefixes)
address = slef.get_all_items(NBIPAddresses)
def to_dict(self):
output = dict()
for nb_object_class in NetBoxObject.__subclasses__():
output[nb_object_class.name] = list()
for object in self.base_structure[nb_object_class.name]:
output[nb_object_class.name].append(object.to_dict())
return output
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True, indent=4)
# EOF
+192 -111
View File
@@ -1,4 +1,5 @@
import json
import logging
@@ -15,10 +16,9 @@ class NetBoxObject():
"is_new": True,
"nb_id": 0,
"updated_items": list(),
"is_present_in_source": False,
"source": None,
}
# keep handle to inventory instance to append objects on demand
inventory = None
@@ -33,62 +33,75 @@ class NetBoxObject():
# store provided inventory handle
self.inventory = inventory
# store source handle
if source is not None:
self.source = source
self.update(data=data, read_from_netbox=read_from_netbox)
def __repr__(self):
return "<%s instance '%s' at %s>" % (self.__class__.__name__, self.get_display_name(), id(self))
def to_dict(self):
out = dict()
for key in dir(self):
value = getattr(self, key)
if "__" in key:
continue
if callable(value) is True:
continue
if key in ["inventory", "default_attributes"]:
if key in ["inventory", "default_attributes", "data_model_relation"]:
continue
if key == "source":
value = getattr(value, "name", None)
if key == "data_model":
data_model = dict()
for dkey, dvalue in value.items():
if isinstance(dvalue, list):
new_dvalue = list()
for possible_option in dvalue:
if possible_option in NetBoxObject.__subclasses__():
new_dvalue.append(str(possible_option))
else:
new_dvalue.append(possible_option)
dvalue = new_dvalue
# if value is class name then print class name
if type(dvalue) == type:
dvalue = str(dvalue)
data_model[dkey] = dvalue
value = data_model
if key == "data":
data = dict()
for dkey, dvalue in value.items():
# if value is class name then print class name
if isinstance(dvalue, NetBoxObject):
dvalue = repr(dvalue)
if dkey == "tags":
dvalue = [x.get_display_name() for x in dvalue]
data[dkey] = dvalue
value = data
out[key] = value
return out
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True, indent=4)
def __iter__(self):
for key, value in self.to_dict():
yield (key, value)
@@ -105,8 +118,8 @@ class NetBoxObject():
"""
if text is None or len(text) == 0:
raise AttributeError("Argument 'text' can't be None or empty!")
raise AttributeError("Argument 'text' can't be None or empty!")
permitted_chars = (
"abcdefghijklmnopqrstuvwxyz" # alphabet
"0123456789" # numbers
@@ -119,18 +132,18 @@ class NetBoxObject():
# Strip unacceptable characters
text = "".join([c for c in text.lower() if c in permitted_chars])
# Enforce max length
return text[0:max_len]
def update(self, data=None, read_from_netbox=False, source=None):
if data is None:
return
if not isinstance(data, dict):
raise AttributeError("Argument 'data' needs to be a dict!")
raise AttributeError("Argument 'data' needs to be a dict!")
if data.get("id") is not None:
self.nb_id = data.get("id")
@@ -138,65 +151,71 @@ class NetBoxObject():
self.is_new = False
self.data = data
self.updated_items = list()
return
if source is not None:
self.source = source
display_name = self.get_display_name(data)
log.debug(f"Parsing '{self.name}' data structure: {display_name}")
parsed_data = dict()
for key, value in data.items():
if key not in self.data_model.keys():
log.error(f"Found undefined data model key '{key}' for object '{self.__class__.__name__}'")
continue
# skip unset values
if value is None:
log.info(f"Found unset/empty key: {key}")
continue
# check data model to see how we have to parse the value
defined_value_type = self.data_model.get(key)
# value must be a string witch a certain max length
if isinstance(defined_value_type, int):
if not isinstance(value, str):
log.error(f"Invalid data type for '{self.__class__.__name__}.{key}' (must be str), got: '{value}'")
continue
value = value[0:defined_value_type]
if key == "slug":
value = self.format_slug(text=value, max_len=defined_value_type)
else:
value = value[0:defined_value_type]
if isinstance(defined_value_type, list):
if value not in defined_value_type:
if isinstance(value, NetBoxObject):
if type(value) not in defined_value_type:
log.error(f"Invalid data type for '{key}' (must be one of {defined_value_type}), got: '{type(value)}'")
continue
elif value not in defined_value_type:
log.error(f"Invalid data type for '{key}' (must be one of {defined_value_type}), got: '{value}'")
continue
# just check the type of the value
type_check_faild = False
for valid_type in [bool, str, int]:
if defined_value_type == valid_type and not isinstance(value, valid_type):
log.error(f"Invalid data type for '{key}' (must be {valid_type.__name__}), got: '{value}'")
type_check_faild = True
break
if type_check_faild is True:
continue
# this is meant to be reference to a different object
if defined_value_type in NetBoxObject.__subclasses__():
# tags need to be treated as list of dictionaries
if defined_value_type == NBTags:
self.add_tags(value)
@@ -205,15 +224,14 @@ class NetBoxObject():
if not isinstance(value, NetBoxObject):
# try to find object.
value = self.inventory.add_update_object(defined_value_type, data=value)
# add to parsed data dict
parsed_data[key] = value
# add/update slug
# if data model contains a slug we need to handle it
# so far slug is always referenced to "name"
if "slug" in self.data_model.keys() and data.get("slug") is None and data.get(self.primary_key) is not None:
if "slug" in self.data_model.keys() and parsed_data.get("slug") is None and parsed_data.get(self.primary_key) is not None:
parsed_data["slug"] = self.format_slug(text=parsed_data.get(self.primary_key), max_len=self.data_model.get("slug"))
# this is a new set of data
@@ -223,104 +241,108 @@ class NetBoxObject():
# add empty tag list if not tags were provided
if "tags" in self.data_model.keys() and data.get("tags") is None:
self.data["tags"] = list()
# see if data just got updated and mark it as such.
else:
for key, new_value in parsed_data.items():
# nothing changed, continue with next key
current_value = self.data.get(key)
if current_value == new_value:
continue
if self.data_model.get(key) in NetBoxObject.__subclasses__():
if isinstance(current_value, NetBoxObject):
current_value_str = current_value.get_display_name()
else:
current_value_str = str(current_value)
new_value_str = new_value.get_display_name()
# if data model is a list then we need to read the netbox data value
# get current value str
if isinstance(current_value, NetBoxObject):
current_value_str = str(current_value.get_display_name())
# if data model is a list then we need to read the netbox data value
elif isinstance(self.data_model.get(key), list) and isinstance(current_value, dict):
current_value_str = str(current_value.get("value"))
new_value_str = str(new_value)
else:
current_value_str = str(current_value).replace("\r","")
# get new value str
if isinstance(new_value, NetBoxObject):
new_value_str = str(new_value.get_display_name())
else:
new_value_str = str(new_value).replace("\r","")
# just check again if values might match now
if current_value_str == new_value_str:
continue
self.data[key] = new_value
self.updated_items.append(key)
log.info(f"{self.name.capitalize()} '{display_name}' attribute '{key}' changed from '{current_value_str}' to '{new_value_str}'")
self.resolve_relations()
def get_display_name(self, data=None, including_second_key=False):
this_data_set = data
if data is None:
this_data_set = self.data
if this_data_set is None:
return None
my_name = this_data_set.get(self.primary_key)
secondary_key = getattr(self, "secondary_key", None)
enforce_secondary_key = getattr(self, "enforce_secondary_key", False)
if secondary_key is not None and (enforce_secondary_key is True or including_second_key is True):
secondary_key_value = this_data_set.get(secondary_key)
if isinstance(secondary_key_value, NetBoxObject):
secondary_key_value = secondary_key_value.get_display_name()
if secondary_key_value is not None:
#import pprint
#pprint.pprint(this_data_set)
my_name = f"{my_name} ({secondary_key_value})"
return my_name
def resolve_relations(self):
for key, value in self.data_model.items():
if self.data.get(key) is None:
continue
# continue if value is not an NetBox object
if value not in NetBoxObject.__subclasses__():
continue
data_value = self.data.get(key)
resolved_data = None
if value == NBTags:
resolved_tag_list = list()
for tag in data_value:
if isinstance(tag, NetBoxObject):
tag_object = tag
else:
tag_object = self.inventory.get_by_data(value, data=tag)
if tag_object is not None:
resolved_tag_list.append(tag_object)
resolved_data = resolved_tag_list
else:
if data_value is None:
continue
if isinstance(data_value, NetBoxObject):
resolved_data = data_value
else:
@@ -331,91 +353,102 @@ class NetBoxObject():
data_to_find = data_value
resolved_data = self.inventory.get_by_data(value, data=data_to_find)
if resolved_data is not None:
self.data[key] = resolved_data
else:
log.error(f"Problems resolving relation '{key}' for object '%s' and value '%s'" % (self.get_display_name(), data_value))
def raw(self):
return self.data
def get_dependencies(self):
return [x for x in self.data_model.values() if x in NetBoxObject.__subclasses__()]
def get_tags(self):
return [x.get_display_name() for x in self.data.get("tags", list())]
def update_tags(self, tags, remove=False):
if tags is None or NBTags not in self.data_model.values():
return
action = "Adding" if remove is False else "Removing"
log.debug(f"{action} Tag: {tags}")
log.debug2(f"{action} Tags: {tags}")
new_tags = list()
def extract_tags(this_tags):
if isinstance(this_tags, str):
new_tags.append(this_tags)
elif isinstance(this_tags, dict) and this_tags.get("name") is not None:
new_tags.append(this_tags.get("name"))
if isinstance(tags, list):
for tag in tags:
extract_tags(tag)
else:
extract_tags(tags)
log.debug(f"Tag list: {new_tags}")
log.debug2(f"Tag list: {new_tags}")
current_tags = self.get_tags()
tag_has_changed = False
for tag_name in new_tags:
if tag_name not in current_tags and remove == False:
# add tag
tag = self.inventory.add_update_object(NBTags, data={"name": tag_name})
self.data["tags"].append(tag)
if self.is_new is False:
self.updated_items.append("tags")
tag_has_changed = True
if tag_name in current_tags and remove == True:
tag = self.inventory.get_by_data(NBTags, data={"name": tag_name})
self.data["tags"].remove(tag)
if self.is_new is False:
self.updated_items.append("tags")
tag_has_changed = True
new_tags = self.get_tags()
log.info(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from '{current_tags}' to '{new_tags}'")
if tag_has_changed is True:
log.debug(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from '{current_tags}' to '{new_tags}'")
def add_tags(self, tags_to_add):
self.update_tags(tags_to_add)
def remove_tags(self, tags_to_remove):
self.update_tags(tags_to_remove, remove=True)
def get_nb_reference(self):
"""
Default class to return reference of how this object is usually referenced.
default: return NetBox ID
"""
"""
FIXME
does this work?
"""
if self.nb_id == 0:
return self
return self.nb_id
@@ -454,7 +487,7 @@ class NBDeviceTypes(NetBoxObject):
"manufacturer": NBManufacturers,
"tags": NBTags
}
class NBPlatforms(NetBoxObject):
name = "platform"
api_path = "dcim/platforms"
@@ -465,7 +498,7 @@ class NBPlatforms(NetBoxObject):
"manufacturer": NBManufacturers,
"description": 200
}
class NBClusterTypes(NetBoxObject):
name = "cluster type"
api_path = "virtualization/cluster-types"
@@ -475,7 +508,7 @@ class NBClusterTypes(NetBoxObject):
"slug": 50,
"description": 200
}
class NBClusterGroups(NetBoxObject):
name = "cluster group"
api_path = "virtualization/cluster-groups"
@@ -541,7 +574,7 @@ class NBDevices(NetBoxObject):
"asset_tag": 50,
"tags": NBTags
}
class NBVMs(NetBoxObject):
name = "virtual machine"
@@ -603,11 +636,59 @@ class NBIPAddresses(NetBoxObject):
primary_key = "address"
data_model = {
"address": str,
"assigned_object_type": str,
"assigned_object_id": int,
"assigned_object_type": ["dcim.interface", "virtualization.vminterface"],
"assigned_object_id": [ NBInterfaces, NBVMInterfaces ],
"description": 200,
"tags": NBTags
}
# add relation between two attributes
data_model_relation = {
"dcim.interface": NBInterfaces,
"virtualization.vminterface": NBVMInterfaces,
NBInterfaces: "dcim.interface",
NBVMInterfaces: "virtualization.vminterface"
}
def resolve_relations(self):
o_id = self.data.get("assigned_object_id")
o_type = self.data.get("assigned_object_type")
# this needs special treatment as the object type depends on a second model key
if o_type is not None and o_type not in self.data_model.get("assigned_object_type"):
log.error("Attribute 'assigned_object_type' for '%s' invalid: %s" % \
(self.get_display_name(), o_type))
do_error_exit("Error while resolving relations for %s" % self.get_display_name())
if isinstance(o_id, int):
self.data["assigned_object_id"] = self.inventory.get_by_id(self.data_model_relation.get(o_type), id=o_id)
super().resolve_relations()
def update(self, data=None, read_from_netbox=False, source=None):
object_type = data.get("assigned_object_type")
object = data.get("assigned_object_id")
# we got an object data structure where we have to find the object
if read_from_netbox is False:
if not isinstance(object, NetBoxObject):
data["assigned_object_id"] = \
self.inventory.add_update_object(self.data_model_relation.get(object_type), data=object)
else:
data["assigned_object_type"] = self.data_model_relation.get(type(object))
super().update(data=data, read_from_netbox=read_from_netbox, source=source)
# we need to tell NetBox which object type this is meant to be
if "assigned_object_id" in self.updated_items:
self.updated_items.append("assigned_object_type")
class NBPrefixes(NetBoxObject):
+36 -14
View File
@@ -10,58 +10,80 @@ from module.common.configuration import get_config
from module.common.logging import get_logger
def validate_source(source_class=None):
necessary_atrtributes = [
"dependend_netbox_objects",
"init_successfull",
"inventory",
"name",
"settings",
"source_tag",
"source_type",
]
for attr in necessary_atrtributes:
# raise exception if attribute not present
getattr(source_class, attr)
def instanciate_sources(config_handler=None, inventory=None):
log = get_logger()
if config_handler is None:
raise Exception("No config handler defined!")
if inventory is None:
raise Exception("No inventory defined!")
# first validate all available sources
for possible_source_class in valid_sources:
validate_source(possible_source_class)
sources = list()
# iterate over sources and validate them
for source_section in config_handler.sections():
# a source section needs to start with "source/"
if not source_section.startswith("source/"):
continue
# get type of source
source_type = config_handler.get(source_section, "type", fallback=None)
if source_type is None:
log.error(f"Source {source_section} option 'type' is undefined")
config_error = True
source_class = None
for possible_source_class in valid_sources:
validate_source(possible_source_class)
source_class_type = getattr(possible_source_class, "source_type", None)
if source_class_type is None:
raise AttributeError("'%s' class attribute 'source_type' not defined." %
(source_class_type.__name__))
if source_class_type == source_type:
source_class = possible_source_class
break
if source_class is None:
log.error(f"Unknown source type '{source_type}' defined for '{source_section}'")
config_error = True
continue
source_config = get_config(config_handler, section=source_section, valid_settings=source_class.settings)
source_handler = source_class(name=source_section.replace("source/",""),
inventory=inventory,
inventory=inventory,
settings=source_config)
# add to list of source handlers
if source_handler.init_successfull is True:
sources.append(source_handler)
return sources
# EOF
+191 -101
View File
@@ -10,7 +10,7 @@ from pyVim.connect import SmartConnectNoSSL, Disconnect
from pyVmomi import vim
from module.netbox.object_classes import *
from module.common.misc import grab, do_error_exit, dump
from module.common.misc import grab, do_error_exit, dump, get_string_or_none
from module.common.support import normalize_mac_address, format_ip
from module import plural
from module.common.logging import get_logger
@@ -29,34 +29,20 @@ class VMWareHandler():
NBDeviceRoles,
NBSites,
NBClusters,
#]
#"""
NBDevices,
NBVMs,
NBVMInterfaces,
NBInterfaces,
NBIPAddresses,
] #"""
session = None
inventory = None
init_successfull = False
source_type = "vmware"
source_tag = None
site_name = None
networks = dict()
standalone_hosts = list()
]
settings = {
"host_fqdn": None,
"port": 443,
"username": None,
"password": None,
"cluster_exclude_filter": None,
"cluster_include_filter": None,
"host_exclude_filter": None,
"host_include_filter": None,
"vm_exclude_filter": None,
@@ -64,9 +50,28 @@ class VMWareHandler():
"netbox_host_device_role": "Server",
"netbox_vm_device_role": "Server",
"permitted_subnets": None,
"collect_hardware_asset_tag": True
"collect_hardware_asset_tag": True,
"cluster_site_relation": None
}
init_successfull = False
inventory = None
name = None
source_tag = None
source_type = "vmware"
# internal vars
session = None
site_name = None
networks = dict()
standalone_hosts = list()
processed_host_names = list()
processed_vm_names = list()
def __init__(self, name=None, settings=None, inventory=None):
if name is None:
@@ -109,21 +114,37 @@ class VMWareHandler():
validation_failed = True
config_settings["permitted_subnets"] = permitted_subnets
# check include and exclude filter expressions
for setting in [x for x in config_settings.keys() if "filter" in x]:
if config_settings.get(setting) is None or config_settings.get(setting).strip() == "":
continue
re_compiled = None
try:
re_compiled = re.compile(config_settings.get(setting))
except Exception as e:
log.error(f"Problem parsing parsing regular expression for '{setting}': {e}")
log.error(f"Problem parsing regular expression for '{setting}': {e}")
validation_failed = True
config_settings[setting] = re_compiled
if config_settings.get("cluster_site_relation") is not None:
relation_data = dict()
for relation in config_settings.get("cluster_site_relation").split(","):
cluster_name = relation.split("=")[0].strip()
site_name = relation.split("=")[1].strip()
if len(cluster_name) == 0 or len(site_name) == 0:
log.error("Config option 'cluster_site_relation' malformed got '{cluster_name}' for cluster_name and '{site_name}' for site name.")
validation_failed = True
relation_data[cluster_name] = site_name
config_settings["cluster_site_relation"] = relation_data
if validation_failed is True:
do_error_exit("Config validation failed. Exit!")
@@ -135,7 +156,7 @@ class VMWareHandler():
if self.session is not None:
return True
log.info(f"Starting vCenter connection to '{self.host_fqdn}'")
log.debug(f"Starting vCenter connection to '{self.host_fqdn}'")
try:
instance = SmartConnectNoSSL(
@@ -214,7 +235,7 @@ class VMWareHandler():
log.error(f"Creating vCenter view for '{view_name}s' failed!")
continue
log.info("vCenter returned '%d' %s%s" % (len(view_objects), view_name, plural(len(view_objects))))
log.debug("vCenter returned '%d' %s%s" % (len(view_objects), view_name, plural(len(view_objects))))
for obj in view_objects:
@@ -224,62 +245,106 @@ class VMWareHandler():
def add_datacenter(self, obj):
if grab(obj, "name") is None:
name = get_string_or_none(grab(obj, "name"))
if name is None:
return
self.inventory.add_update_object(NBClusterGroups,
data = { "name": obj.name }, source=self)
log.debug2(f"Parsing vCenter datacenter: {name}")
self.inventory.add_update_object(NBClusterGroups, data = { "name": name }, source=self)
def add_cluster(self, obj):
if grab(obj, "name") is None or grab(obj, "parent.parent.name") is None:
name = get_string_or_none(grab(obj, "name"))
group = get_string_or_none(grab(obj, "parent.parent.name"))
if name is None or group is None:
return
self.inventory.add_update_object(NBClusters,
data = {
"name": obj.name,
"type": { "name": "VMware ESXi" },
"group": { "name": obj.parent.parent.name }
},
source=self)
log.debug2(f"Parsing vCenter cluster: {name}")
# first includes
if self.cluster_include_filter is not None:
if not self.cluster_include_filter.match(name):
log.debug(f"Cluster '{name}' did not match include filter '{self.cluster_include_filter.pattern}'. Skipping")
return
# second excludes
if self.cluster_exclude_filter is not None:
if self.cluster_exclude_filter.match(name):
log.debug(f"Cluster '{name}' matched exclude filter '{self.cluster_exclude_filter.pattern}'. Skipping")
return
# set default site name
site_name = self.site_name
# check if site was provided in config
site_realtion = getattr(self, "cluster_site_relation", None)
if site_realtion is not None and site_realtion.get(name) is not None:
site_name = site_realtion.get(name)
data = {
"name": name,
"type": { "name": "VMware ESXi" },
"group": { "name": group },
"site": { "name": site_name}
}
self.inventory.add_update_object(NBClusters, data = data, source=self)
def add_network(self, obj):
if grab(obj, "key") is None or grab(obj, "name") is None:
key = get_string_or_none(grab(obj, "key"))
name = get_string_or_none(grab(obj, "name"))
if key is None or name is None:
return
self.networks[obj.key] = obj.name
log.debug2(f"Parsing vCenter network: {name}")
self.networks[key] = name
def add_host(self, obj):
# ToDo:
# * find Host based on device mac addresses
name = grab(obj, "name")
name = get_string_or_none(grab(obj, "name"))
# parse data
log.debug2(f"Parsing vCenter host: {name}")
if name in self.processed_host_names:
log.warning(f"Host '{name}' already parsed. Make sure to use unique host names. Skipping")
return
self.processed_host_names.append(name)
# filter hosts
# first includes
if self.host_include_filter is not None:
if not self.host_include_filter.match(name):
log.debug(f"Host '{name}' did not match include filter '{self.host_include_filter.pattern}'. Skipping")
return
# second excludes
if self.host_exclude_filter is not None:
if self.host_exclude_filter.match(name):
log.debug(f"Host '{name}' matched exclude filter '{self.host_exclude_filter.pattern}'. Skipping")
return
manufacturer = grab(obj, "summary.hardware.vendor")
model = grab(obj, "summary.hardware.model")
platform = "{} {}".format(grab(obj, "config.product.name"), grab(obj, "config.product.version"))
cluster = grab(obj, "parent.name")
status = "active" if grab(obj, "summary.runtime.connectionState") == "connected" else "offline"
manufacturer = get_string_or_none(grab(obj, "summary.hardware.vendor"))
model = get_string_or_none(grab(obj, "summary.hardware.model"))
product_name = get_string_or_none(grab(obj, "config.product.name"))
product_version = get_string_or_none(grab(obj, "config.product.version"))
platform = f"{product_name} {product_version}"
status = "offline"
if get_string_or_none(grab(obj, "summary.runtime.connectionState")) == "connected":
status = "active"
# prepare identifiers
identifiers = grab(obj, "summary.hardware.otherIdentifyingInfo")
identifier_dict = dict()
@@ -294,7 +359,7 @@ class VMWareHandler():
for serial_num_key in [ "EnclosureSerialNumberTag", "SerialNumberTag", "ServiceTag"]:
if serial_num_key in identifier_dict.keys():
serial = identifier_dict.get(serial_num_key)
serial = get_string_or_none(identifier_dict.get(serial_num_key))
break
# add asset tag if desired and present
@@ -310,6 +375,17 @@ class VMWareHandler():
if this_asset_tag.lower() not in [x.lower() for x in banned_tags]:
asset_tag = this_asset_tag
# manage site and cluster
cluster = get_string_or_none(grab(obj, "parent.name"))
# set default site name
site_name = self.site_name
# check if site was provided in config
site_realtion = getattr(self, "cluster_site_relation", None)
if site_realtion is not None and site_realtion.get(cluster) is not None:
site_name = site_realtion.get(cluster)
# handle standalone hosts
if cluster == name:
# Store the host so that we can check VMs against it
@@ -325,8 +401,7 @@ class VMWareHandler():
"name": manufacturer
}
},
"platform": {"name": platform},
"site": {"name": self.site_name},
"site": {"name": site_name},
"cluster": {"name": cluster},
"status": status
}
@@ -335,6 +410,8 @@ class VMWareHandler():
data["serial"]: serial
if asset_tag is not None:
data["asset_tag"]: asset_tag
if platform is not None:
data["platform"]: {"name": platform}
host_object = self.inventory.add_update_object(NBDevices, data=data, source=self)
@@ -369,11 +446,11 @@ class VMWareHandler():
self.inventory.add_update_object(NBInterfaces, data=pnic_data, source=self)
for vnic in grab(obj, "config.network.vnic", fallback=list()):
log.debug2("Parsing {}: {}".format(grab(vnic, "_wsdlName"), grab(vnic, "device")))
vnic_data = {
"name": grab(vnic, "device"),
"device": host_object,
@@ -386,11 +463,11 @@ class VMWareHandler():
vnic_object = self.inventory.add_update_object(NBInterfaces, data=vnic_data, source=self)
vnic_ip = "{}/{}".format(grab(vnic, "spec.ip.ipAddress"), grab(vnic, "spec.ip.subnetMask"))
if format_ip(vnic_ip) is None:
logging.error(f"IP address '{vnic_ip}' for {vnic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_address(grab(vnic, "spec.ip.ipAddress"))
@@ -402,46 +479,50 @@ class VMWareHandler():
if ip_permitted is False:
log.debug(f"IP address {vnic_ip} not part of any permitted subnet. Skipping.")
continue
vnic_ip_data = {
"address": format_ip(vnic_ip),
"assigned_object_id": vnic_object.nb_id,
"assigned_object_type": "dcim.interface"
"assigned_object_id": vnic_object,
}
self.inventory.add_update_object(NBIPAddresses, data=vnic_ip_data, source=self)
def add_virtual_machine(self, obj):
# ToDo:
# * find VM based on device mac addresses
name = grab(obj, "name")
name = get_string_or_none(grab(obj, "name"))
log.debug2(f"Parsing vCenter host: {name}")
# filter VMs
if name in self.processed_vm_names:
log.warning(f"Virtual machine '{name}' already parsed. Make sure to use unique host names. Skipping")
return
self.processed_vm_names.append(name)
# first includes
if self.vm_include_filter is not None:
if not self.vm_include_filter.match(name):
log.debug(f"Virtual machine '{name}' did not match include filter '{self.vm_include_filter.pattern}'. Skipping")
return
# second excludes
if self.vm_exclude_filter is not None:
if self.vm_exclude_filter.match(name):
log.debug(f"Virtual Machine '{name}' matched exclude filter '{self.vm_exclude_filter.pattern}'. Skipping")
return
cluster = grab(obj, "runtime.host.parent.name")
cluster = get_string_or_none(grab(obj, "runtime.host.parent.name"))
if cluster is None:
log.error(f"Requesting cluster for Virtual Machine '{name}' failed. Skipping.")
return
if cluster in self.standalone_hosts:
cluster = "Standalone ESXi Host"
platform = grab(obj, "config.guestFullName")
platform = grab(obj, "guest.guestFullName", fallback=platform)
platform = get_string_or_none(grab(obj, "guest.guestFullName", fallback=platform))
status = "active" if grab(obj, "runtime.powerState") == "poweredOn" else "offline"
status = "active" if get_string_or_none(grab(obj, "runtime.powerState")) == "poweredOn" else "offline"
hardware_devices = grab(obj, "config.hardware.device", fallback=list())
@@ -449,26 +530,29 @@ class VMWareHandler():
if isinstance(comp, vim.vm.device.VirtualDisk)
]) / 1024 / 1024)
annotation = get_string_or_none(grab(obj, "config.annotation"))
data = {
"name": grab(obj, "name"),
"name": name,
"cluster": {"name": cluster},
"role": {"name": self.settings.get("netbox_vm_device_role")},
"status": status,
"memory": grab(obj, "config.hardware.memoryMB"),
"vcpus": grab(obj, "config.hardware.numCPU"),
"disk": disk,
"comments": grab(obj, "config.annotation")
"disk": disk
}
if cluster is not None:
data["cluster"] = {"name": cluster}
if platform is not None:
data["platform"] = {"name": platform}
if annotation is not None:
data["comments"] = annotation
vm_object = self.inventory.add_update_object(NBVMs, data=data, source=self)
# ToDo:
# * get current interfaces and compare description (primary key in vCenter)
# get vm interfaces
for vm_device in hardware_devices:
@@ -485,7 +569,7 @@ class VMWareHandler():
int_label = grab(vm_device, "deviceInfo.label", fallback="")
int_name = "vNIC {}".format(int_label.split(" ")[-1])
int_ip_addresses = list()
for guest_nic in grab(obj, "guest.net", fallback=list()):
@@ -496,8 +580,28 @@ class VMWareHandler():
int_network_name = grab(guest_nic, "network", fallback=int_network_name)
int_connected = grab(guest_nic, "connected", fallback=int_connected)
for ip in grab(guest_nic, "ipConfig.ipAddress", fallback=list()):
int_ip_addresses.append(f"{ip.ipAddress}/{ip.prefixLength}")
# grab all valid interface ip addresses
for int_ip in grab(guest_nic, "ipConfig.ipAddress", fallback=list()):
int_ip_address = f"{int_ip.ipAddress}/{int_ip.prefixLength}"
if format_ip(int_ip_address) is None:
logging.error(f"IP address '{int_ip_address}' for {vm_nic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_address(int_ip_address.split("/")[0])
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
if ip_permitted is False:
log.debug(f"IP address {int_ip_address} not part of any permitted subnet. Skipping.")
continue
int_ip_addresses.append(int_ip_address)
if int_network_name is not None:
@@ -511,36 +615,22 @@ class VMWareHandler():
"enabled": int_connected,
}
# we are trying multiple strategies to find the correct interface
#
vm_nic_object = self.inventory.get_by_data(NBVMInterfaces, data={"mac_address": int_mac})
if vm_nic_object is not None:
vm_nic_object.update(data=vm_nic_data, source=self)
else:
vm_nic_object = self.inventory.add_update_object(NBVMInterfaces, data=vm_nic_data, source=self)
for int_ip_address in int_ip_addresses:
if format_ip(int_ip_address) is None:
logging.error(f"IP address '{int_ip_address}' for {vm_nic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_address(int_ip_address.split("/")[0])
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
if ip_permitted is False:
log.debug(f"IP address {int_ip_address} not part of any permitted subnet. Skipping.")
continue
# apply ip filter
vm_nic_ip_data = {
"address": format_ip(int_ip_address),
"assigned_object_id": vm_nic_object.nb_id,
"assigned_object_type": "virtualization.vminterface"
"assigned_object_id": vm_nic_object,
}
self.inventory.add_update_object(NBIPAddresses, data=vm_nic_ip_data, source=self)