initial commit

ALL WORK IN PROGRESS
This commit is contained in:
Ricardo Bartels
2020-10-26 16:35:57 +01:00
commit edfa48ddcf
18 changed files with 2477 additions and 0 deletions

14
.gitignore vendored Normal file
View File

@@ -0,0 +1,14 @@
# Exclude all hidden files
.*
# Except those related to git
!.git*
# python cache
__pycache__
# log dir
log/*
# settings file
settings.ini

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
This Scrypt syncs data from various sources to NetBox
WIP

2
module/__init__.py Normal file
View File

@@ -0,0 +1,2 @@
plural = lambda x: "s" if x != 1 else ""

View File

@@ -0,0 +1,3 @@

View File

@@ -0,0 +1,47 @@
import os
from os.path import realpath
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from module.common.logging import valid_log_levels
def parse_command_line(version=None,
self_description=None,
version_date=None,
default_config_file_path=None,
):
"""parse command line arguments
Also add current version and version date to description
"""
# define command line options
description = f"{self_description}\nVersion: {version} ({version_date})"
parser = ArgumentParser(
description=description,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-c", "--config", default=default_config_file_path, dest="config_file",
help="points to the config file to read config data from " +
"which is not installed under the default path '" +
default_config_file_path + "'",
metavar="settings.ini")
parser.add_argument("-l", "--log_level", choices=valid_log_levels, dest="log_level",
help="set log level (overrides config)")
parser.add_argument("-p", "--purge", action="store_true",
help="Remove (almost) all synced objects which were create by this script. "
"This is helpful if you want to start fresh or stop using this script.")
args = parser.parse_args()
# fix supplied config file path
if args.config_file != default_config_file_path and args.config_file[0] != "/":
args.config_file = realpath(os.getcwd() + "/" + args.config_file)
return args
# EOF

View File

@@ -0,0 +1,98 @@
import configparser
from os.path import realpath
from module.common.misc import grab, do_error_exit
from module.common.logging import get_logger
log = get_logger()
def get_config_file(config_file):
if config_file is None or config_file == "":
do_error_exit("ERROR: Config file not defined.")
base_dir = "/".join(__file__.split("/")[0:-3])
if config_file[0] != "/":
config_file = f"{base_dir}/{config_file}"
return realpath(config_file)
def open_config_file(config_file):
if config_file is None or config_file == "":
do_error_exit("ERROR: Config file not defined.")
# setup config parser and read config
config_handler = configparser.ConfigParser(strict=True, allow_no_value=True)
# noinspection PyBroadException
try:
config_handler.read_file(open(config_file))
except configparser.Error as e:
do_error_exit(f"ERROR: Problem while config file parsing: {e}")
# noinspection PyBroadException
except Exception:
do_error_exit(f"ERROR: Unable to open file '{config_file}'")
return config_handler
def get_config(config_handler=None, section=None, valid_settings=None):
"""parsing and basic validation of own config file
Parameters
----------
args : ArgumentParser object
default_log_level: str
default log level if log level is not set in config
Returns
-------
dict
a dictionary with all config options parsed from the config file
"""
def get_config_option(section, item, default=None):
if isinstance(default, bool):
value = config_handler.getboolean(section, item, fallback=default)
else:
value = config_handler.get(section, item, fallback=default)
if value == "":
value = None
config_dict[item] = value
# take care of logging sensitive data
for sensitive_item in ["token", "pass"]:
if sensitive_item.lower() in item.lower():
value = value[0:3] + "***"
log.debug(f"Config: {section}.{item} = {value}")
config_dict = {}
config_error = False
if valid_settings is None:
log.error("No valid settings passed to config parser!")
# read specified section section
if section is not None:
if section not in config_handler.sections():
log.error("Section '{section}' not found in config_file")
config_error = True
else:
for config_item, default_value in valid_settings.items():
get_config_option(section, config_item, default=default_value)
return config_dict
# EOF

83
module/common/logging.py Normal file
View File

@@ -0,0 +1,83 @@
import logging
from logging.handlers import RotatingFileHandler
from module.common.misc import do_error_exit
# define DEBUG2 and DEBUG3 log levels
DEBUG2 = 6 # extended messages
# define valid log levels
valid_log_levels = [ "DEBUG2", "DEBUG", "INFO", "WARNING", "ERROR"]
# add log level DEBUG2
logging.addLevelName(DEBUG2, "DEBUG2")
def debug2(self, message, *args, **kws):
if self.isEnabledFor(DEBUG2):
# Yes, logger takes its '*args' as 'args'.
self._log(DEBUG2, message, args, **kws)
logging.Logger.debug2 = debug2
def get_logger():
return logging.getLogger("Netbox-Sync")
def setup_logging(log_level=None, log_file=None):
"""Setup logging
Parameters
----------
args : ArgumentParser object
default_log_level: str
default log level if args.log_level is not set
"""
if log_level is None or log_level == "":
do_error_exit("ERROR: log level undefined or empty. Check config please.")
# check set log level against self defined log level array
if not log_level.upper() in valid_log_levels:
do_error_exit(f"ERROR: Invalid log level: {log_level}")
# check the provided log level
if log_level == "DEBUG2":
numeric_log_level = DEBUG2
else:
numeric_log_level = getattr(logging, log_level.upper(), None)
log_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
# create logger instance
logger = get_logger()
logger.setLevel(numeric_log_level)
# setup stream handler
log_stream = logging.StreamHandler()
log_stream.setFormatter(log_format)
logger.addHandler(log_stream)
# setup log file handler
if log_file is not None:
# base directory is three levels up
base_dir = "/".join(__file__.split("/")[0:-3])
if log_file[0] != "/":
log_file = f"{base_dir}/{log_file}"
try:
log_file_handler = RotatingFileHandler(
filename=log_file,
maxBytes=10 * 1024 * 1024, # Bytes to Megabytes
backupCount=5
)
except Exception as e:
do_error_exit(f"ERROR: Problems setting up log file: {e}")
log_file_handler.setFormatter(log_format)
logger.addHandler(log_file_handler)
return logger

103
module/common/misc.py Normal file
View File

@@ -0,0 +1,103 @@
import sys
def grab(structure=None, path=None, separator=".", fallback=None):
"""
get data from a complex object/json structure with a
"." separated path information. If a part of a path
is not not present then this function returns the
value of fallback (default: "None").
example structure:
data_structure = {
"rows": [{
"elements": [{
"distance": {
"text": "94.6 mi",
"value": 152193
},
"status": "OK"
}]
}]
}
example path:
"rows.0.elements.0.distance.value"
example return value:
15193
Parameters
----------
structure: dict, list, object
object structure to extract data from
path: str
nested path to extract
separator: str
path separator to use. Helpful if a path element
contains the default (.) separator.
fallback: dict, list, str, int
data to return if no match was found.
Returns
-------
str, dict, list
the desired path element if found, otherwise None
"""
max_recursion_level = 100
current_level = 0
levels = len(path.split(separator))
if structure is None or path is None:
return fallback
# noinspection PyBroadException
def traverse(r_structure, r_path):
nonlocal current_level
current_level += 1
if current_level > max_recursion_level:
return fallback
for attribute in r_path.split(separator):
if isinstance(r_structure, dict):
r_structure = {k.lower(): v for k, v in r_structure.items()}
try:
if isinstance(r_structure, list):
data = r_structure[int(attribute)]
elif isinstance(r_structure, dict):
data = r_structure.get(attribute.lower())
else:
data = getattr(r_structure, attribute)
except Exception:
return fallback
if current_level == levels:
return data if data is not None else fallback
else:
return traverse(data, separator.join(r_path.split(separator)[1:]))
return traverse(structure, path)
def dump(obj):
for attr in dir(obj):
if hasattr( obj, attr ):
print( "obj.%s = %s" % (attr, getattr(obj, attr)))
def do_error_exit(log_text):
"""log an error and exit with return code 1
Parameters
----------
log_text : str
the text to log as error
"""
print(log_text, file=sys.stderr)
exit(1)
# EOF

34
module/common/support.py Normal file
View File

@@ -0,0 +1,34 @@
from ipaddress import ip_network, ip_interface
import aiodns
import logging
def format_ip(ip_addr):
"""
Formats IPv4 addresses and subnet to IP with CIDR standard notation.
:param ip_addr: IP address with subnet; example `192.168.0.0/255.255.255.0`
:type ip_addr: str
:return: IP address with CIDR notation; example `192.168.0.0/24`
:rtype: str
"""
try:
return ip_interface(ip_addr).compressed
except Exception:
return None
def normalize_mac_address(mac_address=None):
if mac_address is None:
return None
mac_address = mac_address.upper()
# add colons to interface address
if ":" not in mac_address:
mac_address = ':'.join(mac_address[i:i+2] for i in range(0,len(mac_address),2))
return mac_address

View File

365
module/netbox/connection.py Normal file
View File

@@ -0,0 +1,365 @@
import requests
import json
import logging
import pickle
from packaging import version
import pprint
from module import plural
from module.common.misc import grab, do_error_exit, dump
from module.netbox.object_classes import *
from module.common.logging import get_logger
log = get_logger()
# ToDo:
# * compare version
# * reset debugging level for requests
class NetBoxHandler:
"""
Handles NetBox connection state and interaction with API
"""
# connection defaults
verify_tls = True
timeout = 30
max_retry_attempts = 4
default_netbox_result_limit = 200
minimum_api_version = "2.9"
settings = {
"api_token": None,
"host_fqdn": None,
"port": None,
"disable_tls": False,
"validate_tls_certs": True,
"prune_enabled": False,
"prune_delay_in_days": 30
}
primary_tag = "NetBox-synced"
orphaned_tag = f"{primary_tag}: Orphaned"
inventory = None
instance_tags = None
instance_interfaces = {}
instance_virtual_interfaces = {}
def __init__(self, cli_args=None, settings=None, inventory=None):
self.settings = settings
self.inventory = inventory
# set primary tag
setattr(self.inventory, "primary_tag", self.primary_tag)
proto = "https"
if bool(settings.get("disable_tls", False)) is True:
proto = "http"
port = ""
if settings.get("port", None) is not None:
port = ":{}".format(settings.get("port"))
self.url = f"{proto}://%s{port}/api/" % settings.get("host_fqdn")
self.verify_tls = bool(self.settings.get("validate_tls_certs"))
self.session = self.create_session()
# check for minimum version
if version.parse(self.get_api_version()) < version.parse(self.minimum_api_version):
do_error_exit(f"Netbox API version '{self.api_version}' not supported. "
f"Minimum API version: {self.minimum_api_version}")
def create_session(self):
"""
Creates a session with NetBox
:return: `True` if session created else `False`
:rtype: bool
"""
header = {"Authorization": "Token {}".format(self.settings.get("api_token"))}
session = requests.Session()
session.headers.update(header)
log.debug("Created new HTTP Session for NetBox.")
return session
def get_api_version(self):
"""
Determines the current NetBox API Version
:return: NetBox API version
:rtype: float
"""
response = None
try:
response = self.session.get(
self.url,
timeout=self.timeout,
verify=self.verify_tls)
except Exception as e:
do_error_exit(str(e))
result = str(response.headers["API-Version"])
log.debug(f"Detected NetBox API v{result}.")
return result
def request(self, object_class, req_type="GET", data=None, params=None, nb_id=None):
result = None
request_url = f"{self.url}{object_class.api_path}/"
# append NetBox ID
if nb_id is not None:
request_url += f"{nb_id}/"
if params is None:
params = dict()
params["limit"] = self.default_netbox_result_limit
# prepare request
this_request = self.session.prepare_request(
requests.Request(req_type, request_url, params=params, json=data)
)
# issue request
response = self.single_request(this_request)
log.debug2("Received HTTP Status %s.", response.status_code)
try:
result = response.json()
except json.decoder.JSONDecodeError:
pass
if response.status_code == 200:
# retrieve paginated results
#""" pagination disabled
if this_request.method == "GET" and result is not None:
while response.json().get("next") is not None:
this_request.url = response.json().get("next")
log.debug2("NetBox results are paginated. Getting next page")
response = self.single_request(this_request)
result["results"].extend(response.json().get("results"))
#"""
elif response.status_code in [201, 204]:
action = "created" if response.status_code == 201 else "deleted"
# ToDo:
# * switch to object naming schema based on primary keys
log.info(
f"NetBox successfully {action} {object_class.name} object '%s'." % (result.get(object_class.primary_key))
)
# token issues
elif response.status_code == 403:
do_error_exit("NetBox returned: %s: %s" % (response.reason, grab(result, "detail")))
# we screw up something else
elif response.status_code >= 400 and response.status_code < 500:
log.error(f"NetBox returned: {this_request.method} {this_request.path_url} {response.reason}")
log.debug(f"NetBox returned body: {result}")
result = None
elif response.status_code >= 500:
do_error_exit(f"NetBox returned: {response.status_code} {response.reason}")
return result
def single_request(self, this_request):
req = None
for _ in range(self.max_retry_attempts):
log_message = f"Sending {this_request.method} to '{this_request.url}'"
if this_request.body is not None:
log_message += f" with data '{this_request.body}'."
log.debug2(log_message)
try:
req = self.session.send(this_request,
timeout=self.timeout, verify=self.verify_tls)
except (ConnectionError, requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout):
log.warning(f"Request failed, trying again: {log_message}")
continue
else:
break
else:
do_error_exit(f"Giving up after {self.max_retry_attempts} retries.")
return req
def query_current_data(self, netbox_objects_to_query=None):
if netbox_objects_to_query is None:
raise AttributeError(f"Argument netbox_objects_to_query is: '{netbox_objects_to_query}'")
# query all dependencies
for nb_object_class in netbox_objects_to_query:
if nb_object_class not in NetBoxObject.__subclasses__():
raise AttributeError(f"Class '{nb_object_class.__name__}' must be a subclass of '{NetBoxObject.__name__}'")
cached_nb_data = None
try:
cached_nb_data = pickle.load( open( f"cache/{nb_object_class.__name__}.cache", "rb" ) )
#pprint.pprint(cached_nb_data)
except Exception:
pass
nb_data = dict()
if cached_nb_data is None:
# get all objects of this class
log.debug(f"Requesting {nb_object_class.name}s from NetBox")
nb_data = self.request(nb_object_class)
pickle.dump(nb_data.get("results"), open( f"cache/{nb_object_class.__name__}.cache", "wb" ) )
else:
nb_data["results"] = cached_nb_data
if nb_data.get("results") is None:
log.warning(f"Result data from NetBox for object {nb_object_class.__name__} missing!")
continue
log.debug(f"Processing %s returned {nb_object_class.name}%s" % (len(nb_data.get("results")),plural(len(nb_data.get("results")))))
for object_data in nb_data.get("results"):
self.inventory.add_item_from_netbox(nb_object_class, data=object_data)
return
def inizialize_basic_data(self):
log.debug("Checking/Adding NetBox Sync dependencies")
self.inventory.add_update_object(NBTags, data = {
"name": self.orphaned_tag,
"color": "607d8b",
"description": "The source which has previously "
"provided the object no longer "
"states it exists.{}".format(
" An object with the 'Orphaned' tag will "
"remain in this state until it ages out "
"and is automatically removed."
) if bool(self.settings.get("prune_enabled", False)) else ""
})
self.inventory.add_update_object(NBTags, data = {
"name": self.primary_tag,
"description": "Created and used by NetBox Sync Script "
"to keep track of created items."
})
def update_object(self, nb_object_sub_class):
for object in self.inventory.get_all_items(nb_object_sub_class):
# resolve dependencies
for dependency in object.get_dependencies():
if dependency not in self.inventory.resolved_dependencies:
log.debug2("Resolving dependency: %s" % (dependency.name))
self.update_object(dependency)
returned_object_data = None
patch_issue = False
data_to_patch = dict()
if object.is_new is True:
object.updated_items = object.data.keys()
for key, value in object.data.items():
if key in object.updated_items:
object_type = object.data_model.get(key)
if object_type == NBTags:
data_to_patch[key] = [{"name": d.get_display_name()} for d in value]
elif object_type in NetBoxObject.__subclasses__():
data_to_patch[key] = value.get_nb_reference()
if value.nb_id == 0:
log.error(f"Unable to find a NetBox reference to {value.name} '{value.get_display_name()}'. Might be a dependency issue.")
patch_issue = True
else:
data_to_patch[key] = value
if patch_issue == True:
continue
issued_request = False
if object.is_new is True:
log.info("Creating new NetBox '%s' object: %s" % (object.name, object.get_display_name()))
returned_object_data = self.request(nb_object_sub_class, req_type="POST", data=data_to_patch)
issued_request = True
if object.is_new is False and len(object.updated_items) > 0:
log.info("Updating NetBox '%s' object '%s' with data: %s" % (object.name, object.get_display_name(), data_to_patch))
returned_object_data = self.request(nb_object_sub_class, req_type="PATCH", data=data_to_patch, nb_id=object.nb_id)
issued_request = True
if returned_object_data is not None:
object.update(data = returned_object_data, read_from_netbox=True)
elif issued_request is True:
log.error(f"Request Failed for {nb_object_sub_class.name}. Used data: {data_to_patch}")
pprint.pprint(object.to_dict())
# add class to resolved dependencies
self.inventory.resolved_dependencies = list(set(self.inventory.resolved_dependencies + [nb_object_sub_class] ))
def update_instance(self):
log.info("Updating changed data in NetBox")
# update all items in NetBox accordingly
for nb_object_sub_class in NetBoxObject.__subclasses__():
self.update_object(nb_object_sub_class)
# prune objects
#self.prune_instance()
#print(self.inventory)
return

241
module/netbox/inventory.py Normal file
View File

@@ -0,0 +1,241 @@
import logging
import json
import pprint
from module.netbox.object_classes import *
from module.common.logging import get_logger
log = get_logger()
class NetBoxInventorySearchResult:
members = list()
class NetBoxInventory:
base_structure = dict()
resolved_dependencies = list()
primary_tag = None
def __init__(self):
for object_type in NetBoxObject.__subclasses__():
self.base_structure[object_type.name] = list()
def get_by_id(self, object_type, id=None):
if object_type not in NetBoxObject.__subclasses__():
raise AttributeError("'%s' object must be a sub class of '%s'." %
(object_type.__name__, NetBoxObject.__name__))
if id is None or self.base_structure[object_type.name] is None:
return None
for object in self.base_structure[object_type.name]:
if object.nb_id == id:
return object
def get_by_data(self, object_type, data=None):
if object_type not in NetBoxObject.__subclasses__():
raise AttributeError("'%s' object must be a sub class of '%s'." %
(object_type.__name__, NetBoxObject.__name__))
if data is None:
return None
if self.base_structure[object_type.name] is None:
return None
if not isinstance(data, dict):
# ToDo:
# * proper handling
log.error("data is not dict")
pprint.pprint(data)
exit(0)
# shortcut if data contains valid id
data_id = data.get("id")
if data_id is not None and data_id != 0:
return self.get_by_id(object_type, id=data_id)
# try to find by name
if data.get(object_type.primary_key) is not None:
object_name_to_find = None
results = list()
for object in self.base_structure[object_type.name]:
# Todo:
# * try to compare second key if present.
if object_name_to_find is None:
object_name_to_find = object.get_display_name(data)
#print(f"get_by_data(): Object Display Name: {object_name_to_find}")
if object_name_to_find == object.get_display_name():
results.append(object)
# found exactly one match
# ToDo:
# * add force secondary key if one object has a secondary key
if len(results) == 1:
#print(f"found exact match: {object_name_to_find}")
return results[0]
# compare secondary key
elif len(results) > 1:
object_name_to_find = None
for object in results:
if object_name_to_find is None:
object_name_to_find = object.get_display_name(data, including_second_key=True)
#print(f"get_by_data(): Object Display Name: {object_name_to_find}")
if object_name_to_find == object.get_display_name(including_second_key=True):
return object
# try to match all data attributes
else:
for object in self.base_structure[object_type.name]:
all_items_match = True
for attr_name, attr_value in data.items():
if object.data.get(attr_name) != attr_value:
all_items_match = False
break
if all_items_match == True:
return object
"""
if data.get(object_type.primary_key) is not None and \
object.resolve_attribute(object_type.primary_key) == object.resolve_attribute(object_type.primary_key, data=data):
# object type has a secondary key, lets check if it matches
if getattr(object_type, "secondary_key", None) is not None and data.get(object_type.secondary_key) is not None:
if object.resolve_attribute(object_type.secondary_key) == object.resolve_attribute(object_type.secondary_key, data=data):
return_data.append(object)
# object has no secondary key but the same name, add to list
else:
return_data.append(object)
"""
return None
def add_item_from_netbox(self, object_type, data=None):
"""
only to be used if data is read from NetBox and added to inventory
"""
# create new object
new_object = object_type(data, read_from_netbox=True, inventory=self)
# add to inventory
self.base_structure[object_type.name].append(new_object)
return
def add_update_object(self, object_type, data=None, read_from_netbox=False, source=None):
if data is None:
# ToDo:
# * proper error handling
log.error("NO DATA")
return
this_object = self.get_by_data(object_type, data=data)
if this_object is None:
this_object = object_type(data, read_from_netbox=read_from_netbox, inventory=self, source=source)
self.base_structure[object_type.name].append(this_object)
if read_from_netbox is False:
log.debug(f"Created new {this_object.name} object: {this_object.get_display_name()}")
else:
# ToDo:
# * resolve relations if updated from netbox
this_object.update(data, read_from_netbox=read_from_netbox, source=source)
log.debug("Updated %s object: %s" % (this_object.name, this_object.get_display_name()))
return this_object
def resolve_relations(self):
log.debug("Start resolving relations")
for object_type in NetBoxObject.__subclasses__():
for object in self.base_structure.get(object_type.name, list()):
object.resolve_relations()
log.debug("Finished resolving relations")
def get_all_items(self, object_type):
if object_type not in NetBoxObject.__subclasses__():
raise AttributeError("'%s' object must be a sub class of '%s'." %
(object_type.__name__, NetBoxObject.__name__))
return self.base_structure.get(object_type.name, list())
def tag_all_the_things(self, sources, netbox_handler):
# ToDo:
# * DONE: add main tag to all objects retrieved from a source
# * Done: add source tag all objects of that source
# * check for orphaned objects
# * DONE: objects tagged by a source but not present in source anymore (add)
# * DONE: objects tagged as orphaned but are present again (remove)
source_tags = [x.source_tag for x in sources]
for object_type in NetBoxObject.__subclasses__():
if self.base_structure[object_type.name] is None:
continue
for object in self.base_structure[object_type.name]:
# if object was found in source
if object.source is not None:
object.add_tags([netbox_handler.primary_tag, object.source.source_tag])
# if object was orphaned remove tag again
if netbox_handler.orphaned_tag in object.get_tags():
object.remove_tags(netbox_handler.orphaned_tag)
# if object was tagged by a source in previous runs but is not present
# anymore then add the orphaned tag
else:
for source_tag in source_tags:
if source_tag in object.get_tags():
object.add_tags(netbox_handler.orphaned_tag)
def to_dict(self):
output = dict()
for nb_object_class in NetBoxObject.__subclasses__():
output[nb_object_class.name] = list()
for object in self.base_structure[nb_object_class.name]:
output[nb_object_class.name].append(object.to_dict())
return output
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True, indent=4)
# EOF

View File

@@ -0,0 +1,624 @@
import json
import logging
import pprint
from module.common.misc import grab, do_error_exit, dump
from module.common.logging import get_logger
log = get_logger()
class NetBoxObject():
default_attributes = {
"data": None,
"is_new": True,
"nb_id": 0,
"updated_items": list(),
"is_present_in_source": False,
"source": None,
}
# keep handle to inventory instance to append objects on demand
inventory = None
def __init__(self, data=None, read_from_netbox=False, inventory=None, source=None):
# inherit and create default attributes from parent
for attr_key, attr_value in self.default_attributes.items():
if isinstance(attr_value, (list, dict, set)):
setattr(self, attr_key, attr_value.copy())
else:
setattr(self, attr_key, attr_value)
# store provided inventory handle
self.inventory = inventory
# store source handle
if source is not None:
self.source = source
self.update(data=data, read_from_netbox=read_from_netbox)
def __repr__(self):
return "<%s instance '%s' at %s>" % (self.__class__.__name__, self.get_display_name(), id(self))
def to_dict(self):
out = dict()
for key in dir(self):
value = getattr(self, key)
if "__" in key:
continue
if callable(value) is True:
continue
if key in ["inventory", "default_attributes"]:
continue
if key == "data_model":
data_model = dict()
for dkey, dvalue in value.items():
# if value is class name then print class name
if type(dvalue) == type:
dvalue = str(dvalue)
data_model[dkey] = dvalue
value = data_model
if key == "data":
data = dict()
for dkey, dvalue in value.items():
# if value is class name then print class name
if isinstance(dvalue, NetBoxObject):
dvalue = repr(dvalue)
if dkey == "tags":
dvalue = [x.get_display_name() for x in dvalue]
data[dkey] = dvalue
value = data
out[key] = value
return out
def __str__(self):
return json.dumps(self.to_dict(), sort_keys=True, indent=4)
def __iter__(self):
for key, value in self.to_dict():
yield (key, value)
@staticmethod
def format_slug(text=None, max_len=50):
"""
Format string to comply to NetBox slug acceptable pattern and max length.
:param text: Text to be formatted into an acceptable slug
:type text: str
:return: Slug of allowed characters [-a-zA-Z0-9_] with max length of 50
:rtype: str
"""
if text is None or len(text) == 0:
raise AttributeError("Argument 'text' can't be None or empty!")
permitted_chars = (
"abcdefghijklmnopqrstuvwxyz" # alphabet
"0123456789" # numbers
"_-" # symbols
)
# Replace separators with dash
for sep in [" ", ",", "."]:
text = text.replace(sep, "-")
# Strip unacceptable characters
text = "".join([c for c in text.lower() if c in permitted_chars])
# Enforce max length
return text[0:max_len]
def update(self, data=None, read_from_netbox=False, source=None):
if data is None:
return
if not isinstance(data, dict):
raise AttributeError("Argument 'data' needs to be a dict!")
if data.get("id") is not None:
self.nb_id = data.get("id")
if read_from_netbox is True:
self.is_new = False
self.data = data
self.updated_items = list()
return
if source is not None:
self.source = source
display_name = self.get_display_name(data)
log.debug(f"Parsing '{self.name}' data structure: {display_name}")
parsed_data = dict()
for key, value in data.items():
if key not in self.data_model.keys():
log.error(f"Found undefined data model key '{key}' for object '{self.__class__.__name__}'")
continue
# skip unset values
if value is None:
log.info(f"Found unset/empty key: {key}")
continue
# check data model to see how we have to parse the value
defined_value_type = self.data_model.get(key)
# value must be a string witch a certain max length
if isinstance(defined_value_type, int):
if not isinstance(value, str):
log.error(f"Invalid data type for '{self.__class__.__name__}.{key}' (must be str), got: '{value}'")
continue
value = value[0:defined_value_type]
if key == "slug":
value = self.format_slug(text=value, max_len=defined_value_type)
else:
value = value[0:defined_value_type]
if isinstance(defined_value_type, list):
if value not in defined_value_type:
log.error(f"Invalid data type for '{key}' (must be one of {defined_value_type}), got: '{value}'")
continue
# just check the type of the value
type_check_faild = False
for valid_type in [bool, str, int]:
if defined_value_type == valid_type and not isinstance(value, valid_type):
log.error(f"Invalid data type for '{key}' (must be {valid_type.__name__}), got: '{value}'")
type_check_faild = True
break
if type_check_faild is True:
continue
# this is meant to be reference to a different object
if defined_value_type in NetBoxObject.__subclasses__():
# tags need to be treated as list of dictionaries
if defined_value_type == NBTags:
self.add_tags(value)
continue
if not isinstance(value, NetBoxObject):
# try to find object.
value = self.inventory.add_update_object(defined_value_type, data=value)
# add to parsed data dict
parsed_data[key] = value
# add/update slug
# if data model contains a slug we need to handle it
# so far slug is always referenced to "name"
if "slug" in self.data_model.keys() and data.get("slug") is None and data.get(self.primary_key) is not None:
parsed_data["slug"] = self.format_slug(text=parsed_data.get(self.primary_key), max_len=self.data_model.get("slug"))
# this is a new set of data
if self.data is None:
self.data = parsed_data
# add empty tag list if not tags were provided
if "tags" in self.data_model.keys() and data.get("tags") is None:
self.data["tags"] = list()
# see if data just got updated and mark it as such.
else:
for key, new_value in parsed_data.items():
# nothing changed, continue with next key
current_value = self.data.get(key)
if current_value == new_value:
continue
if self.data_model.get(key) in NetBoxObject.__subclasses__():
if isinstance(current_value, NetBoxObject):
current_value_str = current_value.get_display_name()
else:
current_value_str = str(current_value)
new_value_str = new_value.get_display_name()
# if data model is a list then we need to read the netbox data value
elif isinstance(self.data_model.get(key), list) and isinstance(current_value, dict):
current_value_str = str(current_value.get("value"))
new_value_str = str(new_value)
else:
current_value_str = str(current_value).replace("\r","")
new_value_str = str(new_value).replace("\r","")
# just check again if values might match now
if current_value_str == new_value_str:
continue
self.data[key] = new_value
self.updated_items.append(key)
log.info(f"{self.name.capitalize()} '{display_name}' attribute '{key}' changed from '{current_value_str}' to '{new_value_str}'")
self.resolve_relations()
def get_display_name(self, data=None, including_second_key=False):
this_data_set = data
if data is None:
this_data_set = self.data
if this_data_set is None:
return None
my_name = this_data_set.get(self.primary_key)
secondary_key = getattr(self, "secondary_key", None)
enforce_secondary_key = getattr(self, "enforce_secondary_key", False)
if secondary_key is not None and (enforce_secondary_key is True or including_second_key is True):
secondary_key_value = this_data_set.get(secondary_key)
if isinstance(secondary_key_value, NetBoxObject):
secondary_key_value = secondary_key_value.get_display_name()
if secondary_key_value is not None:
#import pprint
#pprint.pprint(this_data_set)
my_name = f"{my_name} ({secondary_key_value})"
return my_name
def resolve_relations(self):
for key, value in self.data_model.items():
# continue if value is not an NetBox object
if value not in NetBoxObject.__subclasses__():
continue
data_value = self.data.get(key)
resolved_data = None
if value == NBTags:
resolved_tag_list = list()
for tag in data_value:
if isinstance(tag, NetBoxObject):
tag_object = tag
else:
tag_object = self.inventory.get_by_data(value, data=tag)
if tag_object is not None:
resolved_tag_list.append(tag_object)
resolved_data = resolved_tag_list
else:
if data_value is None:
continue
if isinstance(data_value, NetBoxObject):
resolved_data = data_value
else:
data_to_find = None
if isinstance(data_value, int):
data_to_find = {"id": data_value}
elif isinstance(data_value, dict):
data_to_find = data_value
resolved_data = self.inventory.get_by_data(value, data=data_to_find)
if resolved_data is not None:
self.data[key] = resolved_data
else:
log.error(f"Problems resolving relation '{key}' for object '%s' and value '%s'" % (self.get_display_name(), data_value))
def raw(self):
return self.data
def get_dependencies(self):
return [x for x in self.data_model.values() if x in NetBoxObject.__subclasses__()]
def get_tags(self):
return [x.get_display_name() for x in self.data.get("tags", list())]
def update_tags(self, tags, remove=False):
if tags is None or NBTags not in self.data_model.values():
return
action = "Adding" if remove is False else "Removing"
log.debug(f"{action} Tag: {tags}")
new_tags = list()
def extract_tags(this_tags):
if isinstance(this_tags, str):
new_tags.append(this_tags)
elif isinstance(this_tags, dict) and this_tags.get("name") is not None:
new_tags.append(this_tags.get("name"))
if isinstance(tags, list):
for tag in tags:
extract_tags(tag)
else:
extract_tags(tags)
log.debug(f"Tag list: {new_tags}")
current_tags = self.get_tags()
for tag_name in new_tags:
if tag_name not in current_tags and remove == False:
# add tag
tag = self.inventory.add_update_object(NBTags, data={"name": tag_name})
self.data["tags"].append(tag)
if self.is_new is False:
self.updated_items.append("tags")
if tag_name in current_tags and remove == True:
tag = self.inventory.get_by_data(NBTags, data={"name": tag_name})
self.data["tags"].remove(tag)
if self.is_new is False:
self.updated_items.append("tags")
new_tags = self.get_tags()
log.info(f"{self.name.capitalize()} '{self.get_display_name()}' attribute 'tags' changed from '{current_tags}' to '{new_tags}'")
def add_tags(self, tags_to_add):
self.update_tags(tags_to_add)
def remove_tags(self, tags_to_remove):
self.update_tags(tags_to_remove, remove=True)
def get_nb_reference(self):
"""
Default class to return reference of how this object is usually referenced.
default: return NetBox ID
"""
if self.nb_id == 0:
return self
return self.nb_id
class NBTags(NetBoxObject):
name = "tag"
api_path = "extras/tags"
primary_key = "name"
data_model = {
"name": 100,
"slug": 100,
"color": 6,
"description": 200
}
class NBManufacturers(NetBoxObject):
name = "manufacturer"
api_path = "dcim/manufacturers"
primary_key = "name"
data_model = {
"name": 50,
"slug": 50,
"description": 200
}
class NBDeviceTypes(NetBoxObject):
name ="device type"
api_path = "dcim/device-types"
primary_key = "model"
data_model = {
"model": 50,
"slug": 50,
"part_number": 50,
"description": 200,
"manufacturer": NBManufacturers,
"tags": NBTags
}
class NBPlatforms(NetBoxObject):
name = "platform"
api_path = "dcim/platforms"
primary_key = "name"
data_model = {
"name": 100,
"slug": 100,
"manufacturer": NBManufacturers,
"description": 200
}
class NBClusterTypes(NetBoxObject):
name = "cluster type"
api_path = "virtualization/cluster-types"
primary_key = "name"
data_model = {
"name": 50,
"slug": 50,
"description": 200
}
class NBClusterGroups(NetBoxObject):
name = "cluster group"
api_path = "virtualization/cluster-groups"
primary_key = "name"
data_model = {
"name": 50,
"slug": 50,
"description": 200
}
class NBDeviceRoles(NetBoxObject):
name = "device role"
api_path = "dcim/device-roles"
primary_key = "name"
data_model = {
"name": 50,
"slug": 50,
"color": 6,
"description": 200,
"vm_role": bool
}
class NBSites(NetBoxObject):
name = "site"
api_path = "dcim/sites"
primary_key = "name"
data_model = {
"name": 50,
"slug": 50,
"comments": str,
"tags": NBTags
}
class NBClusters(NetBoxObject):
name = "cluster"
api_path = "virtualization/clusters"
primary_key = "name"
data_model = {
"name": 100,
"comments": str,
"type": NBClusterTypes,
"group": NBClusterGroups,
"site": NBSites,
"tags": NBTags
}
class NBDevices(NetBoxObject):
name = "device"
api_path = "dcim/devices"
primary_key = "name"
secondary_key = "site"
data_model = {
"name": 64,
"device_type": NBDeviceTypes,
"device_role": NBDeviceRoles,
"platform": NBPlatforms,
"serial": 50,
"site": NBSites,
"status": [ "offline", "active", "planned", "staged", "failed", "inventory", "decommissioning" ],
"cluster": NBClusters,
"asset_tag": 50,
"tags": NBTags
}
class NBVMs(NetBoxObject):
name = "virtual machine"
api_path = "virtualization/virtual-machines"
primary_key = "name"
secondary_key = "cluster"
data_model = {
"name": 64,
"status": [ "offline", "active", "planned", "staged", "failed", "decommissioning" ],
"cluster": NBClusters,
"role": NBDeviceRoles,
"platform": NBPlatforms,
"vcpus": int,
"memory": int,
"disk": int,
"comments": str,
"tags": NBTags
}
class NBVMInterfaces(NetBoxObject):
name = "virtual machine interface"
api_path = "virtualization/interfaces"
primary_key = "name"
secondary_key = "virtual_machine"
enforce_secondary_key = True
data_model = {
"name": 64,
"virtual_machine": NBVMs,
"enabled": bool,
"mac_address": str,
"description": 200,
"tags": NBTags
}
class NBInterfaces(NetBoxObject):
name = "interface"
api_path = "dcim/interfaces"
primary_key = "name"
secondary_key = "device"
enforce_secondary_key = True
data_model = {
"name": 64,
"device": NBDevices,
"label": 64,
"type": [ "virtual", "100base-tx", "1000base-t", "10gbase-t", "25gbase-x-sfp28", "40gbase-x-qsfpp", "other" ],
"enabled": bool,
"mac_address": str,
"mgmt_only": bool,
"mtu": int,
"description": 200,
"connection_status": bool,
"tags": NBTags
}
class NBIPAddresses(NetBoxObject):
name = "IP address"
api_path = "ipam/ip-addresses"
primary_key = "address"
data_model = {
"address": str,
"assigned_object_type": str,
"assigned_object_id": int,
"description": 200,
"tags": NBTags
}
class NBPrefixes(NetBoxObject):
name = "IP prefix"
api_path = "ipam/prefixes"
primary_key = "prefix"
data_model = {
"prefix": str,
"site": NBSites,
"description": 200,
"tags": NBTags
}
# EOF

View File

@@ -0,0 +1,67 @@
# define all available sources here
from .vmware.connection import VMWareHandler
# list of valid sources
valid_sources = [ VMWareHandler ]
###############
from module.common.configuration import get_config
from module.common.logging import get_logger
def instanciate_sources(config_handler=None, inventory=None):
log = get_logger()
if config_handler is None:
raise Exception("No config handler defined!")
if inventory is None:
raise Exception("No inventory defined!")
sources = list()
# iterate over sources and validate them
for source_section in config_handler.sections():
# a source section needs to start with "source/"
if not source_section.startswith("source/"):
continue
# get type of source
source_type = config_handler.get(source_section, "type", fallback=None)
if source_type is None:
log.error(f"Source {source_section} option 'type' is undefined")
config_error = True
source_class = None
for possible_source_class in valid_sources:
source_class_type = getattr(possible_source_class, "source_type", None)
if source_class_type is None:
raise AttributeError("'%s' class attribute 'source_type' not defined." %
(source_class_type.__name__))
if source_class_type == source_type:
source_class = possible_source_class
break
if source_class is None:
log.error(f"Unknown source type '{source_type}' defined for '{source_section}'")
config_error = True
continue
source_config = get_config(config_handler, section=source_section, valid_settings=source_class.settings)
source_handler = source_class(name=source_section.replace("source/",""),
inventory=inventory,
config=source_config)
# add to list of source handlers
if source_handler.init_successfull is True:
sources.append(source_handler)
return sources
# EOF

View File

@@ -0,0 +1,578 @@
import atexit
from socket import gaierror
from ipaddress import ip_address, ip_network, ip_interface
import re
import pprint
from pyVim.connect import SmartConnectNoSSL, Disconnect
from pyVmomi import vim
from module.netbox.object_classes import *
from module.common.misc import grab, do_error_exit, dump
from module.common.support import normalize_mac_address, format_ip
from module import plural
from module.common.logging import get_logger
log = get_logger()
class VMWareHandler():
dependend_netbox_objects = [
NBTags,
NBManufacturers,
NBDeviceTypes,
NBPlatforms,
NBClusterTypes,
NBClusterGroups,
NBDeviceRoles,
NBSites,
NBClusters,
#]
#"""
NBDevices,
NBVMs,
NBVMInterfaces,
NBInterfaces,
NBIPAddresses,
] #"""
session = None
inventory = None
init_successfull = False
source_type = "vmware"
source_tag = None
site_name = None
networks = dict()
standalone_hosts = list()
settings = {
"host_fqdn": None,
"port": 443,
"username": None,
"password": None,
"host_exclude_filter": None,
"host_include_filter": None,
"vm_exclude_filter": None,
"vm_include_filter": None,
"netbox_host_device_role": "Server",
"netbox_vm_device_role": "Server",
"permitted_subnets": None,
"collect_hardware_asset_tag": True
}
def __init__(self, name=None, config=None, inventory=None):
if name is None:
raise ValueError("Invalid value for attribute 'name': '{name}'.")
self.inventory = inventory
self.name = name
self.parse_config(config)
self.create_session()
self.source_tag = f"Source: {name}"
self.site_name = f"vCenter: {name}"
if self.session is not None:
self.init_successfull = True
def parse_config(self, config):
validation_failed = False
for setting in ["host_fqdn", "port", "username", "password" ]:
if config.get(setting) is None:
log.error(f"Config option '{setting}' in 'source/{self.name}' can't be empty/undefined")
validation_failed = True
# check permitted ip subnets
if config.get("permitted_subnets") is None:
log.info(f"Config option 'permitted_subnets' in 'source/{self.name}' is undefined. No IP addresses will be populated to Netbox!")
else:
config["permitted_subnets"] = [x.strip() for x in config.get("permitted_subnets").split(",") if x.strip() != ""]
permitted_subnets = list()
for permitted_subnet in config["permitted_subnets"]:
try:
permitted_subnets.append(ip_network(permitted_subnet))
except Exception as e:
log.error(f"Problem parsing permitted subnet: {e}")
validation_failed = True
config["permitted_subnets"] = permitted_subnets
# check include and exclude filter expressions
for setting in [x for x in config.keys() if "filter" in x]:
if config.get(setting) is None or config.get(setting).strip() == "":
continue
re_compiled = None
try:
re_compiled = re.compile(config.get(setting))
except Exception as e:
log.error(f"Problem parsing parsing regular expression for '{setting}': {e}")
validation_failed = True
config[setting] = re_compiled
if validation_failed is True:
do_error_exit("Config validation failed. Exit!")
for setting in self.settings.keys():
setattr(self, setting, config.get(setting))
def create_session(self):
if self.session is not None:
return True
log.info(f"Starting vCenter connection to '{self.host_fqdn}'")
try:
instance = SmartConnectNoSSL(
host=self.host_fqdn,
port=self.port,
user=self.username,
pwd=self.password
)
atexit.register(Disconnect, instance)
self.session = instance.RetrieveContent()
except (gaierror, vim.fault.InvalidLogin, OSError) as e:
log.error(
f"Unable to connect to vCenter instance '{self.host_fqdn}' on port {self.port}. "
f"Reason: {e}"
)
return False
log.info(f"Successfully connected to vCenter '{self.host_fqdn}'")
return True
def apply(self):
self.inizialize_basic_data()
log.info(f"Query data from vCenter: '{self.host_fqdn}'")
# Mapping of object type keywords to view types and handlers
object_mapping = {
"datacenter": {
"view_type": vim.Datacenter,
"view_handler": self.add_datacenter
},
"cluster": {
"view_type": vim.ClusterComputeResource,
"view_handler": self.add_cluster
},
"network": {
"view_type": vim.Network,
"view_handler": self.add_network
},
"host": {
"view_type": vim.HostSystem,
"view_handler": self.add_host
},
"virtual machine": {
"view_type": vim.VirtualMachine,
"view_handler": self.add_virtual_machine
}
}
for view_name, view_details in object_mapping.items():
if self.session is None:
log.info("No existing vCenter session found.")
self.create_session()
view_data = {
"container": self.session.rootFolder,
"type": [view_details.get("view_type")],
"recursive": True
}
try:
container_view = self.session.viewManager.CreateContainerView(**view_data)
except Exception as e:
log.error(f"Problem creating vCenter view for '{view_name}s': {e}")
continue
view_objects = grab(container_view, "view")
if view_objects is None:
log.error(f"Creating vCenter view for '{view_name}s' failed!")
continue
log.info("vCenter returned '%d' %s%s" % (len(view_objects), view_name, plural(len(view_objects))))
for obj in view_objects:
view_details.get("view_handler")(obj)
container_view.Destroy()
def add_datacenter(self, obj):
if grab(obj, "name") is None:
return
self.inventory.add_update_object(NBClusterGroups,
data = { "name": obj.name }, source=self)
def add_cluster(self, obj):
if grab(obj, "name") is None or grab(obj, "parent.parent.name") is None:
return
self.inventory.add_update_object(NBClusters,
data = {
"name": obj.name,
"type": { "name": "VMware ESXi" },
"group": { "name": obj.parent.parent.name }
},
source=self)
def add_network(self, obj):
if grab(obj, "key") is None or grab(obj, "name") is None:
return
self.networks[obj.key] = obj.name
def add_host(self, obj):
# ToDo:
# * find Host based on device mac addresses
name = grab(obj, "name")
# parse data
log.debug2(f"Parsing vCenter host: {name}")
# filter hosts
# first includes
if self.host_include_filter is not None:
if not self.host_include_filter.match(name):
log.debug(f"Host '{name}' did not match include filter '{self.host_include_filter.pattern}'. Skipping")
return
# second excludes
if self.host_exclude_filter is not None:
if self.host_exclude_filter.match(name):
log.debug(f"Host '{name}' matched exclude filter '{self.host_exclude_filter.pattern}'. Skipping")
return
manufacturer = grab(obj, "summary.hardware.vendor")
model = grab(obj, "summary.hardware.model")
platform = "{} {}".format(grab(obj, "config.product.name"), grab(obj, "config.product.version"))
cluster = grab(obj, "parent.name")
status = "active" if grab(obj, "summary.runtime.connectionState") == "connected" else "offline"
# prepare identifiers
identifiers = grab(obj, "summary.hardware.otherIdentifyingInfo")
identifier_dict = dict()
if identifiers is not None:
for item in identifiers:
value = grab(item, "identifierValue", fallback="")
if len(str(value).strip()) > 0:
identifier_dict[grab(item, "identifierType.key")] = str(value).strip()
# try to find serial
serial = None
for serial_num_key in [ "EnclosureSerialNumberTag", "SerialNumberTag", "ServiceTag"]:
if serial_num_key in identifier_dict.keys():
serial = identifier_dict.get(serial_num_key)
break
# add asset tag if desired and present
asset_tag = None
if bool(self.collect_hardware_asset_tag) is True and "AssetTag" in identifier_dict.keys():
banned_tags = [ "Default string", "NA", "N/A", "None", "Null", "oem", "o.e.m",
"to be filled by o.e.m.", "Unknown" ]
this_asset_tag = identifier_dict.get("AssetTag")
if this_asset_tag.lower() not in [x.lower() for x in banned_tags]:
asset_tag = this_asset_tag
# handle standalone hosts
if cluster == name:
# Store the host so that we can check VMs against it
self.standalone_hosts.append(cluster)
cluster = "Standalone ESXi Host"
data={
"name": name,
"device_role": {"name": self.netbox_host_device_role},
"device_type": {
"model": model,
"manufacturer": {
"name": manufacturer
}
},
"platform": {"name": platform},
"site": {"name": self.site_name},
"cluster": {"name": cluster},
"status": status
}
if serial is not None:
data["serial"]: serial
if asset_tag is not None:
data["asset_tag"]: asset_tag
host_object = self.inventory.add_update_object(NBDevices, data=data, source=self)
for pnic in grab(obj, "config.network.pnic", fallback=list()):
log.debug2("Parsing {}: {}".format(grab(pnic, "_wsdlName"), grab(pnic, "device")))
pnic_link_speed = grab(pnic, "linkSpeed.speedMb")
if pnic_link_speed is None:
pnic_link_speed = grab(pnic, "spec.linkSpeed.speedMb")
if pnic_link_speed is None:
pnic_link_speed = grab(pnic, "validLinkSpecification.0.speedMb")
pnic_link_speed_text = f"{pnic_link_speed}Mbps " if pnic_link_speed is not None else ""
pnic_speed_type_mapping = {
100: "100base-tx",
1000: "1000base-t",
10000: "10gbase-t",
25000: "25gbase-x-sfp28",
40000: "40gbase-x-qsfpp"
}
pnic_data = {
"name": grab(pnic, "device"),
"device": host_object,
"mac_address": normalize_mac_address(grab(pnic, "mac")),
"enabled": bool(grab(pnic, "spec.linkSpeed")),
"description": f"{pnic_link_speed_text}Physical Interface",
"type": pnic_speed_type_mapping.get(pnic_link_speed, "other")
}
self.inventory.add_update_object(NBInterfaces, data=pnic_data, source=self)
for vnic in grab(obj, "config.network.vnic", fallback=list()):
log.debug2("Parsing {}: {}".format(grab(vnic, "_wsdlName"), grab(vnic, "device")))
vnic_data = {
"name": grab(vnic, "device"),
"device": host_object,
"mac_address": normalize_mac_address(grab(vnic, "spec.mac")),
"mtu": grab(vnic, "spec.mtu"),
"description": grab(vnic, "portgroup"),
"type": "virtual"
}
vnic_object = self.inventory.add_update_object(NBInterfaces, data=vnic_data, source=self)
vnic_ip = "{}/{}".format(grab(vnic, "spec.ip.ipAddress"), grab(vnic, "spec.ip.subnetMask"))
if format_ip(vnic_ip) is None:
logging.error(f"IP address '{vnic_ip}' for {vnic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_address(grab(vnic, "spec.ip.ipAddress"))
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
if ip_permitted is False:
log.debug(f"IP address {vnic_ip} not part of any permitted subnet. Skipping.")
continue
vnic_ip_data = {
"address": format_ip(vnic_ip),
"assigned_object_id": vnic_object.nb_id,
"assigned_object_type": "dcim.interface"
}
self.inventory.add_update_object(NBIPAddresses, data=vnic_ip_data, source=self)
def add_virtual_machine(self, obj):
# ToDo:
# * find VM based on device mac addresses
name = grab(obj, "name")
log.debug2(f"Parsing vCenter host: {name}")
# filter VMs
# first includes
if self.vm_include_filter is not None:
if not self.vm_include_filter.match(name):
log.debug(f"Virtual machine '{name}' did not match include filter '{self.vm_include_filter.pattern}'. Skipping")
return
# second excludes
if self.vm_exclude_filter is not None:
if self.vm_exclude_filter.match(name):
log.debug(f"Virtual Machine '{name}' matched exclude filter '{self.vm_exclude_filter.pattern}'. Skipping")
return
cluster = grab(obj, "runtime.host.parent.name")
if cluster in self.standalone_hosts:
cluster = "Standalone ESXi Host"
platform = grab(obj, "config.guestFullName")
platform = grab(obj, "guest.guestFullName", fallback=platform)
status = "active" if grab(obj, "runtime.powerState") == "poweredOn" else "offline"
hardware_devices = grab(obj, "config.hardware.device", fallback=list())
disk = int(sum([ getattr(comp, "capacityInKB", 0) for comp in hardware_devices
if isinstance(comp, vim.vm.device.VirtualDisk)
]) / 1024 / 1024)
data = {
"name": grab(obj, "name"),
"role": {"name": self.settings.get("netbox_vm_device_role")},
"status": status,
"memory": grab(obj, "config.hardware.memoryMB"),
"vcpus": grab(obj, "config.hardware.numCPU"),
"disk": disk,
"comments": grab(obj, "config.annotation")
}
if cluster is not None:
data["cluster"] = {"name": cluster}
if platform is not None:
data["platform"] = {"name": platform}
vm_object = self.inventory.add_update_object(NBVMs, data=data, source=self)
# ToDo:
# * get current interfaces and compare description (primary key in vCenter)
# get vm interfaces
for vm_device in hardware_devices:
int_mac = normalize_mac_address(grab(vm_device, "macAddress"))
# not a network interface
if int_mac is None:
continue
log.debug2("Parsing device {}: {}".format(grab(vm_device, "_wsdlName"), grab(vm_device, "macAddress")))
int_network_name = self.networks.get(grab(vm_device, "backing.port.portgroupKey"))
int_connected = grab(vm_device, "connectable.connected")
int_label = grab(vm_device, "deviceInfo.label", fallback="")
int_name = "vNIC {}".format(int_label.split(" ")[-1])
int_ip_addresses = list()
for guest_nic in grab(obj, "guest.net", fallback=list()):
if int_mac != normalize_mac_address(grab(guest_nic, "macAddress")):
continue
int_network_name = grab(guest_nic, "network", fallback=int_network_name)
int_connected = grab(guest_nic, "connected", fallback=int_connected)
for ip in grab(guest_nic, "ipConfig.ipAddress", fallback=list()):
int_ip_addresses.append(f"{ip.ipAddress}/{ip.prefixLength}")
if int_network_name is not None:
int_name = f"{int_name} ({int_network_name})"
vm_nic_data = {
"name": int_name,
"virtual_machine": vm_object,
"mac_address": int_mac,
"description": int_label,
"enabled": int_connected,
}
vm_nic_object = self.inventory.get_by_data(NBVMInterfaces, data={"mac_address": int_mac})
if vm_nic_object is not None:
vm_nic_object.update(data=vm_nic_data, source=self)
else:
vm_nic_object = self.inventory.add_update_object(NBVMInterfaces, data=vm_nic_data, source=self)
for int_ip_address in int_ip_addresses:
if format_ip(int_ip_address) is None:
logging.error(f"IP address '{int_ip_address}' for {vm_nic_object.get_display_name()} invalid!")
continue
ip_permitted = False
ip_address_object = ip_address(int_ip_address.split("/")[0])
for permitted_subnet in self.permitted_subnets:
if ip_address_object in permitted_subnet:
ip_permitted = True
break
if ip_permitted is False:
log.debug(f"IP address {int_ip_address} not part of any permitted subnet. Skipping.")
continue
# apply ip filter
vm_nic_ip_data = {
"address": format_ip(int_ip_address),
"assigned_object_id": vm_nic_object.nb_id,
"assigned_object_type": "virtualization.vminterface"
}
self.inventory.add_update_object(NBIPAddresses, data=vm_nic_ip_data, source=self)
def inizialize_basic_data(self):
# add source identification tag
self.inventory.add_update_object(NBTags, data={
"name": self.source_tag,
"description": f"Marks sources synced from vCenter {self.name} "
f"({self.host_fqdn}) to this NetBox Instance."
})
self.inventory.add_update_object(NBSites, data={
"name": self.site_name,
"comments": f"A default virtual site created to house objects "
"that have been synced from this vCenter instance."
})
self.inventory.add_update_object(NBClusters, data={
"name": "Standalone ESXi Host",
"type": {"name": "VMware ESXi"},
"comments": "A default cluster created to house standalone "
"ESXi hosts and VMs that have been synced from "
"vCenter."
})
self.inventory.add_update_object(NBDeviceRoles, data={
"name": "Server",
"color": "9e9e9e",
"vm_role": True
})
# EOF

140
netbox-sync.py Executable file
View File

@@ -0,0 +1,140 @@
#!/usr/bin/env python3
self_description = \
"""
Sync objects from various sources to Netbox
"""
from datetime import date, datetime
from module.common.misc import grab
from module.common.cli_parser import parse_command_line
from module.common.logging import setup_logging
from module.common.configuration import get_config_file, open_config_file, get_config
from module.netbox.connection import NetBoxHandler
from module.netbox.inventory import NetBoxInventory
#from module.netbox.object_classes import *
from module.sources import *
import pprint
__version__ = "0.0.1"
__version_date__ = "2020-10-01"
__author__ = "Ricardo Bartels <ricardo.bartels@telekom.de>"
__description__ = "NetBox Sync"
__license__ = "MIT"
__url__ = "https://github.com/bb-Ricardo/unknown"
default_log_level = "WARNING"
default_config_file_path = "./settings.ini"
"""
ToDo:
* host "Management" interface is Primary
* return more then one object if found more then one and add somehow to returned objects. Maybe related?
* Add purge option
"""
def main():
start_time = datetime.now()
sources = list()
# parse command line
args = parse_command_line(self_description=self_description,
version=__version__,
version_date=__version_date__,
default_config_file_path=default_config_file_path)
# get config file path
config_file = get_config_file(args.config_file)
# get config handler
config_handler = open_config_file(config_file)
# get logging configuration
# set log level
log_level = default_log_level
# config overwrites default
log_level = config_handler.get("common", "log_level", fallback=log_level)
# cli option overwrites config file
if grab(args, "log_level") is not None:
log_level = grab(args, "log_level")
log_file = None
if bool(config_handler.getboolean("common", "log_to_file", fallback=False)) is True:
log_file = config_handler.get("common", "log_file", fallback=None)
# setup logging
log = setup_logging(log_level, log_file)
# now we are ready to go
log.info("Starting " + __description__)
log.debug(f"Using config file: {config_file}")
# initialize an empty inventory which will be used to hold and reference all objects
inventory = NetBoxInventory()
# get config for netbox handler
netbox_settings = get_config(config_handler, section="netbox", valid_settings=NetBoxHandler.settings)
# establish NetBox connection
NB_handler = NetBoxHandler(cli_args=args, settings=netbox_settings, inventory=inventory)
# instantiate source handlers and get attributes
sources = instanciate_sources(config_handler, inventory)
# all sources are unavailable
if len(sources) == 0:
do_error_exit("No working sources found. Exit.")
# retrieve all dependent object classes
netbox_objects_to_query = list()
for source in sources:
netbox_objects_to_query.extend(source.dependend_netbox_objects)
# request NetBox data
NB_handler.query_current_data(list(set(netbox_objects_to_query)))
# resolve object relations within the initial inventory
inventory.resolve_relations()
# initialize basic data needed for syncing
NB_handler.inizialize_basic_data()
# for object in inventory.get_all_items(NBIPAddresses):
# pprint.pprint(object.__dict__)
# exit(0)
# loop over sources and patch netbox data
for source in sources:
source.apply()
# add/remove tags to/from all inventory items
inventory.tag_all_the_things(sources, NB_handler)
#for object in inventory.get_all_items(NBVMs):
# print(object.get_display_name())
"""
nb.set_primary_ips()
# Optional tasks
if settings.POPULATE_DNS_NAME:
nb.set_dns_names()
"""
# update data in NetBox
NB_handler.update_instance()
# finish
log.info("Completed NetBox Sync! Total execution time %s." % (datetime.now() - start_time))
if __name__ == "__main__":
main()

13
requirements.txt Normal file
View File

@@ -0,0 +1,13 @@
aiodns==2.0.0
certifi==2019.11.28
cffi==1.14.0
chardet==3.0.4
idna==2.8
pycares==3.1.1
pycparser==2.19
pyvmomi==6.7.3
requests==2.24.0
six==1.13.0
typing==3.7.4.1
urllib3==1.25.7
packaging

62
settings-example.ini Normal file
View File

@@ -0,0 +1,62 @@
[common]
log_level = INFO
# Places all logs in a rotating file if True
log_to_file = True
log_file = log/netbox_sync.log
# define different sources
[source/my-example]
# currently supported
# * vmware : VMware vcenter
type = vmware
host_fqdn = vcenter.example.com
port = 443
username = vcenteruser
password = supersecret
host_exclude_filter =
host_include_filter =
vm_exclude_filter =
vm_include_filter =
netbox_host_device_role = Server
netbox_vm_device_role = Server
permitted_subnets = 172.16.0.0/12, 10.0.0.0/8, 192.168.0.0/16, fe80::/64
# Attempt to collect asset tags from vCenter hosts
collect_hardware_asset_tag = True
# ToDo:
# * add following options
dns_name_lookup = True
custom_dns_servers = 192.168.1.11, 192.168.1.12
[netbox]
api_token = XYZ
disable_tls = true
validate_tls_certs = true
host_fqdn = netbox.example.com
port = 443
prune_enabled = true
prune_delay_in_days = 30
# EOF