Initial commit

Port hostdb from original Python 2 sources to Python 3
master
Thomas Quinot 2 years ago
commit 7fe5ca09fe

@ -0,0 +1,671 @@
#! /usr/bin/env python
import re
import string
import jinja2
import yaml
from ipaddr import get_mixed_type_key, Bytes, \
IPv4Address, IPv6Address, IPAddress, IPNetwork
from .revzones import reverse_name
from . import log
from . import yaml_loader
class DBClass(type):
"""Metaclass for all objects from the configuration database.
This metaclass provides a dictionary collecting all instances
of each concrete database object class.
"""
def __new__(mcs, name, bases, dict):
cls = super(DBClass, mcs).__new__(mcs, name, bases, dict)
# DBObject is derived from object, and serves as an abstract base
# class: it has no instances dict. Each class directly derived from
# DBObject has one. Further derived classes share their parent's
# instances dict (e.g. RevZone shares the Zone instance dict).
if len(bases) > 0 and bases[0] != object:
instances_dict = getattr(bases[0], '_instances', {})
setattr(cls, name.lower() + 's', instances_dict)
setattr(cls, '_instances', instances_dict)
return cls
class DBObject(object, metaclass=DBClass):
"""Root (abstract) class for all objects from the configuration
database.
"""
def add_attr(self, k, v):
if k == 'flags' and not isinstance(v, list):
v = v.split(',')
if isinstance(self.attrs.get(k), list):
if isinstance(v, list):
self.attrs[k].extend(v)
else:
self.attrs[k].append(v)
else:
self.attrs[k] = v
def add_flag(self, f):
self.add_attr('flags', f)
def has_flag(self, f):
return f in self.attrs.get('flags', [])
def instance_key(self):
'''Unique key to identify instances'''
return self.name
def __init__(self, name, **kwargs):
self.name = name
if 'attrs' not in self.__dict__:
self.attrs = {}
self.attrs['flags'] = []
for k in list(kwargs.keys()):
v = kwargs[k]
if not isinstance(v, list):
v = [v]
for vv in v:
self.add_attr(k, vv)
if isinstance(name, dict):
# Singleton
instance_name = None
else:
instance_name = self.instance_key()
if instance_name in type(self)._instances:
raise Exception("Duplicate %s: %s" % (type(self), instance_name))
type(self)._instances[instance_name] = self
class Vars(DBObject):
pass
class Group(DBObject):
def __init__(self, name, **kwargs):
self.hosts = []
# List of Host objects
super(Group, self).__init__(name, **kwargs)
def get_attr(self, key):
return self.attrs.get(key, '')
def add_host(self, host):
self.hosts.append(host)
def del_host(self, host):
self.hosts.remove(host)
class OrgUnit(DBObject):
pass
def strip_view(s):
"""Remove view part from a <domain>_<view> specification.
"""
i = s.find('_')
if i == -1:
i = len(s)
return s[:i]
class Zone(DBObject):
def __init__(self, name, **kwargs):
self.rrs = {}
self.has_root_cname = 0
self.attrs = {'mx': []}
super(Zone, self).__init__(name, **kwargs)
def add_rr(self, key, type, value):
m = re.match("(?i)((.*)\.|)" + strip_view(self.name), key)
if m:
key = m.group(1)
if key == "":
key = "@"
else:
# Strip dot
key = key[:-1]
if key not in self.rrs:
self.rrs[key] = []
new_rr = {'type': type, 'value': value}
if self.rrs[key].count(new_rr) == 0:
self.rrs[key].append(new_rr)
return key
def add_a(self, key, value):
if isinstance(value, IPv6Address):
self.add_rr(key, 'AAAA', value)
else:
self.add_rr(key, 'A', value)
def add_mxes(self, fqdn):
for mx in self.attrs['mx']:
self.add_rr(fqdn, 'MX', mx)
def add_ptr(self, inaddr, fqdn):
if fqdn[-1] != '.':
fqdn = fqdn + '.'
self.add_rr(inaddr, 'PTR', fqdn)
def add_cname(self, alias, name):
# Name may be an absolute name, or a name relative
# to the root of this zone.
self_suffix = "." + strip_view(self.name) + "."
if name.endswith(self_suffix):
name = name[:-len(self_suffix)]
alias_key = self.add_rr(alias, 'CNAME', name)
if alias_key == "@":
self.has_root_cname = 1
class RevZone(Zone):
def __init__(self, name, **kwargs):
self.prefix = IPNetwork(name)
super(RevZone, self).__init__(reverse_name(self.prefix), **kwargs)
class LDAPTemplate(DBObject):
@staticmethod
def ipvfilter(a, v):
if isinstance(a, list):
return [_f for _f in map(LDAPTemplate.ipvfilter, a, [v] * len(a)) if _f]
elif a.version == v:
return str(a)
def __init__(self, name, **kwargs):
self._tmpl = {}
self._vars = {}
self.env = jinja2.Environment()
self.env.filters['ipv'] = LDAPTemplate.ipvfilter
self.attrs = {'requires': []}
super(LDAPTemplate, self).__init__(name, **kwargs)
def add_attr(self, key, value):
if key == 'template':
value = self.env.from_string(value)
super(LDAPTemplate, self).add_attr(key, value)
def render(self, tmpl_key, **kwargs):
for required in self.attrs['requires']:
if not kwargs.get(required):
log.vv("No LDAP %s entry for %s (missing %s)" %
(tmpl_key, kwargs['name'], required))
return
return self.attrs['template'].render(**kwargs)
def ldap_render(ldif, obj=None, **kwargs):
res = []
obj_vars = dict(Vars.varss[None].attrs)
if obj is not None:
obj_vars.update(obj.__dict__)
obj_vars.update(obj_vars.pop('attrs'))
obj_vars.update(kwargs)
if 'template' in kwargs:
tmpl_key = kwargs.pop('template')
else:
tmpl_key = next(k for (k, Cl) in class_map if isinstance(obj, Cl))
tmpl = LDAPTemplate.ldaptemplates.get(tmpl_key)
if tmpl is None:
return False
res = tmpl.render(tmpl_key, **obj_vars)
if not res:
return False
if res[-1] != '\n':
res = res + '\n'
ldif.append(res)
return True
# Find the zone(s) in which to output name d for the given view.
# If a view is requested, and views are specified for the domain,
# then only that view is returned.
# If no view is specified for the domain, return the (only) zone for the
# domain.
# If no view is requested, and views are specified for the domain,
# returns ALL views.
def find_zones_for_domain(d, default_view=None, orig_d=None):
if orig_d is None:
orig_d = d
# Strip trailing dot in FQDN
if d[-1] == ".":
d = d[:-1]
d = d.lower()
# If a view is specified, first try to find a specific zone for the view
if default_view is not None:
dv = d + '_' + default_view
if dv in Zone.zones:
return [Zone.zones[dv]]
# No specific view, look for a view-less zone
if d in Zone.zones:
return [Zone.zones[d]]
# No view specified: look for zones with view for domain
if default_view is None:
view_prefix = d + '_'
views = [Zone.zones[z]
for z in Zone.zones if z.startswith(view_prefix)]
if len(views) > 0:
return views
# No zone found so far, climb up to parent domain
dot = d.find(".")
if dot < 0:
return []
return find_zones_for_domain(d[dot + 1:], default_view, orig_d)
def find_reverse_zone(ipa):
'''Return the reverse zone for ipa'''
matchlen = 0
match = None
for z in list(Zone.zones.values()):
if isinstance(z, RevZone) \
and ipa in z.prefix \
and z.prefix.prefixlen > matchlen:
matchlen = z.prefix.prefixlen
match = z
return match
class IPnet:
ipnets = []
def __str__(self):
return str(self.prefix)
def __init__(self, prefix):
self.prefix = IPNetwork(prefix)
self.hosts_lines = {}
self.html_hosts_lines = {}
self._hostmask = int(self.prefix.hostmask)
if self.prefix.version == 4:
self.freelist = [self.host_id(h) for h in self.prefix]
def key(self):
"""Key function for comparisons and sorting
"""
return get_mixed_type_key(self.prefix)
def mark_used(self, addr):
if self.prefix.version == 4:
try:
self.freelist.remove(self.host_id(addr))
except Exception:
print("Warning: %s already used in %s" % (addr, str(self)))
def host_id(self, ipa):
return int(ipa) & int(self._hostmask)
def set_hosts_line(self, addr, line):
hid = self.host_id(addr)
if hid in self.hosts_lines:
self.hosts_lines[hid] += ' %s' % line
else:
self.hosts_lines[hid] = '%s\t%s' % (str(addr), line)
def set_html_hosts_line(self, addr, line):
hid = self.host_id(addr)
if hid not in self.html_hosts_lines:
self.html_hosts_lines[hid] = ''
self.html_hosts_lines[hid] += line
def list_hosts(self):
if self.prefix.version == 4:
hosts_list = []
for h in self.prefix:
try:
hosts_list.append(self.hosts_lines[self.host_id(h)] + '\n')
except KeyError:
hosts_list.append("#%s unused\n" % (h))
return hosts_list
else:
return [l + '\n' for l in list(self.hosts_lines.values())]
def html_list_hosts(self):
""" Same as list_hosts, but return an HTML table, and collapse the
unused entries. """
if self.prefix.version != 4:
print("%s is-a %s" % (self.prefix, self.__class__))
return list(self.html_host_lines.items())
hosts_list = []
in_unused_range = False
unused_first = ""
unused_last = ""
for h in self.prefix:
hid = self.host_id(h)
line = self.html_hosts_lines.get(hid, None)
if line is None:
if in_unused_range:
unused_last = h
else:
unused_first = h
unused_last = unused_first
in_unused_range = True
continue
if in_unused_range:
if unused_first == unused_last:
label = str(unused_first)
else:
label = ("%s<br>&nbsp;<small>to</small><br>%s"
% (unused_first, unused_last))
hosts_list.append(
'<tr class="unused"><td>' +
label +
'</td><td>unused</td><td>&nbsp;</td></tr>\n')
in_unused_range = False
hosts_list.append(line)
return hosts_list
class Net(DBObject):
def __init__(self, name, **kwargs):
self.dhcp_fixed = []
self.ipnets = []
# List of (IPnet, dict) tuples
self.binat = []
super(Net, self).__init__(name, **kwargs)
def prefixes(self, v=None, len=None):
'''Return prefixes of self.
If v or len are specified, only return prefixes with
the corresponding version or length.'''
return [ipn.prefix for (ipn, _) in self.ipnets
if (v is None or ipn.prefix.version == v)
and (len is None or ipn.prefix.prefixlen == len)]
def add_attr(self, key, value):
if key == "addr":
if isinstance(value, dict):
self.ipnets.extend([(IPnet(k), value[k]) for k in value])
else:
self.ipnets.append((IPnet(value), {}))
elif key == "binat":
self.binat.append(IPNetwork(value))
else:
super(Net, self).add_attr(key, value)
class Host(DBObject):
def instance_key(self):
"""Provide unique names across entire database
Two hosts in different organizational units can have the same
name, so differentiate them using their internal object ids."""
return "%s.%d" % (self.name, id(self))
def __init__(self, name, **kwargs):
self.aliases = []
self.addrs = []
self.mac_addrs = []
self.groups = []
self.attrs = {'interface': [], 'dhcp': []}
super(Host, self).__init__(name, **kwargs)
def add_mac_addr(self, type, value):
self.mac_addrs.append([type, value])
def add_alias(self, alias, kind):
self.aliases.append([alias, kind])
def add_addr(self, addr):
a = IPAddress(addr)
if a not in self.addrs:
self.addrs.append(a)
def add_attr(self, key, value):
if 'decommissioned' in self.attrs:
return
if key == "alias":
self.add_alias(value, "cname")
elif key == "valias":
self.add_alias(value, "a")
self.add_alias(value, "mx")
elif key == "aalias":
self.add_alias(value, "a")
elif key == "mxalias":
self.add_alias(value, "mx")
elif key == "addr":
self.add_addr(value)
elif key == "ether":
self.add_mac_addr("ethernet", value)
elif key == "groups":
# Parse the list of groups
r = re.compile("([^,]+),?")
for g in r.findall(value):
group = g.strip()
self.groups.append(group)
if group in Group.groups:
Group.groups[group].add_host(self)
else:
print("Warning, host %s is assigned to unknown group %s" \
% (self.name, group))
elif key == "decommissioned":
# Remove entry from hosts table, and also from any group that
# references it.
self.attrs['decommissioned'] = value
del Host.hosts[self.instance_key()]
for g in self.groups:
if g in Group.groups:
Group.groups[g].del_host(self)
else:
super(Host, self).add_attr(key, value)
def get_attr(self, key):
if key in self.attrs:
return self.attrs[key]
else:
return ""
def has_address_in(self, prefix):
for a in self.addrs:
if a in prefix:
return True
return False
def html(self, addr):
return """<tr><td>%s</td>
<td><a href="machines/%s.html">%s</a></td><td>%s</td></tr>
""" % (addr, self.name, self.name, self.get_attr("purpose") or "&nbsp;")
def find_nets(addr):
'''Return all nets that match addr and have the longest prefix length'''
ipa = IPAddress(addr)
matchlen = 0
matches = []
for n in list(Net.nets.values()):
for ipn, ipnprops in n.ipnets:
if ipa in ipn.prefix:
if ipn.prefix.prefixlen > matchlen:
matches = []
matchlen = ipn.prefix.prefixlen
if ipn.prefix.prefixlen == matchlen:
matches.append((n, ipn, ipnprops))
return matches
def nat_addr(ipnet, ipa):
m = ipnet.hostmask
return IPv4Address(int(ipnet) | (int(ipa) & int(m)))
def find_nat(addr):
ipa = IPAddress(addr)
if not isinstance(ipa, IPv4Address):
return None
for n in list(Net.nets.values()):
ipv4_prefixes = n.prefixes(v=4)
for b in n.binat:
if ipa in b:
return nat_addr(ipv4_prefixes[0], ipa)
return None
def autoconfig_address(p, e):
if p.version != 6 or p.prefixlen != 64:
raise Exception("Invalid prefix for SLAAC: %s" % (p,))
eui48 = [int(b, 16) for b in e.split(':')]
eui48[0] = eui48[0] & ~3
eui64 = [eui48[0] | 2] + eui48[1:3] + [0xff, 0xfe] + eui48[3:]
return IPv6Address(Bytes(p.packed[:8] + b''.join(map(chr, eui64))))
def is_slac(ipa):
return isinstance(ipa, IPv6Address) and ipa.packed[11:13] == '\xff\xfe'
def get_fqdn(name, addr, nprops):
default_domain = nprops['domain']
if addr.version == 6:
suffix = nprops.get('suffix6', '')
else:
suffix = ''
if name[-1] == ".":
name_comps = name[:-1].split('.')
name_comps[0] = name_comps[0] + suffix
return '.'.join(name_comps)
else:
return name + suffix + "." + default_domain
# For a generate block, expand a template of the form:
# $
# or with optional modifiers:
# ${offset[,width[,base]]}
#
# Modifiers change the offset from the iterator, field width and base.
# Modifiers are introduced by a { immediately following the $ as
# ${offset[,width[,base]]}. e.g. ${-20,3,d} which subtracts 20 from the current
# value, prints the result as a decimal in a zero padded field of with 3.
# Available output forms are decimal (d), octal (o) and hexadecimal (x or X
# for uppercase). The default modifier is ${0,0,d}.
# (consistent with BIND's $GENERATE directive).
generator_pattern = re.compile('\$(\{([0-9-]*)(,([0-9]*)(,([doxX]))?)?\})?')
def expand_index(m, index):
offset = 0
width = 0
base = 'd'
if m.group(2):
offset = int(m.group(2))
if m.group(4):
width = int(m.group(4))
if m.group(6):
base = m.group(6)
template = "%%0%d%c" % (width, base)
return template % (index + offset)
def expand_index_refs(obj, index):
if isinstance(obj, dict):
return dict([(k_v[0], expand_index_refs(k_v[1], index)) for k_v in list(obj.items())])
elif isinstance(obj, list):
return [expand_index_refs(v, index) for v in obj]
else:
return re.sub(generator_pattern,
lambda m: expand_index(m, index),
obj)
def enter_object(obj):
db_obj = None
for k, Cl in class_map:
if k in obj:
db_obj = Cl(obj.pop(k), **obj)
break
if db_obj is None:
log.err("Invalid object:\n%s" % obj)
# Note: order is significant:
# 'ldap' object has 'host' and 'group' attributes
# 'host' objects may have a 'net' attribute
class_map = [('ldap', LDAPTemplate),
('org-unit', OrgUnit),
('host', Host),
('net', Net),
('group', Group),
('zone', Zone),
('rev-zone', RevZone),
('vars', Vars)]
def load_db(f):
for obj in yaml.load(f, Loader=yaml_loader.UniqueKeyLoader):
if 'generate' in obj:
g = obj['generate']
index, hi = list(map(int, g.pop('range').split('-')))
while index <= hi:
enter_object(expand_index_refs(g, index))
index = index + 1
else:
enter_object(obj)

@ -0,0 +1,311 @@
#! /usr/bin/env python
# ldapdip
# LDAP Diff and Patch
# $Id: ldapdip.py 253716 2017-01-18 14:47:26Z quinot $
import argparse
import ldap
import ldap.modlist
import ldapurl
import ldif
import os.path
import sys
import yaml
conf = {}
success = True
# Always exclude internal attributes
# (useful when comparing slapcat dumps)
exclude = set(map(str.lower, [
'structuralObjectClass',
'entryUUID',
'creatorsName',
'createTimestamp',
'entryCSN',
'modifiersName',
'modifyTimestamp',
'contextCSN'
]))
# If non-empty, consider only these attributes
include = set()
def vv(str):
"""Output str if verbose mode is specified."""
if conf.get('verbose'):
print(str, file=sys.stderr)
# Editors provide functions to be called for each operation required to
# make the OLD tree identifcal to the NEW tree.
class LDIFEditor:
"""The LDIF Editor produces an LDIF change record stream."""
def __init__(self, outf):
self._ldw = ldif.LDIFWriter(outf)
def output_entry(self, dn, arg):
self._ldw.unparse(dn, arg)
def add_entry(self, dn, modlist):
self.output_entry(dn, modlist)
def mod_entry(self, dn, modlist):
self.output_entry(dn, modlist)
def del_entry(self, dn):
self.output_entry(dn, {'changetype': ['delete']})
class LDAPEditor:
"""The LDAP Editor applies the modifications to an LDAP server."""
def __init__(self, lo):
self._lo = lo
def add_entry(self, dn, modlist):
self._lo.add_s(dn, modlist)
def mod_entry(self, dn, modlist):
self._lo.modify_s(dn, modlist)
def del_entry(self, dn):
self._lo.delete_s(dn)
def open_ldap(uri, update):
"""Open LDAP server URI. If update is True, also return an LDAPEditor
based on this connection."""
vv('Querying LDAP server: %s' % uri)
lo = ldap.initialize(uri)
lo.simple_bind_s(conf.get('binddn', ''), conf.get('bindpw', ''))
args = {}
lf = conf.get('filter', None)
if lf is not None:
args['filterstr'] = lf
entries = {}
base = conf.get('base', '')
if not isinstance(base, list):
base = [base]
for b in base:
lr = lo.search_s(b, ldap.SCOPE_SUBTREE, **args)
entries.update(dict(lr))
return entries, (LDAPEditor(lo) if update else None)
class LDIFDictLoader(ldif.LDIFParser):
"""LDIF parser that populates a dict of entries indexed by DN"""
def __init__(self, *args):
ldif.LDIFParser.__init__(self, *args)
self.entries = {}
def handle(self, dn, entry):
self.entries[dn] = entry
def open_ldif(uri, update):
"""Open an LDIF file. update must be False (an LDIF file cannot be
edited), and no editor is ever returned."""
if update:
raise Exception('cannot update %s' % uri)
vv('Loading LDIF file: %s' % uri)
parser = LDIFDictLoader(open(uri))
parser.parse()
return parser.entries, None
def open_tree(tree, update):
'''Open an LDAP URI or LDIF file, or return a tree object unchanged.'''
if isinstance(tree, str):
if ldapurl.isLDAPUrl(tree):
return open_ldap(tree, update)
else:
return open_ldif(tree, update)
else:
return tree, None
def handle_exception(dn, op, e):
"""Output an exception message and set global variable success to False"""
global success
print("Exception raised trying to %s %s:\n%s" % (op, dn, e), file=sys.stderr)
success = False
def cleanup(e):
"""Remove from entry e all attributes that are to be ignored"""
for ea in list(e.keys()):
eal = ea.lower()
if (len(include) > 0 and eal not in include) \
or (eal in exclude):
e.pop(ea)
def dn_key(dn):
"""Key function used for sorting DNs by rightmost component first."""
return list(reversed(ldap.dn.explode_dn(dn)))
def diff_entries(old_tree, new_tree, update, out, editor=None):
"""Recursively compare the given trees and compute modification operations
that must be applied to the old tree to make it identical to the new one.
If out is not None, send modification operations there;
if out is None, and update is True, apply modifications
to old_tree directly.
If editor is not None, changes are also sent to that editor."""
old_entries, ed = open_tree(old_tree, update and out is None)
new_entries, _ = open_tree(new_tree, False)
if out is not None:
ed = LDIFEditor(out)
editors = []
if ed is not None:
editors.append(ed)
if editor is not None:
if isinstance(editor, list):
editors.extend(editor)
else:
editors.append(editor)
old_dns = set(old_entries.keys())
new_dns = set(new_entries.keys())
# Note: deletions need to be processed in reverse order because you
# can only delete a leaf object (i.e. one that has no children).
for dn in sorted(old_dns - new_dns, key=dn_key, reverse=True):
for ed in editors:
try:
ed.del_entry(dn)
except Exception as e:
handle_exception(dn, 'delete', e)
for dn in sorted(old_dns & new_dns, key=dn_key):
oe = old_entries[dn]
cleanup(oe)
ne = new_entries[dn]
cleanup(ne)
# Compute differences between oe and ne as a list of
# LDAP modification operations that transform oe into ne.
# Note that oe and ne might be identical in LDAP sense
# (i.e. yield an empty mod list) even if oe != ne because
# some differences, such as the order of values for
# multi-valued attributes, are irrelevant for LDAP.
mod = ldap.modlist.modifyModlist(oe, ne)
if len(mod) > 0:
for ed in editors:
ed.mod_entry(dn, mod)
for dn in sorted(new_dns - old_dns, key=dn_key):
for ed in editors:
try:
ed.add_entry(dn, ldap.modlist.addModlist(new_entries[dn]))
except Exception as e:
handle_exception(dn, 'add', e)
def update_incl_excl(ie, args):
"""Add all attributes in comma-separated list args to set ie"""
for arg in args:
ie.update(arg.lower().split(','))
def main():
global conf
parser = argparse.ArgumentParser(
description='Compute differences between two LDAP directories')
parser.add_argument('old', help='old directory URI')
parser.add_argument('new', help='new directory URI')
parser.add_argument('--update', '-u', action='store_true', default=False,
help='update to OLD to sync it with NEW')
parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='write verbose messages to stderr')
parser.add_argument('--output', '-o', action='store', default='-',
help='output LDIF file (default stdout)')
parser.add_argument('--conf', '-c', action='append',
default=['ldapdip.conf'],
help='configuration file')
parser.add_argument('--option', '-O', action='append', default=[],
help='override configuration option')
parser.add_argument('--include', '-i', action='append', default=[],
help='include only these attributes in comparison')
parser.add_argument('--exclude', '-x', action='append', default=[],
help='exclude these attributes from comparison')
args = parser.parse_args()
conf = {}
# Load default config file (ldapdip.conf), as well as any additional
# one specified on the command line.
for conf_file in args.conf:
if os.path.exists(conf_file):
conf.update(yaml.load(open(conf_file, 'r').read()))
# Configuration overrides from command line
if args.verbose:
conf['verbose'] = True
for confopt in args.option:
optname = confopt[:confopt.index('=')]
optval = confopt[confopt.index('=') + 1:]
conf[optname] = optval
# Inclusion/exclusion processing
# Should also allow setting these from conf???
update_incl_excl(include, args.include)
update_incl_excl(exclude, args.exclude)
# Prepare output
if args.update:
# Updating directory in place, no LDIF output
out = None
else:
if args.output == '-':
out = sys.stdout
else:
out = open(args.output, 'w')
# Work!
diff_entries(args.old, args.new, args.update, out)
if __name__ == '__main__':
main()
if not success:
sys.exit(1)

@ -0,0 +1,12 @@
import sys
verbose = False
def err(msg):
print(msg, file=sys.stderr)
def vv(msg):
if verbose:
err(msg)

@ -0,0 +1,767 @@
#! /usr/bin/env python
# hostdb
# Hosts database processing
# $Id: main.py 265777 2019-01-15 11:33:46Z quinot $
from . import db as db
from . import log as log
from .db import OrgUnit, Host, Group, Net, Zone
import argparse
import os
import re
import socket
import sys
import time
from ipaddr import IPv4Address, IPv6Address, IPAddress
from .revzones import reverse_name
#reload(sys)
#sys.setdefaultencoding('utf-8')
autogen_marker = ";;; Automatically generated -- do not edit!\n"
dhcpd_conf_template = 'dhcpd.conf.in'
html_subdir = 'html'
csv_subdir = 'csv'
csv_sep = ','
html_machines_subdir = os.path.join(html_subdir, 'machines')
def html_header(title, subdir=0):
"""Return the html header for a page.
:param str title: Title of the page
:param int subdir: Depth of subdirectory containing the page
(relative to that containing hosts.css).
"""
css_path = os.path.join(*([".."] * subdir + ["hosts.css"]))
return """<html><head><meta charset="UTF-8"><title>%s</title>
<link href="%s" rel="stylesheet" type="text/css">
</head><body>""" % (title, css_path)
newserial = None
def get_newserial():
global newserial
if newserial is not None:
return newserial
today = int(time.strftime("%Y%m%d00", time.localtime(time.time())))
try:
serial_file = open("SERIAL", "r+")
oldserial = int(serial_file.readline())
except IOError:
# Case where serial file does not exist yet: create one, and use today
# as the new serial.
serial_file = open("SERIAL", "w")
oldserial = today - 1
if (today - oldserial) > 0:
newserial = str(today)
else:
newserial = str(oldserial + 1)
serial_file.seek(0)
serial_file.truncate()
serial_file.write(str(newserial + '\n'))
serial_file.close()
return newserial
def gen_file(name, lines):
open(name + '.new', 'w').writelines(lines)
try:
os.rename(name, name + '~')
except OSError:
pass
os.rename(name + '.new', name)
os.chmod(name, 0o444)
def main():
# 0. Command line
parser = argparse.ArgumentParser('Process the hosts database')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='force bumping serial number')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='verbose output')
args = parser.parse_args()
log.verbose = args.verbose
# 1. Parse input
db.load_db(sys.stdin)
# 2. Populate zones and hostfile
hostfile = [
"### Automatically generated, do not edit!\n",
"\n",
"::1\tlocalhost localhost.my.domain\n",
"127.0.0.1\tlocalhost localhost.my.domain\n"]
dhcpfile = ["### Automatically generated, do not edit!\n", "\n"] \
+ open(dhcpd_conf_template, 'r').readlines()
ldif = ["### Automatically generated, do not edit!\n", "\n"]
# Generate any required entries for base OUs
for ou in list(OrgUnit.orgunits.values()):
db.ldap_render(ldif, ou)
# Set default domain for networks based on their OU
for n in list(Net.nets.values()):
if 'domain' not in n.attrs \
and 'ou' in n.attrs \
and 'domain' in OrgUnit.orgunits[n.attrs['ou']].attrs:
n.add_attr('domain', OrgUnit.orgunits[n.attrs['ou']].attrs['domain'])
hostfile_html = [html_header("Hosts")]
# Generate additional hosts for interfaces
for h in list(Host.hosts.values()):
for i in h.get_attr('interface'):
# Prefix name and aliases with host name if they start with an hyphen
for i_attr in i:
if (i_attr == 'name' or i_attr.endswith('alias')) \
and i[i_attr][0] == '-':
i[i_attr] = '%s%s' % (h.name, i[i_attr])
# Add new host
Host(i.pop('name'), **i)
# Group processing pass 1 (before host processing)
# * optionally, generate aliases of the form "<groupname>-<hostid>"
for g in list(Group.groups.values()):
if g.has_flag("aliases"):
last = 0
for h in g.hosts:
h.add_alias("%s-%d" % (g.name, last), "cname")
last = last + 1
# Host processing main loop
for h in list(Host.hosts.values()):
no_ldap = h.has_flag('noldap')
# Add implicit NAT addresses
real_addrs = h.addrs[:]
if not h.has_flag("nonat"):
for a in real_addrs:
nat_a = db.find_nat(a)
if nat_a:
h.add_attr('addr', nat_a)
# If no address is specified, try to fetch external ones
if len(h.addrs) == 0 and h.name[-1] == '.':
for ai in socket.getaddrinfo(h.name, None):
# If ai's address family is unknown (e.g. case of Python built
# without IPv6 support) then ai[4][0] is the address family (int),
# not the canonical address representation (str).
if isinstance(ai[4][0], str):
h.add_addr(ai[4][0])
dhcp_addresses = {}
primary_net = None
for a in h.addrs:
# print "%s -> %s" % (h.name, a)
# Identify what network this address belongs to.
# This is normally just the unique network with the longest
# address match. However some IP prefixes appear in more than
# one network (e.g. in the case of several distinct logical
# subnets that share the same broadcast domain, and therefore
# the same IPv6 SLAAC prefix). In this case, the ambiguity
# can be resolved if another address of the same host uniquely
# identifies one of these networks ("primary" network).
# Note that interfaces have already been expanded to distinct
# Host objects, so the primary network for a base Host is
# (correctly) not taken into account for its dependent interfaces.
n, ipnet, ipnprops = None, None, None
nets = db.find_nets(a)
ambiguous = (len(nets) > 1)
for n, ipnet, ipnprops in nets:
# The primary net is the first one that is unambiguously
# identified for this host.
if primary_net is None and len(nets) == 1:
primary_net = n
# If one or more nets were found, and the primary one
# is among them, return it.
if primary_net == n:
ambiguous = False
break
# If more that one net was found, but none matches the
# primary one, we have an unresolvable ambiguity.
if ambiguous:
n = None
if n is None:
try:
n = Net.nets[h.attrs['net']]
except KeyError:
log.err("Can't find network for %s (%s)" % (h.name, a))
n, ipnet = None, None
ipa = IPAddress(a)
if ipnet is not None:
ipnet.mark_used(ipa)
host_nicks = []
if n is None:
fqdn = h.name
# If network is unknown, assume any name is for external view
nprops = {'view': 'ext'}
else:
# Set organizational unit from network
if 'ou' in n.attrs:
n_ou = n.attrs['ou']
h_ou = h.attrs.setdefault('ou', n_ou)
if h_ou != n_ou:
print("Host %s OU %s inconsistent with network OU %s" \
% (h.name, h_ou, n_ou))
if h.name[-1] != '.':
host_nicks.extend([h.name, '%s.%s' % (h.name, n.name)])
# Get network attributes, but allow per-IPnet overrides
nprops = dict(n.attrs)
if ipnprops is not None:
nprops.update(ipnprops)
fqdn = db.get_fqdn(h.name, ipa, nprops)
if n.has_flag("dhcp") and not db.is_slac(ipa):
if n not in dhcp_addresses:
dhcp_addresses[n] = []
dhcp_addresses[n].append(a)
if h.has_flag("64ac") and n.has_flag("64ac"):
for p in n.prefixes(v=6, len=64):
if isinstance(ipa, IPv4Address):
h.addrs.append(IPv6Address(str(p.ip) + str(a)))
elif n.has_flag('slaac') and not h.has_flag('noslaac'):
for p in n.prefixes(v=6, len=64):
if not h.has_address_in(p):
for (htype, haddr) in h.mac_addrs:
if htype == 'ethernet':
h.addrs.append(db.autoconfig_address(p, haddr))
host_nicks = [fqdn] + host_nicks
# DNS direct records
view = nprops.get('view', None)
if not h.has_flag("nodirect"):
for dz in db.find_zones_for_domain(fqdn, view):
dz.add_a(fqdn, ipa)
if h.has_flag("acme-cname") and "acme-cname" in dz.attrs:
dz.add_cname("_acme-challenge." + fqdn,
fqdn + "." + dz.attrs["acme-cname"])
if not h.has_flag("nomx"):
dz.add_mxes(fqdn)
for [alias, kind] in h.aliases:
if n is not None:
if alias[-1] != '.':
host_nicks.extend([alias, '%s.%s' % (alias, n.name)])
alias = db.get_fqdn(alias, ipa, nprops)
for alias_dz in db.find_zones_for_domain(alias, view):
if kind == "mx":
alias_dz.add_mxes(alias)
else:
# "a" or "cname"
host_nicks.append(alias)
if kind == "a":
alias_dz.add_a(alias, ipa)
else:
alias_dz.add_cname(alias, fqdn + ".")
if h.has_flag("acme-cname"):
alias_dz.add_cname("_acme-challenge." + alias,
alias + dz.attrs["acme-cname"])
# DNS reverse records
if (ipnet is not None) and not (h.has_flag('noreverse') or
n.has_flag('noreverse')):
rz = db.find_reverse_zone(ipnet.prefix)
if rz is not None:
rz.add_ptr(reverse_name(rz.prefix, ipa), fqdn)
hostline = ''
for nick in host_nicks:
if len(hostline) > 0:
hostline += ' '
if nick[-1] == '.':
nick = nick[:-1]
hostline = hostline + re.sub('@\.', '', nick)
if ipnet is not None:
ipnet.set_html_hosts_line(ipa, h.html(a))
ipnet.set_hosts_line(ipa, hostline)
else:
hostfile.append('%s\t%s\n' % (ipa, hostline))
# DHCP
alias_index = 0
for (mac_type, mac_value) in h.mac_addrs:
for (n, d_addresses) in list(dhcp_addresses.items()):
n.dhcp_fixed.append(
'host %s-%u {\n option host-name "%s";\n hardware %s %s;\n'
% (h.name, alias_index, h.name, mac_type, mac_value))
# Pending debugging, IPv6 addresses are commented out
for (cl, prefix, suffix) in \
[(IPv4Address, '', ''), (IPv6Address, '#', '6')]:
cl_addresses = [str(a)
for a in d_addresses if isinstance(a, cl)]
if len(cl_addresses) > 0:
n.dhcp_fixed.append(
"%s fixed-address%s %s;\n" %
(prefix, suffix, ', '.join(cl_addresses)))
for o in h.attrs.get('dhcp'):
n.dhcp_fixed.append(' ' + o + ';\n')
n.dhcp_fixed.append('}\n\n')
alias_index = alias_index + 1
# Generate LDAP entry
if not (no_ldap or 'ou' not in h.attrs):
if db.ldap_render(ldif, h):
for alias, kind in h.aliases:
if kind != 'mx':
db.ldap_render(
ldif, h, template='alias', alias=alias, kind=kind)
else:
# Host did not render: set noldap flag to prevent it from
# being referenced in a host group.
# print "%s failed to render for LDAP" % h.name
h.add_flag('noldap')
for z in list(Zone.zones.values()):
if not z.has_root_cname:
z.add_mxes("@")
# Group processing pass 2 (after host processing):
# * generate LDAP entry
# * generate Ansible inventory
# * remove hosts from "ungrouped" set
ungrouped_hosts = [h for h in list(Host.hosts.values())
if not h.has_flag('noldap')]
ansible_inventory = []
for g in list(Group.groups.values()):
ansible_inventory.extend(["", "[%s]" % (g.name)])
ldap_hosts = {}
# Dict indexed by OU, elements are lists of Host objects
for h in g.hosts:
if h in ungrouped_hosts:
ungrouped_hosts.remove(h)
if not (h.has_flag('noldap') or 'ou' not in h.attrs):
ldap_hosts.setdefault(h.attrs['ou'], []).append(h)
ansible_inventory.append(h.instance_key())
for g.ou, g.ldap_hosts in list(ldap_hosts.items()):
db.ldap_render(ldif, g)
open('ansible_inventory', 'w').write(
'\n'.join([h.instance_key() for h in ungrouped_hosts] + ansible_inventory))
# 3. Output files
open('hostdb.ldif', 'w').write('\n'.join(ldif))
for z in list(Zone.zones.values()):
# Zone head file
hfn = "hd." + z.name
# Generated zone file
zfn = "db." + z.name
# Read zone head, identify serial placeholder
nserial_seen = 0
nzone = [autogen_marker, "\n"]
ln = len(nzone)
for l in open(hfn, "r").readlines():
nzone.append(l)
if re.match("\s+SERIALNO\s+", l):
nserial_seen = 1
serial_ln = ln
serial_placeholder = l
ln = ln + 1
if not nserial_seen:
print("Could not find serial number placeholder in %s!" % (hfn))
sys.exit(1)
# Try to load old zone file, and replace serial with placeholder (used to
# detect if this zone has changed)
try:
ozone_file = open(zfn, "r")
ozone = ozone_file.readlines()
ozone_file.close()
for ln in range(0, len(ozone)):
if re.match("\s+[0-9]+\s+;\s+serial", ozone[ln]):
ozone[ln] = serial_placeholder
except IOError:
print("No existing zone info for " + zfn)
ozone = []
nzone.append("\n")
nzone.append(";;; Body of zone " + z.name + "\n")
keys = list(z.rrs.keys())
keys.sort()
for k in keys:
first = 1
for rr in z.rrs[k]:
if first:
nzone.append('\n')
key = k
first = 0
else:
key = "\t" * (len(k) / 8)
nzone.append("%s\tIN\t" % (key) + "%(type)s\t%(value)s\n" % rr)
if nzone != ozone or args.force:
# Record forced indication
forced = " (serial bump forced)" if nzone == ozone else ""
# Now substitute serial in new zone
nzone[serial_ln] = re.sub(
"SERIALNO", get_newserial(), nzone[serial_ln])
nzone_file = open(zfn + ".new", "w")
nzone_file.writelines(nzone)
nzone_file.close()
try:
os.rename(zfn, zfn + "~")
except OSError:
pass
os.rename(zfn + ".new", zfn)
os.chmod(zfn, 0o444)
zstatus = "updated%s." % forced
else:
zstatus = "unchanged."
log.vv("%s %s" % (zfn, zstatus))
xlat_slash_underscore = str.maketrans("/", "_")
# Hosts header
hostfile_html.append("<p><ul>")
for net in list(Net.nets.values()):
info = []
if 'purpose' in net.attrs:
info.append(net.attrs['purpose'])
if 'addr' in net.__dict__:
info.append(str(net.addr))
hostfile_html.append("""<li><a href="#%s">%s</a> (%s)</li>
""" % (net.name, net.name, ", ".join(info)))
hostfile_html.append('</ul><small>')
for net in list(Net.nets.values()):
addr_usage = ''
ipn4 = []
total_addresses = 0
free_addresses = 0
for ipn, _ in sorted(net.ipnets,
key=lambda ipn__: ipn__[0].key()):
hostfile.append('\n# %s - %s\n\n' % (net.name, ipn.prefix))
hostfile.extend(ipn.list_hosts())
if ipn.prefix.version == 4:
total_addresses += ipn.prefix.numhosts
free_addresses += len(ipn.freelist)
ipn4.append(ipn)
used_addresses = total_addresses - free_addresses
if total_addresses > 0:
addr_usage = "%d/%d available addresses" % \
(free_addresses, total_addresses)
addr = " (%s)" % ', '.join(map(str, ipn4))
hostfile_html.append("""
<td><a name="%s"></a><h3>%s%s</h3>
%s
<table class="hosts">
""" % (net.name, net.name, addr, addr_usage))
if len(ipn4) > 0:
for n in ipn4:
hostfile_html.extend(n.html_list_hosts())
else:
for h in [h for h in list(Host.hosts.values()) if 'net' in h.attrs and h.attrs['net'] == net.name]:
for a in h.addrs:
hostfile_html.append(h.html(a))
hostfile_html.append("\n</table></td>")
for net in list(Net.nets.values()):
if len(net.dhcp_fixed) > 0:
try:
fixed_index = dhcpfile.index(
"# FIXED ASSIGNMENTS FOR %s\n" % (net.name))
dhcpfile[fixed_index + 1:fixed_index + 1] = net.dhcp_fixed
except ValueError:
print("Could not find where to generate fixed DHCP assignments" \
+ " for %s\n" \
% (net.name))
raise
gen_file('hosts', hostfile)
gen_file('dhcpd.conf', dhcpfile)
# 4. Generate HTML view of the machines
# Create the HTML subdirectories if they do not exist
for path in [html_subdir, html_machines_subdir]:
if not os.path.isdir(path):
os.mkdir(path)
def html_line_for_host(host):
""" Return a html table line or group of lines for host.
"""
purpose = host.get_attr("purpose")
if purpose == "":
purpose = "(no description)"
return """<tr><td><a href="machines/%s.html">%s</a></td>
<td>%s</td>
</tr>
""" % (host.name, host.name, purpose)
# Generate one file for each group
for g in list(Group.groups.values()):
groupdata = html_header(g.name) + """<h2>Group '%s'</h2> <h3>%s</h3>
""" % (g.name, g.get_attr("purpose"))
# Write the hosts table
groupdata += '<table class="hosts">'
g.hosts.sort()
for h in g.hosts:
groupdata += html_line_for_host(h)
groupdata += '</table>'
gen_file(html_subdir + os.sep + g.name + ".html", groupdata)
# Generate one file for all the machines that do not belong to any group
groupdata = html_header("Ungrouped")
groupdata += """<h2>Devices not associated to any group</h2>"""
groupdata += '<table class="hosts">'
for h in list(Host.hosts.values()):
if h.groups == []:
groupdata += html_line_for_host(h)
groupdata += '</table>'
gen_file(html_subdir + os.sep + "ungrouped.html", groupdata)
# Generate one file for each host, with all the details
for h in list(Host.hosts.values()):
hostdata = html_header(h.name, subdir=1) + \
"<h2>%s</h2>""" % h.name
purpose = h.get_attr("purpose")
if purpose != "":
hostdata += """ Purpose:<ul><li>%s</li></ul>
""" % purpose
if h.groups != []:
hostdata += """ Groups: <ul>%s</ul>
""" % ("".join(["<li><a href=../%s.html>%s</a></li>"
% (x, x) for x in h.groups]))
if h.aliases != []:
hostdata += """ Aliases: <ul>%s</ul>
""" % ("".join(['<li>' + x[0] + ' (' + x[1] + ')</li>' for x in h.aliases]))
if h.addrs != []:
hostdata += """ Addresses: <ul>%s</ul>
""" % ("".join(["<li>%s</li>" % (a) for a in h.addrs]))
if h.mac_addrs != []:
hostdata += """ MAC addresses: <ul>%s</ul>
""" % ("<li> ".join(['<li><b>' + x[0] + '</b>: ' + x[1] + '</li>' for x in h.mac_addrs]))
if h.attrs != {}:
hostdata += """ Attributes: <ul> """
for j in h.attrs:
hostdata += "<li><b>%s</b>: %s</li>" % (j, h.attrs[j])
hostdata += "</ul>"
gen_file(html_machines_subdir + os.sep + h.name + ".html", hostdata)
# Generate one table of all machines, ordered by IP address
gen_file(html_subdir + os.sep + "hosts.html", hostfile_html)
# Generate a table ordered by commission date
dated = [] # contains tuples ('date', 'hostname')
undated = [] # contains list of hostnames
for h in list(Host.hosts.values()):
if 'commissioned' in h.attrs:
dated.append((h.attrs['commissioned'], h.name))
else:
if not ('role' in h.groups):
undated.append(h.name)
dated.sort()
html = html_header("Hosts by date of commission") + """
<h2>Dated hosts</h2>
<table class="hosts">
"""
for h in dated:
html += """<tr>
<td>%s</td><td><a href="machines/%s.html">%s</a></td>
</tr>""" % (h[0], h[1], h[1])
html += """</table><h2>Non-dated hosts</h2><table class="hosts">"""
for h in undated:
html += '<tr><td><a href="machines/%s.html">%s</a></td></tr>' % (h, h)
gen_file(html_subdir + os.sep + "by_date.html", html)
# Generate an index file
index = html_header("Hosts") + """
<h1>Machine inventory</h2>
<h2>View by IP address</h2>
<ul>
<li> <a href="hosts.html">Hosts</a> </li>
</ul>
<h2>View by group</h2>
<ul>
<li> <a href="ungrouped.html">ungrouped</a>: not belonging to any group
</li>
"""
group_purpose = [(g.name, g.get_attr("purpose"))
for g in list(Group.groups.values())]
group_purpose.sort()
for g_name, g_purpose in group_purpose:
index += '<li><a href="%s.html">%s</a>: %s</li>' % (
g_name, g_name, g_purpose)
index += """</ul>
<h2>View by date of commission</h2>
<ul>
<li> <a href="by_date.html">Hosts by date</a> </li>
</ul>
<h2>Download CSVs</h2>
<ul>
<li> <a href="inventory.csv">Inventory</a> </li>
</ul>
"""
gen_file(html_subdir + os.sep + "index.html", index)
# Generate a .csv file
csv = ""
# ... csv table headers
csv += csv_sep.join(
["name", "commissioned"] +
[k for k in Group.groups]) + '\n'
# ... csv table body
# Create a sublist of hosts relevant for listing in the inventory
hosts = [h for h in list(Host.hosts.values())
if 'role' not in h.groups and
'virtual' not in h.groups and
'commissioned' in h.attrs]
for h in hosts:
try:
csv += h.name + csv_sep
if 'commissioned' in h.attrs:
csv += "%s%s" % (h.attrs['commissioned'], csv_sep)
else:
csv += csv_sep
for g in Group.groups:
if g in h.groups:
csv += '1' + csv_sep
else:
csv += '0' + csv_sep
except Exception:
log.err("Failed to generate CSV for " +
h.name + str(h.attrs))
csv += '\n'
if not os.path.isdir(csv_subdir):
os.mkdir(csv_subdir)
gen_file(csv_subdir + os.sep + "inventory.csv", csv)
if __name__ == "__main__":
main()

@ -0,0 +1,63 @@
# revzones
# Compute reverse names for IP networks and hosts
from ipaddr import IPv4Network, IPv6Network, IPAddress
def reverse_name(ipnet, ipa=None):
def _get_element(packed, index):
val = packed[index // 8]
shift = 8 - (index % 8 + step)
val = (val >> shift) & ((1 << step) - 1)
return val
if ipnet.version == 6:
revn = "ip6.arpa"
step = 4
base = 'x'
addrlen = 128
else:
revn = "in-addr.arpa"
step = 8
base = 'd'
addrlen = 32
prefixlen = ipnet.prefixlen
index = 0
# Generate network part
while index < prefixlen:
val = _get_element(ipnet.packed, index)
if prefixlen - index < step:
suffix = "-" + str(prefixlen)
else:
suffix = ""
revn = format(val, base) + suffix + "." + revn
index = index + step
# Generate host part
if ipa:
index = prefixlen - prefixlen % step
while index < addrlen:
val = _get_element(ipa.packed, index)
revn = format(val, base) + "." + revn
index = index + step
return revn
if __name__ == '__main__':
for (ipnet, ipa) in [
(IPv4Network('192.168.12.128/26'), '192.168.12.154'),
(IPv4Network('192.168.12.128/24'), '192.168.12.154'),
(IPv4Network('192.168.12.128/18'), '192.168.12.154'),
(IPv4Network('192.168.12.128/16'), '192.168.12.154'),
(IPv6Network('2001:470:1f0b:1b0c::/64'),
'2001:470:1f0b:1b0c::1234:5678'),
(IPv6Network('2001:470:1f0b:1b0c::/62'),
'2001:470:1f0b:1b0c::1234:5678'),
(IPv6Network('2001:470:1f0b:1b0c::/48'),
'2001:470:1f0b:1b0c::1234:5678')]:
print("Net %s -> %s" % (str(ipnet), reverse_name(ipnet, None)))
print("Host %s -> %s" % (ipa, reverse_name(ipnet, IPAddress(ipa))))

@ -0,0 +1,36 @@
# YaML loader that rejects duplicate keys in objects
from yaml.constructor import ConstructorError
from yaml.nodes import MappingNode
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
class UniqueKeyLoader(Loader):
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
# check for duplicate keys
if key in mapping:
raise ConstructorError(
"while constructing a mapping", node.start_mark,
"found duplicate key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping

@ -0,0 +1,11 @@
[project]
name = "hostdb"
version = "0.0.1"
dependencies = [
"ipaddr",
"jinja2",
"pyyaml",
]
[project.scripts]
hostdb = "hostdb.main:main"
Loading…
Cancel
Save