Create a gist now

Instantly share code, notes, and snippets.

@gerco /countbw.py Secret
Created Apr 27, 2017

What would you like to do?
#!/usr/bin/python
import os
import datetime
import sys
import ipaddress
import json
import pickle
import argparse
kilo = 1024; mega = kilo * kilo; giga = kilo * mega
today = datetime.date.today().isoformat()
downsize_filename = '/tmp/majordomo_downsize'
daily_filename = '/tmp/majordomo_db/majordomo_daily_' + today
cachename = '/tmp/countbw.cache'
groups = {
'1c:91:48:6e:98:44': 'a',
'f0:dc:e2:f2:7e:af': 'a',
'b8:8d:12:12:78:08': 'a',
'28:6a:ba:df:bd:c9': 'a'
}
limits = {
'a': { 'dn': 512 * mega }
}
exclude = {
# Apple Computer Inc (for iCloud backups, software updates, etc)
ipaddress.ip_network(u'2620:149::/36'),
ipaddress.ip_network(u'17.0.0.0/8')
}
def excluded(addr):
if addr == 'other': return False
addr = ipaddress.ip_address(unicode(addr))
for net in exclude:
if addr in net:
return True
return False;
def count_bw(filename, group_bw={}, client_bw={}):
with open(filename, 'r') as f:
for line in f:
fields = line.split(',')
mac = fields[1]
client = client_bw.get(mac, { 'dn': 0, 'up': 0 })
client_bw[mac] = client
client['dn'] += int(float(fields[6]))
client['up'] += int(float(fields[9]))
# Exclude only for groups since we only compare quota for groups
if excluded(fields[2]): continue
grpname = groups.get(mac, None)
if grpname != None:
grp = group_bw.get(grpname, { 'dn': 0, 'up': 0 })
group_bw[grpname] = grp
grp['dn'] += int(float(fields[6]))
grp['up'] += int(float(fields[9]))
return (group_bw, client_bw)
def get_bw(key, type, *dicts):
for d in dicts:
if key in d:
return d[key][type]
raise KeyError(key)
def check_limits(name, limits, group_bw, client_bw):
return {ltype: get_bw(name, ltype, group_bw, client_bw) > limits[ltype] for ltype in limits.keys()}
def calculate():
try:
(group_bw, client_bw) = count_bw(daily_filename, {}, {})
if os.path.exists(downsize_filename):
(group_bw, client_bw) = count_bw(downsize_filename, group_bw, client_bw)
lim_status = {name: check_limits(name, limits[name], group_bw, client_bw) for name in limits.keys()}
return (group_bw, client_bw, lim_status)
except IOError as err:
# Unable to do the calculation. Print error and exit(0) to "fail open"
print('Unable to calculate: {0}'.format(err))
sys.exit(0)
def read_name_mac_map():
ret = {}
with open('/tmp/dhcp.leases', 'r') as f:
for line in f:
fields = line.split(' ')
if fields[3] != '*':
ret[fields[1]] = fields[3]
return ret
def zabbix_discovery(args):
names = read_name_mac_map()
data = { 'data': [
{'{#MAC}':m, '{#NAME}':names.get(m, m)}
for m in client_bw.keys()] }
print(json.dumps(data))
sys.exit(0)
def print_bw(args, type):
try:
if args.group != None:
print(group_bw[args.group][type])
elif args.mac != None:
print(client_bw[args.mac][type])
except KeyError:
print('ZBX_NOTSUPPORTED')
def load_cache():
try:
with open(cachename, 'rb') as cachefile:
obj = pickle.load(cachefile)
return (obj['groups'], obj['clients'], obj['limits'])
except:
# Cache not found, calculate the numbers
return calculate()
def update_cache(args):
(g, c, l) = calculate()
# Persist the data to the cache
with open(cachename, 'wb') as f:
pickle.dump({
'groups': g,
'clients': c,
'limits': l
}, f, pickle.HIGHEST_PROTOCOL)
if args.on_limits_changed != None:
run_command = False
# Check if any of the limit status have changed, if so, execute the given command
for key in l:
for ltype in l[key]:
if lim_status[key][ltype] != l[key][ltype]:
run_command = True
print("Limit %s for %s has changed from %s to %s" % (ltype, key, lim_status[key][ltype], l[key][ltype]))
if run_command:
os.system(args.on_limits_changed)
def check_limit(args):
name = args.group
if name == None:
name = args.mac
if name in lim_status:
for ltype, value in lim_status[name].iteritems():
if value == True:
sys.exit(1)
def show(args):
for name, l in lim_status.iteritems():
for ltype, value in l.iteritems():
print("%s limit %s: actual: %d, max: %d, exceeded: %s" % (
name,
ltype,
get_bw(name, ltype, group_bw, client_bw),
limits[name][ltype],
l[ltype]))
def debug(args):
print(calculate())
def add_client_match_args(parser):
group = parser.add_mutually_exclusive_group()
group.required = True
group.add_argument('--mac', help='Mac address')
group.add_argument('--group', help='Group name')
def main():
global group_bw, client_bw, lim_status
(group_bw, client_bw, lim_status) = load_cache()
root_parser = argparse.ArgumentParser(description='Bandwidth counter tool for Majordomo data')
subparsers = root_parser.add_subparsers()
subparsers.required = True
parser_zbx_disc = subparsers.add_parser('zabbix-discovery', help='Return available information in Zabbix Discovery format')
parser_zbx_disc.set_defaults(func=zabbix_discovery)
parser_clt_dn = subparsers.add_parser('download', help='Print downloaded data by a client or group')
add_client_match_args(parser_clt_dn)
parser_clt_dn.set_defaults(func=lambda args: print_bw(args, 'dn'))
parser_clt_up = subparsers.add_parser('upload', help='Print uploaded data by a client or group')
add_client_match_args(parser_clt_up)
parser_clt_up.set_defaults(func=lambda args: print_bw(args, 'up'))
parser_cache = subparsers.add_parser('update', help='Update bandwidth usage and status cache')
parser_cache.add_argument('--on-limits-changed', help='Shell command to execute if any of the limits have been newly exceeded')
parser_cache.set_defaults(func=update_cache)
parser_check = subparsers.add_parser('check', help='Check group or client for limit, will exit(1) if limit exceeded')
add_client_match_args(parser_check)
parser_check.set_defaults(func=check_limit)
parser_show = subparsers.add_parser('show', help='Show configured limits and current state')
parser_show.set_defaults(func=show)
parser_debug = subparsers.add_parser('debug', help='Whatever debug function I want to run')
parser_debug.set_defaults(func=debug)
args = root_parser.parse_args()
args.func(args)
sys.exit(0)
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment