Skip to content

Instantly share code, notes, and snippets.

@jpf
Created April 18, 2014 17:53
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jpf/11056338 to your computer and use it in GitHub Desktop.
Save jpf/11056338 to your computer and use it in GitHub Desktop.
Monitors and displays changes made to system preferences on Mac OS X
import time
import subprocess
import re
import collections
class NSPlistParser():
'''Thanks to @rndmcnlly for writing this!'''
def __init__(self):
# these will all get interpreted as raw literal values in the parser
self.BARE_STRING = r"\w+"
# self.QUOTED_STRING = r"\"(\\\"|[^\"])*\""
self.QUOTED_STRING = r'"([^"\\]|(\\\\.)|(\\.))*"'
self.HEX_BLOCK = r"[0-9a-f]+"
self.BINARY = r"<[ 0-9a-f]*>"
self.NUMBER = r"-?\d+(\.\d*)?"
self.LEXICON = [
('L_BRACE', r"\{"),
('R_BRACE', r"\}"),
('L_PAREN', r"\("),
('R_PAREN', r"\)"),
('EQUALS', r"="),
('COMMA', r","),
('SEMICOLON', r";"),
('LITERAL', "|".join([self.BARE_STRING,
r'"\\{3,}"',
self.QUOTED_STRING,
self.HEX_BLOCK,
self.BINARY,
self.NUMBER])),
('NEWLINE', r"\n"),
('SKIP', r"\s+"),
]
self.Token = collections.namedtuple('Token',
['typ', 'val', 'line', 'pos'])
def tokenize(self, s):
token_regex = '|'.join(['(?P<%s>%s)' % pair for pair in self.LEXICON])
get_token = re.compile(token_regex).match
line = 1
pos = line_start = 0
mo = get_token(s)
while mo is not None:
typ = mo.lastgroup
if typ == 'NEWLINE':
line_start = pos
line += 1
elif typ != 'SKIP':
val = mo.group(typ)
token = self.Token(typ, val, line, mo.start()-line_start)
yield token
pos = mo.end()
mo = get_token(s, pos)
if pos != len(s):
raise RuntimeError('Unexpected character %r on line %d' %
(s[pos], line))
def parse(self, tokens):
context = {'current_token': None}
def advance():
try:
context['current_token'] = tokens.next()
except StopIteration:
context['current_token'] = None
def get_type():
return context['current_token'].typ
def get_text():
return context['current_token'].val
def get_line():
return context['current_token'].line
def matches(t):
return get_type() == t
def consume(t):
if not matches(t):
err_str = 'Unexpected token %s (%s) on line %d; expected %s'
err_msg = err_str % (get_type(), get_text(), get_line(), t)
raise RuntimeError(err_msg)
text = get_text()
advance()
return text
def parse_dict():
consume('L_BRACE')
items = {}
while not matches('R_BRACE'):
key = consume('LITERAL')
consume('EQUALS')
val = parse_value()
consume('SEMICOLON')
#FIXME: unquote quoted strings
items[key] = val
consume('R_BRACE')
return items
def parse_list():
consume('L_PAREN')
items = []
while not matches('R_PAREN'):
items.append(parse_value())
if not matches('R_PAREN'):
consume('COMMA')
consume('R_PAREN')
return items
def parse_value():
if matches('L_BRACE'):
return parse_dict()
elif matches('L_PAREN'):
return parse_list()
else:
return consume('LITERAL')
advance()
return parse_value()
class DictChangeDetector():
def __init__(self):
self.previous = False
self.current = False
self.changes_found = []
def update(self, data):
if self.current:
self.previous = self.current
self.current = data
self.find_changes()
else:
self.current = data
def find_changes(self):
def changes(prev, cur, stack=[]):
found = []
for key in cur.keys():
if hasattr(cur[key], 'keys') and key in prev:
stack.append(key)
found.extend(changes(prev[key], cur[key], stack))
stack.pop()
else:
if key in prev and prev[key] == cur[key]:
continue
else:
found.append((list(stack), key, cur[key]))
for missing_key in set(prev.keys()) - set(cur.keys()):
found.append((list(stack), missing_key, 'DELETED'))
return found
self.changes_found = changes(self.previous, self.current)
def changed(self):
if len(self.changes_found):
return True
else:
return False
def print_changes(self):
for change in self.changes_found:
(stack, key, value) = change
print("Change: {} {} {}".format(stack, key, value))
p = NSPlistParser()
detector = DictChangeDetector()
seconds = 5
print("Checking defaults every {} seconds".format(seconds))
while True:
output = subprocess.check_output(['defaults', 'read'])
defaults = p.parse(p.tokenize(output))
detector.update(defaults)
if detector.changed():
detector.print_changes()
time.sleep(seconds)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment