Skip to content

Instantly share code, notes, and snippets.

@codersquid
Last active September 11, 2017 19:37
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save codersquid/10083609 to your computer and use it in GitHub Desktop.
Save codersquid/10083609 to your computer and use it in GitHub Desktop.
simple script to parse s3 access logs -- is brittle and does not handle errors
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
"""
The MIT License (MIT)
Copyright (c) 2014 Research Compendia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import print_function
import argparse
from collections import Counter, defaultdict
import datetime
import os
import re
"""
parses lines from s3 access logs
from a python prompt you could do the following
import s3parse
fh = open('biglogs')
nomatch, op_group, op_count = s3parse.match_logfile(fh)
See how many of each type of request there were in the log
In [58]: op_count
Out[58]: Counter({'REST.GET.OBJECT': 100933, 'REST.HEAD.OBJECT': 85939,
'REST.PUT.OBJECT': 71028, 'REST.GET.BUCKET': 1236, 'REST.DELETE.OBJECT': 833,
'REST.GET.LOCATION': 50, 'REST.GET.VERSIONING': 46, 'REST.GET.BUCKETPOLICY':
13, 'REST.GET.WEBSITE': 12, 'REST.GET.LOGGING_STATUS': 12,
'REST.GET.LIFECYCLE': 12, 'REST.GET.CORS': 12, 'REST.GET.ACL': 12,
'REST.GET.REQUEST_PAYMENT': 12, 'REST.GET.TAGGING': 12,
'REST.GET.NOTIFICATION': 12, 'REST.OPTIONS.PREFLIGHT': 5,
'REST.PUT.LOGGING_STATUS': 1})
Get a list of all the GET requests
rest_get_object = op_group['REST.GET.OBJECT']
Get a list of all the HTTP OK status GET requests
ok_list = [x for x in rest_get_object if x['status'] == '200']
Get a list of all the requests for anything in the materials bucket
materials_list = [x for x in ok_list if x['file_name'].startswith('material')]
Get a list of all the requests for anything in the articles bucket
article_list = [x for x in ok_list if x['file_name'].startswith('article')]
Count number of bytes for all of the materials requests
x = 0
for m in materials_list: x += int(m['object_size'])
show them in a human friendly form
s3parse.bytes2human(x)
"""
log_pattern = re.compile(r"""
(?P<bucket_owner>\S+)\s
(?P<bucket>\S+)\s
(?P<date>\[\d\d/\w\w\w/\d\d\d\d:\d\d:\d\d.+\])\s
(?P<ip>\S+)\s
(?P<requester>\S+)\s
(?P<requestid>\S+)\s
(?P<operation>\S+)\s
(?P<file_name>[\w\d\.\-_/]+)\s
\"(?P<request_uri>[^\"]*)\"\s
(?P<status>\d\d\d)\s
(?P<errorcode>[\-\w]+)\s
(?P<bytes_sent>[\d\-]+)\s
(?P<object_size>[\d\-]+)\s
(?P<total_time>[\d\-]+)\s
(?P<turnaround_time>[\d\-]+)\s
"(?P<referrer>[^\"]*)"\s
"(?P<user_agent>[^\"]*)"\s
(?P<versionid>\S+)\s
""",
re.VERBOSE)
agentpattern = re.compile(r'googlebot|bingbot|slurp|yahooseeker|baiduspider|iaskspider|s3|aws', re.I)
def match_logfile(fh):
nomatch = []
matches = []
op_count = Counter()
op_group = defaultdict(list)
for line in fh:
match = log_pattern.search(line)
if match is None:
nomatch.append(line)
continue
result = match.groupdict()
result['datetime'] = match_date(result['date'])
result['material'] = result['file_name'].startswith('material')
result['article'] = result['file_name'].startswith('article')
result['ignore_agent'] = ignore_user_agent(result['user_agent'])
matches.append(result)
op = result['operation']
op_group[op].append(result)
op_count[op] += 1
return nomatch, op_group, op_count, matches
def filter_startswith(results, filename):
return [request for request in results if request['file_name'].startswith(filename)]
def group_by_status(results):
status_group = defaultdict(list)
for r in results:
status_group[r['status']].append(r)
return status_group
def sum_object_size(results):
size = 0
for r in results:
size += int(r.get('object_size', 0))
return size
def match_date(datestr):
date = datetime.datetime.strptime(datestr, '[%d/%b/%Y:%H:%M:%S +0000]')
return date
def ignore_user_agent(agentstr):
return agentpattern.search(agentstr) is not None
# http://goo.gl/zeJZl
def bytes2human(n, format="%(value)i%(symbol)s"):
"""
>>> bytes2human(10000)
'9K'
>>> bytes2human(100001221)
'95M'
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def report(results, filtertype):
filtered_list = filter_startswith(op_group.get('REST.GET.OBJECT', []), filtertype)
status_group = group_by_status(filtered_list)
request_count = len(status_group.get('200', []))
count206 = len(status_group.get('206', []))
size = sum_object_size(status_group.get('200', []))
size206 = sum_object_size(status_group.get('206', []))
print('%s downloads for %s' % (request_count + count206, filtertype))
print('%s bytes requested: %s (%s)' % (filtertype, size, bytes2human(size)))
print('%s bytes requested with 206: %s (%s)' % (filtertype, size206, bytes2human(size206)))
if __name__ == "__main__" :
parser = argparse.ArgumentParser(description="scrape s3 access logfile")
parser.add_argument('logfile', help='logfile')
args = parser.parse_args()
assert os.path.isfile(args.logfile), 'logfile is not a valid file'
with open(args.logfile) as fh:
nomatch, op_group, op_count, matches = match_logfile(fh)
for items in op_count.items():
print('%s: %s' % items)
report(op_group, 'materials')
report(op_group, 'articles')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment