Skip to content

Instantly share code, notes, and snippets.

@rjweiss
Last active May 12, 2016 03:20
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rjweiss/1193b079c3bfaa7038c41ca4c2ceadff to your computer and use it in GitHub Desktop.
Save rjweiss/1193b079c3bfaa7038c41ca4c2ceadff to your computer and use it in GitHub Desktop.
TxP - Create MAU DAU table for all Tests
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
# coding: utf-8
# In[1]:
import boto3, botocore
from requests import get as fetch
from pprint import pprint
from collections import defaultdict, namedtuple
from pyspark.sql import Row
import csv, os.path, json
from datetime import datetime as dt, timedelta, date
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql.functions import *
from boto3.s3.transfer import S3Transfer
from moztelemetry import get_pings, get_pings_properties, standards
get_ipython().magic(u'pylab inline')
# ## Define functions for the task
# In[2]:
def fmt(the_date, date_format="%Y%m%d"):
return dt.strftime(the_date, date_format)
def convert_date(p):
try:
p['creationDate'] = dt.strptime(p.get('creationDate'), "%Y-%m-%dT%H:%M:%S.%fZ")
if p['creationDate']:
p['creationDate'] = standards.unix_time_nanos(p['creationDate'])
return p
except Exception as e:
return e
def remove_disabled(tests):
if tests is not None:
for k, v in tests.items():
if k == 'last_disabled':
del tests[k]
return tests
def get_enabled_tests(p, active_tests):
tests = p.get('payload/tests', None)
enabled_tests = remove_disabled(tests)
if enabled_tests is not None:
p['active_tests'] = [x for x in enabled_tests if x in active_tests] # XXX Need to make sure they haven't uninstalled
del p['payload/tests']
return p
def process_pings(p, active_tests):
p = convert_date(p)
p = get_enabled_tests(p, active_tests)
return p
def expand_tests(ping):
return ((ping, test) for test in ping['active_tests'])
def create_rows(x):
d = {
'clientId': x[0]['clientId'],
'activityTimestamp': x[0]['creationDate'],
'submission_date_s3': x[0]['meta/submissionDate'],
'test': x[1]
}
return Row(**d)
def update_engagement_csv(dataset, old_filename, new_filename,
cutoff_days=30, date_format="%Y%m%d", active_tests):
cutoff_date = dt.utcnow() - timedelta(cutoff_days)
cutoff = fmt(cutoff_date, date_format)
print "Cutoff date: {}".format(cutoff)
fields = ["test", "day", "dau", "mau", "generated_on"]
should_write_header = True
potential_updates = {}
# Carry over rows we won't touch
if os.path.exists(old_filename):
with open(old_filename) as csv_old:
reader = csv.DictReader(csv_old)
with open(new_filename, "w") as csv_new:
writer = csv.DictWriter(csv_new, fields)
writer.writeheader()
should_write_header = False
for row in reader:
if row['day'] < cutoff:
writer.writerow(row)
else:
potential_updates[row['day']] = row
with open(new_filename, "a") as csv_new:
writer = csv.DictWriter(csv_new, fields)
if should_write_header:
writer.writeheader()
for test in active_tests:
for i in range(cutoff_days, 0, -1):
target_day = fmt(dt.utcnow() - timedelta(i), date_format)
if target_day in potential_updates and not should_be_updated(potential_updates[target_day]):
# It's fine as-is.
writer.writerow(potential_updates[target_day])
else:
# Update it.
print "We should update data for {t} on {d}".format(t=test, d=target_day)
record = {"test": test, "day": target_day, "generated_on": fmt(dt.utcnow(), date_format)}
print "Starting dau {} at {}".format(target_day, dt.utcnow())
record["dau"] = standards.dau(dataset.filter(dataset.test == '%s' % test), target_day)
print "Finished dau {} at {}".format(target_day, dt.utcnow())
print "Starting mau {} at {}".format(target_day, dt.utcnow())
record["mau"] = standards.mau(dataset.filter(dataset.test == '%s' % test), target_day)
print "Finished mau {} at {}".format(target_day, dt.utcnow())
writer.writerow(record)
# # Configure task variables
# In[3]:
sqlContext = SQLContext(sc)
data_bucket = "net-mozaws-prod-us-west-2-pipeline-analysis"
engagement_basename = "txp_tests_engagement_ratio.csv"
new_engagement_basename = "txp_tests_engagement_ratio.{}.csv".format(dt.strftime(dt.utcnow(), "%Y%m%d"))
s3path = "rweiss/TxP_tests_maudau"
data_version = "v1"
# Store the latest complete file in a subdir for hive / presto compatibility
engagement_key = "{}/{}/{}".format(s3path, data_version, engagement_basename)
new_engagement_key = "{}/{}".format(s3path, new_engagement_basename)
client = boto3.client('s3', 'us-west-2')
transfer = S3Transfer(client)
# # Retrieve list of current tests in the field
# In[4]:
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
print active_tests
# # Retrieve TxP pings and create DataFrame
# In[5]:
pings = get_pings(sc, doc_type="testpilot", app="Firefox")
subset = get_pings_properties(pings, ["clientId",
"creationDate",
"meta/submissionDate",
"payload/tests"])
subset = subset.map(lambda p : process_pings(p, active_tests))
rows = subset.flatMap(expand_tests).map(lambda x: create_rows(x))
dataset = sqlContext.createDataFrame(rows)
# In[8]:
try:
transfer.download_file(data_bucket, engagement_key, engagement_basename)
except botocore.exceptions.ClientError as e:
# If the file wasn't there, that's ok. Otherwise, abort!
if e.response['Error']['Code'] != "404":
raise e
else:
print "Did not find an existing file at '{}'".format(engagement_key)
get_ipython().magic(u'time update_engagement_csv(dataset, engagement_basename, new_engagement_basename, cutoff_days=35)')
# ## Upload the updated csv file to S3
# In[9]:
# Update the day-specific file:
transfer.upload_file(new_engagement_basename, data_bucket, new_engagement_key)
# Update the "main" file
transfer.upload_file(new_engagement_basename, data_bucket, engagement_key)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment