Skip to content

Instantly share code, notes, and snippets.

@rjweiss
Last active May 10, 2016 05:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save rjweiss/fabee4d22b6d272c3758aeca75b9728a to your computer and use it in GitHub Desktop.
Save rjweiss/fabee4d22b6d272c3758aeca75b9728a to your computer and use it in GitHub Desktop.
Telemetry - Test Pilot KPI Validity Check
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
# coding: utf-8
# # Telemetry - Telemetry KPI Validity Check
#
# In this notebook we follow the standard for computing MAU and DAU as captured in [this notebook](https://gist.github.com/mreid-moz/8b2c2b1c6594d658ca5e). This is to address [bug #1264049](https://bugzilla.mozilla.org/show_bug.cgi?id=1264049). This notebook will create a csv that will be made available on [sql.telemetry.mozilla.org](https://sql.telemetry.mozilla.org).
# In[1]:
import boto3, botocore
from requests import get as fetch
from collections import defaultdict
from pyspark.sql import Row
import csv, os.path, json
from datetime import datetime as dt, timedelta, date
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql.functions import *
from boto3.s3.transfer import S3Transfer
from moztelemetry import get_pings, get_pings_properties, standards
get_ipython().magic(u'pylab inline')
# ## Define functions for the task
# In[47]:
def fmt(the_date, date_format="%Y%m%d"):
return dt.strftime(the_date, date_format)
def convert_date(p):
try:
p['creationDate'] = dt.strptime(p.get('creationDate'), "%Y-%m-%dT%H:%M:%S.%fZ")
if p['creationDate']:
p['creationDate'] = standards.unix_time_nanos(p['creationDate'])
return p
except Exception as e:
return e
def get_num_tests(p):
result = len(p.get('payload/tests', None))
if result is not None:
p['num_tests'] = result
return p
def get_tests(p):
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = set([el['addon_id'] for el in dict(r.json())['results']])
result = p.get('payload/tests', None)
if result is not None:
p['active_tests'] = [x for x in result if x in active_tests] # XXX Need to make sure they haven't uninstalled
p['payload/tests'] = None
return p
def process_pings(p):
p = convert_date(p)
p = get_num_tests(p)
p = get_tests(p)
return p
def should_be_updated(record,
target_col="day",
generated_col="generated_on",
date_format="%Y%m%d"):
target = dt.strptime(record[target_col], date_format)
generated = dt.strptime(record[generated_col], date_format)
# Don't regenerate data that was already updated today.
today = fmt(dt.utcnow(), date_format)
if record[generated_col] >= today:
return False
diff = generated - target
return diff.days <= 10
def update_engagement_csv(dataset, old_filename, new_filename,
cutoff_days=30, date_format="%Y%m%d"):
cutoff_date = dt.utcnow() - timedelta(cutoff_days)
cutoff = fmt(cutoff_date, date_format)
print "Cutoff date: {}".format(cutoff)
fields = ["day", "dau", "mau", "generated_on"]
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
active_test_names = [x.replace('mozilla.com', '').replace('@', '') for x in active_tests]
fields.extend(["dau_" + x for x in active_test_names])
fields.extend(["mau_" + x for x in active_test_names])
should_write_header = True
potential_updates = {}
# Carry over rows we won't touch
if os.path.exists(old_filename):
with open(old_filename) as csv_old:
reader = csv.DictReader(csv_old)
with open(new_filename, "w") as csv_new:
writer = csv.DictWriter(csv_new, fields)
writer.writeheader()
should_write_header = False
for row in reader:
if row['day'] < cutoff:
writer.writerow(row)
else:
potential_updates[row['day']] = row
with open(new_filename, "a") as csv_new:
writer = csv.DictWriter(csv_new, fields)
if should_write_header:
writer.writeheader()
for i in range(cutoff_days, 0, -1):
target_day = fmt(dt.utcnow() - timedelta(i), date_format)
if target_day in potential_updates and not should_be_updated(potential_updates[target_day]):
# It's fine as-is.
writer.writerow(potential_updates[target_day])
else:
# Update it.
print "We should update data for {}".format(target_day)
record = {"day": target_day, "generated_on": fmt(dt.utcnow(), date_format)}
print "Starting dau {} at {}".format(target_day, dt.utcnow())
record["dau"] = standards.dau(dataset, target_day)
print "Finished dau {} at {}".format(target_day, dt.utcnow())
print "Starting mau {} at {}".format(target_day, dt.utcnow())
record["mau"] = standards.mau(dataset, target_day)
print "Finished mau {} at {}".format(target_day, dt.utcnow())
for el in active_tests:
key = el.replace('mozilla.com', '').replace("@", "")
test_df = dataset[array_contains(dataset.active_tests, el)]
print "Starting test dau {} at {}".format(target_day, dt.utcnow())
record['dau_' + key] = standards.dau(test_df, target_day)
print "Finished test dau {} at {}".format(target_day, dt.utcnow())
print "Starting test mau {} at {}".format(target_day, dt.utcnow())
record["mau_" + key] = standards.mau(test_df, target_day)
print "Finished test mau {} at {}".format(target_day, dt.utcnow())
print record
# test_count_dfs = defaultdict(pyspark.sql.DataFrame)
# for el in active_tests:
# test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]
# test_counts = dict.fromkeys(active_tests)
# test_counts = {key: 0 for key in test_counts}
# for k, v in dict(test_count_dfs).iteritems():
# test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v)
# print "We should update data for {}".format(target_day)
# record = {"day": target_day, "generated_on": fmt(dt.utcnow(), date_format)}
# print "Starting dau {} at {}".format(target_day, dt.utcnow())
# record["dau"] = standards.dau(dataset, target_day)
# print "Finished dau {} at {}".format(target_day, dt.utcnow())
# print "Starting mau {} at {}".format(target_day, dt.utcnow())
# record["mau"] = standards.mau(dataset, target_day)
# print "Finished mau {} at {}".format(target_day, dt.utcnow())
# for el in active_tests:
# key = el.replace('mozilla.com', '').replace("@", "")
# test_df = dataset[array_contains(dataset.active_tests, el)]
# print "Starting dau {} at {}".format(target_day, dt.utcnow())
# record['dau_' + key] = standards.dau(test_df, target_day)
# print "Finished dau {} at {}".format(target_day, dt.utcnow())
# print "Starting mau {} at {}".format(target_day, dt.utcnow())
# record["mau_" + key] = standards.mau(test_df, target_day)
# print "Finished mau {} at {}".format(target_day, dt.utcnow())
# print record
# # Configure task variables
# In[3]:
data_bucket = "net-mozaws-prod-us-west-2-pipeline-analysis"
engagement_basename = "testpilot_engagement_ratio.csv"
new_engagement_basename = "testpilot_engagement_ratio.{}.csv".format(dt.strftime(dt.utcnow(), "%Y%m%d"))
s3path = "mreid/TxP_maudau"
data_version = "v1"
# Store the latest complete file in a subdir for hive / presto compatibility
engagement_key = "{}/{}/{}".format(s3path, data_version, engagement_basename)
new_engagement_key = "{}/{}".format(s3path, new_engagement_basename)
client = boto3.client('s3', 'us-west-2')
transfer = S3Transfer(client)
# # Retrieve list of current tests in the field
# In[4]:
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
#active_tests = dict.fromkeys([el['addon_id'] for el in dict(r.json())['results']])
#active_tests = {key: 0 for key in active_tests}
# # Retrieve TxP pings and create DataFrame
# In[5]:
pings = get_pings(sc, doc_type="testpilot", app="Firefox")
subset = get_pings_properties(pings, ["clientId",
"creationDate",
"meta/submissionDate",
"payload/tests"])
# Convert date string to appropriate format and count number of current tests
subset = subset.map(lambda p : process_pings(p))
# In[6]:
subset.take(3)
# In[10]:
sqlContext = SQLContext(sc)
rows = subset.map(lambda p: Row(clientId=p['clientId'],
activityTimestamp=p['creationDate'],
submission_date_s3=p['meta/submissionDate'],
num_tests=p['num_tests'],
active_tests=p['active_tests']))
dataset = sqlContext.createDataFrame(rows)
dataset.take(1)
# # Filter datasets for any tests and specific tests
# In[22]:
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
# In[33]:
fields = ["day", "dau_txp", "mau_txp", "generated_on"]
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
active_tests = [x.replace('mozilla.com', '') for x in active_tests]
active_tests = [x.replace('@', '') for x in active_tests]
fields.extend(["dau_" + x for x in active_tests])
fields.extend(["mau_" + x for x in active_tests])
# In[15]:
total_txp_gross = dataset.select('clientId').distinct().count()
# In[16]:
total_txp_net = dataset[dataset.num_tests > 0].select('clientId').distinct().count()
# In[17]:
test_count_dfs = defaultdict(pyspark.sql.DataFrame)
for el in active_tests:
test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]
# In[18]:
test_counts = dict.fromkeys(active_tests)
test_counts = {key: 0 for key in test_counts}
for k, v in dict(test_count_dfs).iteritems():
test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v)
# In[19]:
print "Clients with test pilot = {}".format(total_txp_gross)
print "Clients with at least 1 test = {}".format(total_txp_net)
for k, v in test_counts.iteritems():
print "Clients with {test} = {count}".format(test=k, count=v)
# # Fetch existing data
# In[48]:
try:
transfer.download_file(data_bucket, engagement_key, engagement_basename)
except botocore.exceptions.ClientError as e:
# If the file wasn't there, that's ok. Otherwise, abort!
if e.response['Error']['Code'] != "404":
raise e
else:
print "Did not find an existing file at '{}'".format(engagement_key)
#%time update_engagement_csv(dataset_w_tests, engagement_basename, new_engagement_basename, cutoff_days=35)
get_ipython().magic(u'time update_engagement_csv(dataset, engagement_basename, new_engagement_basename, cutoff_days=35)')
# In[ ]:
## Upload the updated csv file to S3
# Update the day-specific file:
transfer.upload_file(new_engagement_basename, data_bucket, new_engagement_key)
# Update the "main" file
transfer.upload_file(new_engagement_basename, data_bucket, engagement_key)
# In[ ]:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment