Skip to content

Instantly share code, notes, and snippets.

@rjweiss
Last active May 10, 2016 05:55
Show Gist options
  • Save rjweiss/fabee4d22b6d272c3758aeca75b9728a to your computer and use it in GitHub Desktop.
Save rjweiss/fabee4d22b6d272c3758aeca75b9728a to your computer and use it in GitHub Desktop.
Telemetry - Test Pilot KPI Validity Check
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Telemetry - Telemetry KPI Validity Check\n",
"\n",
"In this notebook we follow the standard for computing MAU and DAU as captured in [this notebook](https://gist.github.com/mreid-moz/8b2c2b1c6594d658ca5e). This is to address [bug #1264049](https://bugzilla.mozilla.org/show_bug.cgi?id=1264049). This notebook will create a csv that will be made available on [sql.telemetry.mozilla.org](https://sql.telemetry.mozilla.org)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Unable to parse whitelist (/home/hadoop/anaconda2/lib/python2.7/site-packages/moztelemetry/histogram-whitelists.json). Assuming all histograms are acceptable.\n",
"Populating the interactive namespace from numpy and matplotlib\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING: pylab import has clobbered these variables: ['rand', 'cbrt', 'rank', 'cosh', 'hypot', 'array', 'tan', 'size', 'isnan', 'randn', 'log', 'floor', 'sum', 'sqrt', 'split', 'rint', 'log10', 'sin', 'repeat', 'log2', 'cos', 'ceil', 'broadcast', 'sinh', 'trunc', 'expm1', 'tanh', 'exp', 'log1p', 'mean']\n",
"`%matplotlib` prevents importing * from pylab and numpy\n"
]
}
],
"source": [
"import boto3, botocore\n",
"from requests import get as fetch\n",
"from collections import defaultdict\n",
"from pyspark.sql import Row\n",
"import csv, os.path, json\n",
"from datetime import datetime as dt, timedelta, date\n",
"from pyspark.sql import SQLContext\n",
"from pyspark.sql.types import *\n",
"from pyspark.sql.functions import *\n",
"from boto3.s3.transfer import S3Transfer\n",
"from moztelemetry import get_pings, get_pings_properties, standards\n",
"\n",
"%pylab inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define functions for the task"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [],
"source": [
"def fmt(the_date, date_format=\"%Y%m%d\"):\n",
" return dt.strftime(the_date, date_format)\n",
"\n",
"def convert_date(p):\n",
" try:\n",
" p['creationDate'] = dt.strptime(p.get('creationDate'), \"%Y-%m-%dT%H:%M:%S.%fZ\")\n",
" if p['creationDate']:\n",
" p['creationDate'] = standards.unix_time_nanos(p['creationDate'])\n",
" return p\n",
" except Exception as e:\n",
" return e\n",
"\n",
"def get_num_tests(p):\n",
" result = len(p.get('payload/tests', None))\n",
" if result is not None:\n",
" p['num_tests'] = result\n",
" return p\n",
"\n",
"def get_tests(p):\n",
" r = fetch('https://testpilot.firefox.com/api/experiments?format=json')\n",
" active_tests = set([el['addon_id'] for el in dict(r.json())['results']])\n",
" result = p.get('payload/tests', None)\n",
" if result is not None:\n",
" p['active_tests'] = [x for x in result if x in active_tests] # XXX Need to make sure they haven't uninstalled\n",
" p['payload/tests'] = None\n",
" return p\n",
"\n",
"\n",
"def process_pings(p):\n",
" p = convert_date(p)\n",
" p = get_num_tests(p)\n",
" p = get_tests(p)\n",
" return p\n",
"\n",
"def should_be_updated(record,\n",
" target_col=\"day\",\n",
" generated_col=\"generated_on\",\n",
" date_format=\"%Y%m%d\"):\n",
" target = dt.strptime(record[target_col], date_format)\n",
" generated = dt.strptime(record[generated_col], date_format)\n",
" \n",
" # Don't regenerate data that was already updated today.\n",
" today = fmt(dt.utcnow(), date_format)\n",
" if record[generated_col] >= today:\n",
" return False\n",
" \n",
" diff = generated - target\n",
" return diff.days <= 10\n",
"\n",
"def update_engagement_csv(dataset, old_filename, new_filename, \n",
" cutoff_days=30, date_format=\"%Y%m%d\"):\n",
" cutoff_date = dt.utcnow() - timedelta(cutoff_days)\n",
" cutoff = fmt(cutoff_date, date_format)\n",
" print \"Cutoff date: {}\".format(cutoff)\n",
"\n",
" fields = [\"day\", \"dau\", \"mau\", \"generated_on\"]\n",
" r = fetch('https://testpilot.firefox.com/api/experiments?format=json')\n",
" active_tests = [el['addon_id'] for el in dict(r.json())['results']]\n",
"\n",
" active_test_names = [x.replace('mozilla.com', '').replace('@', '') for x in active_tests]\n",
" fields.extend([\"dau_\" + x for x in active_test_names])\n",
" fields.extend([\"mau_\" + x for x in active_test_names])\n",
"\n",
" should_write_header = True\n",
" potential_updates = {}\n",
" # Carry over rows we won't touch\n",
" if os.path.exists(old_filename):\n",
" with open(old_filename) as csv_old:\n",
" reader = csv.DictReader(csv_old)\n",
" with open(new_filename, \"w\") as csv_new:\n",
" writer = csv.DictWriter(csv_new, fields)\n",
" writer.writeheader()\n",
" should_write_header = False\n",
" for row in reader:\n",
" if row['day'] < cutoff:\n",
" writer.writerow(row)\n",
" else:\n",
" potential_updates[row['day']] = row\n",
"\n",
" with open(new_filename, \"a\") as csv_new:\n",
" writer = csv.DictWriter(csv_new, fields)\n",
" if should_write_header:\n",
" writer.writeheader()\n",
"\n",
" for i in range(cutoff_days, 0, -1):\n",
" target_day = fmt(dt.utcnow() - timedelta(i), date_format)\n",
" if target_day in potential_updates and not should_be_updated(potential_updates[target_day]):\n",
" # It's fine as-is.\n",
" writer.writerow(potential_updates[target_day])\n",
" else:\n",
" # Update it.\n",
" print \"We should update data for {}\".format(target_day)\n",
" record = {\"day\": target_day, \"generated_on\": fmt(dt.utcnow(), date_format)}\n",
" print \"Starting dau {} at {}\".format(target_day, dt.utcnow())\n",
" record[\"dau\"] = standards.dau(dataset, target_day)\n",
" print \"Finished dau {} at {}\".format(target_day, dt.utcnow())\n",
" print \"Starting mau {} at {}\".format(target_day, dt.utcnow())\n",
" record[\"mau\"] = standards.mau(dataset, target_day)\n",
" print \"Finished mau {} at {}\".format(target_day, dt.utcnow())\n",
"\n",
" for el in active_tests:\n",
" key = el.replace('mozilla.com', '').replace(\"@\", \"\")\n",
" test_df = dataset[array_contains(dataset.active_tests, el)]\n",
" print \"Starting test dau {} at {}\".format(target_day, dt.utcnow())\n",
" record['dau_' + key] = standards.dau(test_df, target_day)\n",
" print \"Finished test dau {} at {}\".format(target_day, dt.utcnow())\n",
" print \"Starting test mau {} at {}\".format(target_day, dt.utcnow())\n",
" record[\"mau_\" + key] = standards.mau(test_df, target_day)\n",
" print \"Finished test mau {} at {}\".format(target_day, dt.utcnow())\n",
"\n",
" print record\n",
"# test_count_dfs = defaultdict(pyspark.sql.DataFrame)\n",
"# for el in active_tests:\n",
"# test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]\n",
"\n",
"# test_counts = dict.fromkeys(active_tests)\n",
"# test_counts = {key: 0 for key in test_counts}\n",
"\n",
"# for k, v in dict(test_count_dfs).iteritems():\n",
"# test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v)\n",
" \n",
"# print \"We should update data for {}\".format(target_day)\n",
"# record = {\"day\": target_day, \"generated_on\": fmt(dt.utcnow(), date_format)}\n",
"# print \"Starting dau {} at {}\".format(target_day, dt.utcnow())\n",
"# record[\"dau\"] = standards.dau(dataset, target_day)\n",
"# print \"Finished dau {} at {}\".format(target_day, dt.utcnow())\n",
"# print \"Starting mau {} at {}\".format(target_day, dt.utcnow())\n",
"# record[\"mau\"] = standards.mau(dataset, target_day)\n",
"# print \"Finished mau {} at {}\".format(target_day, dt.utcnow())\n",
"\n",
"# for el in active_tests:\n",
"# key = el.replace('mozilla.com', '').replace(\"@\", \"\")\n",
"# test_df = dataset[array_contains(dataset.active_tests, el)]\n",
"# print \"Starting dau {} at {}\".format(target_day, dt.utcnow())\n",
"# record['dau_' + key] = standards.dau(test_df, target_day)\n",
"# print \"Finished dau {} at {}\".format(target_day, dt.utcnow())\n",
"# print \"Starting mau {} at {}\".format(target_day, dt.utcnow())\n",
"# record[\"mau_\" + key] = standards.mau(test_df, target_day)\n",
"# print \"Finished mau {} at {}\".format(target_day, dt.utcnow())\n",
" \n",
"# print record"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Configure task variables"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"data_bucket = \"net-mozaws-prod-us-west-2-pipeline-analysis\"\n",
"engagement_basename = \"testpilot_engagement_ratio.csv\"\n",
"new_engagement_basename = \"testpilot_engagement_ratio.{}.csv\".format(dt.strftime(dt.utcnow(), \"%Y%m%d\"))\n",
"s3path = \"mreid/TxP_maudau\"\n",
"data_version = \"v1\"\n",
"\n",
"# Store the latest complete file in a subdir for hive / presto compatibility\n",
"engagement_key = \"{}/{}/{}\".format(s3path, data_version, engagement_basename)\n",
"\n",
"new_engagement_key = \"{}/{}\".format(s3path, new_engagement_basename)\n",
"\n",
"client = boto3.client('s3', 'us-west-2')\n",
"transfer = S3Transfer(client)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Retrieve list of current tests in the field"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"r = fetch('https://testpilot.firefox.com/api/experiments?format=json')\n",
"active_tests = [el['addon_id'] for el in dict(r.json())['results']]\n",
"#active_tests = dict.fromkeys([el['addon_id'] for el in dict(r.json())['results']])\n",
"#active_tests = {key: 0 for key in active_tests}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Retrieve TxP pings and create DataFrame"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"pings = get_pings(sc, doc_type=\"testpilot\", app=\"Firefox\")\n",
"\n",
"subset = get_pings_properties(pings, [\"clientId\",\n",
" \"creationDate\",\n",
" \"meta/submissionDate\",\n",
" \"payload/tests\"])\n",
"\n",
"# Convert date string to appropriate format and count number of current tests\n",
"subset = subset.map(lambda p : process_pings(p))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"[{'active_tests': [u'@activity-streams'],\n",
" 'clientId': u'6a4f8c6e-da27-5c49-8c70-383b3b779842',\n",
" 'creationDate': 1.4620617149140001e+18,\n",
" 'meta/submissionDate': u'20160501',\n",
" 'num_tests': 1,\n",
" 'payload/tests': None},\n",
" {'active_tests': [u'@activity-streams'],\n",
" 'clientId': u'3a35caf4-f234-9e41-9d94-a27bfe86b1db',\n",
" 'creationDate': 1.4620639625460001e+18,\n",
" 'meta/submissionDate': u'20160501',\n",
" 'num_tests': 1,\n",
" 'payload/tests': None},\n",
" {'active_tests': [],\n",
" 'clientId': u'0e12c78c-2b29-0a49-bed2-2aaa04163ce2',\n",
" 'creationDate': 1.462064152624e+18,\n",
" 'meta/submissionDate': u'20160501',\n",
" 'num_tests': 0,\n",
" 'payload/tests': None}]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"subset.take(3)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"[Row(active_tests=[u'@activity-streams'], activityTimestamp=1.4620617149140001e+18, clientId=u'6a4f8c6e-da27-5c49-8c70-383b3b779842', num_tests=1, submission_date_s3=u'20160501')]"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sqlContext = SQLContext(sc)\n",
"rows = subset.map(lambda p: Row(clientId=p['clientId'], \n",
" activityTimestamp=p['creationDate'], \n",
" submission_date_s3=p['meta/submissionDate'],\n",
" num_tests=p['num_tests'],\n",
" active_tests=p['active_tests']))\n",
"\n",
"dataset = sqlContext.createDataFrame(rows)\n",
"dataset.take(1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Filter datasets for any tests and specific tests"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"r = fetch('https://testpilot.firefox.com/api/experiments?format=json')\n",
"active_tests = [el['addon_id'] for el in dict(r.json())['results']]"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"fields = [\"day\", \"dau_txp\", \"mau_txp\", \"generated_on\"]\n",
"r = fetch('https://testpilot.firefox.com/api/experiments?format=json')\n",
"active_tests = [el['addon_id'] for el in dict(r.json())['results']]\n",
"active_tests = [x.replace('mozilla.com', '') for x in active_tests]\n",
"active_tests = [x.replace('@', '') for x in active_tests]\n",
"fields.extend([\"dau_\" + x for x in active_tests])\n",
"fields.extend([\"mau_\" + x for x in active_tests])"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"total_txp_gross = dataset.select('clientId').distinct().count()"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"total_txp_net = dataset[dataset.num_tests > 0].select('clientId').distinct().count()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"test_count_dfs = defaultdict(pyspark.sql.DataFrame)\n",
"for el in active_tests:\n",
" test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false,
"scrolled": true
},
"outputs": [
{
"ename": "Py4JJavaError",
"evalue": "An error occurred while calling o324.count.\n: org.apache.spark.SparkException: Job aborted due to stage failure: Task 502 in stage 30.0 failed 4 times, most recent failure: Lost task 502.3 in stage 30.0 (TID 5856, ip-172-31-23-222.us-west-2.compute.internal): org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat java.lang.Thread.run(Thread.java:745)\n\nDriver stacktrace:\n\tat org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)\n\tat scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)\n\tat scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)\n\tat org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat scala.Option.foreach(Option.scala:236)\n\tat org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)\n\tat org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)\n\tat org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1929)\n\tat org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)\n\tat org.apache.spark.rdd.RDD.withScope(RDD.scala:316)\n\tat org.apache.spark.rdd.RDD.collect(RDD.scala:926)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)\n\tat org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1554)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1553)\n\tat org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)\n\tat org.apache.spark.sql.DataFrame.count(DataFrame.scala:1553)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat java.lang.reflect.Method.invoke(Method.java:606)\n\tat py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)\n\tat py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)\n\tat py4j.Gateway.invoke(Gateway.java:259)\n\tat py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)\n\tat py4j.commands.CallCommand.execute(CallCommand.java:79)\n\tat py4j.GatewayConnection.run(GatewayConnection.java:209)\n\tat java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\t... 1 more\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mPy4JJavaError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-18-deab2d409ec3>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mv\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_count_dfs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miteritems\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[0mtest_counts\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mk\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mv\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mselect\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'clientId'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdistinct\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;31m#standards.count_distinct_clientids(v)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m/usr/lib/spark/python/pyspark/sql/dataframe.py\u001b[0m in \u001b[0;36mcount\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 267\u001b[0m \u001b[1;36m2\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 268\u001b[0m \"\"\"\n\u001b[1;32m--> 269\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_jdf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 270\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 271\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mignore_unicode_prefix\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/py4j/java_gateway.pyc\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args)\u001b[0m\n\u001b[0;32m 536\u001b[0m \u001b[0manswer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgateway_client\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend_command\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcommand\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 537\u001b[0m return_value = get_return_value(answer, self.gateway_client,\n\u001b[1;32m--> 538\u001b[1;33m self.target_id, self.name)\n\u001b[0m\u001b[0;32m 539\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mtemp_arg\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mtemp_args\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/usr/lib/spark/python/pyspark/sql/utils.py\u001b[0m in \u001b[0;36mdeco\u001b[1;34m(*a, **kw)\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdeco\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 45\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 46\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mpy4j\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprotocol\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mPy4JJavaError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 47\u001b[0m \u001b[0ms\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjava_exception\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtoString\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/py4j/protocol.pyc\u001b[0m in \u001b[0;36mget_return_value\u001b[1;34m(answer, gateway_client, target_id, name)\u001b[0m\n\u001b[0;32m 298\u001b[0m raise Py4JJavaError(\n\u001b[0;32m 299\u001b[0m \u001b[1;34m'An error occurred while calling {0}{1}{2}.\\n'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 300\u001b[1;33m format(target_id, '.', name), value)\n\u001b[0m\u001b[0;32m 301\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 302\u001b[0m raise Py4JError(\n",
"\u001b[1;31mPy4JJavaError\u001b[0m: An error occurred while calling o324.count.\n: org.apache.spark.SparkException: Job aborted due to stage failure: Task 502 in stage 30.0 failed 4 times, most recent failure: Lost task 502.3 in stage 30.0 (TID 5856, ip-172-31-23-222.us-west-2.compute.internal): org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat java.lang.Thread.run(Thread.java:745)\n\nDriver stacktrace:\n\tat org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)\n\tat scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)\n\tat scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)\n\tat org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat scala.Option.foreach(Option.scala:236)\n\tat org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)\n\tat org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)\n\tat org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1929)\n\tat org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)\n\tat org.apache.spark.rdd.RDD.withScope(RDD.scala:316)\n\tat org.apache.spark.rdd.RDD.collect(RDD.scala:926)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)\n\tat org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1554)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1553)\n\tat org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)\n\tat org.apache.spark.sql.DataFrame.count(DataFrame.scala:1553)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat java.lang.reflect.Method.invoke(Method.java:606)\n\tat py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)\n\tat py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)\n\tat py4j.Gateway.invoke(Gateway.java:259)\n\tat py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)\n\tat py4j.commands.CallCommand.execute(CallCommand.java:79)\n\tat py4j.GatewayConnection.run(GatewayConnection.java:209)\n\tat java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt1/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000003/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\t... 1 more\n"
]
}
],
"source": [
"test_counts = dict.fromkeys(active_tests)\n",
"test_counts = {key: 0 for key in test_counts}\n",
"\n",
"for k, v in dict(test_count_dfs).iteritems():\n",
" test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v) "
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Clients with test pilot = 202\n",
"Clients with at least 1 test = 125\n",
"Clients with tabcentertest1@mozilla.com = 78\n",
"Clients with universal-search@mozilla.com = 51\n",
"Clients with @activity-streams = 0\n"
]
}
],
"source": [
"print \"Clients with test pilot = {}\".format(total_txp_gross)\n",
"print \"Clients with at least 1 test = {}\".format(total_txp_net)\n",
"for k, v in test_counts.iteritems():\n",
" print \"Clients with {test} = {count}\".format(test=k, count=v)"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"# Fetch existing data"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cutoff date: 20160405\n",
"We should update data for 20160423\n",
"Starting dau 20160423 at 2016-05-10 05:51:27.879864\n",
"Finished dau 20160423 at 2016-05-10 05:52:22.512656\n",
"Starting mau 20160423 at 2016-05-10 05:52:22.513293\n",
"Finished mau 20160423 at 2016-05-10 05:53:16.216172\n",
"Starting test dau 20160423 at 2016-05-10 05:53:16.220592\n"
]
},
{
"ename": "Py4JJavaError",
"evalue": "An error occurred while calling o501.count.\n: org.apache.spark.SparkException: Job aborted due to stage failure: Task 401 in stage 45.0 failed 4 times, most recent failure: Lost task 401.3 in stage 45.0 (TID 9004, ip-172-31-23-223.us-west-2.compute.internal): org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat java.lang.Thread.run(Thread.java:745)\n\nDriver stacktrace:\n\tat org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)\n\tat scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)\n\tat scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)\n\tat org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat scala.Option.foreach(Option.scala:236)\n\tat org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)\n\tat org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)\n\tat org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1929)\n\tat org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)\n\tat org.apache.spark.rdd.RDD.withScope(RDD.scala:316)\n\tat org.apache.spark.rdd.RDD.collect(RDD.scala:926)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)\n\tat org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1554)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1553)\n\tat org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)\n\tat org.apache.spark.sql.DataFrame.count(DataFrame.scala:1553)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat java.lang.reflect.Method.invoke(Method.java:606)\n\tat py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)\n\tat py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)\n\tat py4j.Gateway.invoke(Gateway.java:259)\n\tat py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)\n\tat py4j.commands.CallCommand.execute(CallCommand.java:79)\n\tat py4j.GatewayConnection.run(GatewayConnection.java:209)\n\tat java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\t... 1 more\n",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mPy4JJavaError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-48-ba993ee73064>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 9\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[1;31m#%time update_engagement_csv(dataset_w_tests, engagement_basename, new_engagement_basename, cutoff_days=35)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 11\u001b[1;33m \u001b[0mget_ipython\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmagic\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mu'time update_engagement_csv(dataset, engagement_basename, new_engagement_basename, cutoff_days=35)'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc\u001b[0m in \u001b[0;36mmagic\u001b[1;34m(self, arg_s)\u001b[0m\n\u001b[0;32m 2334\u001b[0m \u001b[0mmagic_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmagic_arg_s\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0marg_s\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpartition\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m' '\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2335\u001b[0m \u001b[0mmagic_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmagic_name\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlstrip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprefilter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mESC_MAGIC\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2336\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmagic_name\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmagic_arg_s\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2337\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2338\u001b[0m \u001b[1;31m#-------------------------------------------------------------------------\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc\u001b[0m in \u001b[0;36mrun_line_magic\u001b[1;34m(self, magic_name, line)\u001b[0m\n\u001b[0;32m 2255\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'local_ns'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_getframe\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstack_depth\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mf_locals\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2256\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbuiltin_trap\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2257\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2258\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2259\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m<decorator-gen-60>\u001b[0m in \u001b[0;36mtime\u001b[1;34m(self, line, cell, local_ns)\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/IPython/core/magic.pyc\u001b[0m in \u001b[0;36m<lambda>\u001b[1;34m(f, *a, **k)\u001b[0m\n\u001b[0;32m 191\u001b[0m \u001b[1;31m# but it's overkill for just that one bit of state.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 192\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mmagic_deco\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 193\u001b[1;33m \u001b[0mcall\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mlambda\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mk\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mk\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 194\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 195\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcallable\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marg\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/IPython/core/magics/execution.pyc\u001b[0m in \u001b[0;36mtime\u001b[1;34m(self, line, cell, local_ns)\u001b[0m\n\u001b[0;32m 1161\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m==\u001b[0m\u001b[1;34m'eval'\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1162\u001b[0m \u001b[0mst\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mclock2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1163\u001b[1;33m \u001b[0mout\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0meval\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mglob\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlocal_ns\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1164\u001b[0m \u001b[0mend\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mclock2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1165\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m<timed eval>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n",
"\u001b[1;32m<ipython-input-47-07af3a94249f>\u001b[0m in \u001b[0;36mupdate_engagement_csv\u001b[1;34m(dataset, old_filename, new_filename, cutoff_days, date_format)\u001b[0m\n\u001b[0;32m 103\u001b[0m \u001b[0mtest_df\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0marray_contains\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mactive_tests\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 104\u001b[0m \u001b[1;32mprint\u001b[0m \u001b[1;34m\"Starting test dau {} at {}\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtarget_day\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutcnow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 105\u001b[1;33m \u001b[0mrecord\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'dau_'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mkey\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mstandards\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdau\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_df\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_day\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 106\u001b[0m \u001b[1;32mprint\u001b[0m \u001b[1;34m\"Finished test dau {} at {}\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtarget_day\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutcnow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 107\u001b[0m \u001b[1;32mprint\u001b[0m \u001b[1;34m\"Starting test mau {} at {}\"\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtarget_day\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mutcnow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/moztelemetry/standards.pyc\u001b[0m in \u001b[0;36mdau\u001b[1;34m(dataframe, target_day, future_days, date_format)\u001b[0m\n\u001b[0;32m 62\u001b[0m filtered = filter_date_range(dataframe, act_col, min_activity, max_activity,\n\u001b[0;32m 63\u001b[0m sub_col, min_submission, max_submission)\n\u001b[1;32m---> 64\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mcount_distinct_clientids\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfiltered\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 65\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 66\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mmau\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataframe\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_day\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpast_days\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m28\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfuture_days\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdate_format\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"%Y%m%d\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/moztelemetry/standards.pyc\u001b[0m in \u001b[0;36mcount_distinct_clientids\u001b[1;34m(dataframe)\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 45\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcount_distinct_clientids\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataframe\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 46\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mdataframe\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mselect\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'clientId'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdistinct\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 47\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdau\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataframe\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_day\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfuture_days\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdate_format\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"%Y%m%d\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/usr/lib/spark/python/pyspark/sql/dataframe.py\u001b[0m in \u001b[0;36mcount\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 267\u001b[0m \u001b[1;36m2\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 268\u001b[0m \"\"\"\n\u001b[1;32m--> 269\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_jdf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 270\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 271\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mignore_unicode_prefix\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/py4j/java_gateway.pyc\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args)\u001b[0m\n\u001b[0;32m 536\u001b[0m \u001b[0manswer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgateway_client\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend_command\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcommand\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 537\u001b[0m return_value = get_return_value(answer, self.gateway_client,\n\u001b[1;32m--> 538\u001b[1;33m self.target_id, self.name)\n\u001b[0m\u001b[0;32m 539\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 540\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mtemp_arg\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mtemp_args\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/usr/lib/spark/python/pyspark/sql/utils.py\u001b[0m in \u001b[0;36mdeco\u001b[1;34m(*a, **kw)\u001b[0m\n\u001b[0;32m 43\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdeco\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 45\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 46\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mpy4j\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprotocol\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mPy4JJavaError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 47\u001b[0m \u001b[0ms\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjava_exception\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtoString\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m/home/hadoop/anaconda2/lib/python2.7/site-packages/py4j/protocol.pyc\u001b[0m in \u001b[0;36mget_return_value\u001b[1;34m(answer, gateway_client, target_id, name)\u001b[0m\n\u001b[0;32m 298\u001b[0m raise Py4JJavaError(\n\u001b[0;32m 299\u001b[0m \u001b[1;34m'An error occurred while calling {0}{1}{2}.\\n'\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 300\u001b[1;33m format(target_id, '.', name), value)\n\u001b[0m\u001b[0;32m 301\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 302\u001b[0m raise Py4JError(\n",
"\u001b[1;31mPy4JJavaError\u001b[0m: An error occurred while calling o501.count.\n: org.apache.spark.SparkException: Job aborted due to stage failure: Task 401 in stage 45.0 failed 4 times, most recent failure: Lost task 401.3 in stage 45.0 (TID 9004, ip-172-31-23-223.us-west-2.compute.internal): org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat java.lang.Thread.run(Thread.java:745)\n\nDriver stacktrace:\n\tat org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)\n\tat scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)\n\tat scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)\n\tat org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)\n\tat scala.Option.foreach(Option.scala:236)\n\tat org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)\n\tat org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)\n\tat org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)\n\tat org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)\n\tat org.apache.spark.SparkContext.runJob(SparkContext.scala:1929)\n\tat org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:927)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)\n\tat org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)\n\tat org.apache.spark.rdd.RDD.withScope(RDD.scala:316)\n\tat org.apache.spark.rdd.RDD.collect(RDD.scala:926)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)\n\tat org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)\n\tat org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)\n\tat org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)\n\tat org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1554)\n\tat org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1553)\n\tat org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)\n\tat org.apache.spark.sql.DataFrame.count(DataFrame.scala:1553)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)\n\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\n\tat java.lang.reflect.Method.invoke(Method.java:606)\n\tat py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)\n\tat py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:381)\n\tat py4j.Gateway.invoke(Gateway.java:259)\n\tat py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:133)\n\tat py4j.commands.CallCommand.execute(CallCommand.java:79)\n\tat py4j.GatewayConnection.run(GatewayConnection.java:209)\n\tat java.lang.Thread.run(Thread.java:745)\nCaused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 111, in main\n process()\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/worker.py\", line 106, in process\n serializer.dump_stream(func(split_index, iterator), outfile)\n File \"/mnt/yarn/usercache/hadoop/appcache/application_1462820223101_0005/container_1462820223101_0005_01_000005/pyspark.zip/pyspark/serializers.py\", line 263, in dump_stream\n vs = list(itertools.islice(iterator, batch))\n File \"<ipython-input-5-4b11284a51ec>\", line 9, in <lambda>\n File \"<ipython-input-2-8896d6d611da>\", line 32, in process_pings\n File \"<ipython-input-2-8896d6d611da>\", line 21, in get_tests\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/requests/models.py\", line 808, in json\n return complexjson.loads(self.text, **kwargs)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\n return _default_decoder.decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\n obj, end = self.raw_decode(s)\n File \"/home/hadoop/anaconda2/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\n return self.scan_once(s, idx=_w(s, idx).end())\nJSONDecodeError: Expecting value: line 1 column 1 (char 0)\n\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRDD.scala:166)\n\tat org.apache.spark.api.python.PythonRunner$$anon$1.<init>(PythonRDD.scala:207)\n\tat org.apache.spark.api.python.PythonRunner.compute(PythonRDD.scala:125)\n\tat org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:70)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)\n\tat org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)\n\tat org.apache.spark.rdd.RDD.iterator(RDD.scala:270)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)\n\tat org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)\n\tat org.apache.spark.scheduler.Task.run(Task.scala:89)\n\tat org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\t... 1 more\n"
]
}
],
"source": [
"try:\n",
" transfer.download_file(data_bucket, engagement_key, engagement_basename)\n",
"except botocore.exceptions.ClientError as e:\n",
" # If the file wasn't there, that's ok. Otherwise, abort!\n",
" if e.response['Error']['Code'] != \"404\":\n",
" raise e\n",
" else:\n",
" print \"Did not find an existing file at '{}'\".format(engagement_key)\n",
"\n",
"#%time update_engagement_csv(dataset_w_tests, engagement_basename, new_engagement_basename, cutoff_days=35)\n",
"%time update_engagement_csv(dataset, engagement_basename, new_engagement_basename, cutoff_days=35)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"## Upload the updated csv file to S3\n",
"\n",
"# Update the day-specific file:\n",
"transfer.upload_file(new_engagement_basename, data_bucket, new_engagement_key)\n",
"\n",
"# Update the \"main\" file\n",
"transfer.upload_file(new_engagement_basename, data_bucket, engagement_key)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.11"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
# coding: utf-8
# # Telemetry - Telemetry KPI Validity Check
#
# In this notebook we follow the standard for computing MAU and DAU as captured in [this notebook](https://gist.github.com/mreid-moz/8b2c2b1c6594d658ca5e). This is to address [bug #1264049](https://bugzilla.mozilla.org/show_bug.cgi?id=1264049). This notebook will create a csv that will be made available on [sql.telemetry.mozilla.org](https://sql.telemetry.mozilla.org).
# In[1]:
import boto3, botocore
from requests import get as fetch
from collections import defaultdict
from pyspark.sql import Row
import csv, os.path, json
from datetime import datetime as dt, timedelta, date
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql.functions import *
from boto3.s3.transfer import S3Transfer
from moztelemetry import get_pings, get_pings_properties, standards
get_ipython().magic(u'pylab inline')
# ## Define functions for the task
# In[47]:
def fmt(the_date, date_format="%Y%m%d"):
return dt.strftime(the_date, date_format)
def convert_date(p):
try:
p['creationDate'] = dt.strptime(p.get('creationDate'), "%Y-%m-%dT%H:%M:%S.%fZ")
if p['creationDate']:
p['creationDate'] = standards.unix_time_nanos(p['creationDate'])
return p
except Exception as e:
return e
def get_num_tests(p):
result = len(p.get('payload/tests', None))
if result is not None:
p['num_tests'] = result
return p
def get_tests(p):
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = set([el['addon_id'] for el in dict(r.json())['results']])
result = p.get('payload/tests', None)
if result is not None:
p['active_tests'] = [x for x in result if x in active_tests] # XXX Need to make sure they haven't uninstalled
p['payload/tests'] = None
return p
def process_pings(p):
p = convert_date(p)
p = get_num_tests(p)
p = get_tests(p)
return p
def should_be_updated(record,
target_col="day",
generated_col="generated_on",
date_format="%Y%m%d"):
target = dt.strptime(record[target_col], date_format)
generated = dt.strptime(record[generated_col], date_format)
# Don't regenerate data that was already updated today.
today = fmt(dt.utcnow(), date_format)
if record[generated_col] >= today:
return False
diff = generated - target
return diff.days <= 10
def update_engagement_csv(dataset, old_filename, new_filename,
cutoff_days=30, date_format="%Y%m%d"):
cutoff_date = dt.utcnow() - timedelta(cutoff_days)
cutoff = fmt(cutoff_date, date_format)
print "Cutoff date: {}".format(cutoff)
fields = ["day", "dau", "mau", "generated_on"]
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
active_test_names = [x.replace('mozilla.com', '').replace('@', '') for x in active_tests]
fields.extend(["dau_" + x for x in active_test_names])
fields.extend(["mau_" + x for x in active_test_names])
should_write_header = True
potential_updates = {}
# Carry over rows we won't touch
if os.path.exists(old_filename):
with open(old_filename) as csv_old:
reader = csv.DictReader(csv_old)
with open(new_filename, "w") as csv_new:
writer = csv.DictWriter(csv_new, fields)
writer.writeheader()
should_write_header = False
for row in reader:
if row['day'] < cutoff:
writer.writerow(row)
else:
potential_updates[row['day']] = row
with open(new_filename, "a") as csv_new:
writer = csv.DictWriter(csv_new, fields)
if should_write_header:
writer.writeheader()
for i in range(cutoff_days, 0, -1):
target_day = fmt(dt.utcnow() - timedelta(i), date_format)
if target_day in potential_updates and not should_be_updated(potential_updates[target_day]):
# It's fine as-is.
writer.writerow(potential_updates[target_day])
else:
# Update it.
print "We should update data for {}".format(target_day)
record = {"day": target_day, "generated_on": fmt(dt.utcnow(), date_format)}
print "Starting dau {} at {}".format(target_day, dt.utcnow())
record["dau"] = standards.dau(dataset, target_day)
print "Finished dau {} at {}".format(target_day, dt.utcnow())
print "Starting mau {} at {}".format(target_day, dt.utcnow())
record["mau"] = standards.mau(dataset, target_day)
print "Finished mau {} at {}".format(target_day, dt.utcnow())
for el in active_tests:
key = el.replace('mozilla.com', '').replace("@", "")
test_df = dataset[array_contains(dataset.active_tests, el)]
print "Starting test dau {} at {}".format(target_day, dt.utcnow())
record['dau_' + key] = standards.dau(test_df, target_day)
print "Finished test dau {} at {}".format(target_day, dt.utcnow())
print "Starting test mau {} at {}".format(target_day, dt.utcnow())
record["mau_" + key] = standards.mau(test_df, target_day)
print "Finished test mau {} at {}".format(target_day, dt.utcnow())
print record
# test_count_dfs = defaultdict(pyspark.sql.DataFrame)
# for el in active_tests:
# test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]
# test_counts = dict.fromkeys(active_tests)
# test_counts = {key: 0 for key in test_counts}
# for k, v in dict(test_count_dfs).iteritems():
# test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v)
# print "We should update data for {}".format(target_day)
# record = {"day": target_day, "generated_on": fmt(dt.utcnow(), date_format)}
# print "Starting dau {} at {}".format(target_day, dt.utcnow())
# record["dau"] = standards.dau(dataset, target_day)
# print "Finished dau {} at {}".format(target_day, dt.utcnow())
# print "Starting mau {} at {}".format(target_day, dt.utcnow())
# record["mau"] = standards.mau(dataset, target_day)
# print "Finished mau {} at {}".format(target_day, dt.utcnow())
# for el in active_tests:
# key = el.replace('mozilla.com', '').replace("@", "")
# test_df = dataset[array_contains(dataset.active_tests, el)]
# print "Starting dau {} at {}".format(target_day, dt.utcnow())
# record['dau_' + key] = standards.dau(test_df, target_day)
# print "Finished dau {} at {}".format(target_day, dt.utcnow())
# print "Starting mau {} at {}".format(target_day, dt.utcnow())
# record["mau_" + key] = standards.mau(test_df, target_day)
# print "Finished mau {} at {}".format(target_day, dt.utcnow())
# print record
# # Configure task variables
# In[3]:
data_bucket = "net-mozaws-prod-us-west-2-pipeline-analysis"
engagement_basename = "testpilot_engagement_ratio.csv"
new_engagement_basename = "testpilot_engagement_ratio.{}.csv".format(dt.strftime(dt.utcnow(), "%Y%m%d"))
s3path = "mreid/TxP_maudau"
data_version = "v1"
# Store the latest complete file in a subdir for hive / presto compatibility
engagement_key = "{}/{}/{}".format(s3path, data_version, engagement_basename)
new_engagement_key = "{}/{}".format(s3path, new_engagement_basename)
client = boto3.client('s3', 'us-west-2')
transfer = S3Transfer(client)
# # Retrieve list of current tests in the field
# In[4]:
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
#active_tests = dict.fromkeys([el['addon_id'] for el in dict(r.json())['results']])
#active_tests = {key: 0 for key in active_tests}
# # Retrieve TxP pings and create DataFrame
# In[5]:
pings = get_pings(sc, doc_type="testpilot", app="Firefox")
subset = get_pings_properties(pings, ["clientId",
"creationDate",
"meta/submissionDate",
"payload/tests"])
# Convert date string to appropriate format and count number of current tests
subset = subset.map(lambda p : process_pings(p))
# In[6]:
subset.take(3)
# In[10]:
sqlContext = SQLContext(sc)
rows = subset.map(lambda p: Row(clientId=p['clientId'],
activityTimestamp=p['creationDate'],
submission_date_s3=p['meta/submissionDate'],
num_tests=p['num_tests'],
active_tests=p['active_tests']))
dataset = sqlContext.createDataFrame(rows)
dataset.take(1)
# # Filter datasets for any tests and specific tests
# In[22]:
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
# In[33]:
fields = ["day", "dau_txp", "mau_txp", "generated_on"]
r = fetch('https://testpilot.firefox.com/api/experiments?format=json')
active_tests = [el['addon_id'] for el in dict(r.json())['results']]
active_tests = [x.replace('mozilla.com', '') for x in active_tests]
active_tests = [x.replace('@', '') for x in active_tests]
fields.extend(["dau_" + x for x in active_tests])
fields.extend(["mau_" + x for x in active_tests])
# In[15]:
total_txp_gross = dataset.select('clientId').distinct().count()
# In[16]:
total_txp_net = dataset[dataset.num_tests > 0].select('clientId').distinct().count()
# In[17]:
test_count_dfs = defaultdict(pyspark.sql.DataFrame)
for el in active_tests:
test_count_dfs[el] = dataset[array_contains(dataset.active_tests, el)]
# In[18]:
test_counts = dict.fromkeys(active_tests)
test_counts = {key: 0 for key in test_counts}
for k, v in dict(test_count_dfs).iteritems():
test_counts[k] = v.select('clientId').distinct().count()#standards.count_distinct_clientids(v)
# In[19]:
print "Clients with test pilot = {}".format(total_txp_gross)
print "Clients with at least 1 test = {}".format(total_txp_net)
for k, v in test_counts.iteritems():
print "Clients with {test} = {count}".format(test=k, count=v)
# # Fetch existing data
# In[48]:
try:
transfer.download_file(data_bucket, engagement_key, engagement_basename)
except botocore.exceptions.ClientError as e:
# If the file wasn't there, that's ok. Otherwise, abort!
if e.response['Error']['Code'] != "404":
raise e
else:
print "Did not find an existing file at '{}'".format(engagement_key)
#%time update_engagement_csv(dataset_w_tests, engagement_basename, new_engagement_basename, cutoff_days=35)
get_ipython().magic(u'time update_engagement_csv(dataset, engagement_basename, new_engagement_basename, cutoff_days=35)')
# In[ ]:
## Upload the updated csv file to S3
# Update the day-specific file:
transfer.upload_file(new_engagement_basename, data_bucket, new_engagement_key)
# Update the "main" file
transfer.upload_file(new_engagement_basename, data_bucket, engagement_key)
# In[ ]:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment