Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@mattwoodrow
Last active December 17, 2018 00:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mattwoodrow/7bba3a9debb6372584e353e9ca7cc9a1 to your computer and use it in GitHub Desktop.
Save mattwoodrow/7bba3a9debb6372584e353e9ca7cc9a1 to your computer and use it in GitHub Desktop.
wr-content-frame-time-reason
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
# coding: utf-8
# In[1]:
import ujson as json
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from moztelemetry.dataset import Dataset
get_ipython().magic(u'matplotlib inline')
# We can look at the schema of the dataset we are interested in:
# In[2]:
Dataset.from_source('telemetry').schema
# Let's create a Dataset of Telemetry submissions for a given submission date:
# In[3]:
pings_dataset = (
Dataset.from_source('telemetry')
.where(docType='main')
#.where(appBuildId='20180721100146')
#.where(submissionDate ='20180925')
.where(submissionDate=lambda x: x > '20181201')
.where(appUpdateChannel="nightly")
)
# Select only the properties we need and then take a 10% sample:
# In[4]:
pings = (
pings_dataset
.select(
'clientId',
buildId='application.buildId',
device_resest='payload.processes.gpu.histograms.DEVICE_RESET_REASON',
frame_time_reason='payload.processes.gpu.histograms.CONTENT_FRAME_TIME_REASON.values',
experiments='environment.experiments',
osName='environment.system.os.name',
gfx='environment.system.gfx')
.records(sc, sample=1.)
)
# In[5]:
pings.count()
# Caching is fundamental as it allows for an iterative, real-time development workflow:
# In[6]:
cached = pings.cache()
# How many pings are we looking at?
# In[7]:
cached.count()
# In[8]:
wrExperiment = cached.filter(lambda p: "experiments" in p and p["experiments"]).filter(lambda p: "prefflip-webrender-v1-3-1492568" in p["experiments"])
wrExperiment.count()
# In[9]:
cached = cached.filter(lambda p: "features" in p["gfx"])
cached = cached.filter(lambda p: "wrQualified" in p["gfx"]["features"])
cached.count()
# In[10]:
wrQualified = cached.filter(lambda p: p["gfx"]["features"]["wrQualified"]["status"] == "available" )
wrQualified.count()
wrQualified = wrQualified.filter(lambda p: len(p["gfx"]["monitors"]) == 1 and p["gfx"]["monitors"][0]["refreshRate"] == 60)
# In[11]:
wrExperiment = cached.filter(lambda p: "experiments" in p and p["experiments"]).filter(lambda p: "prefflip-webrender-v1-3-1492568" in p["experiments"])
wrExperiment.map(lambda p: p["gfx"]["features"]["compositor"]).countByValue()
# In[12]:
wrExperiment = wrExperiment.filter(lambda p: p["gfx"]["features"]["wrQualified"]["status"] == "available")
#wrExperiment = wrExperiment.filter(lambda p: len(p["gfx"]["monitors"]) == 1 and p["gfx"]["monitors"][0]["refreshRate"] == 60)
# In[13]:
wrExperiment.map(lambda p: p["experiments"]["prefflip-webrender-v1-3-1492568"]["branch"]).countByValue()
# In[14]:
treatment = wrExperiment.filter(lambda p: p["experiments"]["prefflip-webrender-v1-3-1492568"]["branch"] == "enabled")
control = wrExperiment.filter(lambda p: p["experiments"]["prefflip-webrender-v1-3-1492568"]["branch"] == "disabled")
treatment.count(), control.count()
# In[15]:
treatment.map(lambda p: p["gfx"]["features"]["compositor"]).countByValue()
# In[16]:
wrEnabled = treatment.filter(lambda p: p["gfx"]["features"]["compositor"] == "webrender")
wrDisabled = control.filter(lambda p: p["gfx"]["features"]["compositor"] == "d3d11")
wrEnabled.count(), wrDisabled.count()
# In[17]:
wrDisabled2 = wrDisabled.sample(False, wrEnabled.count()/(wrDisabled.count()*1.0))
wrDisabled3 = wrDisabled.sample(False, wrEnabled.count()/(wrDisabled.count()*1.0))
wrDisabled = wrDisabled.sample(False, wrEnabled.count()/(wrDisabled.count()*1.0))
# In[18]:
wrDisabled3.count(), wrDisabled.count(), wrDisabled2.count(), wrEnabled.count()
# In[19]:
desired_count = float(min(wrEnabled.count(), wrDisabled.count()))
wrEnabled = wrEnabled.sample(False, desired_count / float(wrEnabled.count()))
wrDisabled = wrDisabled.sample(False, desired_count / float(wrDisabled.count()))
(wrEnabled.count(), wrDisabled.count())
# In[20]:
def aggregate_series(s1, s2):
"""Function to sum up series; if one is None, return other"""
if s1 is None:
return s2
if s2 is None:
return s1
return s1.add(s2, fill_value=0)
aggregated_enabled = (
wrEnabled
.filter(lambda p: p['frame_time_reason'])
.map(lambda p: pd.Series(p['frame_time_reason']))
.reduce(aggregate_series)
)
aggregated_disabled = (
wrDisabled
.filter(lambda p: p['frame_time_reason'])
.map(lambda p: pd.Series(p['frame_time_reason']))
.reduce(aggregate_series)
)
# In[37]:
df = pd.DataFrame()
df['enabled'] = aggregated_enabled[:4]
df['disabled'] = aggregated_disabled[:4]
df.index = ["OnTime", "NoVsync", "MissedComposite", "SlowComposite"]
p = df.plot(kind='bar', figsize=(15, 7))
p.set_ylabel("Number of frames")
p.set_xlabel("CONTENT_FRAME_TIME_REASON failure reason")
# In[38]:
(sum(aggregated_enabled), sum(aggregated_disabled))
# In[39]:
df
# In[40]:
df = pd.DataFrame()
percent_enabled = aggregated_enabled.map(lambda x: 100.0*x/aggregated_enabled[0:].sum())
percent_disabled = aggregated_disabled.map(lambda x: 100.0*x/aggregated_disabled[0:].sum())
df['enabled'] = percent_enabled[:4]
df['disabled'] = percent_disabled[:4]
df.index = ["OnTime", "NoVsync", "MissedComposite", "SlowComposite"]
p = df.plot(kind='bar', figsize=(15, 7))
p.set_ylabel("Percentage of frames")
p.set_xlabel("CONTENT_FRAME_TIME_REASON failure reason")
# In[41]:
df
# In[ ]:
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment