-
-
Save jkreileder/459cf1936e099e2e521cee7d2d4b7acb to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3 | |
import requests # for getting URL | |
import json # for parsing json | |
from datetime import datetime # datetime parsing | |
import pytz # timezone adjusting | |
import csv # for making csv files | |
import os | |
################################################################# | |
# USER VARIABLES | |
username = "user@gmail.com" | |
password = "password123" | |
save_directory = "~/" # keep trailing slash | |
################################################################# | |
# GET ACCESS TOKEN | |
# Post credentials | |
r = requests.post("https://api-7.whoop.com/oauth/token", json={ | |
"grant_type": "password", | |
"issueRefresh": False, | |
"password": password, | |
"username": username | |
}) | |
# Exit if fail | |
if r.status_code != 200: | |
print("Fail - Credentials rejected.") | |
exit() | |
else: | |
print("Success - Credentials accepted") | |
# Set userid/token variables | |
userid = r.json()['user']['id'] | |
access_token = r.json()['access_token'] | |
################################################################# | |
# GET DATA | |
# Download data | |
url = 'https://api-7.whoop.com/users/{}/cycles'.format(userid) | |
params = { | |
'start': '2000-01-01T00:00:00.000Z', | |
'end': '2030-01-01T00:00:00.000Z' | |
} | |
headers = { | |
'Authorization': 'bearer {}'.format(access_token) | |
} | |
r = requests.get(url, params=params, headers=headers) | |
# Check if user/auth are accepted | |
if r.status_code != 200: | |
print("Fail - User ID / auth token rejected.") | |
exit() | |
else: | |
print("Success - User ID / auth token accepted") | |
################################################################# | |
# PARSE/TRANSFORM DATA | |
# Convert data to json | |
data_raw = r.json() | |
# Takes a time and offset string and returns a timezone-corrected datetime string | |
def time_parse(time_string, offset_string): | |
# Switch sign on offset | |
offset_string = offset_string.replace( | |
'-', '+') if offset_string.count('-') else offset_string.replace('+', '-') | |
# Remove tz from time and add offset, get to 19 characters | |
time_string = time_string[:-(len(time_string) - 19)] + offset_string | |
# Parse and format | |
oldformat = '%Y-%m-%dT%H:%M:%S%z' | |
newformat = '%Y-%m-%d %H:%M:%S' | |
return datetime.strptime(time_string, oldformat).astimezone(pytz.utc).strftime(newformat) | |
# Make data object | |
data_summary = [] | |
# Iterate through data | |
for d in data_raw: | |
# Make record object with default values | |
record = { | |
'timestamp_measurement': None, | |
'HR': None, | |
'AVNN': None, | |
'SDNN': None, | |
'rMSSD': None, | |
'pNN50': None, | |
'LF': None, | |
'HF': None, | |
'HRV4T_Recovery_Points': None | |
} | |
# Recovery | |
if (d['recovery'] and | |
'timestamp' in d['recovery'] and | |
'heartRateVariabilityRmssd' in d['recovery'] and | |
isinstance(d['recovery']['heartRateVariabilityRmssd'], (int, float)) and | |
d['sleep'] and | |
d['sleep']['sleeps'] and | |
d['sleep']['sleeps'][0]['timezoneOffset']): | |
# This is the timestamp when Whoop processed sleep - | |
# not the time of measurement | |
record['timestamp_measurement'] = time_parse( | |
d['recovery']['timestamp'], | |
d['sleep']['sleeps'][0]['timezoneOffset']) | |
record['rMSSD'] = d['recovery']['heartRateVariabilityRmssd'] * 1000.0 | |
if ('restingHeartRate' in d['recovery'] and | |
isinstance(d['recovery']['restingHeartRate'], (int, float))): | |
record['HR'] = d['recovery']['restingHeartRate'] | |
# Recovery score | |
if ('score' in d['recovery'] and | |
isinstance(d['recovery']['score'], (int, float))): | |
record['HRV4T_Recovery_Points'] = d['recovery']['score'] / 10.0 | |
# Append record to data dictionary | |
data_summary.append(record) | |
################################################################# | |
# WRITE JSON RAW DATA FILE | |
''' | |
# Write json file | |
with open(save_directory + 'whoop_raw.json', 'w') as outfile: | |
json.dump(data_raw, outfile) | |
print("Success - JSON raw data saved.") | |
''' | |
################################################################# | |
# WRITE CSV SUMMARY DATA FILE | |
# Write to CSV file | |
with open(os.path.expanduser(save_directory + 'whoop-goldencheetah.csv'), 'w', newline='') as f: | |
writer = csv.DictWriter(f, fieldnames=data_summary[0].keys()) | |
# Write header | |
writer.writeheader() | |
# Write rows | |
for row in data_summary: | |
writer.writerow(row) | |
print("Success - CSV summary data saved.") |
This is great! Do you mind if I use some of the code and make some alterations for my own use?
I'm having trouble running this.
@theradlabs Feel free to use this :)
@BrentC03 Do get an error message.
Possible issues I can imagine:
- You don't have the required modules installed
- You probably have to change
save_directory
if you're on Windows.
FYI if you're using Anaconda you'll need to change json to json5, everything worked fine after I made that change.
In case you're interested, I've documented a bit more of the whoop api spec as I've discovered it... https://github.com/pelo-tech/whoop-api-spec
@DovOps how did you determine these API calls?
Just simple chrome debugger activity on http://app.whoop.com and then started some experimentation when editing spec over on a web based openapi editor
@agawronski you can also see similar data structuring I did in a google sheet here: https://github.com/pelo-tech/whoop-google-sheets
@DovOps, cool, thanks. 👍
A+ worked great (on 2020-10-28) thanks!
I ran this just now, and I only got data for HR, rMSSD and HRV4T_Recovery_Points. Has anyone else experienced that as well?
@laitinen that's expected: these are the only values that make sense to import into Golden Cheetah.
Please note: I've canceled my Whoop subscription after 18 months (mainly to due to its accuracy issues, even with the bicep band, and doubts about the significance of its recovery score), so I'm not maintaining this anymore. Unless they change the API, it should continue to work as is, though.
I'm running this script now with Anaconda Python 3.7 and using the same login I use on the website and getting invalid credentials. Anyone else had this issue?
In case this helps, I just checked my API calls in the google sheets code that I posted above, and that still works fine so I don't believe there's been a change to the APIs.
In case this helps, I just checked my API calls in the google sheets code that I posted above, and that still works fine so I don't believe there's been a change to the APIs.
Thanks for getting back to me, worked when I ran the script again this morning. Not sure what the issue was, may've been related to logging onto the WHOOP site concurrently.
I ended up making a wrapper for the API in order to more easily export different kinds of data. You may find it useful here https://github.com/IanMcLaughlin19/unofficialWhoopAPI/
Cool - if I wrote the specs correctly, which I’m sure I didn’t do 100%, you should be able to generate a python (or any other language) client that works with the entire api. Swaggerhub lets you download that directly or via https://editor.swagger.io - might be worth a try so I can correct the schema If something doesn’t work.
@jkreileder this is super cool! Been looking to do something like this for a while. Do you mind if I use some of this code?
@tliggett Feel free to use it for whatever!
@jkreileder I am writing a paper for class and I would like to mention you in the acknowledgements, as this script was super helpful for my project. Would that be okay or would you rather me leave you out of it?
@tliggett that's ok with me. Thanks!
great stuff, i got inspired and create this one
https://gist.github.com/oscarsan/5a31185a364d29744e275070d643c307
It will fetch the workouts between 2 dates and create TCX file in order to import it to TrainingPeaks. GPS points not used.
Sorry i now this is worded poorly, but you can get all your data. and they should help with your scripts.
Please report This, . If someone wants to clean up my wording, and standaized this, please do. But the more people that make this request, they should be able to automate it.
Will everyone please contact them and request it, and get them to speed up their process.
Below is what they send me, and i requested 45 days free membership for the delay.
Please also explained the historical SPO2 data they can not provide is the most important data you need. And request a 45 day free membership
This is how I requested the data
I called and explained that the following all access to download your personal Data.
Google has it. Called Take outhttps://takeout.google.com/?pli=1
And I told them the other health apps have it.
Samsung
Apple
Fitbit
And i told them under my state and federal laws their required to provide all data releated to my health, and they i would submit a open records request, and medical records request, which their required to release under hippa.
and they i would send those forms next if required.You can also remind them they applied to be medical apporved, which means they must follow hippa laws.Note: I am sure theirs some other reasons that can be added.
they asked a reason, I would just like a option to download all data stored on whoop on me, and check for my medical conditions.
They then opened a support ticket, and i sent in the same info.
this was their response.
Thanks for reaching out, and thank you for your patience while we responded back to your inquiry. My name is Jillian, and I work on the Data Privacy Team here at WHOOP. We take the privacy and security of our Members' data very seriously.
We do have a process that allows you to export your WHOOP data. Our data export currently generates up to four excel spreadsheets of data. The first is a "metrics" data sheet that includes a daily log of heart rate data collected by your strap throughout the day. The second is a "recovery" data sheet that includes: RHR, HRV, Recovery, and detailed sleep information. The third is a "workouts" data sheet that, to the extent, you have logged workout activities, includes: Workout Strain, Day Strain, Sport, Max HR, Average HR, and calories. The fourth is a "journal" data sheet that, to the extent, you have logged journal responses, will include responses and associated notes. We have recently added skin temperature to our exports; however, we do not currently have historical SPO2. We are working with our internal team to have these added.
In order to export your data, we first need to validate your account. Could you please use the following link to log-in and submit a “Download Request”? https://app.whoop.com/settings/data-management
Below is what they send me, and i requested 45 days free membership for the delay.
Thank you for completing the user verification and submitting your data export request. The reason why the link is not available on our website is because we have a designated team who generates these data exports on the backend. I apologize for any inconvenience that this may cause.
We will aim to fulfill your request within the next 45 days, but we may need additional time depending on how complex your request is for us to complete and the number of requests we receive.
If we need additional time we will inform you within 45 days after we receive your request. Please let us know if you have any additional questions or concerns in the meantime!
Thank you for completing the user verification and submitting your data export request. The reason why the link is not available on our website is because we have a designated team who generates these data exports on the backend. I apologize for any inconvenience that this may cause.
We will aim to fulfill your request within the next 45 days, but we may need additional time depending on how complex your request is for us to complete and the number of requests we receive.
If we need additional time we will inform you within 45 days after we receive your request. Please let us know if you have any additional questions or concerns in the meantime!
This is awesome, thank you for sharing. Is there a way to grab the heart_rate object that tracks all heart rate datapoints? I tried modifying the URL to do so but for some reason I'm getting an 'auth token rejected' error