Skip to content

Instantly share code, notes, and snippets.

@jvhaarst
Forked from tvdsluijs/slimmemeterportal.nl.py
Last active April 18, 2021 08:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jvhaarst/6f56d7e646a8a29b85727ff88ec549dd to your computer and use it in GitHub Desktop.
Save jvhaarst/6f56d7e646a8a29b85727ff88ec549dd to your computer and use it in GitHub Desktop.
Small script to get your energy data from the slimmemeterportal.nl
[DEFAULT]
login = [YOUR LOGIN]
password = [YOUR PASSWORD]
START_URL = https://slimmemeterportal.nl/login
LOGIN_URL = https://slimmemeterportal.nl/user_session
DOWNLOAD = [LOCATION TO STORE THE FILES]
# scrape imports
from bs4 import BeautifulSoup, SoupStrainer
import configparser
import datetime
import glob
import lxml
import os
import requests
import sys
import time
# Cloned and adapted from https://gist.github.com/tvdsluijs/a4ad638f11283a3d82763a47ae210109
def get_data(day, noclobber="True"):
print("Grabbing {}".format(day))
day_epoch = int(time.mktime(day.timetuple()))
try:
if noclobber:
# Check whether files already exist for this day.
pattern = str(day) + '_*'
pattern = os.path.join(DOWNLOAD, pattern)
filenames = glob.glob(pattern)
if len(filenames) > 0:
raise ValueError("{} found, quitting".format(day))
# Grab data for each commodity
for commodity in commodities:
url = "https://slimmemeterportal.nl/cust/consumption/chart.xls?"
url += "commodity={}&datatype=consumption&range={}&timeslot_start={}".format(commodity, seconds_in_a_day, day_epoch)
response = s.get(url)
# Add date to filename, as the default isn't very sortable
filename = str(day) + '_' + response.headers.get("Content-Disposition").split("filename=")[1].strip('\"')
filename = os.path.join(DOWNLOAD, filename)
with open(filename, 'wb') as f:
f.write(response.content)
except Exception as e:
print(e)
sys.exit()
# If a day given on the commandline, use that as input
days = []
if len(sys.argv) > 1:
days = sys.argv[1:]
# Get defaults from ini file
config = configparser.ConfigParser()
config.read('grab_days.ini')
login = config['DEFAULT']['login']
password = config['DEFAULT']['password']
START_URL = config['DEFAULT']['start_url']
LOGIN_URL = config['DEFAULT']['login_url']
DOWNLOAD = config['DEFAULT']['download']
seconds_in_a_day = 60*60*24
commodities = ['power', 'gas']
# Setup session
# Get authenticity token from start URL
s = requests.Session()
response = s.get(START_URL)
soup = BeautifulSoup(response.text, "lxml")
authenticity_token = soup.find('input', {'name': 'authenticity_token'}).get('value')
# Log in
payload = {
"utf8": "✓",
"authenticity_token": authenticity_token,
"user_session[email]": login,
"user_session[password]": password,
"commit": "Inloggen"
}
response = s.post(LOGIN_URL, data=payload)
# Data download
# If no days were given, try to download all historic data
if len(days) == 0:
# Get first day (yesterday)
day = datetime.date.today()
day = day - datetime.timedelta(1)
# Get all data from the past
while True:
get_data(day)
# Ask for previous day in next run
day = day - datetime.timedelta(1)
# Wait a bit, so we do not hammer the site
time.sleep(5)
else:
# Grab the entered days, overwriting older ones
for day in days:
day = datetime.date.fromisoformat(day)
get_data(day, False)
# Wait a bit, so we do not hammer the site
time.sleep(5)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment