Skip to content

Instantly share code, notes, and snippets.

@tmonck
Last active May 29, 2022 04:15
Show Gist options
  • Star 9 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save tmonck/6a84f9d8999a4e17de1c9b6888d522be to your computer and use it in GitHub Desktop.
Save tmonck/6a84f9d8999a4e17de1c9b6888d522be to your computer and use it in GitHub Desktop.
This gist allows you to have a custom service that you can use in automations to delete snapshots from homeassistant. The __init__.py file and the services .yaml file should be put in <path_to_config>/custom_components/clean_up_backups_service. If you are running >= 0.92 version of homeassistant and haven't followed these instructions you will h…
"""
Support for automating the deletion of snapshots.
"""
import logging
import pytz
from dateutil.parser import parse
import asyncio
import aiohttp
import async_timeout
from urllib.parse import urlparse
from homeassistant.const import (CONF_HOST, CONF_TOKEN)
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'clean_up_backups_service'
ATTR_NAME = 'number_of_backups_to_keep'
DEFAULT_NUM = 0
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(ATTR_NAME, default=DEFAULT_NUM): int
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
conf = config[DOMAIN]
hassio_url = '{}/api/hassio/'.format(conf.get(CONF_HOST))
auth_token = conf.get(CONF_TOKEN)
num_snapshots_to_keep = conf.get(ATTR_NAME, DEFAULT_NUM)
headers = {'authorization': "Bearer {}".format(auth_token)}
async def async_get_snapshots():
_LOGGER.info('Calling get snapshots')
async with aiohttp.ClientSession(raise_for_status=True) as session:
try:
with async_timeout.timeout(10, loop=hass.loop):
resp = await session.get(hassio_url + 'snapshots', headers=headers, ssl=not isgoodipv4(urlparse(hassio_url).netloc))
data = await resp.json()
await session.close()
return data['data']['snapshots']
except aiohttp.ClientError:
_LOGGER.error("Client error on calling get snapshots", exc_info=True)
await session.close()
except asyncio.TimeoutError:
_LOGGER.error("Client timeout error on get snapshots", exc_info=True)
await session.close()
except Exception:
_LOGGER.error("Unknown exception thrown", exc_info=True)
await session.close()
async def async_remove_snapshots(stale_snapshots):
for snapshot in stale_snapshots:
async with aiohttp.ClientSession(raise_for_status=True) as session:
_LOGGER.info('Attempting to remove snapshot: slug=%s', snapshot['slug'])
# call hassio API deletion
try:
with async_timeout.timeout(10, loop=hass.loop):
resp = await session.post(hassio_url + 'snapshots/' + snapshot['slug'] + "/remove", headers=headers, ssl=not isgoodipv4(urlparse(hassio_url).netloc))
res = await resp.json()
if res['result'].lower() == "ok":
_LOGGER.info("Deleted snapshot %s", snapshot["slug"])
await session.close()
continue
else:
# log an error
_LOGGER.warning("Failed to delete snapshot %s: %s", snapshot["slug"], str(res.status_code))
except aiohttp.ClientError:
_LOGGER.error("Client error on calling delete snapshot", exc_info=True)
await session.close()
except asyncio.TimeoutError:
_LOGGER.error("Client timeout error on delete snapshot", exc_info=True)
await session.close()
except Exception:
_LOGGER.error("Unknown exception thrown on calling delete snapshot", exc_info=True)
await session.close()
def isgoodipv4(s):
if ':' in s: s = s.split(':')[0]
pieces = s.split('.')
if len(pieces) != 4: return False
try: return all(0<=int(p)<256 for p in pieces)
except ValueError: return False
async def async_handle_clean_up(call):
# Allow the service call override the configuration.
num_to_keep = call.data.get(ATTR_NAME, num_snapshots_to_keep)
_LOGGER.info('Number of snapshots we are going to keep: %s', str(num_to_keep))
if num_to_keep == 0:
_LOGGER.info('Number of snapshots to keep was zero which is default so no snapshots will be removed')
return
snapshots = await async_get_snapshots()
_LOGGER.info('Snapshots: %s', snapshots)
# filter the snapshots
for snapshot in snapshots:
d = parse(snapshot["date"])
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
_LOGGER.info("Naive DateTime found for snapshot %s, setting to UTC...", snapshot["slug"])
snapshot["date"] = d.replace(tzinfo=pytz.utc).isoformat()
snapshots.sort(key=lambda item: parse(item["date"]), reverse=True)
stale_snapshots = snapshots[num_to_keep:]
_LOGGER.info('Stale Snapshots: {}'.format(stale_snapshots))
await async_remove_snapshots(stale_snapshots)
hass.services.async_register(DOMAIN, 'clean_up', async_handle_clean_up)
return True
- alias: Daily snapshot clean up
initial_state: 'on'
trigger:
platform: time
at: '03:00:00'
condition:
action:
- service: clean_up_backups_service.clean_up
# Data is optional if you have defined the number of snapshots to keep in the configuration.yaml.
data:
# If this property is passed to the service it will be used regardless of what you have in the configuration.yaml
number_of_backups_to_keep: 7
clean_up_backups_service:
host: !secret base_url
token: !secret clean_up_token
number_of_backups_to_keep: 7
clean_up:
description: Cleans up the snapshots taken by home assistant
fields:
number_of_backups_to_keep:
description: Number of snapshots you wish to keep.
examle: 3
@kvvoff
Copy link

kvvoff commented Dec 31, 2018

I don't understand what they mean host and token in configuration.yaml. what needs to be written there?

@miguelgilmartinez
Copy link

miguelgilmartinez commented Jul 29, 2019

I don't understand what they mean host and token in configuration.yaml. what needs to be written there?

In your secrets.yaml you write that info.

base_url: The URL that Home Assistant is available on the internet. For example: https://hass-example.duckdns.org:8123. 
clean_up_token: Something like  zte9QkPNevgNuen-OA6NZdLQQmSmJ0BcdA_pQxcMB8Y... created from HA profile page and pasted here

@tmonck
Copy link
Author

tmonck commented Sep 2, 2019

This has been made into a repository and HACS integration You can find the repo here

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment