Skip to content

Instantly share code, notes, and snippets.

Last active March 12, 2023 01:36
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save RhetTbull/06617e33fe8645f75260311ab582fb6d to your computer and use it in GitHub Desktop.
Save RhetTbull/06617e33fe8645f75260311ab582fb6d to your computer and use it in GitHub Desktop.
Extract your "Saved Stories" articles from the Apple News app on macOS (thanks to @eecue who wrote much of this)
"""Get your "Saved Stories" articles from Apple News
Thanks to Dave Bullock ( who's idea this was and who wrote the extract_info_from_apple_news function
This script requires the following modules be pip installed:
* bs4
* requests
Save this script to a file called and run it with Python 3.9 or later
For a more robust implementation of this, see:
from __future__ import annotations
import io
import pathlib
import plistlib
import requests
from bs4 import BeautifulSoup
def get_reading_list_bplist() -> bytes | None:
"""Get saved articles info from Apple News
bytes: The saved articles binary plist as a bytes object
None: If the saved articles are not found
# The saved articles are stored in a binary file called reading-list
# in the Apple News container (~/Library/Containers/
# The file contains at least two binary plist (bplist) files
# embedded in it, the second of which contains the saved article IDs
# (The first is a binary NSKeyedArchiver archive)
# This function finds the second bplist file and returns it as a bytes object
# or None if the file is not found
news_container = (
+ "Data/Library/Application Support/"
+ ""
reading_list_file = (
pathlib.Path(news_container, "reading-list").expanduser().absolute()
bplist_marker = b"\x62\x70\x6C\x69\x73\x74\x30\x30" # bplist00
reading_list = open(reading_list_file, "rb")
length =, io.SEEK_END)
found = 0
while window := reading_list.peek(1):
if len(window) >= 8 and window[:8] == bplist_marker:
found += 1
if found == 2:
return - reading_list.tell())
return None
def get_article_info(reading_list: bytes) -> dict[str, dict[str, str]] | None:
"""Decode the saved article information from the binary plist"""
return plistlib.loads(reading_list, fmt=plistlib.FMT_BINARY)
def extract_info_from_apple_news(news_id: str) -> dict[str, str]:
"""Extract the article URL, title, description, image, and author from Apple News"""
# Construct the Apple News URL from the ID
apple_news_url = f"{news_id}"
# Send a GET request to the Apple News URL and get the response HTML
response = requests.get(apple_news_url)
html = response.content.decode("utf-8")
# Use BeautifulSoup to extract the URL from the redirectToUrlAfterTimeout function
soup = BeautifulSoup(html, "html.parser")
if script_tag := soup.find(
"script", string=lambda t: "redirectToUrlAfterTimeout" in t
url_start_index = script_tag.text.index('"https://') + 1
url_end_index = script_tag.text.index('"', url_start_index)
url = script_tag.text[url_start_index:url_end_index]
url = None
# Extract the og:title, og:description, og:image, and author meta tags
if title_tag := soup.find("meta", property="og:title"):
title = title_tag["content"]
title = None
if description_tag := soup.find("meta", property="og:description"):
description = description_tag["content"]
description = None
if image_tag := soup.find("meta", property="og:image"):
image = image_tag["content"]
image = None
if author_tag := soup.find("meta", {"name": "Author"}):
author = author_tag["content"]
author = None
# Return the extracted information as a dictionary
return {
"url": url,
"title": title,
"description": description,
"image": image,
"author": author,
if __name__ == "__main__":
reading_list = get_reading_list_bplist()
articles = get_article_info(reading_list)
if articles is None:
print("No saved articles found")
for article in articles.values():
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment