Skip to content

Instantly share code, notes, and snippets.

@kuipumu
Last active August 1, 2023 17:33
Show Gist options
  • Save kuipumu/52877765a150a38c88d7a3fdbf549671 to your computer and use it in GitHub Desktop.
Save kuipumu/52877765a150a38c88d7a3fdbf549671 to your computer and use it in GitHub Desktop.
Add video titles to Google Takeout Youtube CSV playlist.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Add video titles to Google Takeout Youtube CSV playlist.
This script will add video titles to Youtube videos on CSV playlists
exported from Google Takaout (https://takeout.google.com).
Example:
$ python addtitle2ytcsv.py input_playlist.csv output_playlist.csv
Attributes:
ENCODING (str): File encoding format.
TITLE_RE (str): Regex expresion used to extract HTML title element.
"""
import csv
import html.parser
import json
import re
from argparse import ArgumentParser
from pathlib import Path
from urllib.request import Request, urlopen
ENCODING = 'utf-8'
TITLE_RE = r'<title>(.*?)</title>'
def get_yt_video_title(url: str, timeout: int) -> str:
"""Get Youtube video title from URL.
This function will try to get the head title from the requested document,
if the title is of an unavailable video it will try to fecth it from the
Wayback Machine API.
Args:
url: Video URL.
timeout: Timeout in seconds.
Returns:
Extracted Youtube video title or empty string.
"""
try:
with urlopen(Request(url), timeout=timeout) as response:
decoded = response.read().decode(ENCODING)
title = re.search(TITLE_RE, html.unescape(decoded)).group(1)
# Get title from Wayback Machine if video is unavailable.
if title in ['YouTube', ' - YouTube']:
with urlopen(
Request('http://archive.org/wayback/available?url={0}'.format(url)),
timeout=timeout,
) as response:
decoded = json.loads(response.read().decode(ENCODING))
with urlopen(
Request(decoded['archived_snapshots']['closest']['url']),
timeout=timeout,
) as response:
decoded = response.read().decode(ENCODING)
title = re.search(TITLE_RE, html.unescape(decoded)).group(1)
return title[:-10]
except Exception: # pylint: disable=broad-exception-caught
return ''
def main():
"""Script main function."""
parser = ArgumentParser(description='Add video titles to Google Takeout Youtube CSV playlist.')
parser.add_argument('input', type=Path, help='Input CSV file path.')
parser.add_argument('output', type=Path, help='Output CSV file path.')
parser.add_argument('--timeout', '-t', default=15, help='Request timeout.')
args = parser.parse_args()
# Read all rows from CSV file.
with open(str(args.input), 'r', newline='', encoding=ENCODING) as file:
rows = list(csv.reader(file))
for row in rows:
# Skip empty row or non-video row.
if not row or len(row[0]) != 11:
continue
url = 'https://www.youtube.com/watch?v={0}'.format(row[0].strip())
row.append(get_yt_video_title(url, args.timeout))
print(url, 'OK!' if row[2] else 'N/A')
# Write each processed row to CSV file.
with open(str(args.output), 'w', newline='', encoding=ENCODING) as file:
csv.writer(file).writerows(rows)
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment