Skip to content

Instantly share code, notes, and snippets.

@10bn
Created May 29, 2024 19:32
Show Gist options
  • Save 10bn/1fcdd2706889ea551d7689d4d6b400b8 to your computer and use it in GitHub Desktop.
Save 10bn/1fcdd2706889ea551d7689d4d6b400b8 to your computer and use it in GitHub Desktop.
Patreon Downloader Script
# 1. Login on patreon
# 2. Use Chrome Cookie extractor extension, export as cookies.json to script root folder
# 3. Use Chrome Link Grabber extension to fill links.txt
import os
import requests
import json
from urllib.parse import urlparse, parse_qs, unquote
# Load cookies from a JSON file
def load_cookies_from_file(file_path):
with open(file_path, 'r') as f:
cookies_json = json.load(f)
cookies = {cookie['name']: cookie['value'] for cookie in cookies_json}
return cookies
# Get file extension and main name from the URL response headers or URL parameters
def get_file_extension_and_name(url, cookies):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
response = requests.get(url, cookies=cookies, headers=headers, stream=True)
response.raise_for_status()
content_type = response.headers.get('content-type', '').split(';')[0]
extension = content_type.split('/')[-1] if content_type else 'file'
content_disposition = response.headers.get('content-disposition', '')
filename = None
if 'filename*=' in content_disposition:
filename = unquote(content_disposition.split("filename*=''")[-1].strip('"'))
elif 'filename=' in content_disposition:
filename = content_disposition.split('filename=')[-1].strip('"')
else:
parsed_url = urlparse(url)
query_params = parse_qs(parsed_url.query)
file_id = query_params.get('i', ['file'])[0]
filename = f"{file_id}.{extension}"
# Reduce filename to main name if possible
main_name = filename.split()[-1]
return extension, main_name
# Parse subfolder from URL parameters
def parse_subfolder(url):
parsed_url = urlparse(url)
query_params = parse_qs(parsed_url.query)
subfolder = query_params.get('h', [None])[0]
return subfolder
# Download the file from the URL to the specified path
def download_file(url, cookies, save_path):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
response = requests.get(url, cookies=cookies, headers=headers, stream=True)
response.raise_for_status()
with open(save_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
def main():
# Load cookies
cookies = load_cookies_from_file('cookies.json')
# Load file with links, every link in a new line
with open('links.txt', 'r') as f:
links = f.read().splitlines()
# Create a base directory for downloads
base_dir = 'downloads'
os.makedirs(base_dir, exist_ok=True)
# Process each link
for link in links:
try:
subfolder = parse_subfolder(link)
subfolder_path = os.path.join(base_dir, subfolder) if subfolder else base_dir
os.makedirs(subfolder_path, exist_ok=True)
_, filename = get_file_extension_and_name(link, cookies)
file_path = os.path.join(subfolder_path, filename)
print(f"Downloading {filename} to {subfolder_path}")
download_file(link, cookies, file_path)
print(f"File saved: {file_path}\n")
except requests.HTTPError as e:
print(f"Failed to download {link}: HTTP error {e.response.status_code}")
except requests.RequestException as e:
print(f"Failed to download {link}: {e}")
except Exception as e:
print(f"An error occurred: {e}")
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment