Skip to content

Instantly share code, notes, and snippets.

View pybites's full-sized avatar

Pybites pybites

View GitHub Profile
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- id: debug-statements
- repo: https://gitlab.com/pycqa/flake8
import concurrent.futures
import os
import re
from timeit import timeit
import requests
URLS = 'urls'
from typing import Protocol
class PybitesSearchProtocol(Protocol):
def match_content(self, search: str) -> list[str]:
"""Implement in subclass to search Pybites content"""
...
class CompleteSearch:
def match_content(self, search: str) -> list[str]:
# Implementation of search method
# prior step done:
# python manage.py dumpdata auth.User --output=users.json --format=json
from collections import Counter
import json
import plotext as plt
def aggregate_users_by_year_month(filename="users.json"):
cnt = Counter()
import cProfile
from functools import wraps
from pstats import Stats, SortKey
from time import time
def timing(f):
"""A simple timer decorator"""
@wraps(f)
def wrapper(*args, **kwargs):
from django.core.management.base import BaseCommand
import feedparser
from django.db.utils import IntegrityError
from blog.models import Article
FEED_URL = "https://pybit.es/feed/"
class Command(BaseCommand):
>>> def gen():
... yield from [1, 2, 3]
...
>>> g = gen()
>>> for i in g: print(i)
...
1
2
3
# generator exhausted:
import concurrent.futures
import os
import re
from timeit import timeit
import requests
from tqdm import tqdm
URLS = 'urls'
"""Script to retrieve new titles from O'Reilly Media (formerly Safari Books Online)"""
from collections import namedtuple
from pathlib import Path
from datetime import datetime, timedelta
from urllib.request import urlretrieve
from xml.etree.ElementTree import parse
RSS_FEED = "https://www.oreilly.com/feeds/recently-added.rss"
NOW = datetime.now()
DT_FMT = "%a, %d %b %Y %H:%M:%S"
import asyncio
import os
import aiofiles
import aiohttp
# saved links from https://pybit.es/archives in urls
URLS = [u.rstrip() for u in open('urls', 'r').readlines()]
async def fetch(session, url):