Skip to content

Instantly share code, notes, and snippets.

@thejevans
Last active January 10, 2023 02:24
Show Gist options
  • Save thejevans/ebab3af71c99239b8e12c602696eb097 to your computer and use it in GitHub Desktop.
Save thejevans/ebab3af71c99239b8e12c602696eb097 to your computer and use it in GitHub Desktop.
generate all anagrams to make names given a last name
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "3701403a-b37d-4584-9f5b-4bff97299a48",
"metadata": {},
"outputs": [],
"source": [
"# only need to run once\n",
"\n",
"import nltk\n",
"nltk.download('brown')\n",
"nltk.download('words')"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "618b6333-85a6-468a-965d-b2900541ce64",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import collections\n",
"import functools\n",
"import itertools\n",
"import json\n",
"\n",
"from nltk import FreqDist\n",
"from nltk.corpus import brown, words\n",
"from numba import njit, prange\n",
"import numpy as np\n",
"from tqdm.notebook import tqdm"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "32f8c4b9-1c29-445a-9101-1294bee217c4",
"metadata": {},
"outputs": [],
"source": [
"# functions\n",
"\n",
"def logging(log):\n",
" def logging_decorator(func):\n",
" @functools.wraps(func)\n",
" def wrapper(*args, **kwargs):\n",
" print(f'{log}...', end='', flush=True)\n",
" result = func(*args, **kwargs)\n",
" print('done.')\n",
" return result\n",
" return wrapper\n",
" return logging_decorator\n",
"\n",
"@logging('finding all anagram sets in list')\n",
"@njit\n",
"def word_to_word_anagrams(word_arr: np.ndarray) -> np.ndarray:\n",
" \"\"\"\n",
" uses bit packing to keep memory usage reasonable.\n",
" \n",
" this finds all words that are anagrams of each other\n",
" \"\"\"\n",
" bitmap = np.array([\n",
" np.uint8(128),\n",
" np.uint8(64),\n",
" np.uint8(32),\n",
" np.uint8(16),\n",
" np.uint8(8),\n",
" np.uint8(4),\n",
" np.uint8(2),\n",
" np.uint8(1),\n",
" ], dtype=np.uint8)\n",
"\n",
" result_arr = np.zeros((len(word_arr), int(np.ceil(len(word_arr) / 8))), dtype=np.uint8)\n",
" \n",
" # without this, it's 10 times slower. no idea why.\n",
" dummy = np.zeros(26, dtype=np.int8)\n",
"\n",
" for i in range(len(word_arr)):\n",
" result_arr[i] = _word_to_word_row(bitmap, i, word_arr, dummy)\n",
"\n",
" return result_arr\n",
"\n",
"@njit(parallel=True)\n",
"def _word_to_word_row(bitmap: np.ndarray, i: int, word_arr: np.ndarray, dummy: np.ndarray) -> np.ndarray:\n",
" row_arr = np.zeros(int(np.ceil(len(word_arr) / 8)), dtype=np.uint8)\n",
"\n",
" for j in prange(len(word_arr)):\n",
" counts = word_arr[i] - word_arr[j] + dummy\n",
"\n",
" if not np.any(counts):\n",
" row_arr[j // 8] += bitmap[j % 8]\n",
"\n",
" return row_arr\n",
"\n",
"@logging('filtering word pairs for potential anagrams')\n",
"@njit\n",
"def snarks_filter(word_arr: np.ndarray, snarks_arr: np.ndarray) -> np.ndarray:\n",
" \"\"\"\n",
" uses bit packing to keep memory usage reasonable.\n",
" \n",
" this reduces the search space to only pairs of words with snarks in them somewhere.\n",
" \"\"\"\n",
" bitmap = np.array([\n",
" np.uint8(128),\n",
" np.uint8(64),\n",
" np.uint8(32),\n",
" np.uint8(16),\n",
" np.uint8(8),\n",
" np.uint8(4),\n",
" np.uint8(2),\n",
" np.uint8(1),\n",
" ], dtype=np.uint8)\n",
"\n",
" result_arr = np.zeros((len(word_arr), int(np.ceil(len(word_arr) / 8))), dtype=np.uint8)\n",
"\n",
" for i in range(len(word_arr)):\n",
" result_arr[i] = _snarks_row(bitmap, i, word_arr, snarks_arr)\n",
"\n",
" return result_arr\n",
"\n",
"@njit(parallel=True)\n",
"def _snarks_row(\n",
" bitmap: np.ndarray, i: int, word_arr: np.ndarray, snarks_arr: np.ndarray) -> np.ndarray:\n",
" row_arr = np.zeros(int(np.ceil(len(word_arr) / 8)), dtype=np.uint8)\n",
"\n",
" for j in prange(i + 1, len(word_arr)):\n",
" counts = word_arr[i] + word_arr[j] - snarks_arr\n",
"\n",
" if not np.any(counts < 0) and np.sum(counts) > 0:\n",
" row_arr[j // 8] += bitmap[j % 8]\n",
"\n",
" return row_arr\n",
"\n",
"@njit(parallel=True)\n",
"def snarks_anagram_row(\n",
" i: int,\n",
" row: np.ndarray,\n",
" snarks_count: np.ndarray,\n",
" word_arr: np.ndarray,\n",
" word_hash_idxs: np.ndarray,\n",
" bitmap: np.ndarray,\n",
") -> np.ndarray:\n",
" \"\"\"\n",
" this does most of the heavy lifting of actually finding valid anagrams\n",
"\n",
" uses the fact that words are sorted by hash to search fewer words\n",
" \"\"\"\n",
" potential_match_idxs = np.nonzero(row)[0]\n",
" row_matches = -1 * np.ones(len(potential_match_idxs), dtype=np.int32)\n",
" remainder_counts = word_arr[i] + word_arr[potential_match_idxs] - snarks_count\n",
"\n",
" for j in prange(len(potential_match_idxs)):\n",
" remainder_hash = 0\n",
" for k in range(8):\n",
" if remainder_counts[j, k] == 0:\n",
" remainder_hash += bitmap[k]\n",
" if remainder_counts[j, k + 8] == 0:\n",
" remainder_hash += bitmap[k]\n",
" if remainder_counts[j, k + 16] == 0:\n",
" remainder_hash += bitmap[k]\n",
" for k in range(2):\n",
" if remainder_counts[j, k + 24] == 0:\n",
" remainder_hash += bitmap[k]\n",
"\n",
" lo = word_hash_idxs[remainder_hash][0]\n",
" hi = word_hash_idxs[remainder_hash][1]\n",
" abs_diff_sums = np.sum(np.abs(word_arr[lo:hi] - remainder_counts[j]), axis=1)\n",
" matches = np.nonzero(abs_diff_sums == 0)[0]\n",
" if len(matches) > 0:\n",
" # since all one-word anagrams are gone, there should only be one match\n",
" row_matches[j] = matches[0] + lo\n",
"\n",
" valid = np.nonzero(row_matches != -1)[0] \n",
" return potential_match_idxs[valid], row_matches[valid]\n",
"\n",
"@logging('loading word list')\n",
"def load_wordlist(corpus) -> list[str]:\n",
" word_list = [s.lower() for s in set(corpus.words())]\n",
" return list(set(word_list))\n",
"\n",
"@logging('filtering word list')\n",
"def filter_wordlist(word_list: list[str]) -> list[str]:\n",
" word_list = [word.replace('-', '') for word in word_list]\n",
" word_list = [word.replace('.', '') for word in word_list]\n",
" return [word for word in word_list if word.isalpha()]\n",
"\n",
"@logging('building word count arrays')\n",
"def build_count_arrs(word_list: list[str], snarks: str) -> tuple[np.ndarray, np.ndarray]:\n",
" word_count_arr = np.zeros((len(word_list), 26), dtype=np.int8)\n",
" snarks_count_arr = np.zeros(26, dtype=np.int8)\n",
"\n",
" for i, word in enumerate(word_list):\n",
" count = collections.Counter(word)\n",
" for char, num in count.items():\n",
" word_count_arr[i, ord(char) - ord('a')] = num\n",
"\n",
" for char, num in collections.Counter(snarks).items():\n",
" snarks_count_arr[ord(char.lower()) - ord('a')] = num\n",
"\n",
" return word_count_arr, snarks_count_arr\n",
"\n",
"@logging('sorting word list by hash')\n",
"def sort_wordlist(word_list: list[str], word_count_arr: np.ndarray) -> list[str]:\n",
" hash_arr = hash_func(word_count_arr)\n",
" return list(zip(*sorted(zip(hash_arr, word_list))))[1]\n",
"\n",
"def hash_func(word_count_arr: np.ndarray):\n",
" return np.sum(\n",
" np.packbits(word_count_arr == 0, bitorder='little', axis=-1),\n",
" axis=-1,\n",
" dtype=np.uint16,\n",
" )\n",
"\n",
"@logging('condensing word list')\n",
"def condense_words_by_anagrams(word_list: list[str], anagram_arr: np.ndarray):\n",
" anagram_map = {word: [] for word in word_list}\n",
" for i, word in enumerate(word_list):\n",
" row = np.unpackbits(anagram_arr[i])\n",
" anagram_idxs = np.nonzero(row)[0]\n",
" anagram_map[word] = [word_list[j] for j in anagram_idxs if word_list[j] != word]\n",
"\n",
" condensed_word_list = []\n",
" for word, anagrams in anagram_map.items():\n",
" for anagram in anagrams:\n",
" if anagram in condensed_word_list:\n",
" break\n",
" else:\n",
" condensed_word_list.append(word)\n",
" \n",
" anagram_map = {word: anagram_map[word] for word in condensed_word_list}\n",
" \n",
" return condensed_word_list, anagram_map\n",
"\n",
"@logging('building word hash index array')\n",
"def hash_idx_arr(word_list: list[str], word_count_arr: np.ndarray) -> np.ndarray:\n",
" hashes = hash_func(word_count_arr)\n",
" word_hash_idxs = np.zeros((np.amax(hashes) + 1, 2), dtype=np.uint32)\n",
"\n",
" for j, hash_val in enumerate(hashes):\n",
" if word_hash_idxs[hash_val, 0] == 0:\n",
" word_hash_idxs[hash_val, 0] = j\n",
" if hash_val > 0:\n",
" word_hash_idxs[hashes[j - 1], 1] = j\n",
"\n",
" word_hash_idxs[np.amax(hashes), 1] = len(word_list)\n",
" return word_hash_idxs\n",
"\n",
"@logging('finding all valid anagrams')\n",
"def find_anagrams(\n",
" word_list: list[str],\n",
" word_hash_idxs: np.ndarray,\n",
" word_count_arr: np.ndarray,\n",
" snarks_count_arr: np.ndarray,\n",
" filter_arr: np.ndarray,\n",
") -> dict[str, list[tuple[str, str]]]:\n",
" bitmap = 2**np.array(np.arange(8), dtype=np.uint8)\n",
"\n",
" pairs = {}\n",
" for i in tqdm(range(len(word_list))):\n",
" row = np.unpackbits(filter_arr[i])\n",
" pairing_idxs, name_idxs = snarks_anagram_row(\n",
" i, row, snarks_count_arr, word_count_arr, word_hash_idxs, bitmap)\n",
"\n",
" for name_idx, pairing_idx in zip(name_idxs, pairing_idxs):\n",
" name = word_list[name_idx]\n",
" a = word_list[pairing_idx]\n",
" b = word_list[i]\n",
" if name not in pairs:\n",
" pairs[name] = []\n",
" pairs[name].append((min(a, b), max(a, b)))\n",
" \n",
" return pairs\n",
"\n",
"@logging('building anagram dictionary')\n",
"def build_anagram_dict(\n",
" snarks: str,\n",
" pairs: dict[str, list[tuple[str, str]]],\n",
" anagram_map: dict[str, list[str]],\n",
") -> dict[str, dict[str, list[str]] | list[str]]:\n",
" anagrams = {}\n",
" for word, pair_list in pairs.items():\n",
" full_pair_list = [*pair_list]\n",
" for a, b in pair_list:\n",
" full_pair_list.extend(\n",
" list(itertools.product(anagram_map[a], anagram_map[b])))\n",
" pair_strs = [f'{i} {j}' for i, j in full_pair_list]\n",
" if len(anagram_map[word]) > 0:\n",
" anagrams[f'{word} {snarks}'] = {\n",
" 'whole name anagrams': pair_strs,\n",
" 'first name anagrams': anagram_map[word],\n",
" }\n",
" else:\n",
" anagrams[f'{word} {snarks}'] = pair_strs\n",
" return anagrams\n",
"\n",
"@logging('saving anagrams to file(s)')\n",
"def save_anagrams(\n",
" anagrams: dict[str, list[str] | dict[str, list[str]]],\n",
" out_dir: str,\n",
" single_outfile: bool,\n",
") -> None:\n",
" first_name_lens = [len(name[:name.find(' ')]) for name in anagrams]\n",
" longest_first_name = max(first_name_lens)\n",
" test_name = next(iter(anagrams))\n",
" snarks_length = len(test_name[test_name.find(' '):])\n",
" out_dicts = [{} for _ in range(longest_first_name)]\n",
" for name, name_anagrams in anagrams.items():\n",
" out_dicts[len(name) - snarks_length - 1][name] = name_anagrams\n",
" for i, out_dict in enumerate(out_dicts):\n",
" out_file = f'{out_dir}/{i + 1:02} char names.json'\n",
" with open(out_file, 'w') as f:\n",
" json.dump(out_dict, f)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "6ade6baa-4601-478f-aec8-98df3d2ea465",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"loading word list...done.\n",
"filtering word list...done.\n",
"length of uncondensed word list: 234377\n",
"building word count arrays...done.\n",
"sorting word list by hash...done.\n",
"building word count arrays...done.\n",
"finding all anagram sets in list...done.\n",
"condensing word list...done.\n",
"length of condensed word list: 215848\n",
"building word count arrays...done.\n",
"building word hash index array...done.\n",
"filtering word pairs for potential anagrams...done.\n",
"finding all valid anagrams..."
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "879770e5da014c64b7edcac2fe607a79",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/215848 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"done.\n",
"building anagram dictionary...done.\n",
"saving anagrams to file...done.\n"
]
}
],
"source": [
"snarks = 'snarks'\n",
"single_outfile = False\n",
"# 500MB RAM -- 44962 uncondensed -- 41780 condensed -- took 23s to complete find_anagrams() on laptop\n",
"#out_dir, corpus = './snarks_brown', brown\n",
"# 6.7GB RAM -- 234377 uncondensed -- 215848 condensed -- took 1311s to complete find_anagrams() on laptop\n",
"out_dir, corpus = './snarks_words', words\n",
"\n",
"uncondensed_word_list = load_wordlist(corpus)\n",
"uncondensed_word_list = filter_wordlist(uncondensed_word_list)\n",
"\n",
"print(f'length of uncondensed word list: {len(uncondensed_word_list)}')\n",
"\n",
"uncondensed_word_count_arr, _ = build_count_arrs(uncondensed_word_list, snarks)\n",
"uncondensed_word_list = sort_wordlist(uncondensed_word_list, uncondensed_word_count_arr)\n",
"uncondensed_word_count_arr, _ = build_count_arrs(uncondensed_word_list, snarks)\n",
"anagram_arr = word_to_word_anagrams(uncondensed_word_count_arr)\n",
"word_list, anagram_map = condense_words_by_anagrams(uncondensed_word_list, anagram_arr)\n",
"\n",
"del anagram_arr\n",
"del uncondensed_word_list\n",
"del uncondensed_word_count_arr\n",
"\n",
"print(f'length of condensed word list: {len(word_list)}')\n",
"\n",
"word_count_arr, snarks_count_arr = build_count_arrs(word_list, snarks)\n",
"word_hash_idxs = hash_idx_arr(word_list, word_count_arr)\n",
"filter_arr = snarks_filter(word_count_arr, snarks_count_arr)\n",
"pairs = find_anagrams(word_list, word_hash_idxs, word_count_arr, snarks_count_arr, filter_arr)\n",
"anagrams = build_anagram_dict(snarks, pairs, anagram_map)\n",
"save_anagrams(anagrams, out_dir, single_outfile)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a2180c0e-5447-456a-b49e-9b9c257d046f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
@thejevans
Copy link
Author

takes a word list and finds all valid anagrams of the form:
word_a + known_word -> word_c + word_d
then sends the output to a JSON file

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment