Skip to content

Instantly share code, notes, and snippets.

@transfluxus
Last active December 24, 2017 19:26
Show Gist options
  • Save transfluxus/48ff74d2035f22f96734695a4d5aa0d2 to your computer and use it in GitHub Desktop.
Save transfluxus/48ff74d2035f22f96734695a4d5aa0d2 to your computer and use it in GitHub Desktop.
Exploring multimodel embeddings
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"__Notebook to look into Multimodel embeddings, especially those generated with [MUSE](https://github.com/facebookresearch/MUSE) by facebook research:\n",
"[1] A. Conneau*, G. Lample*, L. Denoyer, MA. Ranzato, H. Jégou, [Word Translation Without Parallel Data](https://arxiv.org/pdf/1710.04087.pdf)\n",
"Some handy functions to translate a word into multiple langauges at once__\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from os.path import join, basename\n",
"from os import listdir, remove\n",
"from time import time\n",
"\n",
"import pandas as pd\n",
"from gensim.models import KeyedVectors as KV\n",
"from sklearn.manifold import TSNE\n",
"import matplotlib.pyplot as plt\n",
"\n",
"from IPython.display import display"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"[The Repo by facebook](https://github.com/facebookresearch/MUSE) provides fastText Wikipedia supervised word embeddings for 30 languages, aligned in a single vector space. These files should be ending with '.txt', '.vec' or '.bin'. \n",
"\n",
"The models provided by facebook are in text format. The following function converts them into binary format. By that, they load faster and take less memory."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_folder = \"./\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def turn_binary(file_path, delete_txt = False):\n",
" try:\n",
" print('reading %s' % basename(file_path))\n",
" model = KV.load_word2vec_format(file_path)\n",
" new_name = file_path[:-3] + \"bin\"\n",
" model.save_word2vec_format(new_name,binary=True)\n",
" print(\"saved to %s\" % (new_name))\n",
" if delete_txt:\n",
" remove(file_path)\n",
" return model\n",
" except UnicodeDecodeError:\n",
" print(\"Gensim can't open %s. It's probably already binary then (or something else) :)\" % basename(file_path))\n",
"\n",
"\n",
"for file in listdir(model_folder):\n",
" turn_binary(join(model_folder,file))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following class manages all embeddings in one folder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class MultiModelEmbeddings:\n",
"\n",
" def __init__(self, folder_path, auto_load = True): \n",
" self.folder_path = folder_path\n",
" self.model_dir = self.check_model_files(folder_path)\n",
" print(len(self.model_dir),\"languages\")\n",
" if auto_load:\n",
" self.load_all()\n",
" \n",
" def get_lang_code(self,file_name):\n",
" return file_name[-6:-4]\n",
" \n",
" def check_model_files(self, folder_path):\n",
" files = {self.get_lang_code(f) : { # this is the language code\n",
" \"file\":f,\n",
" \"model\":None}\n",
" for f in listdir(folder_path) if f[-3:] in ['txt','vec','bin']}\n",
" return files\n",
"\n",
" def load_language(self, lang_code):\n",
" if lang_code in self.model_dir and not self.model_dir[lang_code][\"model\"]:\n",
" print(\"Loading %s\" %lang_code)\n",
" lang = self.model_dir[lang_code]\n",
" binary = lang[\"file\"].endswith('bin')\n",
" model_path = join(self.folder_path,lang[\"file\"])\n",
" lang['model'] = KV.load_word2vec_format(model_path, binary=binary)\n",
" return lang['model']\n",
"\n",
" def get_model(self, lang_code):\n",
" if not self.model_dir[lang_code][\"model\"]:\n",
" return self.load_language(lang_code)\n",
" else:\n",
" return self.model_dir[lang_code][\"model\"]\n",
"\n",
" def add_lang_to_dir(self, path, load=True):\n",
" lang_code = self.get_lang_code(path) \n",
" if path[-3:] in ['txt','vec','bin'] and lang_code not in self.model_dir:\n",
" self.model_dir[lang_code] = {\"file\":path,\"model\": None}\n",
" if load:\n",
" self.load_language(lang_code)\n",
" \n",
" def load_all(self):\n",
" count = 0\n",
" start = time()\n",
" for lang in self.model_dir:\n",
" if not self.model_dir[lang][\"model\"]:\n",
" self.load_language(lang)\n",
" count += 1\n",
" if count > 0:\n",
" print(\"loaded %s models in %.2f\" % (count,(time() - start)))\n",
" \n",
" def prep_result(self, result,only_words):\n",
" if only_words:\n",
" result =list(zip(*result))[0]\n",
" if len(result) == 1:\n",
" return result[0]\n",
" else:\n",
" return result \n",
"\n",
" def check_langs(self, *lang_codes):\n",
" for lang in lang_codes:\n",
" if lang not in self.model_dir:\n",
" print(\"language %s does not exist\" %lang) \n",
" return False\n",
" return True\n",
"\n",
" def translate(self, src_lang, trg_lang, word, topn=1,only_words=True):\n",
" if isinstance(trg_lang,list):\n",
" return [self.translate(src_lang, lang, word, topn, only_words) for lang in trg_lang]\n",
" if not self.check_langs(src_lang,trg_lang): return []\n",
" srs_vec = self.get_model(src_lang)[word]\n",
" result = self.get_model(trg_lang).similar_by_vector(srs_vec,topn=topn)\n",
" return self.prep_result(result, only_words)\n",
"\n",
" def similars(self, src_lang,word,topn,only_words=True):\n",
" if not self.check_langs(src_lang): return []\n",
" result = self.get_model(src_lang).similar_by_word(word,topn=topn)\n",
" return self.prep_result(result, only_words)\n",
"\n",
" def compare(self, lang1,lang2,word1,word2, topn=5):\n",
" if not self.check_langs(lang1,lang2): return []\n",
" L1_1 = self.similars(lang1,word1,topn=topn)\n",
" L2_2 = self.similars(lang2,word2,topn=topn)\n",
" L1_2 = self.translate(lang1,lang2,word1,topn=topn)\n",
" L2_1 = self.translate(lang2,lang1,word2,topn=topn)\n",
" columns = [\"%s:%s\" %(lang1,word1), \"%s:%s\" %(lang2,word2), \"%s:%s> %s\" %(lang1,word1,lang2),\"%s:%s> %s\" %(lang2,word2,lang1)]\n",
" compares = {columns[0]: L1_1, columns[1]: L2_2, columns[2]: L1_2, columns[3]: L2_1}\n",
" return pd.DataFrame(data=compares, columns=columns)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"m_ms = MultiModelEmbeddings(model_folder, False)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"m_ms.load_language('en')\n",
"m_ms.load_language('de')\n",
"m_ms.load_language('es')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(m_ms.similars(\"en\",\"car\",topn=20))\n",
"print(m_ms.translate(\"en\",\"de\",\"car\",topn=20))\n",
"print(m_ms.translate(\"de\",\"en\",\"auto\",topn=20))\n",
"print(m_ms.translate(\"de\",\"en\",\"fahrzeug\",topn=20))\n",
"\n",
"display(m_ms.compare('en','de','summer','sommer',10))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"word = \"peace\"\n",
"langs = [lang for lang in list(m_ms.model_dir.keys()) if m_ms.model_dir[lang]['model']]\n",
"topn = 10\n",
"start = time()\n",
"trans = dict(zip(langs,m_ms.translate(\"en\",langs,word, topn=topn)))\n",
"print(\"query took %.3f secs\" %((time() - start)))\n",
"df = pd.DataFrame(trans, index = [word] + list(range(1,topn)))\n",
"df"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "nlp",
"language": "python",
"name": "nlp"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment