Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save xiaoouwang/06b7814600e711ef6643c7759c4acf7f to your computer and use it in GitHub Desktop.
Save xiaoouwang/06b7814600e711ef6643c7759c4acf7f to your computer and use it in GitHub Desktop.
01_play_with_camembert_and_flaubert.ipynb
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:34:05.995835Z",
"start_time": "2021-05-06T14:33:52.212836Z"
},
"trusted": false
},
"cell_type": "code",
"source": "import torch\nimport transformers as ppb\n# Load camembert model/tokenizer\ncamembert_model = ppb.CamembertModel.from_pretrained(\"camembert/camembert-base\")\ncamembert_tokenizer = ppb.CamembertTokenizer.from_pretrained(\"camembert/camembert-base\")\nfrom transformers import FlaubertModel, FlaubertTokenizer\n# Load flaubert model/tokenizer\nflaubert_model = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')\nflaubert_tokenizer = FlaubertTokenizer.from_pretrained('flaubert/flaubert_base_cased', do_lowercase=False)",
"execution_count": 1,
"outputs": []
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:34:26.272848Z",
"start_time": "2021-05-06T14:34:26.044121Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# test the tokenizer\nsentences = [\"je suis un chat.\",\"Je suis un chat.\"]\nfor sent in sentences:\n print('Original: ', sent)\n print('Tokenized Camembert: ', camembert_tokenizer.tokenize(sent))\n print('Tokenized Flaubert: ', flaubert_tokenizer.tokenize(sent))\n print('Token IDs Camembert: ', camembert_tokenizer.convert_tokens_to_ids(camembert_tokenizer.tokenize(sent)))\n print('Token IDs Camembert: ', flaubert_tokenizer.convert_tokens_to_ids(flaubert_tokenizer.tokenize(sent)))",
"execution_count": 2,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "Original: je suis un chat.\nTokenized Camembert: ['▁je', '▁suis', '▁un', '▁chat', '.']\nTokenized Flaubert: ['je</w>', 'suis</w>', 'un</w>', 'chat</w>', '.</w>']\nToken IDs Camembert: [50, 146, 23, 1234, 9]\nToken IDs Camembert: [61, 176, 26, 4427, 16]\nOriginal: Je suis un chat.\nTokenized Camembert: ['▁Je', '▁suis', '▁un', '▁chat', '.']\nTokenized Flaubert: ['Je</w>', 'suis</w>', 'un</w>', 'chat</w>', '.</w>']\nToken IDs Camembert: [100, 146, 23, 1234, 9]\nToken IDs Camembert: [107, 176, 26, 4427, 16]\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:34:41.758782Z",
"start_time": "2021-05-06T14:34:41.751683Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# test the encoder\ntest_special = \"Il y a un chat.\"\ninput_ids = camembert_tokenizer.encode_plus(test_special, add_special_tokens=True, max_length = 10, padding = 'max_length')\nprint(input_ids) # 5 is cls, 6 is sep, pad is 1, mask = 0\ninput_ids = flaubert_tokenizer.encode_plus(test_special, add_special_tokens=True, max_length = 10, padding = 'max_length')\nprint(input_ids) # 0 is cls, 1 is sep, pad is 2, mask = 0\nbizarre_word = \"dgfsdgdfgsdfgsd\"\nprint(flaubert_tokenizer(bizarre_word))\nprint(flaubert_tokenizer.tokenize(bizarre_word))\nprint(camembert_tokenizer(bizarre_word))\nprint(camembert_tokenizer.tokenize(bizarre_word))",
"execution_count": 3,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "{'input_ids': [5, 69, 102, 33, 23, 1234, 9, 6, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]}\n{'input_ids': [0, 59, 66, 34, 26, 4427, 16, 1, 2, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]}\n{'input_ids': [0, 14374, 42667, 14374, 358, 688, 23702, 358, 688, 23702, 98, 1], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n['dg', 'fs', 'dg', 'd', 'f', 'gs', 'd', 'f', 'gs', 'd</w>']\n{'input_ids': [5, 18, 383, 362, 10, 16136, 21231, 12014, 21231, 12014, 204, 6], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n['▁d', 'g', 'f', 's', 'dg', 'df', 'gs', 'df', 'gs', 'd']\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:34:44.980779Z",
"start_time": "2021-05-06T14:34:44.977545Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# check tokenizer properties\nprint(camembert_tokenizer,end=\"\\n\\n\\n\")\nprint(flaubert_tokenizer)",
"execution_count": 4,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "PreTrainedTokenizer(name_or_path='camembert-base', vocab_size=32005, model_max_len=512, is_fast=False, padding_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'sep_token': '</s>', 'pad_token': '<pad>', 'cls_token': '<s>', 'mask_token': AddedToken(\"<mask>\", rstrip=False, lstrip=True, single_word=False, normalized=True), 'additional_special_tokens': ['<s>NOTUSED', '</s>NOTUSED']})\n\n\nPreTrainedTokenizer(name_or_path='flaubert/flaubert_base_cased', vocab_size=68729, model_max_len=512, is_fast=False, padding_side='right', special_tokens={'bos_token': '<s>', 'unk_token': '<unk>', 'sep_token': '</s>', 'pad_token': '<pad>', 'cls_token': '</s>', 'mask_token': '<special1>', 'additional_special_tokens': ['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']})\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:35:04.498955Z",
"start_time": "2021-05-06T14:35:04.411118Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# verify tokenizer's vocav\nprint(\"size of vocab camembert:\",camembert_tokenizer.vocab_size)\ncam_vocab = camembert_tokenizer.get_vocab()\nfor v in [\"<s>\",\"</s>\",\"<pad>\",\"<unk>\",\"chat\"]:\n print(cam_vocab[v],end = \" \")\n\nprint(\"\\nsize of vocab flaubert:\",flaubert_tokenizer.vocab_size)\nflaubert_vocab = flaubert_tokenizer.get_vocab()\nfor v in [\"<s>\",\"</s>\",\"<pad>\",\"<unk>\",\"chat\"]:\n print(flaubert_vocab[v],end = \" \")\n\ntry:\n print(flaubert_vocab[\"béquille\"])\nexcept:\n print(\"\\nle mot béquille n'existe pas dans le vocab.\")",
"execution_count": 5,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "size of vocab camembert: 32005\n5 6 1 4 8734 \nsize of vocab flaubert: 68729\n0 1 2 3 8830 \nle mot béquille n'existe pas dans le vocab.\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:35:40.055844Z",
"start_time": "2021-05-06T14:35:08.084130Z"
},
"trusted": false
},
"cell_type": "code",
"source": "from transformers import pipeline\nnlp_fill_flaubert = pipeline('fill-mask', model=\"flaubert/flaubert_base_cased\", top_k=10)\nnlp_fill_camembert = pipeline('fill-mask', model=\"camembert/camembert-base\", top_k=10)",
"execution_count": 6,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": "Some weights of FlaubertWithLMHeadModel were not initialized from the model checkpoint at flaubert/flaubert_base_cased and are newly initialized: ['transformer.position_ids']\nYou should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:35:40.449254Z",
"start_time": "2021-05-06T14:35:40.060353Z"
},
"trusted": false
},
"cell_type": "code",
"source": "def fill_line(sent):\n print(\"\\nflaubert\")\n for x in nlp_fill_flaubert(sent.replace(\"<mask>\",\"<special1>\")):\n print(x[\"sequence\"], end = \" \")\n print(\"\\ncamembert\")\n for y in nlp_fill_camembert(sent):\n print(y[\"sequence\"], end = \" \")\n\n# full context\nfill_line(\"Mon père est un <mask> remarquable.\")\n# only left context\nfill_line(\"Le camembert est <mask> !\")",
"execution_count": 7,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "\nflaubert\nMon père est un homme remarquable. Mon père est un animal remarquable. Mon père est un roman remarquable. Mon père est un livre remarquable. Mon père est un pays remarquable. Mon père est un peuple remarquable. Mon père est un oiseau remarquable. Mon père est un enfant remarquable. Mon père est un poème remarquable. Mon père est un mot remarquable. \ncamembert\nMon père est un acteur remarquable. Mon père est un homme remarquable. Mon père est un artiste remarquable. Mon père est un écrivain remarquable. Mon père est un musicien remarquable. Mon père est un personnage remarquable. Mon père est un père remarquable. Mon père est un photographe remarquable. Mon père est un auteur remarquable. Mon père est un professeur remarquable. \nflaubert\nLe camembert est reparti! Le camembert est partie! Le camembert est parties! Le camembert est parti! Le camembert est terminé! Le camembert est lancée! Le camembert est fini! Le camembert est magique! Le camembert est magnifique! Le camembert est tuant! \ncamembert\nLe camembert est arrivé! Le camembert est prêt! Le camembert est mort! Le camembert est disponible! Le camembert est gratuit! Le camembert est cuit! Le camembert est terminé! Le camembert est sorti! Le camembert est introuvable! Le camembert est fini! "
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:35:49.861908Z",
"start_time": "2021-05-06T14:35:49.683421Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# language bias\nsents = [\"Mon père est un <mask>.\",\"Ma mère est une <mask>.\"]\nfor sent in sents:\n fill_line(sent)",
"execution_count": 8,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "\nflaubert\nMon père est un homme. Mon père est un roman. Mon père est un livre. Mon père est un enfant. Mon père est un jeu. Mon père est un mensonge. Mon père est un animal. Mon père est un fait. Mon père est un poème. Mon père est un mot. \ncamembert\nMon père est un homme. Mon père est un enfant. Mon père est un escroc. Mon père est un père. Mon père est un médecin. Mon père est un écrivain. Mon père est un garçon. Mon père est un musicien. Mon père est un avocat. Mon père est un artiste. \nflaubert\nMa mère est une femme. Ma mère est une chose. Ma mère est une erreur. Ma mère est une réalité. Ma mère est une fiction. Ma mère est une fille. Ma mère est une illusion. Ma mère est une ville. Ma mère est une tragédie. Ma mère est une science. \ncamembert\nMa mère est une femme. Ma mère est une prostituée. Ma mère est une fille. Ma mère est une mère. Ma mère est une sorcière. Ma mère est une salope. Ma mère est une pute. Ma mère est une maman. Ma mère est une infirmière. Ma mère est une déesse. "
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:51:46.029966Z",
"start_time": "2021-05-06T14:51:46.023984Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# contextualized embedding, see Sureau\n# camembert\nsent = \"Le célèbre avocat François Sureau et l'avocat Charles ont acheté un avocat. \"\nsent_special = \"<s> \" + sent + \" </s>\"\ncam_tokenized_text = camembert_tokenizer.tokenize(sent_special)\ncam_indexed_tokens = camembert_tokenizer.convert_tokens_to_ids(cam_tokenized_text)\ncam_segments_ids = [1] * len(cam_tokenized_text)\nprint(cam_tokenized_text)\n# Convert inputs to PyTorch tensors\ncam_tokens_tensor = torch.tensor([cam_indexed_tokens])\ncam_segments_tensors = torch.tensor([cam_segments_ids])",
"execution_count": 9,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "['<s>', '▁Le', '▁célèbre', '▁avocat', '▁François', '▁Sur', 'eau', '▁et', '▁l', \"'\", 'avocat', '▁Charles', '▁ont', '▁acheté', '▁un', '▁avocat', '.', '</s>']\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:53:04.182227Z",
"start_time": "2021-05-06T14:53:04.177770Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# flaubert, see Sureau and <l'> compared to <l> of camembert\nflau_tokenized_text = flaubert_tokenizer.tokenize(sent_special)\nflau_indexed_tokens = flaubert_tokenizer.convert_tokens_to_ids(flau_tokenized_text)\nflau_segments_ids = [1] * len(flau_tokenized_text)\nprint(flau_tokenized_text)\n# Convert inputs to PyTorch tensors\nflau_tokens_tensor = torch.tensor([flau_indexed_tokens])\nflau_segments_tensors = torch.tensor([flau_segments_ids])",
"execution_count": 10,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "['<s>', 'Le</w>', 'célèbre</w>', 'avocat</w>', 'François</w>', 'Su', 'reau</w>', 'et</w>', \"l'</w>\", 'avocat</w>', 'Charles</w>', 'ont</w>', 'acheté</w>', 'un</w>', 'avocat</w>', '.</w>', '</s>']\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:41:27.776646Z",
"start_time": "2021-05-06T14:41:20.933861Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# Load camembert model with hidden states\ncamembert_model = ppb.CamembertModel.from_pretrained(\"camembert/camembert-base\",output_hidden_states = True)\n# Load flaubert model idem\nflaubert_model = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased', output_hidden_states = True)",
"execution_count": 12,
"outputs": []
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:41:32.318521Z",
"start_time": "2021-05-06T14:41:32.316416Z"
},
"trusted": false
},
"cell_type": "code",
"source": "camembert_model.eval()\nflaubert_model.eval()",
"execution_count": 14,
"outputs": []
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:53:42.399414Z",
"start_time": "2021-05-06T14:53:42.321543Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# torch.no_grad tells PyTorch not to construct the compute graph during this forward pass (since we won’t be running backprop here)–this just reduces memory consumption and speeds things up a little.\n# Run the text through BERT, and collect all of the hidden states produced from all 12 layers.\nwith torch.no_grad():\n cam_outputs = camembert_model(cam_tokens_tensor, cam_segments_tensors)\n cam_hidden_states = cam_outputs[2]\n flau_outputs = flaubert_model(flau_tokens_tensor, flau_segments_tensors)\n flau_hidden_states = flau_outputs[1]",
"execution_count": 15,
"outputs": []
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:53:42.943976Z",
"start_time": "2021-05-06T14:53:42.938808Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# The object has four dimensions, in the following order:\n\n# The layer number (13 layers)\n# The batch number (1 sentence)\n# The word / token number (22 tokens in our sentence)\n# The hidden unit / feature number (768 features)\nfor x in [cam_hidden_states,flau_hidden_states]:\n print(\"the layer number\",len(x))\n print(\"the batch number\",len(x[0]))\n print(\"the number of tokens\",len(x[0][0]))\n print(\"the number of hidden units\",len(x[0][0][0]))",
"execution_count": 16,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "the layer number 13\nthe batch number 1\nthe number of tokens 18\nthe number of hidden units 768\nthe layer number 13\nthe batch number 1\nthe number of tokens 17\nthe number of hidden units 768\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:54:03.209390Z",
"start_time": "2021-05-06T14:54:03.201725Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# convert from [# layers, # batches, # tokens, # features]\n# to [# tokens, # layers, # features]\n\n# camembert\ntoken_embeddings = torch.stack(cam_hidden_states, dim=0)\ntoken_embeddings = torch.squeeze(token_embeddings, dim=1)\ncam_token_embeddings = token_embeddings.permute(1,0,2)\ncam_token_embeddings.size()",
"execution_count": 17,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": "torch.Size([18, 13, 768])"
},
"metadata": {},
"execution_count": 17
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:54:03.479188Z",
"start_time": "2021-05-06T14:54:03.474040Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# flaubert, one token less\ntoken_embeddings = torch.stack(flau_hidden_states, dim=0)\ntoken_embeddings = torch.squeeze(token_embeddings, dim=1)\nflau_token_embeddings = token_embeddings.permute(1,0,2)\nflau_token_embeddings.size()",
"execution_count": 18,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": "torch.Size([17, 13, 768])"
},
"metadata": {},
"execution_count": 18
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:55:05.278779Z",
"start_time": "2021-05-06T14:55:05.274430Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# for each token of our input we have 13 separate vectors/layers each of length 768.\n# which layer/combination?\n\n# sum together the last 4 layers\n\n# camembert\ncam_token_vecs_sum = []\n\n# `token_embeddings` is a [22 x 12 x 768] tensor.\n\n# For each token in the sentence...\nfor token in cam_token_embeddings:\n # Sum the vectors from the last four layers.\n sum_vec = torch.sum(token[-4:], dim=0)\n \n cam_token_vecs_sum.append(sum_vec)\nprint ('Shape is: %d x %d' % (len(cam_token_vecs_sum), len(cam_token_vecs_sum[0])))",
"execution_count": 19,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "Shape is: 18 x 768\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:55:06.617265Z",
"start_time": "2021-05-06T14:55:06.610762Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# flaubert\nflau_token_vecs_sum = []\n\n# For each token in the sentence...\nfor token in flau_token_embeddings:\n # Sum the vectors from the last four layers.\n sum_vec = torch.sum(token[-4:], dim=0)\n \n flau_token_vecs_sum.append(sum_vec)\n \nprint ('Shape is: %d x %d' % (len(flau_token_vecs_sum), len(flau_token_vecs_sum[0])))",
"execution_count": 20,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "Shape is: 17 x 768\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T14:56:29.344790Z",
"start_time": "2021-05-06T14:56:29.339069Z"
},
"trusted": false
},
"cell_type": "code",
"source": "# the three avocats are at 3 10 15 for camembert\nfor i, token_str in enumerate(cam_tokenized_text):\n print(i, token_str, end = \" \")\nprint(\"\\n\")\n# 3 9 14 for flaubert\nfor i, token_str in enumerate(flau_tokenized_text):\n print(i, token_str, end = \" \")",
"execution_count": 21,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "0 <s> 1 ▁Le 2 ▁célèbre 3 ▁avocat 4 ▁François 5 ▁Sur 6 eau 7 ▁et 8 ▁l 9 ' 10 avocat 11 ▁Charles 12 ▁ont 13 ▁acheté 14 ▁un 15 ▁avocat 16 . 17 </s> \n\n0 <s> 1 Le</w> 2 célèbre</w> 3 avocat</w> 4 François</w> 5 Su 6 reau</w> 7 et</w> 8 l'</w> 9 avocat</w> 10 Charles</w> 11 ont</w> 12 acheté</w> 13 un</w> 14 avocat</w> 15 .</w> 16 </s> "
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T15:01:13.484723Z",
"start_time": "2021-05-06T15:01:13.479557Z"
},
"trusted": false
},
"cell_type": "code",
"source": "from scipy.spatial.distance import cosine\n\nprint(\"similar\")\nprint(1 - cosine(cam_token_vecs_sum[3], cam_token_vecs_sum[10]))\nprint(\"different\")\nprint(1 - cosine(cam_token_vecs_sum[3], cam_token_vecs_sum[15]))\nprint(1 - cosine(cam_token_vecs_sum[10], cam_token_vecs_sum[15]))",
"execution_count": 22,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "similar\n0.832181990146637\ndifferent\n0.841330885887146\n0.7700269818305969\n"
}
]
},
{
"metadata": {
"ExecuteTime": {
"end_time": "2021-05-06T15:00:52.874602Z",
"start_time": "2021-05-06T15:00:52.869433Z"
},
"trusted": false
},
"cell_type": "code",
"source": "from scipy.spatial.distance import cosine\n\nprint(\"similar\")\nprint(1 - cosine(flau_token_vecs_sum[3], flau_token_vecs_sum[9]))\nprint(\"different\")\nprint(1 - cosine(flau_token_vecs_sum[3], flau_token_vecs_sum[14]))\nprint(1 - cosine(flau_token_vecs_sum[9], flau_token_vecs_sum[14]))",
"execution_count": 23,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": "similar\n0.7798425555229187\ndifferent\n0.735776424407959\n0.7943295240402222\n"
}
]
}
],
"metadata": {
"_draft": {
"nbviewer_url": "https://gist.github.com/06b7814600e711ef6643c7759c4acf7f"
},
"gist": {
"id": "06b7814600e711ef6643c7759c4acf7f",
"data": {
"description": "01_play_with_camembert_and_flaubert.ipynb",
"public": true
}
},
"kernelspec": {
"name": "base",
"display_name": "Python 3",
"language": "python"
},
"language_info": {
"name": "python",
"version": "3.7.6",
"mimetype": "text/x-python",
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"pygments_lexer": "ipython3",
"nbconvert_exporter": "python",
"file_extension": ".py"
},
"nbTranslate": {
"hotkey": "alt-t",
"sourceLang": "en",
"targetLang": "fr",
"displayLangs": [
"*"
],
"langInMainMenu": true,
"useGoogleTranslate": true
},
"toc": {
"nav_menu": {},
"number_sections": true,
"sideBar": false,
"skip_h1_title": true,
"base_numbering": 1,
"title_cell": "Table des matières",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
},
"varInspector": {
"window_display": false,
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"library": "var_list.py",
"delete_cmd_prefix": "del ",
"delete_cmd_postfix": "",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"library": "var_list.r",
"delete_cmd_prefix": "rm(",
"delete_cmd_postfix": ") ",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
]
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment