Skip to content

Instantly share code, notes, and snippets.

View rdisipio's full-sized avatar

Riccardo Di Sipio rdisipio

View GitHub Profile
def top_k_filtering( logits, top_k = 5):
# a[...,1] equivalent to a[: ,: ,1 ]
indices_to_remove = logits < tf.math.top_k(logits,top_k)[0][..., -1, None]
# indices_to_remove is a tensor of bool values e.g. [ True, False, False, ..., True ]
# 1d indices
idx_remove = tf.where( indices_to_remove == True )[:,-1]
idx_keep = tf.where( indices_to_remove == False )[:,-1]
@rdisipio
rdisipio / get_arxiv_abstracts.py
Last active January 22, 2020 15:32
Script to download abstracts from the arXiv server
#!/usr/bin/env python
import os, sys
import pickle
import numpy as np
import pandas as pd
import urllib.request
import re
import feedparser
def make_model_classical(n_categories, latent_dim=16, embedding_dim=512):
text_in = keras.Input( shape=(embedding_dim,), dtype=tf.float64, name='text_in') # (None, 512)
x = layers.Dense(latent_dim, activation='tanh', dtype=tf.float64)(text_in)
x_out = layers.Dense(n_categories, activation='softmax')(x)
return keras.Model(inputs=text_in, outputs=x_out, name="ClassicalPreprintClassifier")
def make_model_quantum(n_categories, n_qubits=4, n_layers=2, embedding_dim=512):
text_in = keras.Input( shape=(embedding_dim,), dtype=tf.float64, name='text_in')
x = layers.Dense(n_qubits, activation='tanh', dtype=tf.float64)(text_in)
x = VariationalQuantumCircuit(
n_categories=n_categories,
n_qubits=n_qubits,
n_layers=n_layers)(x)
@rdisipio
rdisipio / qlstm_qlayer.py
Created December 17, 2020 22:00
qlstm_qlayer
n_qubits = 4
dev = qml.device("default.qubit", wires=n_qubits)
def _circuit(inputs, weights):
qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))
qml.templates.BasicEntanglerLayers(weights, wires=range(n_qubits))
return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)]
qlayer = qml.QNode(_circuit, dev, interface="torch")
concat_size = inputs_dim + hidden_dim
clayer_in = torch.nn.Linear(concat_size, n_qubits)
VQC = [qml.qnn.TorchLayer(qlayer, weight_shapes) for _ in range(4)]
clayer_out = torch.nn.Linear(n_qubits, hidden_size)
hidden_seq = []
for t in range(seq_length):
# get features from the t-th element in seq, for all entries in the batch
x_t = x[:, t, :]
@rdisipio
rdisipio / qlstm_pos_tagger.py
Created December 17, 2020 22:41
QLSTM POS Tagger
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, n_qubits=0):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
if n_qubits > 0:
class MultiHeadAttentionClassical(MultiHeadAttentionBase):
def __init__(self, embed_dim, num_heads):
super(MultiHeadAttentionClassical, self).__init__(embed_dim, num_heads)
self.wq = tf.keras.layers.Dense(embed_dim)
self.wk = tf.keras.layers.Dense(embed_dim)
self.wv = tf.keras.layers.Dense(embed_dim)
self.dense = tf.keras.layers.Dense(embed_dim)
def apply_dense_layers(self, v, k, q):
q = self.wq(q) # (batch_size, seq_len, embed_dim)
class MultiHeadAttentionQuantum(MultiHeadAttentionBase):
def __init__(self,
embed_dim, num_heads,
n_qubits, n_qlayers=1, q_device='default.qubit'):
super(MultiHeadAttentionQuantum, self).__init__(embed_dim, num_heads)
# todo: add intermediate layer to "dress" quantum circuit
assert n_qubits == embed_dim, f"Number of qubits ({n_qubits}) does not match embedding dim ({embed_dim})"
self.dev = qml.device(q_device, wires=n_qubits)
weight_shapes = {"weights": (n_qlayers, n_qubits)}
#!/usr/env/python
graph = {
'A' : ['B','C'],
'B' : ['D', 'E'],
'C' : ['F'],
'D' : [],
'E' : [],
'F' : []
}