Skip to content

Instantly share code, notes, and snippets.

View FrancescoSaverioZuppichini's full-sized avatar
🧸
Focusing

Francesco Saverio Zuppichini FrancescoSaverioZuppichini

🧸
Focusing
  • roboflow
  • Lugano, Switzerland
View GitHub Profile
@FrancescoSaverioZuppichini
FrancescoSaverioZuppichini / imagenet1000_clsid_to_human.txt
Created December 16, 2018 09:34 — forked from yrevar/imagenet1000_clsidx_to_labels.txt
text: imagenet 1000 class id to human readable labels (Fox, E., & Guestrin, C. (n.d.). Coursera Machine Learning Specialization.)
{0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
class Kernel(Layer):
def __init__(self, r, **kwargs):
self.r = r
super(Kernel, self).__init__(**kwargs)
def build(self, input_shape):
self.mu = self.add_weight(name='mu',
shape=(1, self.r),
initializer='uniform',
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size: int = 768, num_heads: int = 8, dropout: float = 0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
# fuse the queries, keys and values in one matrix
self.qkv = nn.Linear(emb_size, emb_size * 3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
class PatchEmbedding(nn.Module):
def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768):
self.patch_size = patch_size
super().__init__()
self.projection = nn.Sequential(
# using a conv layer instead of a linear one -> performance gains
nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size),
Rearrange('b e (h) (w) -> b (h w) e'),
)
function random_gender() {
let roll = Math.random()
let gender = roll > 0.5 ? 'He/She' : 'She/He'
return gender
}
function fix_bias() {
var biased_node = document.querySelectorAll("#tw-target > div > div > pre > span")[0]
var text = biased_node.innerText
print("Hello world!")
for _ in range(5):
print("hey!")
print("Hello world!")
for _ in range(5):
print("hey!")