Skip to content

Instantly share code, notes, and snippets.

View monk1337's full-sized avatar
🌱
It's a long hard fight / But I'll always live for tomorrow.

Aaditya Ura (looking for PhD Fall’24) monk1337

🌱
It's a long hard fight / But I'll always live for tomorrow.
View GitHub Profile
import tensorflow as tf
import numpy as np
def attention(inputs, attention_size, time_major=False, return_alphas=False):
if isinstance(inputs, tuple):
inputs = tf.concat(inputs, 2)
if time_major:
inputs = tf.array_ops.transpose(inputs, [1, 0, 2])
def contrastive_loss(self, y, d, batch_size):
tmp = y * tf.square(d)
# tmp= tf.mul(y,tf.square(d))
tmp2 = (1 - y) * tf.square(tf.maximum((1 - d), 0))
return tf.reduce_sum(tmp + tmp2) / batch_size / 2
# self.scores = logits
with tf.name_scope("loss"):
5 # The shape is []
[ 1., 2., 3., 4. ] # The shape is [4]
[[ 1., 2., 3., 4. ],[ 5., 6., 7., 8. ]] # Matrix of shape [ 2,4]
[[[ 1., 2., 3., 4. ] ],[ [ 5., 6., 7., 8. ]]] # Tensor of shape [ 2, 1, 4 ]
import torch
print('one dim')
print(torch.rand(1))
# output:
# one dim
# tensor([ 0.3725])
import torch
print('two dim')
print(torch.rand(2,5))
# output:
# two dim
# tensor([[ 0.2495, 0.2948, 0.5486, 0.5077, 0.1657],
# [ 0.4142, 0.8142, 0.8635, 0.8827, 0.7176]])
#normal distribution with mean=0 var=1
print(torch.rand(1,20,dtype=torch.float))
print(torch.rand(1,20,dtype=torch.double))
# output:
# tensor([[ 0.4976, 0.5590, 0.4242, 0.3130, 0.4160, 0.2188, 0.5643,
# 0.8620, 0.6020, 0.0883, 0.2870, 0.5136, 0.8119, 0.7638,
# 0.3188, 0.1961, 0.3527, 0.9613, 0.2914, 0.3882]])
# tensor([[ 0.9707, 0.9771, 0.0904, 0.0374, 0.7983, 0.4952, 0.2216,
#best part with pytorch is you can treat pytorch object as python object
var=torch.rand(5,2,dtype=torch.double)
#we can loop over it
for i in var:
for k in i:
print(k)
# output:
# tensor(0.9142, dtype=torch.float64)
tensor=torch.Tensor(4,3)
print(tensor)
# output:
# tensor([[ 0.0000e+00, 1.0842e-19, 6.0390e+35],
# [ 2.8586e-42, 4.2039e-45, 0.0000e+00],
# [ 0.0000e+00, 0.0000e+00, 0.0000e+00],
# [ 0.0000e+00, 0.0000e+00, 2.7551e-40]])
print(tensor.size())
# output:
# torch.Size([4, 3])
torch.cuda.is_available()
#output
# False
#use of cuda
print(torch.Tensor(1,2).cuda())
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)