Instantly share code, notes, and snippets.

View contextual_loss.py
def contextual_loss(x, y, h=0.5):
"""Computes contextual loss between x and y.
Args:
x: features of shape (N, C, H, W).
y: features of shape (N, C, H, W).
Returns:
cx_loss = contextual loss between x and y (Eq (1) in the paper)
"""
View hello_nsml.py
import argparse
import os
import nsml
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument("--pause", type=int, default=0)
parser.add_argument("--iteration", type=int, default=0)
View google_crawler.py
# pip install icrawler
from icrawler.builtin import GoogleImageCrawler
from datetime import date
google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=3,
storage={'root_dir': './husky'}) # directory where images are downloaded
google_crawler.crawl(keyword='Siberian husky', max_num=1000, # max_num should be equal or less than 1000
date_min=date(2012, 3, 1), date_max=date(2012, 6, 1), # you can change date to get more than 1000 images
View download_flickr_image.py
# First, you should install flickrapi
# pip install flickrapi
import flickrapi
import urllib
from PIL import Image
# Flickr api access key
flickr=flickrapi.FlickrAPI('c6a2c45591d4973ff525042472446ca2', '202ffe6f387ce29b', cache=True)
View attn_prob_summation.py
import torch
import numpy as np
# Hyper-parameters
vocab_size = 10
batch_size = 3
seq_length = 4
# Generate random indices of range [0, vocab_size)
word_indices = torch.from_numpy(np.random.randint(low=0, high=vocab_size, size=(batch_size, seq_length))).view(-1)
View make_dataset.py
import os
from PIL import Image
# Attribute
f = open('./list_attr_celeba.txt', 'r')
text = f.readlines()
# Number of images
text[0]
View grad.py
"""Toy examples for torch.autograd.grad."""
import torch
from torch.autograd import Variable
# Input
x = Variable(torch.Tensor([1, 2, 3]), requires_grad=True)
# Weights
w1 = Variable(torch.Tensor([2, 3, 4]), requires_grad=True)