import argparse
import os
import nsml
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument("--pause", type=int, default=0)
parser.add_argument("--iteration", type=int, default=0)
# pip install icrawler
from icrawler.builtin import GoogleImageCrawler
from datetime import date
google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=3,
storage={'root_dir': './husky'}) # directory where images are downloaded
google_crawler.crawl(keyword='Siberian husky', max_num=1000, # max_num should be equal or less than 1000
date_min=date(2012, 3, 1), date_max=date(2012, 6, 1), # you can change date to get more than 1000 images
# First, you should install flickrapi
# pip install flickrapi
import flickrapi
import urllib
from PIL import Image
# Flickr api access key
flickr=flickrapi.FlickrAPI('c6a2c45591d4973ff525042472446ca2', '202ffe6f387ce29b', cache=True)
import os
import argparse
from time import time
from PIL import Image
def save_if_contain_attr(attrs, idx, file_name, image_path, output_path):
if attrs[idx] == '1':
path = os.path.join(output_path, 'trainA')
elif attrs[idx] == '-1':
import torch
import numpy as np
# Hyper-parameters
vocab_size = 10
batch_size = 3
seq_length = 4
# Generate random indices of range [0, vocab_size)
word_indices = torch.from_numpy(np.random.randint(low=0, high=vocab_size, size=(batch_size, seq_length))).view(-1)
import os
from PIL import Image
# Attribute
f = open('./list_attr_celeba.txt', 'r')
text = f.readlines()
# Number of images
"""Toy examples for torch.autograd.grad."""
import torch
from torch.autograd import Variable
# Input
x = Variable(torch.Tensor([1, 2, 3]), requires_grad=True)
# Weights
w1 = Variable(torch.Tensor([2, 3, 4]), requires_grad=True)