This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import os | |
import shutil | |
import time | |
import socket | |
import multiprocessing | |
import numpy as np | |
import torch | |
import torch.nn as nn | |
import torch.nn.parallel |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Step 36/51 : RUN git clone --recursive https://github.com/pytorch/vision && cd vision && git submodule sync && git submodule update --init --recursive && export TORCH_CUDA_ARCH_LIST="3.5 5.2 6.0 6.1 7.0+PTX" && export TORCH_NVCC_FLAGS="-Xfatbin -compress-all" && export CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" && python setup.py clean && python setup.py install | |
---> Running in 2ee1554613dc | |
Cloning into 'vision'... | |
No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' | |
/opt/conda/lib/python3.6/site-packages/torch/cuda/__init__.py:52: UserWarning: CUDA initialization: Found no NVIDIA driver on your system. Please check that you have an NVIDIA GPU and installed a driver from http://www.nvidia.com/Download/index.aspx (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:100.) | |
return torch._C._cuda_getDeviceCount() > 0 | |
Building wheel torchvision-0.8.0a0+588f7ae | |
PNG found: True | |
libpng version: 1.2.54 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Step 41/56 : RUN cd vision && . /opt/conda/bin/activate && export TORCH_CUDA_ARCH_LIST="3.7;6.1;7.0;7.5" && export FORCE_CUDA=$torchvision_cuda && python setup.py install | |
---> Running in ff2dd49d614a | |
No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' | |
Building wheel torchvision-0.7.0a0+78ed10c | |
running install | |
running bdist_egg | |
running egg_info | |
creating torchvision.egg-info | |
writing torchvision.egg-info/PKG-INFO |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Kill all processes in nvidia-smi | |
kill -9 $(nvidia-smi | sed -n 's/|\s*[0-9]*\s*\([0-9]*\)\s*.*/\1/p' | sort | uniq | sed '/^$/d' | |
# Resize param for ffmpeg: 171 width, 128 height & pad (without stretching) | |
-vf "scale=iw*min(171/iw\,128/ih):ih*min(171/iw\,128/ih),pad=171:128:(171-iw)/2:(128-ih)/2" | |
# Stop/remove all docker containers | |
docker stop $(docker ps -a -q) | |
docker rm $(docker ps -a -q) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import requests | |
import pandas as pd | |
LIM = 3000 | |
OFFSET = 1000 | |
all_papers = [] | |
for offs in range(0,LIM,OFFSET): | |
papers = requests.get( | |
"https://openreview.net/notes?invitation=ICLR.cc%2F2020%2FConference%2F-%2FBlind_Submission&details=replyCount%2C"+ | |
"original&includeCount=true&offset={0}&limit={1}".format(offs, LIM)) | |
rows = [ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torchvision | |
import torch.utils.data | |
import random | |
import numpy as np | |
from torch.utils.data import TensorDataset | |
# https://github.com/galatolofederico/pytorch-balanced-batch/blob/master/sampler.py | |
class BalancedBatchSampler(torch.utils.data.sampler.Sampler): | |
def __init__(self, dataset, labels=None): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import argparse | |
import logging | |
import os | |
from os import path | |
import numpy as np | |
import pandas as pd | |
import multiprocessing | |
import random | |
from toolz import pipe | |
from timer import Timer |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
biases <- lapply(seq_along(listb), function(idx){ | |
r <- listb[[idx]] | |
matrix(rnorm(n=r), nrow=r, ncol=1) | |
}) | |
weights <- lapply(seq_along(listb), function(idx){ | |
c <- listw[[idx]] | |
r <- listb[[idx]] | |
matrix(rnorm(n=r*c), nrow=r, ncol=c) | |
}) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
square_stack_lst_of_matricies <- function(lst) | |
{ | |
sqr_size <- sqrt(length(lst)) | |
# Stack vertically | |
cols <- do.call(cbind, lst) | |
# Split to another dim | |
dim(cols) <- c(dim(filter_map[[1]])[[1]], | |
dim(filter_map[[1]])[[1]]*sqr_size, | |
sqr_size) | |
# Stack horizontally |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
SGD <- function(training_data, epochs, mini_batch_size, lr, C, sizes, num_layers, biases, weights, | |
verbose=FALSE, validation_data) | |
{ | |
# Every epoch | |
for (j in 1:epochs){ | |
# Stochastic mini-batch (shuffle data) | |
training_data <- sample(training_data) | |
# Partition set into mini-batches | |
mini_batches <- split(training_data, | |
ceiling(seq_along(training_data)/mini_batch_size)) |
NewerOlder