Skip to content

Instantly share code, notes, and snippets.

import os
import numpy as np
import ipyparallel as ipp
from ipyparallel import Client
rc = Client(profile='default')
dview = rc[:]
dview.block = True
#all packages need to be parallelized
!apt-get install nvidia-cuda-toolkit
!pip3 install numba
import os
os.environ['NUMBAPRO_LIBDEVICE'] = "/usr/lib/nvidia-cuda-toolkit/libdevice"
os.environ['NUMBAPRO_NVVM'] = "/usr/local/cuda-10.0/nvvm/lib64/libnvvm.so"
from sklearn import datasets, preprocessing
import kernelml
num_components=6
#standardize the columns in the dataset
X = (X-np.mean(X,axis=0))/np.std(X,axis=0)
#get the covariance matrix from the dataset
S = np.cov(X.T)
#generate the eigenvectors and eigenvalues of the dataset
eigvals, eigvecs = np.linalg.eig(S)
#sort the eigenvalues
index = np.argsort(eigvals)[::-1]
#sort the eigenvectors and get the top components
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q http://apache.osuosl.org/spark/spark-2.3.3/spark-2.3.3-bin-hadoop2.7.tgz
!tar xf spark-2.3.3-bin-hadoop2.7.tgz
!pip install -q findspark
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.3.3-bin-hadoop2.7"
!apt-get install nvidia-cuda-toolkit
!pip3 install numba
import os
os.environ['NUMBAPRO_LIBDEVICE'] = "/usr/lib/nvidia-cuda-toolkit/libdevice"
os.environ['NUMBAPRO_NVVM'] = "/usr/lib/x86_64-linux-gnu/libnvvm.so"
!wget -nc https://github.com/rapidsai/notebooks-extended/raw/master/utils/rapids-colab.sh
!bash rapids-colab.sh
import sys, os
sys.path.append('/usr/local/lib/python3.6/site-packages/')
os.environ['NUMBAPRO_NVVM'] = '/usr/local/cuda/nvvm/lib64/libnvvm.so'
os.environ['NUMBAPRO_LIBDEVICE'] = '/usr/local/cuda/nvvm/libdevice/'
from sklearn import datasets,mixture
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1000)
y = datasets.load_iris().data
names = datasets.load_iris().feature_names
num_clusters = 3
gmm = mixture.GaussianMixture(n_components=num_clusters,max_iter=100000,tol=0.00001).fit(y)
import numpy as np
psd = 0
same_sign = 0
means = []
covs = []
for i in range(1000000):
def phaseCorr(a, b):
corr = np.fft.fftn(a)*np.conjugate(np.fft.fftn(b))
pc = corr/np.absolute(corr)
return np.fft.fftshift(np.fft.ifftn(pc)).real
a = np.array([[0,0,0,0],[0,1,0,0],[0,0,0,0],[0,0,0,0]])
b = np.array([[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,1,0]])
center_x = a.shape[0]/2
center_y = a.shape[1]/2
hidden_layer_1 = 1
weights1 = tf.Variable(tf.random_normal((X_train.shape[1],hidden_layer_1),stddev=0.01,dtype='float32'))
b1 = tf.Variable(tf.zeros((1,hidden_layer_1),dtype='float32'))
input_X = tf.placeholder('float32',(None,X_train.shape[1]))
input_y = tf.placeholder('float32',(None,1))
predicted_out = tf.add(tf.matmul(input_X,weights1),tf.reduce_sum(b1*weights1))
loss = tf.reduce_sum(tf.square(predicted_out-input_y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(loss)