Skip to content

Instantly share code, notes, and snippets.

@Tan-Moy
Tan-Moy / gist:6a87d96364e9d8404527180222ad1a48
Last active May 24, 2020 11:32 — forked from dodyg/gist:5823184
Kotlin Programming Language Cheat Sheet Part 1

Intro

Kotlin is a new programming language for the JVM. It produces Java bytecode, supports Android and generates JavaScript. The latest version of the language is Kotlin M5.3

Kotlin project website is at kotlin.jetbrains.org.

All the codes here can be copied and run on Kotlin online editor.

Let's get started.

#plot the cost
fig, ax = plt.subplots()
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
@Tan-Moy
Tan-Moy / art2gd.py
Last active December 17, 2017 15:09
#gradient descent
def gradientDescent(X,y,theta,iters,alpha):
cost = np.zeros(iters)
for i in range(iters):
theta = theta - (alpha/len(X)) * np.sum(X * (X @ theta.T - y), axis=0)
cost[i] = computeCost(X, y, theta)
return theta,cost
#running the gd and cost function
#computecost
def computeCost(X,y,theta):
tobesummed = np.power(((X @ theta.T)-y),2)
return np.sum(tobesummed)/(2 * len(X))
#setting the matrixes
X = my_data.iloc[:,0:2]
ones = np.ones([X.shape[0],1])
X = np.concatenate((ones,X),axis=1)
y = my_data.iloc[:,2:3].values #.values converts it from pandas.core.frame.DataFrame to numpy.ndarray
theta = np.zeros([1,3])
#set hyper parameters
alpha = 0.01
#we need to normalize the features using mean normalization
my_data = (my_data - my_data.mean())/my_data.std()
my_data.head()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
my_data = pd.read_csv('home.txt',names=["size","bedroom","price"]) #read the data
plt.scatter(my_data[:, 0].reshape(-1,1), y)
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = g[0][0] + g[0][1]* x_vals #the line equation
plt.plot(x_vals, y_vals, '--')
def gradientDescent(X, y, theta, alpha, iters):
for i in range(iters):
theta = theta - (alpha/len(X)) * np.sum((X @ theta.T - y) * X, axis=0)
cost = computeCost(X, y, theta)
# if i % 10 == 0: # just look at cost every ten loops for debugging
# print(cost)
return (theta, cost)
def computeCost(X, y, theta):
inner = np.power(((X @ theta.T) - y), 2) # @ means matrix multiplication of arrays. If we want to use * for multiplication we will have to convert all arrays to matrices
return np.sum(inner) / (2 * len(X))