Skip to content

Instantly share code, notes, and snippets.

🤖
Putting the finishing touches on my robot army

Mark Saroufim msaroufim

🤖
Putting the finishing touches on my robot army
Block or report user

Report or block msaroufim

Hide content and notifications from this user.

Learn more about blocking users

Contact Support about this user’s behavior.

Learn more about reporting abuse

Report abuse
View GitHub Profile
View quicksort.py
# Python program for implementation of Quicksort Sort
# This function takes last element as pivot, places
# the pivot element at its correct position in sorted
# array, and places all smaller (smaller than pivot)
# to left of pivot and all greater elements to right
# of pivot
def partition(arr,low,high):
i = ( low-1 ) # index of smaller element
pivot = arr[high] # pivot
View logistic_regression.py
# https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
class LogisticRegression:
def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False):
self.lr = lr
self.num_iter = num_iter
self.fit_intercept = fit_intercept
def __add_intercept(self, X):
intercept = np.ones((X.shape[0], 1))
View q-learning.py
#https://www.geeksforgeeks.org/q-learning-in-python/
def createPolicy(Q, epsilon, num_actions):
def policyFunction(state):
# policy function can be a neural net instead of a dictionary from state to action
# helpful when state is too big like continuous domains
action_probs = np.ones(num_actions, dtype=float) * epsilon / num_actions
best_action = np.argmax(Q[state])
action_probs[best_action] += 1.0 - epsilon
View k-means.py
class KMeans:
def __init__(self, k):
self.k = k
self.means = None
def classify(self, input):
return min(range(self.k), key = lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
View minimize_stochastic.py
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0 = 0.01):
data = zip(x,y)
theta = theta_0
alpha = alpha_0
min_theta , min_value = None, float("inf")
iterations_with_no_improvement = 0
while iterations_with_no_improvement < 100:
value = sum(target_fn(x_i, y_i, theta) for x_i, y_i in data)
View gradient_descent.py
def step(v, direction, step_size):
return [v_i + step_size * direction_i for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
v = random point
tolerance = 0.00001
View se3.jl
function MatrixExp6(se3mat::Array)
omgtheta = so3ToVec(se3mat[1:3, 1:3])
if NearZero(linalg.norm(omgtheta))
return vcat(hcat(linalg.I, se3mat[1:3, 4]), [0 0 0 1])
else
θ = AxisAng3(omgtheta)[2]
omgmat = se3mat[1:3, 1:3] / θ
return vcat(hcat(MatrixExp3(se3mat[1:3, 1:3]),
(linalg.I * θ +
(1 - cos(θ)) * omgmat +
View adjoint.jl
function Adjoint(T::Array)
R, p = TransToRp(T)
vcat(hcat(R, zeros(3, 3)), hcat(VecToso3(p) * R, R))
end
View vectoso3.jl
function VecToso3::Array)
[ 0 -ω[3] ω[2];
ω[3] 0 -ω[1];
-ω[2] ω[1] 0 ]
end
View matrixexp3.jl
function MatrixExp3(so3mat::Array)
omgtheta = so3ToVec(so3mat)
if NearZero(linalg.norm(omgtheta))
return linalg.I
else
θ = AxisAng3(omgtheta)[2]
omgmat = so3mat / θ
return linalg.I + sin(θ) * omgmat + (1 - cos(θ)) * omgmat * omgmat
end
end
You can’t perform that action at this time.