Skip to content

Instantly share code, notes, and snippets.

import matplotlib.pyplot as plt
for alpha in np.linspace(0.1, 0.9, 5):
x = np.array(range(-10, 11))
y = np.where(x > 0, alpha * x, (alpha - 1) * x)
plt.plot(x, y, label=f'Q = {alpha}')
plt.legend(loc='upper left')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-20, 20, 200)
y = np.abs(x)
alpha = 0.2
y_Q = np.where(x > 0, alpha * x, (alpha - 1) * x)
y_logcosh = np.where(x > 0, alpha * np.log(np.cosh(x)), (1 - alpha) * np.log(np.cosh(x)))
plt.plot(x, y_Q, label=f'Q = 0.2')
plt.plot(x, y_logcosh, label=f'Smooth Q=0.2 regression using log_cosh')
import matplotlib.pyplot as plt
import numpy as np
trip_distance = 6.0 # km
trip_duration = 12.0 # minutes
trip_avg_speed = 30.0 # km/h
# trip duration in minutes
def duration(distance, speed):
return distance * 1/speed * 60.0
import matplotlib.pyplot as plt
import numpy as np
trip_distance = 6.0 # km
trip_duration = 12.0 # minutes
trip_avg_speed = 30.0 # km/h
# trip duration in minutes
def duration(distance, speed):
return distance * 1/speed * 60.0
import matplotlib.pyplot as plt
import numpy as np
trip_distance = 6.0 # km
trip_duration = 12.0 # minutes
trip_avg_speed = 30.0 # km/h
# trip duration in minutes
def duration(distance, speed):
return distance * 1/speed * 60.0
from forward_autodiff import DualFloat
def simple_polynome(a, b):
return lambda x : x**2 * a + b
def squared_polynome(a, b, c):
return lambda x : x**2 * a + x * b + c
def squared_polynome_check(a, b, c):
return lambda x : 2*x * a + b
import matplotlib.pyplot as plt
import numpy as np
from forward_autodiff import DualFloat
trip_distance = DualFloat(6.0) # km
trip_duration = DualFloat(12.0) # minutes
trip_avg_speed = DualFloat(30.0) # km/h
# trip duration in minutes
import numpy as np
def linear_predictions(weights, inputs):
# y = weights[0] inputs[0] + weights[1] * inputs[1]
# where inputs[0] = 1.0
return np.dot(inputs, weights) * 60.0 # minutes
v_avg = 30 # km/h
startup_time = 2 /60.0 # hours
import numpy as np
def linear_predictions(weights, inputs):
# y = weights[0] inputs[0] + weights[1] * inputs[1]
# where inputs[0] = 1.0
return np.dot(inputs, weights) * 60.0
def squared_loss(weights, inputs, targets):
# Training loss is the negative squared loss
preds = linear_predictions(weights, inputs)
from functools import reduce
import numpy as np
import jax.numpy as jnp
class LagrangianPolynome:
def __init__(self, Ts, Xs):
self.Ts = Ts
self.Xs = Xs