Skip to content

Instantly share code, notes, and snippets.

@KevOrr
Last active September 1, 2015 04:50
Show Gist options
  • Save KevOrr/00720ff6ddf11e0b53cc to your computer and use it in GitHub Desktop.
Save KevOrr/00720ff6ddf11e0b53cc to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
from statistics import mean, stdev
def squared_error_cost(hypothesis, data):
return sum( (hypothesis(point[:-1]) - point[-1])**2 for point in data ) / 2*len(data)
def linear_optimize_gradient_descent(data, args, alpha=.01, max_iters=1000):
"""Takes a sequence of n-tuples, where n-1 is the number of features of the model, and the last index is the output"""
data = [(1,) + tuple(point) for point in data]
feature_scale_parameters = [(mean(feature), stdev(feature)) for feature in zip(*data)]
data = [tuple((x - m)/s for x,(m,s) in zip(point, feature_scale_parameters)) for point in data]
next_args = list(args)
for i in range(max_iters):
hyp = create_linear_hypothesis(args)
for feature in range(len(data[0])):
next_args[feature] -= alpha/len(data) * sum((hyp(point) - point[1])*point[feature] for point in data)
args = list(next_args)
args = [arg*s + m for arg, (m,s) in zip(args, feature_scaling_parameters)]
return args
def create_linear_hypothesis(args):
return lambda vals: sum(arg*val for (arg,val) in zip(args, (1,) + tuple(vals)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment