Skip to content

Instantly share code, notes, and snippets.

@6174
Last active November 13, 2019 08:54
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save 6174/bc799919948169d0f840 to your computer and use it in GitHub Desktop.
Save 6174/bc799919948169d0f840 to your computer and use it in GitHub Desktop.
rust linear regression
// ==================================
// linear regression trainer in rust
// @author 6174
// ==================================
// use std::cell::RefCell;
use std::vec::Vec;
fn main() {
// linear_regression();
validate();
}
fn validate() {
let v_x:Vec<Vec<f64>> = vec![
vec![1.0, 0.09901, 0.099668],
vec![1.0, 1.0, 1.0],
vec![1.0, 0.049505, 0.13289],
vec![1.0, 0.693069, 0.797342]
];
let v_theta:Vec<f64> = vec![0.302283, 23.009411, -3.390758];
let v_y:Vec<f64> = vec![2.0, 20.1, 1.3, 13.3];
for i in 0..v_x.len() {
let mut predict:f64 = 0.0;
for j in 0..v_x[i].len() {
predict += v_x[i][j] * v_theta[j];
}
println!("real: {:?}, predict: {:?}", v_y[i], predict);
}
}
fn linear_regression() {
let v_theta:Vec<f64> = vec![1.0, 1.0, 1.0];
let v_y:Vec<f64> = vec![2.0, 20.1, 1.3, 13.3];
let training_set = vec![
vec![0.0, 2.0, 3.0],
vec![0.0, 20.2, 30.1],
vec![0.0, 1.0, 4.0],
vec![0.0, 14.0, 24.0]
];
let featured_scaled_training_set = feature_scaling(&training_set, 3, 4);
println!("featured_scaled_training_set: {:?}", featured_scaled_training_set);
// let v_trained_theta:Vec<f64> = gradient_descent(v_theta, &featured_scaled_training_set, &v_y, 10000, 0.003);
}
// scale feature
fn feature_scaling(v_x: &Vec<Vec<f64>>, width: usize, height: usize) -> Vec<Vec<f64>> {
// initial a matrix
let mut ret:Vec<Vec<f64>> = vec![vec![0.0;width]; height];
for i in 0..width {
let mut tmp:Vec<f64> = vec![0.0; height];
for j in 0..height {
tmp[j] = v_x[j][i];
}
let range:f64 = find_max(&tmp);
for j in 0..height {
if range == 0.0 {
ret[j][i] = 1.0;
} else {
ret[j][i] = tmp[j] / range;
}
}
}
ret
}
fn find_max(v: &Vec<f64>) -> f64 {
let len = v.len();
let mut max:f64 = -1000000000.0;
for i in 0..len {
if v[i] > max {
max = v[i]
}
}
max
}
// gradient descent algorithm
// @param (&Vec<Vec<f64>>) v_x: training set variables
// @param (&Vec<f64>) v_y: training set result
// @param (Vec<f64>) v_theta: initial model theta
// @ret (Vec<f64>) trained model theta
// repeat until conergence {
// for theta_j in thetas {
// theta_j = theta_j - learining_rate * partial_derivative(hypothesis_function, theta_j)
// }
// }
//
fn gradient_descent(v_theta: Vec<f64>, v_x: &Vec<Vec<f64>>, v_y: &Vec<f64>, times: usize, learining_rate: f64) -> Vec<f64> {
let dimentions = v_theta.len();
let m_reciprocal = 1.0/(v_y.len() as f64);
let mut j_history:Vec<f64> = Vec::new();
println!("start training: {:?}, {:?}, {:?}", v_theta, times, learining_rate);
// repeat until conergence
// gradient times or with a stop
let mut v_theta_next:Vec<f64>;
let mut v_theta_current = v_theta;
// for i in 0..times {
loop {
v_theta_next = vec![0.0;dimentions];
// change theta
for j in 0..dimentions {
v_theta_next[j] = v_theta_current[j] - learining_rate * m_reciprocal * (partial_derivative(&v_theta_current, &v_x, &v_y, j));
}
v_theta_current = v_theta_next;
let cost = compute_cost(&v_theta_current, &v_x, &v_y);
if cost < 0.001 {
break;
}
println!("cost: {:?}, v_theta_current: {:?}", cost, v_theta_current);
// j_history.push();
}
println!("j_history: {:?}", j_history);
println!("v_theta: {:?}", v_theta_current);
v_theta_current
}
// sum{(h(xi) - yi) * xi)} / m
fn partial_derivative(v_theta: &Vec<f64>, v_x: &Vec<Vec<f64>>, v_y: &Vec<f64>, index: usize) -> f64{
// training data
let m = v_x.len();
let mut sum:f64 = 0.0;
for i in 0..m {
sum += (calculate_hypothesis(&v_theta, &v_x[i]) - v_y[i]) * &v_x[i][index];
}
sum
}
// sum{(h(xi) - y(i))^2}/2m
fn compute_cost(v_theta: &Vec<f64>, v_x: &Vec<Vec<f64>>, v_y: &Vec<f64>) -> f64 {
let m = v_y.len();
let m_reciprocal = 1.0/(2.0 * m as f64);
let mut sum:f64 = 0.0;
for i in 0..m {
let diff = calculate_hypothesis(&v_theta, &v_x[i]) - v_y[i];
sum += diff * diff
}
sum * m_reciprocal
}
// calculate hepothesis value
// hi = [theta] * [x]
fn calculate_hypothesis(v_theta: &Vec<f64>, v_x: &Vec<f64>) -> f64 {
let mut ret:f64 = 0.0;
for i in 0..v_theta.len() {
ret += v_theta[i] * v_x[i];
}
ret
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment