Skip to content

Instantly share code, notes, and snippets.

@PtrMan
Last active May 29, 2020 07:30
Show Gist options
  • Save PtrMan/f0cf8ccd6981ec69457f1d9d062c33d4 to your computer and use it in GitHub Desktop.
Save PtrMan/f0cf8ccd6981ec69457f1d9d062c33d4 to your computer and use it in GitHub Desktop.
rust learn retina motion PROTOTYPE
#![allow(non_snake_case)]
extern crate rand;
use std::default::Default;
use rand::Rng;
use rand::distributions::{Normal, Distribution};
// automatic differentiation
#[derive(Debug, Clone)]
pub struct Ad {
r: f64,
d: f64,
}
pub fn add(a: &Ad,b: &Ad) -> Ad {
Ad{r:a.r+b.r,d:a.d+b.d}
}
pub fn sub(a: &Ad,b: &Ad) -> Ad {
Ad{r:a.r-b.r,d:a.d-b.d}
}
pub fn mul(a: &Ad,b: &Ad) -> Ad {
Ad{r:a.r*b.r,d:a.r*b.d+a.d*b.r}
}
pub fn div(a: &Ad,b: &Ad) -> Ad {
let z=a.r*b.d+a.d*b.r;
Ad{r:a.r/b.r,d:z/(b.r*b.r)}
}
pub fn exp(v:&Ad) -> Ad {
let e = v.r.exp();
Ad{r:e,d:v.d*e}
}
pub fn dot(v:&[Ad], w:&[Ad]) -> Ad {
let mut acc = mul(&v[0],&w[0]);
for i in 1..w.len() {
acc = add(&mul(&v[i],&w[i]), &acc);
}
return acc;
}
// ReLU activation function
pub fn reluAct(v:&Ad) -> Ad {
if v.r > 0.0 {
Ad{r:v.r,d:v.d}
}
else {
Ad{r:0.0,d:0.0}
}
}
pub fn sigmoidAct(v:&Ad) -> Ad {
let z=exp(&v);
div(&z, &add(&z, &Ad{r:1.0,d:0.0}))
}
#[derive(Debug, Clone)]
pub struct Neuron {
weights: Vec<Ad>,
bias:Ad,
act: u32,
}
// calc activation of neuron
pub fn calc(inAct:&[Ad], n:&Neuron) -> Ad {
let act = add(&dot(&inAct, &n.weights), &n.bias);
let act2 = if n.act == 0 {reluAct(&act)} else {sigmoidAct(&act)};
return act2;
}
// experiment to experiment with backprop
pub fn expNn0() {
let mut rng = rand::thread_rng();
let mut n0 = Neuron {
weights: vec![Ad{r:0.5,d:0.0}, Ad{r:0.2,d:0.0}],
bias:Ad{r:0.0,d:0.0},
act:0
};
let mut in2 = vec![Ad{r:0.1,d:0.0}, Ad{r:0.9,d:0.0}, ];
let mut iterations = 350; // number of iterations
let mut lrate = 0.03; // learning rate
for it in 0..iterations {
let mut weightIdx = rng.gen::<usize>() % n0.weights.len();
n0.weights[weightIdx].d = 1.0; // we want to differentiate this one
let mut r = calc(&in2,&n0);
println!("{} {}", r.r,r.d);
let mut error = 0.9 - r.r;
n0.weights[weightIdx].r += n0.weights[weightIdx].d*error*lrate;
n0.weights[weightIdx].d = 0.0; // reset
}
}
#[derive(Debug, Clone)]
pub struct Map2d<T> {
arr:Vec<T>,
w:i32, // width
h:i32, // height
}
pub fn readAt<T:Copy+Default>(m:&Map2d<T>, y:i32,x:i32) -> T {
if y<0||x<0||x>=m.w||y>=m.h {
return T::default();
}
m.arr[(y*m.w+x) as usize]
}
pub fn writeAt<T>(m:&mut Map2d<T>, y:i32,x:i32,v:T) -> () {
if y<0||x<0||x>=m.w||y>=m.h {
return;
}
m.arr[(y*m.w+x) as usize] = v;
}
// helper to draw a box
pub fn map2dDrawBox<T:Copy>(m:&mut Map2d<T>, cx:i32,cy:i32,w:i32,h:i32,v:T) {
for iy in 0..h {
for ix in 0..w {
writeAt(m,cy+iy,cx+ix, v);
}
}
}
// count true values
pub fn map2dCountTrue(m:&Map2d<bool>) -> i64 {
let mut c = 0;
for ix in 0..m.w {
for iy in 0..m.h {
if readAt(m,iy,ix) {
c+=1;
}
}
}
c
}
// compute L2 distance between maps
pub fn map2dDist2(a:&Map2d<f64>,b:&Map2d<f64>) -> f64 {
let mut acc = 0.0;
for ix in 0..a.w {
for iy in 0..a.h {
acc += (readAt(a,iy,ix)-readAt(b,iy,ix)).powf(2.0);
}
}
acc.sqrt()
}
// compute L2 distance between maps using a mask
pub fn map2dDist2ByMask(a:&Map2d<f64>,b:&Map2d<f64>,mask:&Map2d<bool>) -> f64 {
let mut acc = 0.0;
for ix in 0..a.w {
for iy in 0..a.h {
if readAt(mask,iy,ix) {
acc += (readAt(a,iy,ix)-readAt(b,iy,ix)).powf(2.0);
}
}
}
acc.sqrt()
}
// compute similarity (simple way)
pub fn map2dSim(a:&Map2d<f64>,b:&Map2d<f64>) -> f64 {
// TODO< use better more sound formula! >
1.0-map2dDist2(&a,&b)/((a.w*a.h) as f64)
}
// compute similarity (simple way)
pub fn map2dSimByMask(a:&Map2d<f64>,b:&Map2d<f64>,mask:&Map2d<bool>) -> f64 {
// TODO< use better more sound formula! >
let n = map2dCountTrue(mask);
1.0-map2dDist2ByMask(&a,&b,&mask)/(n as f64)
}
// prototype
#[derive(Debug, Clone)]
pub struct Proto {
map:Map2d<f64>,
id:i64,
}
#[derive(Debug, Clone)]
pub struct ProtoClassifier {
protos:Vec<Proto>,
idCounter:i64,
}
// add prototype
pub fn protoPush(c:&mut ProtoClassifier, m:&Map2d<f64>) -> i64 {
let resCategory = c.idCounter;
c.protos.push(Proto{map:m.clone(),id:resCategory});
c.idCounter+=1;
resCategory
}
// search for matching prototype
pub fn protoFindByMask(c:&ProtoClassifier, m:&Map2d<f64>, mask:&Map2d<bool>, foundId: &mut i64, foundMaxSim:&mut f64) {
*foundId = -1;
*foundMaxSim = 0.0;
// search for best matching prototype
for iProto in c.protos.iter() {
let sim = map2dSimByMask(&m, &iProto.map, &mask);
if sim > *foundMaxSim {
*foundMaxSim = sim;
*foundId = iProto.id;
}
}
}
// more advanced comparison based on masks
pub fn protoCalcUncroppedAndCropped(c:&ProtoClassifier, perceptPrimary:&Map2d::<f64>, foundSimUncropped:&mut f64, foundSimCropped:&mut f64) {
// mask for perception
let mut perceptMask = Map2d::<bool> {
arr:vec![true; 7*7],
w:7,
h:7,
};
let mut foundId = -1;
//let mut foundMaxSim = 0.0;
// TODO< use category >
protoFindByMask(&c, &perceptPrimary, &perceptMask, &mut foundId, foundSimUncropped);
// crop left side
{
let h = perceptMask.h;
map2dDrawBox(&mut perceptMask, 0,0, 2,h, false);
}
// compute best proto
// TODO< use category >
protoFindByMask(&c, &perceptPrimary, &perceptMask, &mut foundId, foundSimCropped);
}
#[derive(Clone)]
pub struct Tv {
f: f64,
c: f64,
}
pub fn tvExp(tv:&Tv) -> f64 {
// see https://cis.temple.edu/~pwang/Publication/abduction.pdf
(tv.f-0.5)*tv.c + 0.5
}
pub fn vecAddScale(a: &[f64], b: &[f64], w:f64) -> Vec<f64> {
let mut res = vec![0.0;a.len()];
for idx in 0..a.len() {
res[idx] = a[idx]+b[idx]*w;
}
res
}
pub fn vecScale(a: &[f64], w:f64) -> Vec<f64> {
let mut res = vec![0.0;a.len()];
for idx in 0..a.len() {
res[idx] = a[idx]*w;
}
res
}
use std::time::{Duration, Instant};
pub fn main() {
let mut rng = rand::thread_rng();
let now = Instant::now();
let mut worldEpisodes = 1; // timesteps the world is simulated for
// parameters of the evolved brain
let mut agentBrainParameters:Vec<f64> = vec![0.0;40];
for idx in 0..agentBrainParameters.len() {
agentBrainParameters[idx] = (rng.gen::<f64>() * 2.0 - 1.0) * 0.5;
}
// iterate over world timesteps
for iEpisode in 0..worldEpisodes {
// mutation of EA candidate
let normal = Normal::new(0.0, 0.02); // standard deviation 0.02
let mut param2 = agentBrainParameters.clone();
for idx in 0..agentBrainParameters.len() {
param2[idx] += normal.sample(&mut rng);
}
let mut worldScreen = Map2d {
arr:vec![0.0; 10*10],
w:10,
h:10,
};
// draw to environment
map2dDrawBox(&mut worldScreen, 3,3, 5,5, 1.0);
// prototype based classifier
let mut protocls = ProtoClassifier {protos:vec![],idCounter:1};
{ // store prototype of searched object
let cx = 3;
let cy = 3;
let mut perceptPrimary = Map2d::<f64> {
arr:vec![0.0; 7*7],
w:7,
h:7,
};
{
// position of left top corner
let px = cx-perceptPrimary.w/2;
let py = cy-perceptPrimary.h/2;
for dx in 0..perceptPrimary.w {
for dy in 0..perceptPrimary.h {
let v = readAt(&worldScreen, py+dy,px+dx);
writeAt(&mut perceptPrimary, dy,dx, v);
}
}
}
protoPush(&mut protocls, &perceptPrimary); // add prototype to all prototype
}
{ // do similarity computation with and without cropping
let mut perceptPrimary = Map2d::<f64> {
arr:vec![0.0; 7*7],
w:7,
h:7,
};
{
let startCx = 3;
let startCy = 3;
let cx = startCx;
let cy = startCy;
// position of left top corner
let px = cx-perceptPrimary.w/2;
let py = cy-perceptPrimary.h/2;
for dx in 0..perceptPrimary.w {
for dy in 0..perceptPrimary.h {
let v = readAt(&worldScreen, py+dy,px+dx);
writeAt(&mut perceptPrimary, dy,dx, v);
}
}
}
let mut foundSimUncropped = 0.0;
let mut foundSimCropped = 0.0;
protoCalcUncroppedAndCropped(&protocls, &perceptPrimary, &mut foundSimUncropped, &mut foundSimCropped);
println!("sim uncropped {}", foundSimUncropped);
println!("sim cropped {}", foundSimCropped);
}
// function to evaluate score by parameters
// /param usedParameters are the parameters of the "brain"-NN
let mut evalScore = |usedParameters: &Vec<f64>| -> f64 {
let mut n0 = Neuron {
weights: vec![Ad{r:0.5,d:0.0};5],
bias:Ad{r:0.0,d:0.0},
act:1 // sigmoid because relu does't really work for "decision making"
};
let mut n1 = Neuron {
weights: vec![Ad{r:0.5,d:0.0};5],
bias:Ad{r:0.0,d:0.0},
act:1 // sigmoid because relu does't really work for "decision making"
};
let mut n2 = Neuron {
weights: vec![Ad{r:0.5,d:0.0};5],
bias:Ad{r:0.0,d:0.0},
act:1 // sigmoid because relu does't really work for "decision making"
};
// transfer parameters as weights etc.
let mut paramIdx=0;
for i in 0..n0.weights.len() {
n0.weights[i] = Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
}
n0.bias=Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
for i in 0..n1.weights.len() {
n1.weights[i] = Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
}
n1.bias=Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
for i in 0..n2.weights.len() {
n2.weights[i] = Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
}
n2.bias=Ad{r:usedParameters[paramIdx],d:0.0};
paramIdx+=1;
let startCx = 3;
let startCy = 3;
let mut cx = startCx;
let mut cy = startCy;
// simulate retina thing with NN for "decision making"
for retinaStep in 0..5 { // let the retina do deciions for n timesteps
println!("ep={} perceive @ {} {}", iEpisode,cx,cy);
// mask for perception
let mut perceptMask = Map2d::<bool> {
arr:vec![true; 7*7],
w:7,
h:7,
};
// TODO< let this control by NN >
let cropFromLeft = false; // do we want to crop left side?
if cropFromLeft {
let h = perceptMask.h;
map2dDrawBox(&mut perceptMask, 0,0, 2,h, false);
}
let mut perceptPrimary = Map2d::<f64> {
arr:vec![0.0; 7*7],
w:7,
h:7,
};
{
// position of left top corner
let px = cx-perceptPrimary.w/2;
let py = cy-perceptPrimary.h/2;
for dx in 0..perceptPrimary.w {
for dy in 0..perceptPrimary.h {
let v = readAt(&worldScreen, py+dy,px+dx);
writeAt(&mut perceptPrimary, dy,dx, v);
}
}
}
let mut foundId = -1;
let mut foundMaxSim = 0.0;
{
protoFindByMask(&protocls, &perceptPrimary, &perceptMask, &mut foundId, &mut foundMaxSim);
if foundMaxSim < 0.3 {
foundId = protoPush(&mut protocls, &perceptPrimary); // add prototype to all prototypes
}
}
// stimulate neuron and compute and use result of it!
let mut stimulusOfNn = vec![Ad{r:0.1,d:0.0};5];
//println!("ID {}", foundId); // DEBUG id of classification
if foundId < stimulusOfNn.len() as i64 { // GUARD
stimulusOfNn[foundId as usize] = Ad{r:0.9,d:0.0}; // encode class as stimulus
}
let mut neuronRes0 = vec![];
neuronRes0.push(calc(&stimulusOfNn, &n0));
neuronRes0.push(calc(&stimulusOfNn, &n1));
neuronRes0.push(calc(&stimulusOfNn, &n2));
let dbgActY = false; // debug Y values of actions
if dbgActY {
for nidx in 0..neuronRes0.len() {
println!("n[{}].res = {}", nidx, neuronRes0[nidx].r);
}
}
// * compute action by argmax
// act = 0: nop
// act = 1: left
// act = 2: right
let mut act = 0; // index of action to execute
let mut actVal = neuronRes0[0].r;
for nidx in 0..neuronRes0.len() {
if neuronRes0[nidx].r > actVal {
actVal = neuronRes0[nidx].r;
act = nidx;
}
}
let dbgAct = false; // debug actions?
// * do action
if act == 1 {
if dbgAct{println!("ACT right")};
cx += 2;
}
else if act == 2 {
if dbgAct{println!("ACT left")};
cx -= 2;
}
}
let mut score:f64 = 7.0 - (((startCx - cx)^2) as f64).sqrt();
score = score.max(0.01);
return score;
};
let score0:f64 = evalScore(&agentBrainParameters);
let score1:f64 = evalScore(&param2);
{ // do weighting by score
let mut scoreSum = 0.0;
let mut parameterSum = vec![0.0; agentBrainParameters.len()];
let score0:f64 = evalScore(&agentBrainParameters);
scoreSum += score0;
parameterSum = vecAddScale(&parameterSum, &agentBrainParameters, score0);
let score1:f64 = evalScore(&param2);
scoreSum += score1;
parameterSum = vecAddScale(&parameterSum, &param2, score1);
agentBrainParameters = vecScale(&parameterSum,1.0/scoreSum);
}
// TODO< do weighting by score >
if score1 > score0 {
agentBrainParameters = param2; // new parameters are better
}
}
println!("took {} us", now.elapsed().as_micros());
}
@PtrMan
Copy link
Author

PtrMan commented May 26, 2020

Licensed under MIT license

@PtrMan
Copy link
Author

PtrMan commented May 27, 2020

generates now different behaviours from run to run!

@PtrMan
Copy link
Author

PtrMan commented May 27, 2020

TODO: add bigger population

@PtrMan
Copy link
Author

PtrMan commented May 29, 2020

DONE: learn class 0 by training and the classify
DONE: use mask to mask partially visible object!
TODO: compare masked with unmasked and take unmasked version if similarity is higher!
TODO: splitt of genetic algorithm stuff into own file!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment