Skip to content

Instantly share code, notes, and snippets.

@bradfordw
Created December 15, 2009 23:27
Show Gist options
  • Save bradfordw/257405 to your computer and use it in GitHub Desktop.
Save bradfordw/257405 to your computer and use it in GitHub Desktop.
-module(ann).
-define(SIG,fun(X) -> 1 / (1 + math:exp(-X)) end).
-define(SIG_DERIV,fun(X) -> math:exp(-X) / (1 + math:exp(-2 * X)) end).
-compile(export_all).
dp(X,Y) ->
lists:foldl(fun(E,Sum) -> E + Sum end, 0,
vector_map(fun(EX,EY) -> EX * EY end, X, Y)).
vector_map(_, [], []) ->
[];
vector_map(Fun, [XH|XT], [YH|YT]) ->
[ Fun(XH,YH) | vector_map(Fun,XT,YT)].
feed_fwd(Fun, Weights, Inputs) ->
Fun(dp(Weights, Inputs)).
perceptron(Weights, Inputs, Sens) ->
receive
{learn, Backprop} ->
LearnRate = 0.5,
% 1. calculate correct sensitivities (Sens)
NewSensitivity = add_sensitivity(Sens, Backprop),
io:format("New sensitivity: ~p~n",[NewSensitivity]),
OutputValue = feed_fwd(?SIG, Weights, get_values(Inputs)),
DerivedValue = feed_fwd(?SIG_DERIV, Weights, get_values(Inputs)),
Sensitivity = calculate_sensitivity(Backprop, Inputs, NewSensitivity, OutputValue, DerivedValue),
io:format("(~p) New Sensitivities: ~p~n", [self(), NewSensitivity]),
io:format("(~p) Calculated Sensitivities: ~p~n", [self(), Sensitivity]),
% 2. adjust weights
AdjWeights = lists:map(fun(I) -> LearnRate * Sensitivity * I end, get_values(Inputs)),
NewWeights = vector_map(fun(W,D) -> W + D end, Weights, AdjWeights),
io:format("(~p) Adjusted weights: ~p~n",[self(), NewWeights]),
% 3. broadcast sensitivities and weights to previous layer
vector_map(fun(W,InPid) -> InPid ! {learn, {self(), Sensitivity * W}} end, NewWeights, NewSensitivity),
perceptron(NewWeights, Inputs, NewSensitivity);
{stimulate, Input} ->
% 1. add input to inputs to create new inputs.
NewInput = new_input(Inputs, Input),
io:format("NewInput: ~p~n",[NewInput]),
% 2. calculate output of perceptron
Output = feed_fwd(?SIG, Weights, get_values(NewInput)),
% 3. stimulate connected perceptrons via OutputPids
io:format("Sens: ~p~n",[Sens]),
case Sens =/= [] of
true ->
broadcast(ann:get_keys(Sens), {stimulate, {self(), Output}});
false -> %training
io:format("~p outputs: ~p~n",[self(),Output]),
self() ! {learn, {self(), 1}}
end,
perceptron(Weights, NewInput, Sens);
{connect_output, RecvPid} ->
NewSens = [RecvPid | Sens],
% io:format("~p output connected to ~p: ~p~n",[self(),RecvPid,NewSens]),
perceptron(Weights, Inputs, NewSens);
{connect_input, SendPid} ->
NewInput = [{SendPid, 0.5} | Inputs],
% io:format("~p input connected to ~p: ~p~n",[self(), SendPid, NewInput]),
perceptron([0.5 | Weights], NewInput, Sens);
{pass, Input} ->
broadcast_verbose(ann:get_keys(Sens), Input);
{status} ->
io:format("Status of Node(~p)~nW: ~p~nI: ~p~nS: ~p~n",[self(), Weights, Inputs, Sens])
end.
new_input(Inputs, Input) ->
{InPid, _} = Input,
lists:keyreplace(InPid, 1, Inputs, Input).
get_values(Input) ->
lists:foldl(fun({_, V}, Acc) -> Acc ++ [V] end,[],Input).
get_keys(Input) ->
proplists:get_keys(Input).
broadcast([],_) ->
{ok, done};
broadcast([PH|PT],Msg) ->
PH ! Msg,
broadcast(PT,Msg).
broadcast_verbose([],_) ->
{ok,done};
broadcast_verbose([PH|PT],Msg) ->
io:format("Stimulating ~p with: ~p~n",[PH, Msg]),
PH ! Msg,
broadcast_verbose(PT,Msg).
connect(SenderPid,RecvPid) ->
SenderPid ! {connect_output, RecvPid},
RecvPid ! {connect_input, SenderPid}.
% adds the propagating sensitivity to the Sensitivities Hash
add_sensitivity(Sens, Backprop) ->
case Sens of
[] ->
[];
_ ->
new_input(Sens, Backprop)
end.
% Calculates the sensitivity of this particular node
calculate_sensitivity(_BP, Inputs, Sensitivities, _Output, _Derv)
when Sensitivities =/= [], Inputs =:= [] -> % When the node is an input node:
null;
calculate_sensitivity(Backprop, Inputs, Sensitivities, Output_value, Derv_value)
when Sensitivities =:= [], Inputs =/= [] -> % When the node is an output node:
{_, Training_value} = Backprop,
(Training_value - Output_value) * Derv_value;
calculate_sensitivity(_Bp, Inputs, Sensitivities, _Output, Derv_value)
when Sensitivities =/= [], Inputs =/= [] -> % When the node is a hidden node:
Derv_value * lists:foldl(fun(E, T) -> E + T end, 0, get_values(Sensitivities)).
run() ->
X1 = spawn(ann, perceptron, [[],[],[]]),
X2 = spawn(ann, perceptron, [[],[],[]]),
H1 = spawn(ann, perceptron, [[],[],[]]),
H2 = spawn(ann, perceptron, [[],[],[]]),
O = spawn(ann, perceptron, [[],[],[]]),
% Connect input node X1 to hidden nodes H1 and H2
ann:connect(X1, H1),
ann:connect(X1, H2),
% Connect input node X2 to hidden nodes H1 and H2
ann:connect(X2, H1),
ann:connect(X2, H2),
% Connect input node H1 and H2 to output node O
ann:connect(H1, O),
ann:connect(H2, O),
X1 ! {pass, 1.8},
X2 ! {pass, 1.3},
X1 ! {status},
X2 ! {status},
H1 ! {status},
H2 ! {status},
O ! {status}.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment