public
Last active

Example of a _very_ simple feed-forward neural network.

  • Download Gist
Neural_Net_demo.rb
Ruby
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
class Link
 
def initialize(to, weight)
@to = to
@weight = weight
end
 
def propagate(activation)
puts " propagating #{activation} * #{@weight} = #{@weight * activation} to #{@to.name}"
puts " old activation: #{@to.activation}"
 
@to.activation += @weight * activation
 
puts " new activation: #{@to.activation}"
end
end
 
class Neuron
attr_accessor :activation
attr_reader :name
 
def initialize(name)
@name = name
@links = []
@activation = 0.0
 
puts "New Node '#{name}'"
end
 
def add_link_to(neuron, weight)
puts "Adding Link from #{@name} to #{neuron.name}, weight #{weight}."
@links << Link.new(neuron, weight)
end
 
def activate
print "Activating Neuron #{@name}: #{@activation} -> "
 
# Simple threshold function
@activation = (@activation > 0.5 ? 1.0 : 0.0)
 
puts "#{@activation}"
 
@links.each do |link|
link.propagate(@activation)
end
end
end
 
if $0 == __FILE__
# Boring, simple network that computes AND
 
nodes = [ Neuron.new("A"), Neuron.new("B"), # Input Neurons
# No Hidden Neurons
Neuron.new("Result") ] # Output Neuron
 
nodes[0].add_link_to(nodes[2], 0.26) # A -> Result
nodes[1].add_link_to(nodes[2], 0.26) # B -> Result
 
for a in [0.0, 1.0]
for b in [0.0, 1.0]
# Reset the network
nodes.each { |node| node.activation = 0.0 }
 
# Feed the input in
nodes[0].activation = a
nodes[1].activation = b
 
puts "\nComputing #{a} AND #{b}...\n"
 
# Evaluate the network by activating each node
nodes.each { |n| n.activate }
 
# Extract output from the activation of the output node
output = nodes[2].activation
 
puts "\n#{a} AND #{b} = #{output}"
end
end
 
end
 
 
# Output:
#
# New Node 'A'
# New Node 'B'
# New Node 'Result'
# Adding Link from A to Result, weight 0.26.
# Adding Link from B to Result, weight 0.26.
#
# Computing 0.0 AND 0.0...
# Activating Neuron A: 0.0 -> 0.0
# propagating 0.0 * 0.26 = 0.0 to Result
# old activation: 0.0
# new activation: 0.0
# Activating Neuron B: 0.0 -> 0.0
# propagating 0.0 * 0.26 = 0.0 to Result
# old activation: 0.0
# new activation: 0.0
# Activating Neuron Result: 0.0 -> 0.0
#
# 0.0 AND 0.0 = 0.0
#
# Computing 0.0 AND 1.0...
# Activating Neuron A: 0.0 -> 0.0
# propagating 0.0 * 0.26 = 0.0 to Result
# old activation: 0.0
# new activation: 0.0
# Activating Neuron B: 1.0 -> 1.0
# propagating 1.0 * 0.26 = 0.26 to Result
# old activation: 0.0
# new activation: 0.26
# Activating Neuron Result: 0.26 -> 0.0
#
# 0.0 AND 1.0 = 0.0
#
# Computing 1.0 AND 0.0...
# Activating Neuron A: 1.0 -> 1.0
# propagating 1.0 * 0.26 = 0.26 to Result
# old activation: 0.0
# new activation: 0.26
# Activating Neuron B: 0.0 -> 0.0
# propagating 0.0 * 0.26 = 0.0 to Result
# old activation: 0.26
# new activation: 0.26
# Activating Neuron Result: 0.26 -> 0.0
#
# 1.0 AND 0.0 = 0.0
#
# Computing 1.0 AND 1.0...
# Activating Neuron A: 1.0 -> 1.0
# propagating 1.0 * 0.26 = 0.26 to Result
# old activation: 0.0
# new activation: 0.26
# Activating Neuron B: 1.0 -> 1.0
# propagating 1.0 * 0.26 = 0.26 to Result
# old activation: 0.26
# new activation: 0.52
# Activating Neuron Result: 0.52 -> 1.0
#
# 1.0 AND 1.0 = 1.0

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.