Skip to content

Instantly share code, notes, and snippets.

@badboy
Created December 17, 2014 16:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save badboy/b59fb31d94909b6bf968 to your computer and use it in GitHub Desktop.
Save badboy/b59fb31d94909b6bf968 to your computer and use it in GitHub Desktop.
#!/usr/bin/env ruby
Node = Struct.new(:info) do
def to_s
info[:name]
end
def inspect
"new_node(\"#{info[:host]}\", #{info[:port]})"
end
end
ClusterHashSlots = 16384
def new_node host, port
Node.new({ host: host, port: port, name: "#{host}:#{port}" })
end
#require 'pry'; binding.pry
def alloc_slots(nodes, replicas)
@nodes = nodes
@replicas = replicas
nodes_count = @nodes.length
masters_count = @nodes.length / (@replicas+1)
masters = []
slaves = []
# The first step is to split instances by IP. This is useful as
# we'll try to allocate master nodes in different physical machines
# (as much as possible) and to allocate slaves of a given master in
# different physical machines as well.
#
# This code assumes just that if the IP is different, than it is more
# likely that the instance is running in a different physical host
# or at least a different virtual machine.
ips = {}
@nodes.each{|n|
ips[n.info[:host]] = [] if !ips[n.info[:host]]
ips[n.info[:host]] << n
}
# Select master instances
puts "Using #{masters_count} masters:"
while masters.length < masters_count
ips.each{|ip,nodes_list|
next if nodes_list.length == 0
masters << nodes_list.shift
puts masters[-1]
nodes_count -= 1
break if masters.length == masters_count
}
end
# Alloc slots on masters
slots_per_node = ClusterHashSlots.to_f / masters_count
first = 0
cursor = 0.0
masters.each_with_index{|n,masternum|
last = (cursor+slots_per_node-1).round
if last > ClusterHashSlots || masternum == masters.length-1
last = ClusterHashSlots-1
end
last = first if last < first # Min step is 1.
#n.add_slots first..last
puts "Add slots (#{first}..#{last}) to #{n}"
first = last+1
cursor += slots_per_node
}
# Select N replicas for every master.
# We try to split the replicas among all the IPs with spare nodes
# trying to avoid the host where the master is running, if possible.
#
# Note we loop two times. The first loop assigns the requested
# number of replicas to each master. The second loop assigns any
# remaining instances as extra replicas to masters. Some masters
# may end up with more than their requested number of replicas, but
# all nodes will be used.
assignment_verbose = false
p ips
[:requested,:unused].each{|assign|
masters.each{|m|
reverse = true
assigned_replicas = 0
while assigned_replicas < @replicas
break if nodes_count == 0
if assignment_verbose
if assign == :requested
puts "Requesting total of #{@replicas} replicas " \
"(#{assigned_replicas} replicas assigned " \
"so far with #{nodes_count} total remaining)."
elsif assign == :unused
puts "Assigning extra instance to replication " \
"role too (#{nodes_count} remaining)."
end
end
cur_ips = reverse ? ips.to_a.reverse : ips.to_a
cur_ips.each{|ip,nodes_list|
next if nodes_list.length == 0
# Skip instances with the same IP as the master if we
# have some more IPs available.
next if ip == m.info[:host] && nodes_count > nodes_list.length
slave = nodes_list.shift
#slave.set_as_replica(m.info[:name])
nodes_count -= 1
assigned_replicas += 1
puts "Adding replica #{slave} to #{m}"
break
}
reverse = !reverse
end
}
}
end
nodes = [
new_node("192.168.1.100", 6379),
new_node("192.168.1.100", 6380),
new_node("192.168.1.101", 6379),
new_node("192.168.1.101", 6380),
new_node("192.168.1.102", 6379),
new_node("192.168.1.102", 6380),
]
replicas = 1
alloc_slots(nodes, replicas)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment