Skip to content

Instantly share code, notes, and snippets.

@mmmries
Last active September 16, 2021 07:14
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mmmries/54c2110bb93af61ebfa1aff36acec9ca to your computer and use it in GitHub Desktop.
Save mmmries/54c2110bb93af61ebfa1aff36acec9ca to your computer and use it in GitHub Desktop.
Benchmarking lbm_kv
defmodule Benchmarks do
def init do
:ok = :lbm_kv.create(Web.Job)
end
def count_entries do
IO.puts "Web.Job => #{:lbm_kv.match_key(Web.Job, :_) |> elem(1) |> Enum.count}"
end
def measure_throughput(fun, num_items) do
{us, _} = :timer.tc(fun)
ops_per_sec = 1000000.0 * num_items / us
IO.puts "#{ops_per_sec} ops/sec (#{num_items} in #{us}us)"
end
def serial_write_speed do
expiration = (Timex.now |> Timex.to_unix) + 6000
measure_throughput(fn()->
Enum.each(1..1000, &(:lbm_kv.put(Web.Job, "JOB-#{&1}", %{expiration: expiration, status: "authenticating", error_message: "credentials were rejected"})))
end, 1000)
end
def truncate_entries do
{:atomic, :ok} = :mnesia.delete_table(Web.Job)
init
end
def parallel_write_speed(num_actors, writes_per_actor) do
expiration = (Timex.now |> Timex.to_unix) + 6000
ranges = (1..num_actors) |> Enum.map(&( Range.new(writes_per_actor * &1, (writes_per_actor - 1) + (writes_per_actor * &1)) ))
total_writes = num_actors * writes_per_actor
measure_throughput(fn()->
pids = Enum.map(ranges, fn(range)->
report_to = self()
spawn( fn() ->
Enum.each(range, &(:lbm_kv.put(Web.Job, "JOB-#{&1}", %{expiration: expiration, status: "authenticating", error_message: "credentials were rejected"})))
send report_to, self()
end)
end)
wait_until_receive_all(pids)
end, total_writes)
end
defp wait_until_receive_all([]), do: :done
defp wait_until_receive_all([h | tail]) do
receive do
^h -> wait_until_receive_all(tail)
end
end
end
@mmmries
Copy link
Author

mmmries commented Oct 5, 2016

~~I ran this on two t2.medium servers on AWS and got ~700-800 writes/sec in serial and ~11k - 20k writes/sec with the parallel_write_speed test.~~

I realized after the fact that the way I was calculating throughput was wrong. I re-ran this experiment with 3 m3.xlarge nodes (4 cores, 1 generation old architecture) and I get these results.

  • serial throughput ~900/sec
  • 10 concurrent writers ~5000/sec
  • 20 concurrent writers ~8000/sec
  • 100 concurrent writers ~9200/sec

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment