Skip to content

Instantly share code, notes, and snippets.

@robertov8
Created March 20, 2023 19:46
Show Gist options
  • Save robertov8/1b5a135e2edaed40ee376c2f16b03a68 to your computer and use it in GitHub Desktop.
Save robertov8/1b5a135e2edaed40ee376c2f16b03a68 to your computer and use it in GitHub Desktop.
Testando abordagens async
Operating System: Linux
CPU Information: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz
Number of Available Cores: 8
Available memory: 15.43 GB
Elixir 1.14.2
Erlang 24.3.4.6
Benchmark suite executing with the following configuration:
warmup: 2 s
time: 5 s
memory time: 0 ns
reduction time: 0 ns
parallel: 1
inputs: none specified
Estimated total run time: 49 s
Benchmarking async ...
Benchmarking async_group ...
Benchmarking async_stream ...
Benchmarking async_stream_group ...
Benchmarking async_stream_group_ordered ...
Benchmarking async_stream_ordered ...
Benchmarking sync ...
Name ips average deviation median 99th %
async_group 1.00 1.00 s ±0.01% 1.00 s 1.00 s
async 1.00 1.00 s ±0.03% 1.00 s 1.00 s
async_stream_group 0.50 2.00 s ±0.00% 2.00 s 2.00 s
async_stream 0.50 2.00 s ±0.01% 2.00 s 2.00 s
async_stream_group_ordered 0.50 2.00 s ±0.00% 2.00 s 2.00 s
async_stream_ordered 0.50 2.00 s ±0.01% 2.00 s 2.00 s
sync 0.0999 10.01 s ±0.00% 10.01 s 10.01 s
Comparison:
async_group 1.00
async 1.00 - 1.00x slower +0.00005 s
async_stream_group 0.50 - 2.00x slower +1.00 s
async_stream 0.50 - 2.00x slower +1.00 s
async_stream_group_ordered 0.50 - 2.00x slower +1.00 s
async_stream_ordered 0.50 - 2.00x slower +1.00 s
sync 0.0999 - 10.00x slower +9.01 s
Extended statistics:
Name minimum maximum sample size mode
async_group 1.00 s 1.00 s 5 None
async 1.00 s 1.00 s 5 None
async_stream_group 2.00 s 2.00 s 3 None
async_stream 2.00 s 2.00 s 3 None
async_stream_group_ordered 2.00 s 2.00 s 3 None
async_stream_ordered 2.00 s 2.00 s 3 None
sync 10.01 s 10.01 s 1 None
# deps, do: [{:benchee, "~> 1.0", only: :dev}]
defmodule Http do
def get(i) do
Process.sleep(1_000)
%{body: %{message: "hello_world"}}
end
end
defmodule RequestsSync do
def run(total), do: Enum.map(1..total, &Http.get/1)
end
defmodule RequestsAsync do
def run(total) do
Enum.map(1..total, &Task.async(Http, :get, [&1]))
|> Task.await_many()
end
end
defmodule RequestsAsyncGroup do
@group_by System.schedulers_online()
def run(total) do
1..total
|> Enum.chunk_every(@group_by)
|> Enum.flat_map(&group_by/1)
|> Task.await_many()
end
defp group_by(group) do
Enum.map(group, &Task.async(Http, :get, [&1]))
end
end
defmodule RequestsAsyncStream do
def run(total) do
Task.async_stream(1..total, &Http.get/1, ordered: false)
|> Enum.to_list()
end
end
defmodule RequestsAsyncStreamGroup do
@group_by System.schedulers_online()
def run(total) do
1..total
|> Enum.chunk_every(@group_by)
|> Enum.flat_map(&group_by/1)
end
defp group_by(group) do
group
|> Task.async_stream(&Http.get/1, ordered: false)
|> Enum.to_list()
end
end
defmodule RequestsAsyncStreamOrdered do
def run(total) do
Task.async_stream(1..total, &Http.get/1, ordered: true)
|> Enum.to_list()
end
end
defmodule RequestsAsyncStreamGroupOrdered do
@group_by System.schedulers_online()
def run(total) do
1..total
|> Enum.chunk_every(@group_by)
|> Enum.flat_map(&group_by/1)
end
defp group_by(group) do
group
|> Task.async_stream(&Http.get/1, ordered: true)
|> Enum.to_list()
end
end
total = 10
opts = [
unit_scaling: :smallest,
formatters: [{Benchee.Formatters.Console, extended_statistics: true}]
]
Benchee.run(
%{
"sync" => fn -> RequestsSync.run(total) end,
"async" => fn -> RequestsAsync.run(total) end,
"async_group" => fn -> RequestsAsyncGroup.run(total) end,
"async_stream" => fn -> RequestsAsyncStream.run(total) end,
"async_stream_group" => fn -> RequestsAsyncStreamGroup.run(total) end,
"async_stream_ordered" => fn -> RequestsAsyncStreamOrdered.run(total) end,
"async_stream_group_ordered" => fn -> RequestsAsyncStreamGroupOrdered.run(total) end
},
opts
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment