Skip to content

Instantly share code, notes, and snippets.

@andreasnoack
Last active August 11, 2016 23:43
Show Gist options
  • Save andreasnoack/f4871427d527d16bbfdf226b56932016 to your computer and use it in GitHub Desktop.
Save andreasnoack/f4871427d527d16bbfdf226b56932016 to your computer and use it in GitHub Desktop.
Compute round trips with different transports in Julia
if isempty(ARGS)
error("you'll have to tell me what to do")
end
if ARGS[1] == "remotecall"
# if length(ARGS) != 4
# error("when specifying the remotecall option you'd have to supply three argument")
# end
if ARGS[2] == "slurm"
if ARGS[3] == "Julia"
hosts = split(readstring(`scontrol show hostname $(ENV["SLURM_NODELIST"])`))
println("Available hosts")
for h in hosts
println(h)
end
addprocs([(h, parse(Int, ARGS[4])) for h in hosts])#, topology = :all_to_all)
@everywhere print("Who am I? $(readstring(`hostname`))")
@everywhere function foo(data, p::Vector)
if isempty(p)
return data
else
pp = pop!(p)
return remotecall_fetch(foo, pp, data, p)
end
end
foo([1], procs())
for x in ([1], ones(1, 2^20))
info("Data: 2^$(Int(log2(length(x)))) Int(s)")
for i = 1:10
@time foo(x, procs())
end
end
elseif ARGS[3] == "MPI"
using MPI
mgr = MPI.start_main_loop(MPI.MPI_TRANSPORT_ALL)
@everywhere print("Who am I? $(readstring(`hostname`))")
@everywhere function foo(data, p::Vector)
if isempty(p)
return data
else
pp = pop!(p)
return remotecall_fetch(foo, pp, data, p)
end
end
foo([1], procs())
for x in ([1], ones(Int, 2^20))
info("Data: 2^$(Int(log2(length(x)))) Int(s)")
for i = 1:10
@time foo(x, procs())
end
end
MPI.stop_main_loop(mgr)
else
error("No such option")
end
else
error("only slurm is supported at this point")
end
elseif ARGS[1] == "MPI"
if length(ARGS) != 2
error("MPI only takes two argument")
end
using MPI
MPI.Init()
cm = MPI.COMM_WORLD
rnk = MPI.Comm_rank(cm)
sz = MPI.Comm_size(cm)
print("Who am I? Rank $rnk on $(readstring(`hostname`))")
for x in ([1], ones(Int, 2^20))
if rnk == 0
info("Data: 2^$(Int(log2(length(x)))) Int(s)")
end
for i = 1:11
if rnk == 0
@time begin
# Send to rank 1
MPI.Send(x, 1, 0, cm)
# Receive from rank sz
if ARGS[2] == "prealloc"
MPI.Recv!(x, sz - 1, 0, cm)
else
x = copy(x)
MPI.Recv!(x, sz - 1, 0, cm)
end
# Send to rank sz
MPI.Send(x, sz - 1, 0, cm)
# Receive from rank 1
if ARGS[2] == "prealloc"
MPI.Recv!(x, 1, 0, cm)
else
x = copy(x)
MPI.Recv!(x, 1, 0, cm)
end
end
else
if ARGS[2] == "prealloc"
MPI.Recv!(x, rnk - 1, 0, cm)
else
x = copy(x)
MPI.Recv!(x, rnk - 1, 0, cm)
end
MPI.Send(x, rnk == sz - 1 ? 0 : rnk + 1, 0, cm)
if ARGS[2] == "prealloc"
MPI.Recv!(x, rnk == sz - 1 ? 0 : rnk + 1, 0, cm)
else
x = copy(x)
MPI.Recv!(x, rnk == sz - 1 ? 0 : rnk + 1, 0, cm)
end
MPI.Send(x, rnk - 1, 0, cm)
end
end
end
println("Done!")
MPI.Finalize()
else
error("transport not implemented")
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment