Skip to content

Instantly share code, notes, and snippets.

@christophernhill
Created July 18, 2022 01:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save christophernhill/00ec5ca8130581818cfb0f3e9c59579a to your computer and use it in GitHub Desktop.
Save christophernhill/00ec5ca8130581818cfb0f3e9c59579a to your computer and use it in GitHub Desktop.
Gist example of Julia with system MPI on a cluster for Matin and Sana
#!/bin/bash
#
# ==
# == Typical run command for this script
# ==
# $ cat setup_and_test_julia_mpi.slurm | sbatch -p sched_mit_darwin2 -N 2 --exclusive --time=0-12:00:00
#
#
# == Activate cluster MPI
#
source /etc/profile.d/modules.sh
## module use /home/jahn/software/modulefiles
## module load gcc/6.2.0
## module load jahn/mvapich2/2.3.2_gcc-6.2.0
## module load jahn/hdf5/1.10.6_gcc-6.2.0
## module load jahn/netcdf-c/4.7.3_gcc-6.2.0_hdf-1.10.6
## module load jahn/netcdf-fortran/4.5.2_gcc-6.2.0_hdf-1.10.6
module use /home/jahn/software/modulefiles
module load intel/2020-04
module load impi/2020-04
module load jahn/netcdf-fortran/4.5.3_intel-2020-04
#
# == Choose working directory (**BEWARE** script does rm on the contents of this!)
#
WDIR=/nobackup1b/users/cnh/julia-test/mpi-julia-temp
mkdir -p ${WDIR}
cd ${WDIR}
wdir=`pwd`
if [ $wdir = $WDIR ]; then
### uncomment to clean up previous, but check first!
### \rm -fr *
fi
#
# == Get clean Julia and set Julia MPI to use cluster MPI not the
# == Julia binary artifact.
# == Add MPI and build in single process mode.
#
wget https://julialang-s3.julialang.org/bin/linux/x64/1.7/julia-1.7.3-linux-x86_64.tar.gz
tar -xzvf julia-1.7.3-linux-x86_64.tar.gz
export JULIA_DEPOT_PATH=`pwd`/.julia
export JULIA_MPI_BINARY="system"
export JULIA_MPI_PATH=`which mpirun | sed s'/\(.*\)\/bin\/mpirun/\1/'`
./julia-1.7.3/bin/julia --project=@. -e 'using Pkg;Pkg.add("MPI");Pkg.build("MPI")'
#
# == Simple point to point MPI Send and Recv data transfer example
#
cat > test_julia_mpi_bw.jl <<'EOFA'
using MPI
function bwtest(sm::Vector{Float64},rm::Vector{Float64},src::Int64,dst::Int64,rank::Int64,comm::MPI.Comm,nrep::Int64)
for i=1:nrep
rreq = MPI.Irecv!(rm, src, src+32, comm)
sreq = MPI.Isend( sm, dst, rank+32, comm)
stats = MPI.Waitall!([rreq, sreq])
end
end
MPI.Init(;threadlevel=MPI.THREAD_SINGLE)
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
size = MPI.Comm_size(comm)
if size != 2
print("size != 2")
exit()
end
dst = mod(rank+1, size)
src = mod(rank-1, size)
N = 4
send_mesg = Array{Float64}(undef, N)
recv_mesg = Array{Float64}(undef, N)
fill!(send_mesg, Float64(rank))
rreq = MPI.Irecv!(recv_mesg, src, src+32, comm)
print("$rank: Sending $rank -> $dst = $send_mesg\n")
sreq = MPI.Isend(send_mesg, dst, rank+32, comm)
stats = MPI.Waitall!([rreq, sreq])
print("$rank: Received $src -> $rank = $recv_mesg\n")
NN=Int(1e6)
nrep=10
sm = Array{Float64}(undef, NN)
fill!(sm, Float64(rank))
rm = Array{Float64}(undef, NN)
tt1=@elapsed bwtest(sm,rm,src,dst,rank,comm,nrep)
tt2=@elapsed bwtest(sm,rm,src,dst,rank,comm,nrep)
tt3=@elapsed bwtest(sm,rm,src,dst,rank,comm,nrep)
tt4=@elapsed bwtest(sm,rm,src,dst,rank,comm,nrep)
if rank == Int64(0)
print("\n")
print("time = ",tt1,"\n")
print("time = ",tt2,"\n")
print("time = ",tt3,"\n")
print("time = ",tt4,"\n")
print("bw = ",(nrep*2*NN*8.)/tt4,"\n")
print("\n")
end
MPI.Barrier(comm)
EOFA
#
# == Run julia in multi-process MPI mode
# == Note - if cluster has pmix/pmi2 then can use that to launch too
#
mpiexec -launcher ssh -n 2 ./julia-1.7.3/bin/julia --project=@. test_julia_mpi_bw.jl
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment