Skip to content

Instantly share code, notes, and snippets.

@rythorpe
Last active January 27, 2023 19:34
Show Gist options
  • Save rythorpe/31fddcdddf13e234bc3ba970a9f7628d to your computer and use it in GitHub Desktop.
Save rythorpe/31fddcdddf13e234bc3ba970a9f7628d to your computer and use it in GitHub Desktop.
Test HNN-core's alternative MPIBackend on HPC
"""
Run HNN-core simulations distributed over various number of cores while clocking the runtime.
Designed to be used on an HPC and called from a master MPI process.
"""
import os.path as op
import tempfile
import timeit
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import hnn_core
from hnn_core import simulate_dipole, jones_2009_model, average_dipoles, JoblibBackend, MPIBackend
from hnn_core.viz import plot_dipole
# load param file
params_fname = '/users/rthorpe/data/rthorpe/hnn_out_2020/param/med_nerve_2020_04_27_2prox_2dist_opt1_smooth.param'
params = hnn_core.read_params(params_fname)
net = hnn_core.jones_2009_model(params, add_drives_from_params=True)
# run 100 trials per simulation with varying numbers of cores
# n_procs_list = [1] + list(range(2, 25, 2))
n_procs_list = [22, 23] # XXX try running only two times
durations = list()
for n_procs in n_procs_list:
start = timeit.default_timer()
with MPIBackend(n_procs=n_procs, mpi_comm_spawn=True):
dpls = simulate_dipole(net, tstop=170., n_trials=25)
stop = timeit.default_timer()
durations.append((stop - start) / 60)
print(f'n_procs: {n_procs}')
# plot run time vs. # of cores
plt.step(n_procs_list, durations, where='post')
plt.xlabel('# of cores')
plt.ylabel('computation time (min)')
plt.savefig('clock_hnn_core.png', dpi=300)
plt.savefig('clock_hnn_core.eps')
# plot dpls
scaling_factor = 40
dpls = [dpl.scale(scaling_factor).smooth(20) for dpl in dpls] # scale in place
avg_dpl = average_dipoles(dpls)
fig, axes = plt.subplots(2, 1, sharex=True, sharey=True)
plot_dipole(dpls, ax=axes[0], show=False)
plot_dipole(avg_dpl, ax=axes[1], show=False)
fig.savefig('clock_hnn_core_mn.png', dpi=300)
#!/bin/bash
#SBATCH -t 4:00:00
#SBATCH --nodes=1
#SBATCH --tasks-per-node=24
##SBATCH --cpus-per-task=24 # don't use this (for multithreading)
#SBATCH --mem=4G
#SBATCH --exclusive # might be needed to prevent freezing - investigate further
#SBATCH -A carney-sjones-condo
#SBATCH -J clock_sim
#SBATCH -o slurm.out
#SBATCH -e slurm.err
module load python/3.9.0 mpi/openmpi_4.0.7_gcc_10.2_slurm22 gcc/10.2 cuda/11.7.1
#module load python/3.7.4 mpi/mvapich2-2.3.5_gcc_10.2_slurm22 gcc/10.2
#source ~/envs/hnn_core_env_stable/bin/activate
#source ~/envs/hnn_core_env/bin/activate
#source ~/envs/hnn_core_mpi/bin/activate
source ~/envs/hnn_core_mpi_root/bin/activate
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/ompi/lib
#export PATH=$PATH:$HOME/ompi/bin
#export OMPI_MCA_pml="ucx"
#export OMPI_MCA_btl="^tcp,vader,openib"
export OMPI_MCA_routed="direct"
#python ~/clock_hnn_core/benchmark_hnn_core_runtime.py
mpiexec -np 1 --oversubscribe python -m mpi4py $HOME/clock_hnn_core/benchmark_hnn_core_runtime.py
deactivate
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment