Skip to content

Instantly share code, notes, and snippets.

@DaisukeMiyamoto
Last active May 9, 2021 15:04
Show Gist options
  • Save DaisukeMiyamoto/c2493c36b929002785e43a3588e7d45a to your computer and use it in GitHub Desktop.
Save DaisukeMiyamoto/c2493c36b929002785e43a3588e7d45a to your computer and use it in GitHub Desktop.
#!/bin/bash -xe
# Based on this site:
# https://software.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top/installation/install-using-package-managers/apt.html
# use wget to fetch the Intel repository public key
cd /tmp
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
# add to your apt sources keyring so that archives signed with this key will be trusted.
sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
# remove the public key
rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main"
sudo apt install -y intel-basekit intel-hpckit
#!/bin/bash -xe
# --REQUIREMENTS--
# Common
# Intel oneAPI
# Ubuntu
# sudo apt install cmake git build-essential mpi-default-bin mpi-default-dev libfftw3-dev libtiff-dev
# CentOS7
# sudo yum install cmake git sudo yum install cmake libX11-devel fftw-devel libtiff-devel texlive-latex-bin texlive-cm texlive-dvips ghostscript evince qpdfview fltk-fluid
TARGET=relion-v31
RELION_TAG=ver3.1
MPI_MODULE=intelmpi
##########################################################
git clone https://github.com/3dem/relion.git ${TARGET}
cd ${TARGET}
git checkout ${RELION_TAG}
module load ${MPI_MODULE}
# Build for G4dn (CUDA75)
BUILD_DIR=build-cuda75
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake -DCUDA_ARCH=75 -DCUDA=ON -DCudaTexture=ON -DGUI=OFF -DCMAKE_BUILD_TYPE=Release ..
make
cd ..
# Build for P3 (CUDA70)
BUILD_DIR=build-cuda70
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake -DCUDA_ARCH=70 -DCUDA=ON -DCudaTexture=ON -DGUI=OFF -DCMAKE_BUILD_TYPE=Release ..
make
cd ..
# Build for CPU (AVX512)
BUILD_DIR=build-cpu
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
CC=mpiicc CXX=mpiicpc cmake -DMKLFFT=ON -DCUDA=OFF -DALTCPU=ON -DCudaTexture=OFF\
-DCMAKE_C_COMPILER=icc -DCMAKE_CXX_COMPILER=icpc -DMPI_C_COMPILER=mpiicc -DMPI_CXX_COMPILER=mpiicpc \
-DCMAKE_C_FLAGS="-O3 -ip -g -xCOMMON-AVX512 -restrict " \
-DCMAKE_CXX_FLAGS="-O3 -ip -g -xCOMMON-AVX512 -restrict " -DGUI=OFF -DCMAKE_BUILD_TYPE=Release ..
make
cd ..
echo "Relion Path:"
echo $(pwd)/bin
#!/bin/bash
#SBATCH --nodes=XXXmpinodesXXX
#--SBATCH --ntasks-per-node=XXXextra1XXX
#SBATCH --cpus-per-task=XXXthreadsXXX
#SBATCH --partition=XXXqueueXXX
#SBATCH --error=XXXerrfileXXX
#SBATCH --output=XXXoutfileXXX
#SBATCH --open-mode=append
##SBATCH XXXextra4XXX
##SBATCH XXXextra5XXX
##SBATCH XXXextra6XXX
##SBATCH --time=XXXextra1XXX
##SBATCH --mem-per-cpu=XXXextra2XXX
##SBATCH --gres=XXXextra3XXX
export OMPI_MCA_btl_tcp_if_exclude="docker0,lo,virbr0"
time srun --mpi=pmix XXXcommandXXX
#!/bin/bash -xe
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-task 2
#SBATCH --job-name g4dn-x
#SBATCH --output=result-%x.%j.out
#SBATCH --error=result-%x.%j.err
# set relion path
export PATH=$PATH:/lustre/cryoem/relion-gpu/build-cuda75/bin
# CPU
#COMPUTE_OPTIONS="--j ${SLURM_CPUS_PER_TASK} --cpu --pool 100 --dont_combine_weights_via_disc"
# GPU
COMPUTE_OPTIONS="--j ${SLURM_CPUS_PER_TASK} --gpu --pool 30 --dont_combine_weights_via_disc"
# non-MPI
RELION_REFINE="`which relion_refine`"
# MPI
#RELION_REFINE="mpirun -np ${SLURM_NTASKS} `which relion_refine_mpi`"
##################################################################
RESULT_DIR="${SLRUM_JOB_NAME}_${SLURM_JOB_ID}"
mkdir ${RESULT_DIR}
##################################################################
# Class2D
#RELION_OPTIONS="--i Particles/shiny_2sets.star --ctf --iter 25 --tau2_fudge 2 --particle_diameter 360 --K 200 --zero_mask --oversampling 1 --psi_step 6 --offset_range 5 --offset_step 2 --norm --scale --random_seed 0 --o ${RESULT_DIR}/run"
# Class3D
RELION_OPTIONS="--i Particles/shiny_2sets.star --ref emd_2660.map:mrc --firstiter_cc --ini_high 60 --ctf --ctf_corrected_ref --iter 25 --tau2_fudge 4 --particle_diameter 360 --K 6 --flatten_solvent --zero_mask --oversampling 1 --healpix_order 2 --offset_range 5 --offset_step 2 --sym C1 --norm --scale --random_seed 0 --o ${RESULT_DIR}/run"
module load intelmpi
time ${RELION_REFINE} ${RELION_OPTIONS} ${COMPUTE_OPTIONS}
#!/bin/bash -xe
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 5
#SBATCH --cpus-per-task 6
module load intelmpi
#export OMPI_MCA_btl_tcp_if_exclude="docker0,lo,virbr0"
export PATH=$PATH:/lustre/cryoem/relion/build-cuda70/bin/
##################################################################
# CPU-c5.24xlarge
#NTHREADS=48
#COMPUTE_OPTIONS="--j ${NTHREADS} --cpu --pool 100 --dont_combine_weights_via_disc"
# GPU-p3.16xlarge
NTHREADS=6
COMPUTE_OPTIONS="--j ${NTHREADS} --gpu --pool 30 --dont_combine_weights_via_disc"
##################################################################
JOB_NAME=Class3D_GPU1x8
RESULT_DIR=${JOB_NAME}_JOB_${SLURM_JOB_ID}
mkdir ${RESULT_DIR}
RELION_REFINE="mpirun -np ${SLURM_NTASKS} `which relion_refine_mpi`"
##################################################################
# Class2D
#RELION_OPTIONS="--i Particles/shiny_2sets.star --ctf --iter 25 --tau2_fudge 2 --particle_diameter 360 --K 200 --zero_mask --oversampling 1 --psi_step 6 --offset_range 5 --offset_step 2 --norm --scale --random_seed 0 --o ${RESULT_DIR}/run"
# Class3D
RELION_OPTIONS="--i Particles/shiny_2sets.star --ref emd_2660.map:mrc --firstiter_cc --ini_high 60 --ctf --ctf_corrected_ref --iter 25 --tau2_fudge 4 --particle_diameter 360 --K 6 --flatten_solvent --zero_mask --oversampling 1 --healpix_order 2 --offset_range 5 --offset_step 2 --sym C1 --norm --scale --random_seed 0 --o ${RESULT_DIR}/run"
time ${RELION_REFINE} ${RELION_OPTIONS} ${COMPUTE_OPTIONS}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment