Last active
May 4, 2020 18:24
-
-
Save hungyiwu/01d67fa83c5c2e37a334cc7da3c8acfd to your computer and use it in GitHub Desktop.
SLURM job submission scripts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
#SBATCH --job-name=newjob | |
#SBATCH -n 1 # Number of cores | |
#SBATCH -N 1 # Ensure that all cores are on one machine | |
#SBATCH -t 0-01:00 # Runtime in D-HH:MM, minimum of 10 minutes | |
#SBATCH -p shared,general # Partition to submit to, whichever is faster | |
#SBATCH --mem-per-cpu=4G # Memory (see also --mem) | |
#SBATCH -o joboutput_%j.out # File to which STDOUT will be written, %j inserts jobid | |
#SBATCH -e joberrors_%j.err # File to which STDERR will be written, %j inserts jobid | |
#SBATCH --mail-type=ALL # Type of email notification- BEGIN,END,FAIL,ALL | |
#SBATCH --mail-user=user@email.domain #Email to which notifications will be sent | |
echo "Now running job ${SLURM_JOB_ID}" | |
# Reference: https://slurm.schedmd.com/ | |
# Usage: "sbatch slurmjob.sh" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
#SBATCH --job-name=newjob | |
#SBATCH -n 1 # Number of cores | |
#SBATCH -N 1 # Ensure that all cores are on one machine | |
#SBATCH -t 0-01:00 # Runtime in D-HH:MM, minimum of 10 minutes | |
#SBATCH -p shared,general # Partition to submit to, whichever is faster | |
#SBATCH --mem-per-cpu=4G # Memory (see also --mem) | |
#SBATCH -o joboutput_%A_%a.out # File to which STDOUT will be written, %A inserts jobid, %a inserts arrayid | |
#SBATCH -e joberrors_%A_%a.err # File to which STDERR will be written, %j inserts jobid, %a inserts arrayid | |
#SBATCH --mail-type=ALL # Type of email notification- BEGIN,END,FAIL,ALL | |
#SBATCH --mail-user=user@email.domain #Email to which notifications will be sent | |
echo "Now running array job ${SLURM_ARRAY_JOB_ID}, task ${SLURM_ARRAY_TASK_ID}" | |
# Reference: https://slurm.schedmd.com/job_array.html | |
# Usage: "sbatch --array=0-31 slurmjob_array.sh" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
#SBATCH --job-name=testgpu | |
#SBATCH -n 1 # Number of cores | |
#SBATCH -N 1 # Ensure that all cores are on one machine | |
#SBATCH -t 0-00:10 # Runtime in D-HH:MM, minimum of 10 minutes | |
#SBATCH -p gpu # Partition to submit to | |
#SBATCH --gres=gpu:teslaK80:1 # request for 1 GPU logical processing unit of type TeslaK80 | |
#SBATCH --mem-per-cpu=4G # Memory (see also --mem) | |
#SBATCH -o joboutput_%j.out # File to which STDOUT will be written, %j inserts jobid | |
#SBATCH -e joberrors_%j.err # File to which STDERR will be written, %j inserts jobid | |
#SBATCH --mail-type=ALL # Type of email notification- BEGIN,END,FAIL,ALL | |
#SBATCH --mail-user=user@email.domain #Email to which notifications will be sent | |
module load gcc/6.2.0 python/3.6.0 cuda/10.0 | |
source /path/to/virtualenv/bin/activate | |
gpuAvail=$(echo "import tensorflow as tf; print(tf.test.is_gpu_available())" | python) | |
echo "GPU available? $gpuAvail" | |
nvidia-smi | |
# Reference: https://wiki.rc.hms.harvard.edu/display/O2/Using+O2+GPU+resources | |
# Usage: "sbatch slurmjob.sh" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment