Skip to content

Instantly share code, notes, and snippets.

@cuihaoleo
Created October 5, 2018 14:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save cuihaoleo/6086cb64f078d86af844b560c5d8228a to your computer and use it in GitHub Desktop.
Save cuihaoleo/6086cb64f078d86af844b560c5d8228a to your computer and use it in GitHub Desktop.
CgroupAutomount=no
CgroupMountpoint=/sys/fs/cgroup
ConstrainCores=yes
ConstrainDevices=yes
ConstrainRAMSpace=no
ConstrainSwapSpace=yes
AllowedSwapSpace=20
TaskAffinity=no
NodeName=wmc-slave-g[1-2] Name=gpu File=/dev/nvidia[0-1] CPUs=0-11
NodeName=wmc-slave-g[1-2] Name=gpu File=/dev/nvidia[2-3] CPUs=12-23
NodeName=wmc-slave-g3 Name=gpu File=/dev/nvidia0 CPUs=0-11
NodeName=wmc-slave-g3 Name=gpu File=/dev/nvidia1 CPUs=12-23
NodeName=wmc-slave-g5 Name=gpu File=/dev/nvidia[0-1]
ControlMachine=wmc-slave-g1
ControlAddr=10.99.0.10
BackupController=wmc-slave-g4
BackupAddr=10.99.0.40
ProctrackType=proctrack/cgroup
ReturnToService=2
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
SlurmdSpoolDir=/var/spool/slurmd
SlurmUser=slurm
SlurmdUser=root
StateSaveLocation=/mnt/gv1/slurmctld_state
TaskPlugin=task/affinity,task/cgroup
AuthType=auth/munge
CryptoType=crypto/munge
#
#
# SCHEDULING
FastSchedule=1
SchedulerType=sched/backfill
SelectType=select/cons_res
SelectTypeParameters=CR_CPU_Memory
#
#
# LOGGING AND ACCOUNTING
AccountingStorageType=accounting_storage/filetxt
AccountingStorageLoc=/mnt/gv1/slurm_log/jobacct.log
JobCompType=jobcomp/filetxt
JobCompLoc=/mnt/gv1/slurm_log/jobcomp.log
ClusterName=wmc
JobAcctGatherType=jobacct_gather/cgroup
SlurmctldLogFile=/var/log/slurm/slurmctld.log
SlurmdLogFile=/var/log/slurm/slurmd.log
#
#
# COMPUTE NODES
DisableRootJobs=yes
GresTypes=gpu
NodeName=wmc-slave-g1 NodeAddr=10.99.0.10 CPUs=24 RealMemory=63500 Sockets=2 CoresPerSocket=12 ThreadsPerCore=1 CPUSpecList=0-5,18-23 MemSpecLimit=4096 Gres=gpu:4
NodeName=wmc-slave-g2 NodeAddr=10.99.0.20 CPUs=24 RealMemory=63500 Sockets=2 CoresPerSocket=12 ThreadsPerCore=1 MemSpecLimit=1024 Gres=gpu:4
NodeName=wmc-slave-g3 NodeAddr=10.99.0.30 CPUs=24 RealMemory=63500 Sockets=2 CoresPerSocket=12 ThreadsPerCore=1 MemSpecLimit=1024 Gres=gpu:2
NodeName=wmc-slave-g4 NodeAddr=10.99.0.40 CPUs=32 RealMemory=48100 Sockets=4 CoresPerSocket=8 ThreadsPerCore=1 MemSpecLimit=1024
NodeName=wmc-slave-g5 NodeAddr=10.99.0.50 CPUs=4 RealMemory=19900 Sockets=1 CoresPerSocket=4 ThreadsPerCore=1 MemSpecLimit=512 Gres=gpu:2
DefMemPerCPU=1024
PartitionName=cpu1 Nodes=wmc-slave-g[1-3] MaxTime=INFINITE MaxCPUsPerNode=20 MaxMemPerNode=57344 Default=YES
PartitionName=gpu1 Nodes=wmc-slave-g[1-2] MaxTime=INFINITE MaxCPUsPerNode=4
PartitionName=cpu2 Nodes=wmc-slave-g4 MaxTime=INFINITE
PartitionName=gpu2 Nodes=wmc-slave-g3 MaxTime=INFINITE MaxCPUsPerNode=2
PartitionName=gpu3 Nodes=wmc-slave-g5 MaxTime=INFINITE MaxCPUsPerNode=2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment