Skip to content

Instantly share code, notes, and snippets.

@P8H
Last active October 27, 2016 13:07
Show Gist options
  • Save P8H/16b2b596eca05a36ed97590cc666d337 to your computer and use it in GitHub Desktop.
Save P8H/16b2b596eca05a36ed97590cc666d337 to your computer and use it in GitHub Desktop.
Deeplearning4j with GPU/CUDA on SLURM
#!/bin/bash
#SBATCH -J bom_with_gpu
#SBATCH --mail-user=<??>
#SBATCH --mail-type=ALL
# Bitte achten Sie auf vollständige Pfad-Angaben:
#SBATCH -e /work/scratch/?user name?/job_with_gpu.err.%j
#SBATCH -o /work/scratch/?user name?/job_with_gpu.out.%j
#
#SBATCH -t 00:30:00 # in Stunden, Minuten und Sekunden, oder '#SBATCH -t 10' - nur Minuten
#SBATCH -C nvd
#SBATCH --ntasks=16
#SBATCH --mem-per-cpu=1000
#
# -------------------------------
# Anschließend schreiben Sie Ihre eigenen Befehle, wie z.B.
module load cuda/8.0
cd /home/?user name?/Project
java -cp target/Project-0.5-nd4j-cuda-8.0-platform.jar project.TestClass
...
<!-- <nd4j.backend>nd4j-native-platform</nd4j.backend> -->
<nd4j.backend>nd4j-cuda-8.0-platform</nd4j.backend> <!-- activate cuda platform, deactivate native platform -->
...
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-native-platform</artifactId>
<version>0.6.0</version>
</dependency>
<!-- add following: -->
<dependency>
<groupId>org.nd4j</groupId>
<artifactId>nd4j-cuda-8.0-platform</artifactId>
<version>0.6.0</version>
</dependency>
</dependencies>
</dependencyManagement>
...
import org.nd4j.jita.conf.CudaEnvironment;
import org.nd4j.jita.conf.Configuration;
public class TestClass {
public static void main(String[] args) throws Exception {
CudaEnvironment.getInstance().getConfiguration().allowMultiGPU(true); //Use all available GPUs
DataTypeUtil.setDTypeForContext(DataBuffer.Type.HALF); //Use half precision, boost learning up to 200%
CudaEnvironment.getInstance().getConfiguration()
.setMaximumDeviceCacheableLength(1024 * 1024 * 1024L)
.setMaximumDeviceCache(6L * 1024 * 1024 * 1024L)
.setMaximumHostCacheableLength(1024 * 1024 * 1024L)
.setMaximumHostCache(6L * 1024 * 1024 * 1024L); //Allow to cache up to 6GB of GPU RAM
....
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment