Skip to content

Instantly share code, notes, and snippets.

@udibr
Forked from justinfx/py_cudart_memory.py
Last active January 18, 2016 17:45
Show Gist options
  • Save udibr/5a0a659a3bf2971a1c94 to your computer and use it in GitHub Desktop.
Save udibr/5a0a659a3bf2971a1c94 to your computer and use it in GitHub Desktop.
Get memory usage of CUDA GPU, using ctypes and libcudart
import ctypes
# Path to location of libcudart
# Change the path to "cudart<x>_<ver>.dll" to use on Windows, where <x>={32|64} and <ver> is the CUDA version.
_CUDA = "/usr/local/cuda/lib/libcudart.dylib"
cuda = ctypes.cdll.LoadLibrary(_CUDA)
cuda.cudaMemGetInfo.restype = int
cuda.cudaMemGetInfo.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
cuda.cudaGetErrorString.restype = ctypes.c_char_p
cuda.cudaGetErrorString.argtypes = [ctypes.c_int]
def cudaMemGetInfo(mb=False):
"""
Return (free, total) memory stats for CUDA GPU
Default units are bytes. If mb==True, return units in MB
"""
free = ctypes.c_size_t()
total = ctypes.c_size_t()
ret = cuda.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total))
if ret != 0:
err = cuda.cudaGetErrorString(status)
raise RuntimeError("CUDA Error (%d): %s" % (status, err))
if mb:
scale = 1024.0**2
return free.value / scale, total.value / scale
else:
return free.value, total.value
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment