Skip to content

Instantly share code, notes, and snippets.

@dribnet
Forked from udibr/py_cudart_memory.py
Created January 18, 2016 17:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dribnet/addbafcbb3286afa3f88 to your computer and use it in GitHub Desktop.
Save dribnet/addbafcbb3286afa3f88 to your computer and use it in GitHub Desktop.
Get memory usage of CUDA GPU, using ctypes and libcudart
import ctypes
# Path to location of libcudart
_CUDA = "/usr/local/cuda/lib/libcudart.dylib"
cuda = ctypes.cdll.LoadLibrary(_CUDA)
cuda.cudaMemGetInfo.restype = int
cuda.cudaMemGetInfo.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
cuda.cudaGetErrorString.restype = ctypes.c_char_p
cuda.cudaGetErrorString.argtypes = [ctypes.c_int]
def cudaMemGetInfo(mb=False):
"""
Return (free, total) memory stats for CUDA GPU
Default units are bytes. If mb==True, return units in MB
"""
free = ctypes.c_size_t()
total = ctypes.c_size_t()
ret = cuda.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total))
if ret != 0:
err = cuda.cudaGetErrorString(ret)
raise RuntimeError("CUDA Error (%d): %s" % (ret, err))
if mb:
scale = 1024.0**2
return free.value / scale, total.value / scale
else:
return free.value, total.value
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment