Created
August 12, 2020 02:50
-
-
Save mkolod/ef6720731afde4063fb1ca9ee3830998 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch.cuda | |
import torch.cuda.memory as cumem | |
import sys | |
import ctypes as C | |
GB = 1 << 30 | |
def get_cuda_memory(): | |
handle = C.cdll.LoadLibrary("libcudart.so") | |
free, total = C.c_long(), C.c_long() | |
handle.cudaMemGetInfo(C.byref(free), C.byref(total)) | |
return free.value, total.value | |
# event can be call, line, etc. | |
def trace_calls(frame, event, arg): | |
if event != 'call': | |
return | |
co = frame.f_code | |
func_name = co.co_name | |
if func_name == 'write': | |
# Ignore write() calls from print statements | |
return | |
func_line_no = frame.f_lineno | |
func_filename = co.co_filename | |
caller = frame.f_back | |
caller_line_no = caller.f_lineno | |
caller_filename = caller.f_code.co_filename | |
cuda_meminfo = get_cuda_memory() | |
cuda_free, cuda_total = cuda_meminfo[0] / GB, cuda_meminfo[1] / GB | |
cuda_used = cuda_total - cuda_free | |
torch_alloc = cumem.memory_allocated() / GB | |
torch_reserved = cumem.memory_reserved() / GB | |
torch.cuda.empty_cache() | |
print('\nCall to {} on line {} of {} from line {} of {}, RAW CUDA memory (GB): free = {:.4f}, used = {:.4f}, total = {:.4f}, PyTorch allocator CUDA memory (GB): allocated = {:.4f}, reserved = {:.4f}, cuda_used-torch_reserved = {:.4f}'.format( | |
func_name, func_line_no, func_filename, \ | |
caller_line_no, caller_filename, \ | |
cuda_free, | |
cuda_used, | |
cuda_total, | |
torch_alloc, | |
torch_reserved, | |
cuda_used - torch_reserved | |
)) | |
return | |
if __name__ == "__main__": | |
sys.settrace(trace_calls) | |
res = main(sys.argv[1:]) | |
sys.settrace(Non |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment