Skip to content

Instantly share code, notes, and snippets.

@Lyken17
Created August 15, 2023 09:31
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Lyken17/43abdd5e2f6bc8789dcd7d7b66216852 to your computer and use it in GitHub Desktop.
Save Lyken17/43abdd5e2f6bc8789dcd7d7b66216852 to your computer and use it in GitHub Desktop.
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
def scientific_precision(number):
suffix = ["KB", "MB", "GB", "TB", "PB"]
for idx_, s in enumerate(suffix):
idx = idx_ + 1
if 1024 ** (idx + 1) < number:
continue
return number / 1024 ** idx, s
return number / 1024 ** idx, s
def print_gpu_utilization(idx=None, prefix=None):
from pynvml import nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, nvmlInit
nvmlInit()
used = 0
if idx is None:
# sum all GPU memory usage
n = torch.cuda.device_count()
for index in range(n):
handle = nvmlDeviceGetHandleByIndex(index)
info = nvmlDeviceGetMemoryInfo(handle)
used += info.used
else:
# report specific GPU memory usage only
handle = nvmlDeviceGetHandleByIndex(index)
info = nvmlDeviceGetMemoryInfo(handle)
used += info.used
sci_used, suff = scientific_precision(used)
if prefix:
print(f"[{prefix}] GPU memory occupied: {sci_used:.3f} {suff}.")
else:
print(f"GPU memory occupied: {sci_used:.3f} {suff}.")
return used
net = nn.Sequential(*[nn.Conv2d(32, 32, kernel_size=1, bias=True),] * 50)
print("normal nn.conv2d")
print_gpu_utilization(prefix="Torch init")
device = "cuda"
data = torch.randn(16, 32, 224, 224).to(device).requires_grad_(True)
net = net.to(device)
for n, p in net.named_parameters():
p.requires_grad_(False)
with torch.no_grad():
out = net(data)
print_gpu_utilization(prefix="Forward")
out = net(data)
out.sum().backward()
print_gpu_utilization(prefix="Training")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment