Skip to content

Instantly share code, notes, and snippets.

@gregjhogan
Last active March 21, 2024 06:26
Show Gist options
  • Save gregjhogan/38c0202b95ff36f205b968e491bbe047 to your computer and use it in GitHub Desktop.
Save gregjhogan/38c0202b95ff36f205b968e491bbe047 to your computer and use it in GitHub Desktop.
gpu burn
import time
import torch
torch.backends.cuda.matmul.allow_tf32 = True
def get_flops(bs, n, t):
flops = (n ** 2) * (2 * n - 1) / t * bs
print(f"{flops/1e12:.2f} TFLOP/s")
def burn_pytorch(dt, n, bs, runtime=10):
a, b = torch.rand(n, n, dtype=dt).cuda(), torch.rand(n, n, dtype=dt).cuda()
end = time.time() + runtime
while time.time() < end:
se = torch.cuda.Event(enable_timing=True)
ee = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize()
se.record()
for _ in range(bs):
torch.matmul(a, b)
ee.record()
torch.cuda.synchronize()
t = se.elapsed_time(ee) / 1000
get_flops(bs, n, t)
if __name__ == "__main__":
burn_pytorch(torch.float16, 2048, 100)
#burn_pytorch(torch.float32, 4096, 100)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment