Skip to content

Instantly share code, notes, and snippets.

@amogh112
Last active July 15, 2020 23:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save amogh112/b7146ebdd30d1d1e62ed4d5d44c71858 to your computer and use it in GitHub Desktop.
Save amogh112/b7146ebdd30d1d1e62ed4d5d44c71858 to your computer and use it in GitHub Desktop.
def print_gc_tensors():
dict_tensorsize_to_count = defaultdict(int) # Tensor size to number
dict_device_to_tensors = defaultdict(lambda: defaultdict(int)) # Device to tensor count
dict_device_memory = defaultdict(int) # Device to tensor memory
total_count = 0
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
size = obj.size()
device = obj.get_device()
dict_tensorsize_to_count[size] += 1
dict_device_to_tensors[device][size] += 1
dict_device_to_tensors[device]['device_total'] += 1
dict_device_memory[device] += (obj.element_size() * obj.nelement())/(1024*1024)
total_count += 1
except:
pass
print("______________________________")
print("Mapping tensor shape to count across all devices.")
print(dict_tensorsize_to_count)
print("Total tensor count across all devices: ", total_count)
devices_list = list(dict_device_to_tensors.keys())
process = psutil.Process(os.getpid())
print("\n CPU process memory (GBs)",process.memory_info().rss/1024**3)
for d in devices_list:
print("\n Device Number: ", d)
print("Total tensors on device {} are {}".format(d,dict_device_to_tensors[d]['device_total']))
print(dict_device_to_tensors[d])
if d>=0:
print("GPU information for device : ", d, 'Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB', 'Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
print("Device memory occupied by tensors (MBs)", dict_device_memory)
print("______________________________")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment