Skip to content

Instantly share code, notes, and snippets.

@lapp0
Created January 19, 2024 13:36
Show Gist options
  • Save lapp0/409a7c3a7f9880b606626bb283f0b01c to your computer and use it in GitHub Desktop.
Save lapp0/409a7c3a7f9880b606626bb283f0b01c to your computer and use it in GitHub Desktop.
import ray
import torch
import time
import random
import numpy as np
import os
# Initialize Ray
ray.init()
@ray.remote
def process_logits(logits):
print(os.getpid())
# mock: take logits, return modified logits
return logits + 0.1
mock_mask = np.random.choice([True, False], size=2**16)
@ray.remote
def process_sequence(seq):
# mock: take sequence, return mask
return mock_mask
def benchmark_passing_logits():
mock_logits_size = (1, 2**16)
for _ in range(4):
mock_logits = torch.randn(mock_logits_size)
start_time = time.time()
result_id = process_logits.remote(mock_logits)
result = ray.get(result_id)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time for passing the tensor: {elapsed_time} seconds")
def benchmark_passing_list_int_getting_mask():
mock_sequence_size = 4096
for _ in range(4):
sequence = [random.randint(0, int(2**16)) for _ in range(mock_sequence_size)]
# Start the timer
start_time = time.time()
start_time = time.time()
result_id = process_sequence.remote(sequence)
result = ray.get(result_id)
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time for passing the sequence: {elapsed_time} seconds")
def benchmark_moving_logits_gpu_to_cpu_to_gpu():
print(os.getpid())
mock_logits_size = (1, 2**16)
for _ in range(4):
# Create logits on GPU
mock_logits = torch.randn(mock_logits_size).cuda()
# Start the timer
start_time = time.time()
# Move logits to CPU and back to GPU
mock_logits_cpu = mock_logits.cpu()
result_id = process_logits.remote(mock_logits_cpu)
result = ray.get(result_id)
mock_logits_gpu_again = result.cuda()
# End the timer
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Elapsed time for moving logits GPU -> CPU -> GPU: {elapsed_time} seconds")
if __name__ == "__main__":
benchmark_passing_logits()
benchmark_passing_list_int_getting_mask()
benchmark_moving_logits_gpu_to_cpu_to_gpu()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment