Skip to content

Instantly share code, notes, and snippets.

@nathanhere
Forked from mrseanryan/calc-gpu-layers.py
Created March 17, 2024 09:08
Show Gist options
  • Save nathanhere/67c4299d30b0e192508cf71fcb9e1cb3 to your computer and use it in GitHub Desktop.
Save nathanhere/67c4299d30b0e192508cf71fcb9e1cb3 to your computer and use it in GitHub Desktop.
Rough PoC using binary search to find the optimal number of model layers to offload to the GPU, for this LLM and this hardware.
"""
Rough PoC using binary search to find the optimal number of model layers to offload to the GPU, for this LLM and this hardware.
"""
import time
def call_llm(prompt, gpu_layers):
# TODO fill in the actual call to LLM here
# dummy GPU memory limit
test_best_layers = 60
if gpu_layers > test_best_layers:
raise("Out of memory!")
# dummy sleep
time.sleep((test_best_layers + 1 - gpu_layers) * 0.25)
return "<response>"
max_gpu_layers = 100
def tune_gpu_layers__binary_search(min_gpu_layers, max_gpu_layers, best_gpu_layers, best_time, test_prompt):
print(f"tune_gpu_layers: min_gpu_layers={min_gpu_layers}, max_gpu_layers={max_gpu_layers}, best_gpu_layers={best_gpu_layers}, best_time={best_time}, test_prompt={test_prompt}")
if (max_gpu_layers - min_gpu_layers) <= 1:
return best_gpu_layers
current_gpu_layers = round((max_gpu_layers - min_gpu_layers) / 2) + min_gpu_layers
print(f"current_gpu_layers = {current_gpu_layers}")
start_time = time.time()
try:
call_llm(test_prompt, current_gpu_layers)
except:
# out of memory? try less layers
print("out of memory? try less layers")
max_gpu_layers = current_gpu_layers - 1
return tune_gpu_layers__binary_search(min_gpu_layers, max_gpu_layers, best_gpu_layers, best_time, test_prompt)
elapsed = time.time() - start_time
if best_time == -1 or elapsed < best_time:
best_time = elapsed
best_gpu_layers = current_gpu_layers
# try more layers, in case we get a better time:
print("try more layers")
min_gpu_layers = current_gpu_layers
return tune_gpu_layers__binary_search(min_gpu_layers, max_gpu_layers, best_gpu_layers, best_time, test_prompt)
min_gpu_layers = 1
max_gpu_layers = 100
best_gpu_layers = max_gpu_layers
best_time = -1
best_gpu_layers = tune_gpu_layers__binary_search(min_gpu_layers, max_gpu_layers, best_gpu_layers, best_time, "AI is going to")
print(f"Best GPU layers for this model, on this hardware: {best_gpu_layers}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment