Skip to content

Instantly share code, notes, and snippets.

@oscarknagg
Created January 31, 2022 17:46
Show Gist options
  • Save oscarknagg/2fb03c71a0a2a928d6aed7ffd8a9ac51 to your computer and use it in GitHub Desktop.
Save oscarknagg/2fb03c71a0a2a928d6aed7ffd8a9ac51 to your computer and use it in GitHub Desktop.
Ray object store performance
import string
import argparse
import time
import ray
import pandas as pd
import numpy as np
import uuid
import os
WRITE_DIR = "/tmp/ray_object_store_performance_test/"
@ray.remote
def write_to_object_store(n=1000, cols=3, use_object_store=True, object_type=pd.DataFrame):
df = pd.DataFrame([
{char: float(i) for i, char in enumerate(string.ascii_lowercase[:cols])}
] * n)
if use_object_store:
ref_or_file = ray.put(df)
else:
ref_or_file = os.path.join(WRITE_DIR, str(uuid.uuid4()) + ".parquet")
df.to_parquet(ref_or_file, engine="pyarrow", compression=None)
return ref_or_file, df.memory_usage(index=True).sum()
@ray.remote
def read_from_object_store(ref_or_file):
if isinstance(ref_or_file, ray.ObjectRef):
df = ray.get(ref_or_file)
else:
df = pd.read_parquet(ref_or_file)
if __name__ == '__main__':
"""Description from Alec Zorab:
Simultaneously put/get large ( > a few GB) pandas dataframes into object store.
100s of workers putting and 100s of workers reading
"""
parser = argparse.ArgumentParser()
parser.add_argument("--use-disk", default=False, action="store_true", help="Use disk instead of object store")
parser.add_argument("--num-cpus", type=int, default=2)
parser.add_argument("--num-tasks", type=int, default=None)
parser.add_argument("--rows", type=int, default=20_000_000) # Works out to a few GB
parser.add_argument("--cols", type=int, default=16)
parser.add_argument("--do-read", default=False, action="store_true")
args = parser.parse_args()
args.num_tasks = args.num_tasks or args.num_cpus * 4
ray.init(num_cpus=args.num_cpus, include_dashboard=True, dashboard_host="0.0.0.0")
os.makedirs(WRITE_DIR, exist_ok=True)
num_concurrent_write_tasks = args.num_cpus // 2
num_concurrent_read_tasks = args.num_cpus - num_concurrent_write_tasks
bytes_written = 0
write_results = set()
to_read = []
t0 = time.time()
for i in range(args.num_tasks):
# print(f"i={i}")
if len(write_results) >= num_concurrent_write_tasks:
num_ready = num_concurrent_write_tasks
ready, not_ready = ray.wait(list(write_results), num_returns=num_ready)
# print(len(ready), len(not_ready), len(to_read))
t = time.time()
for ref in ready:
write_results.remove(ref)
for df_ref, df_bytes in ray.get(ready):
if args.do_read:
to_read.append(df_ref)
bytes_written += df_bytes
print("{:3f}GB/s".format(bytes_written / (1e9 * (t - t0))))
write_results.add(write_to_object_store.remote(n=args.rows, cols=args.cols, use_object_store=not args.use_disk))
@oscarknagg
Copy link
Author

image

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment