Skip to content

Instantly share code, notes, and snippets.

View edoakes's full-sized avatar

Edward Oakes edoakes

  • Anyscale
  • Minneapolis, MN
View GitHub Profile
@edoakes
edoakes / monte_carlo_pi.py
Last active February 8, 2020 00:26
Monte Carlo Pi Estimation in Python
import math
import random
import time
def sample(num_samples):
num_inside = 0
for _ in range(num_samples):
x, y = random.uniform(-1, 1), random.uniform(-1, 1)
if math.hypot(x, y) <= 1:
num_inside += 1
@edoakes
edoakes / parallel_monte_carlo_pi.py
Last active February 8, 2020 01:10
Monte Carlo Pi Estimation in Python - parallel using multiprocessing.Pool
import math
import random
import time
def sample(num_samples):
num_inside = 0
for _ in range(num_samples):
x, y = random.uniform(-1, 1), random.uniform(-1, 1)
if math.hypot(x, y) <= 1:
num_inside += 1
import math
import random
import time
def sample(num_samples):
num_inside = 0
for _ in range(num_samples):
x, y = random.uniform(-1, 1), random.uniform(-1, 1)
if math.hypot(x, y) <= 1:
num_inside += 1
@edoakes
edoakes / ray-cluster.yaml
Last active August 11, 2021 13:46
10-node AWS Ray cluster configuration
cluster_name: monte_carlo_pi
# The number of worker nodes to launch in addition to the head node.
min_workers: 9
max_workers: 9
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
@edoakes
edoakes / monte_carlo_pi.py
Last active May 15, 2024 02:14
Monte Carlo Pi estimation
import argparse
import time
import random
import math
parser = argparse.ArgumentParser(description="Approximate digits of Pi using Monte Carlo simulation.")
parser.add_argument("--num-samples", type=int, default=1000000)
parser.add_argument("--parallel", default=False, action="store_true")
parser.add_argument("--distributed", default=False, action="store_true")
import requests
from ray import serve
serve.init()
# Main concepts
## Endpoints
import ray
from ray import serve
# Connect to the running Ray Serve instance.
ray.init(address='auto', ignore_reinit_error=True)
serve.init()
# Deploy the model.
serve.create_backend("sklearn_backend", SKLearnBackend)
serve.create_endpoint("sentiment_endpoint", backend="sklearn_backend", route="/sentiment")
import joblib
import s3fs
import sklearn
@serve.deployment(route_prefix="/sentiment", name="sentiment-deployment")
class SentimentDeployment:
def __init__(self):
fs = s3fs.S3FileSystem(anon=True)
with fs.open('ray-serve-blog/unigram_vectorizer.joblib', 'rb') as f:
self.vectorizer = joblib.load(f)
import ray
from ray import serve
# Connect to the running Ray Serve instance.
ray.init(address='auto', namespace="serve-example", ignore_reinit_error=True)
serve.start(detached=True)
# Deploy the model.
SentimentDeployment.deploy()
import requests
input_text = "Ray Serve eases the pain of model serving"
result = requests.get("http://127.0.0.1:8000/sentiment", data=input_text).text
print("Result for '{}': {}".format(input_text, result))