Skip to content

Instantly share code, notes, and snippets.

View edoakes's full-sized avatar

Edward Oakes edoakes

  • Anyscale
  • Minneapolis, MN
View GitHub Profile
import ray
from ray import serve
ray.init(address='auto', namespace="serve-example", ignore_reinit_error=True)
serve.start(detached=True)
SentimentDeployment.deploy()
from transformers import pipeline
@serve.deployment(route_prefix="/sentiment", name="sentiment")
class SentimentDeployment:
def __init__(self):
self.classifier = pipeline("sentiment-analysis")
async def __call__(self, request):
data = await request.body()
[result] = self.classifier(str(data))
import ray
from ray import serve
ray.init(address='auto', namespace="serve-example", ignore_reinit_error=True) # Connect to the local running Ray cluster.
serve.start(detached=True) # Start the Ray Serve processes within the Ray cluster.
import requests
input_text = "Ray Serve eases the pain of model serving"
result = requests.get("http://127.0.0.1:8000/sentiment", data=input_text).text
print("Result for '{}': {}".format(input_text, result))
import ray
from ray import serve
# Connect to the running Ray Serve instance.
ray.init(address='auto', namespace="serve-example", ignore_reinit_error=True)
serve.start(detached=True)
# Deploy the model.
SentimentDeployment.deploy()
import joblib
import s3fs
import sklearn
@serve.deployment(route_prefix="/sentiment", name="sentiment-deployment")
class SentimentDeployment:
def __init__(self):
fs = s3fs.S3FileSystem(anon=True)
with fs.open('ray-serve-blog/unigram_vectorizer.joblib', 'rb') as f:
self.vectorizer = joblib.load(f)
import ray
from ray import serve
# Connect to the running Ray Serve instance.
ray.init(address='auto', ignore_reinit_error=True)
serve.init()
# Deploy the model.
serve.create_backend("sklearn_backend", SKLearnBackend)
serve.create_endpoint("sentiment_endpoint", backend="sklearn_backend", route="/sentiment")
import requests
from ray import serve
serve.init()
# Main concepts
## Endpoints
@edoakes
edoakes / monte_carlo_pi.py
Last active May 10, 2022 11:46
Monte Carlo Pi estimation
import argparse
import time
import random
import math
parser = argparse.ArgumentParser(description="Approximate digits of Pi using Monte Carlo simulation.")
parser.add_argument("--num-samples", type=int, default=1000000)
parser.add_argument("--parallel", default=False, action="store_true")
parser.add_argument("--distributed", default=False, action="store_true")
@edoakes
edoakes / ray-cluster.yaml
Last active August 11, 2021 13:46
10-node AWS Ray cluster configuration
cluster_name: monte_carlo_pi
# The number of worker nodes to launch in addition to the head node.
min_workers: 9
max_workers: 9
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a