Skip to content

Instantly share code, notes, and snippets.

@kyamagu
Last active January 17, 2020 04:44
Show Gist options
  • Save kyamagu/2ea8e045168b8fe57823eed61050f171 to your computer and use it in GitHub Desktop.
Save kyamagu/2ea8e045168b8fe57823eed61050f171 to your computer and use it in GitHub Desktop.

PyTorch AI platform deployment

This is an example of how to deploy a PyTorch model on GCP AI platform.

Prerequisite

Download a pre-trained model and labels into models/ dir:

mkdir models/
wget https://download.pytorch.org/models/resnet18-5c106cde.pth -P models/
wget https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json -P models/

Deployment

First set up a few configurations. Make a bucket to upload packages and data beforehand.

NAME="TorchDemo"
VERSION_NAME="v1"
PACKAGE_VERSION="0.1"
ROOT_PATH="gs://bucket-name/torch-deploy"
MODEL_PATH="${ROOT_PATH}/models"
PACKAGE_PATH="${ROOT_PATH}/packages"
REGION="asia-northeast1"

First create a model:

gcloud ai-platform models create $NAME \
    --regions $REGION \
    --enable-logging

Prepare the package and model data for deploy:

# Package and upload the predictor.
python setup.py sdist
gsutil -m rsync -x "\..*" dist/ $PACKAGE_PATH/

# Upload any model data to the origin path.
gsutil -m rsync -x "\..*" models/ $MODEL_PATH/

Once the package and data are avail, deploy the model with the following:

gcloud beta ai-platform versions create $VERSION_NAME \
    --model $NAME \
    --runtime-version 1.15 \
    --python-version 3.7 \
    --origin $MODEL_PATH \
    --package-uris ${PACKAGE_PATH}/predictor-${PACKAGE_VERSION}.tar.gz \
    --prediction-class predictor.Predictor

Online prediction

To test if the deployed model works, create an input data in JSON file:

tee instances.json <<EOF
{"image_url": "https://..."}
EOF

Then make a prediction:

gcloud ai-platform predict \
    --model $NAME \
    --version $VERSION_NAME \
    --json-instances instances.json

PyTorch installation on AI platform

PyTorch CPU package is not available via standard PyPI index. See setup.py for how to workaroud installation steps.

# predictor/__init__.py
import os
import requests
import json
from base64 import b64decode
from io import BytesIO
from PIL import Image
import torch
import torchvision.models as models
import torchvision.transforms as transforms
def load_imagenet_labels(path):
"""Fetch imagenet class labels."""
with open(path, 'r') as f:
labels = json.load(f)
return [labels[str(i)][1] for i in range(len(labels))]
class Predictor(object):
"""Interface for constructing custom predictors."""
def __init__(self, model, labels):
self._model = model
self._transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
])
self._labels = labels
self._model.eval()
def predict(self, instances, **kwargs):
"""Performs custom prediction.
Instances are the decoded values from the request. They have already
been deserialized from JSON.
Args:
instances: A list of prediction input instances.
**kwargs: A dictionary of keyword args provided as additional
fields on the predict request body.
Returns:
A list of outputs containing the prediction results. This list must
be JSON serializable.
"""
with torch.no_grad():
batch = torch.stack([
self._transform(self._load_image(x)) for x in instances
])
results = self._model(batch).tolist()
return [
self._sort_prediction(
prediction, kwargs.get('max_items', 10)
) for prediction in results
]
def _load_image(self, instance):
"""Load image from either base64 encoded bytes or URL."""
if 'image_bytes' in instance:
image_bytes = instance['image_bytes']
if instance['image_bytes'].startswith(b'data://'):
image_bytes = image_bytes.split(b',')[-1]
return Image.open(BytesIO(b64decode(instance['image_bytes'])))
elif 'image_url' in instance:
return Image.open(
BytesIO(requests.get(instance['image_url']).content)
)
else:
raise KeyError('No image_bytes or image_url found')
def _sort_prediction(self, prediction, max_items):
"""Sort prediction results with labels."""
result = sorted(
zip(self._labels, prediction), key=lambda x: x[1], reverse=True
)
if max_items:
return result[:min(max_items, len(result))]
return result
@classmethod
def from_path(cls, model_dir):
"""Creates an instance of Predictor using the given path.
Loading of the predictor should be done in this method.
Args:
model_dir: The local directory that contains the exported model
file along with any additional files uploaded when creating the
version resource.
Returns:
An instance implementing this Predictor class.
"""
model = models.resnet18()
state_dict = torch.load(
os.path.join(model_dir, 'resnet18-5c106cde.pth')
)
model.load_state_dict(state_dict)
labels = load_imagenet_labels(
os.path.join(model_dir, 'imagenet_class_index.json')
)
return cls(model, labels)
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
INSTALL_REQUIRES = [
'pillow',
'requests',
]
CUSTOM_INSTALL_COMMANDS = [
# Install torch here.
[
'python-default', '-m', 'pip', 'install', '--target=/tmp/custom_lib',
'-b', '/tmp/pip_builds', 'torch==1.4.0+cpu', 'torchvision==0.5.0+cpu',
'-f', 'https://download.pytorch.org/whl/torch_stable.html'
],
]
class Install(_install):
def run(self):
import sys
if sys.platform == 'linux':
import subprocess
import logging
for command in CUSTOM_INSTALL_COMMANDS:
logging.info('Custom command: ' + ' '.join(command))
result = subprocess.run(
command, check=True, stdout=subprocess.PIPE
)
logging.info(result.stdout.decode('utf-8', 'ignore'))
_install.run(self)
setup(
name='predictor',
version='0.1',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
cmdclass={'install': Install},
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment