Skip to content

Instantly share code, notes, and snippets.

@631068264
Created May 16, 2023 02:14
Show Gist options
  • Save 631068264/f43b32aeac3486f77ac2bc583515b18b to your computer and use it in GitHub Desktop.
Save 631068264/f43b32aeac3486f77ac2bc583515b18b to your computer and use it in GitHub Desktop.
modelscope + kubeflow
# 使用稳定python瘦身版,减少镜像体积
FROM python:3.9.15-slim-buster
# 禁用pip缓存,减少镜像体积
ENV PIP_NO_CACHE_DIR=1
RUN sed -i 's/archive.ubuntu.com/mirrors.aliyun.com/g' /etc/apt/sources.list && \
apt update && \
apt-get install -y --no-install-recommends gcc ffmpeg libsm6 libxext6 g++ && \
rm -rf /var/lib/apt/lists/*
# 安装依赖,没用到第三方包别往requirements.txt写
COPY *.whl ./
# 指定依赖镜像地址,加快下载
RUN python -m pip install --upgrade pip \
&& pip install -i https://pypi.tuna.tsinghua.edu.cn/simple *.whl \
&& rm -rf *.whl
COPY requirements.txt ./
RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements.txt
# 复制源码
COPY model ./model
COPY model.py ./
ENTRYPOINT ["python","-m","model"]
modelscope==1.0.3
kserve
mmcls
mmcv
torchvision
transformers
fairseq
timm
unicodedata2
zhconv
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
import os
import tempfile
from typing import Dict, Any
import kserve
import numpy
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, numpy.integer):
return int(obj)
elif isinstance(obj, numpy.floating):
return float(obj)
elif isinstance(obj, numpy.ndarray):
return obj.tolist()
return super(CustomJsonEncoder, self).default(obj)
def safe_json(output: Any) -> str:
return json.dumps(
output,
ensure_ascii=False,
allow_nan=False,
indent=None,
separators=(",", ":"),
cls=CustomJsonEncoder,
).replace("</", "<\\/")
class AIModel(kserve.Model):
def __init__(self, name: str):
super().__init__(name)
self.name = name
self.load()
def load(self):
model = pipeline(Tasks.image_classification, model='model')
self.model = model
self.ready = True
def predict(self, request: Dict) -> Dict:
inputs = request["instances"]
data = inputs[0]["image"]["b64"]
img = base64.b64decode(data)
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as temp:
temp.write(img)
output = self.model(temp.name)
os.remove(temp.name)
return {"predictions": safe_json(output)}
if __name__ == "__main__":
# 必须预留从环境变量中接收模型名字,MODEL_NAME这个key不能改,value影响到模型部署的URL: /v1/models/{MODEL_NAME}:predict
model = AIModel(os.environ.get('MODEL_NAME', 'custom-model'))
model.load()
kserve.ModelServer(workers=1).start([model])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment