Skip to content

Instantly share code, notes, and snippets.

@seanbenhur
Created November 22, 2021 09:15
Show Gist options
  • Save seanbenhur/b0f79e83e977fca3c783e19f74c4a7ba to your computer and use it in GitHub Desktop.
Save seanbenhur/b0f79e83e977fca3c783e19f74c4a7ba to your computer and use it in GitHub Desktop.
from waitress import serve
import io
from flask import Flask, request,jsonify
from PIL import Image
import base64
from spacymodels.activeorpassive.model import find_passive_or_active
import spacy
import pandas as pd
import torch
import numpy as np
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import ViTFeatureExtractor, ViTForImageClassification
app = Flask(__name__)
SPACY_MODEL_DIR = r"D:\MODELS\spacymodels\models"
IMAGE_MODEL_DIR = r"D:\MODELS\image-classification\models"
SENTIMENT_MODEL_DIR = r"D:\MODELS\sentiment-analysis\models"
DOCUMENT_MODEL_DIR = r"D:\MODELS\documentclassification\models"
YOLO_REPO_DIR = "D:\MODELS\yolov5"
YOLO_MODEL_PATH = "D:\MODELS\yolov5\yolov5s.pt"
@app.route('/active_or_passive',methods=['POST'])
def predict_active_or_passive():
text = request.json['sentence']
response = {}
response['result'] = find_passive_or_active(text)
return jsonify(response)
@app.route ('/ner',methods=['POST'])
def predict_ner():
if request.method == 'POST':
text = request.json['sentence']
nlp = spacy.load(SPACY_MODEL_DIR)
entities = []
doc = nlp(text)
df = pd.DataFrame()
for ent in doc.ents:
entities.append((ent.label_, ent.text))
df = pd.DataFrame(entities, columns=['label', 'text'])
return jsonify(df.to_dict(orient='records'))
@app.route('/parts_of_speech', methods=['POST'])
def predict_poss():
if request.method == 'POST':
text = request.json['sentence']
nlp = spacy.load(SPACY_MODEL_DIR)
entities = []
doc = nlp(text)
df = pd.DataFrame()
for ent in doc:
entities.append((ent.pos_, ent.text))
df = pd.DataFrame(entities, columns=['label', 'text'])
return jsonify(df.to_dict(orient='records'))
@app.route('/sentiment-analysis',methods=['POST'])
def predict_sentiment():
text = request.json["sentence"]
inputs = sentiment_tokenizer(text, padding=True, truncation=True, return_tensors="pt")
outputs = sentiment_model(**inputs)
preds = torch.nn.functional.softmax(outputs.logits,dim=-1)
preds = preds.detach().numpy()
index = np.argmax(preds)
index = int(index)
response = {}
response["Predicted Class"] = sentiment_model.config.id2label[index]
response['text']= text
response['label'] = index
return jsonify(response)
@app.route('/documentclassify',methods=['POST'])
def predict_doc():
text = request.json["sentence"]
#text = str(text)
inputs = doc_tokenizer(text, padding=True, truncation=True, return_tensors="pt")
outputs = doc_model(**inputs)
preds = torch.nn.functional.softmax(outputs.logits,dim=-1)
preds = preds.detach().numpy()
index = np.argmax(preds)
index = int(index)
response = {}
response['text']= text
response['label'] = index
return jsonify(response)
@app.route('/image-classify',methods=['POST'])
def predict_image():
if request.method == 'POST':
request_body = request.json
filename = request_body['filename']
file_encoded_string = request_body['encodedFileString']
image = Image.open(io.BytesIO(base64.b64decode(file_encoded_string)))
image = np.array(image, np.uint8)
if image.shape[-1] == 4:
image = image[..., :3]
inputs = image_feature_extractor(images=image, return_tensors="pt")
outputs = image_model(**inputs)
logits = outputs.logits
predicted_class_idx = logits.argmax(-1).item()
# model predicts one of the 1000 ImageNet classes
response = {}
# response["Predicted Class Index"] = logits.argmax(-1).item()
response["Predicted Class"] = image_model.config.id2label[predicted_class_idx]
return jsonify(response)
@app.route("/object-detection", methods=["POST"])
def predict_object_detection():
if request.method == "POST":
request_body = request.json
filename = request_body['filename']
file_encoded_string = request_body['encodedFileString']
image = Image.open(io.BytesIO(base64.b64decode(file_encoded_string)))
image = np.array(image, np.uint8)
if image.shape[-1] == 4:
image = image[..., :3]
img = image
results = model(img, size=640) # reduce size=320 for faster inference
results.render()
for image in results.imgs:
buffered = io.BytesIO()
img_base64 = Image.fromarray(image)
img_base64.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
if __name__ == '__main__':
doc_model = AutoModelForSequenceClassification.from_pretrained(DOCUMENT_MODEL_DIR,num_labels=2)
doc_model.config.id2label = {0:"MEDICAL DOCUMENT",1:"INVOICE DOCUMENT", 2:"W3-FORM", 3: "BILL"}
doc_tokenizer = AutoTokenizer.from_pretrained(DOCUMENT_MODEL_DIR)
sentiment_tokenizer = AutoTokenizer.from_pretrained(SENTIMENT_MODEL_DIR,num_labels=3)
sentiment_model = AutoModelForSequenceClassification.from_pretrained(SENTIMENT_MODEL_DIR)
image_feature_extractor = ViTFeatureExtractor.from_pretrained(IMAGE_MODEL_DIR)
image_model = ViTForImageClassification.from_pretrained(IMAGE_MODEL_DIR)
model = torch.hub.load(YOLO_REPO_DIR, "custom",path=YOLO_MODEL_PATH,source='local')
serve(app,port=8081,host='0.0.0.0')
# app.run(host='0.0.0.0',port=8080,debug=True)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment