Skip to content

Instantly share code, notes, and snippets.

@kishida
Created June 7, 2024 14:09
Show Gist options
  • Save kishida/7332b8212871cb9298671c99251a45a0 to your computer and use it in GitHub Desktop.
Save kishida/7332b8212871cb9298671c99251a45a0 to your computer and use it in GitHub Desktop.
日本語CLIPを使う分類
import gradio as gr
import io
import requests
from PIL import Image
import torch
from transformers import AutoImageProcessor, AutoModel, AutoTokenizer
HF_MODEL_PATH = 'line-corporation/clip-japanese-base'
tokenizer = AutoTokenizer.from_pretrained(HF_MODEL_PATH, trust_remote_code=True)
processor = AutoImageProcessor.from_pretrained(HF_MODEL_PATH, trust_remote_code=True)
model = AutoModel.from_pretrained(HF_MODEL_PATH, trust_remote_code=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
def process_input(image, text):
image_t = processor(image, return_tensors="pt")
texts = text.rstrip().splitlines()
text_t = tokenizer(texts)
with torch.no_grad():
image_features = model.get_image_features(**image_t)
text_features = model.get_text_features(**text_t)
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
maxs = torch.argmax(text_probs, dim=1)
id = maxs[0].item()
res = texts[id]
return f"{res}ぽい"
scr = gr.Interface(
fn = process_input,
inputs=[
gr.Image(type="pil"),
gr.Textbox(lines=5, placeholder="分類をいれてください", value="犬\n猫\n象")
],
outputs = gr.Textbox()
)
scr.launch()
@kishida
Copy link
Author

kishida commented Jun 7, 2024

bandicam.2024-06-07.22-49-59-544.CLIP.mp4

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment