Skip to content

Instantly share code, notes, and snippets.

@kishida
Created October 25, 2023 05:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kishida/979ff57256896e8b9f801ed192b0e6ee to your computer and use it in GitHub Desktop.
Save kishida/979ff57256896e8b9f801ed192b0e6ee to your computer and use it in GitHub Desktop.
llm chat
# based on StableLM chat
# https://huggingface.co/spaces/stabilityai/stablelm-tuned-alpha-chat
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
import time
import numpy as np
from torch.nn import functional as F
import os
from threading import Thread
print(f"Starting to load the model to memory")
model_name = "line-corporation/japanese-large-lm-1.7b-instruction-sft"
#model_name = "line-corporation/japanese-large-lm-3.6b-instruction-sft"
m = AutoModelForCausalLM.from_pretrained(model_name)
tok = AutoTokenizer.from_pretrained(model_name, use_fast = False)
generator = pipeline('text-generation', model=m, tokenizer=tok)
print(f"Sucessfully loaded the model to the memory")
start_message = ""
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
stop_ids = [50278, 50279, 50277, 1, 0]
for stop_id in stop_ids:
if input_ids[0][-1] == stop_id:
return True
return False
def user(message, history):
# Append the user's message to the conversation history
return "", history + [[message, ""]]
def chat(curr_system_message, history):
# Initialize a StopOnTokens object
stop = StopOnTokens()
# Construct the input message string for the model by concatenating the current system message and conversation history
curr_system_message = ""
messages = curr_system_message \
.join(["\n".join(["ユーザー: "+item[0], "システム: "+item[1]])
for item in history])
# Tokenize the messages string
model_inputs = tok.encode(messages, return_tensors="pt", add_special_tokens=False)
streamer = TextIteratorStreamer(
tok, timeout=10., skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
# model_inputs,
streamer=streamer,
max_new_tokens=1024,
do_sample=True,
top_p=0.95,
top_k=1000,
temperature=0.7,
num_beams=1,
pad_token_id=tok.pad_token_id,
bos_token_id=tok.bos_token_id,
eos_token_id=tok.eos_token_id,
no_repeat_ngram_size=2,
stopping_criteria=StoppingCriteriaList([stop])
)
t = Thread(target=m.generate, args=[model_inputs], kwargs=generate_kwargs)
t.start()
# print(history)
# Initialize an empty string to store the generated text
partial_text = ""
for new_text in streamer:
# print(new_text)
partial_text += new_text
history[-1][1] = partial_text
# Yield an empty string to cleanup the message textbox and the updated conversation history
yield history
return partial_text
with gr.Blocks() as demo:
# history = gr.State([])
gr.Markdown(f"## {model_name} Chat")
chatbot = gr.Chatbot().style(height=500)
with gr.Row():
with gr.Column():
msg = gr.Textbox(label="Chat Message Box", placeholder="Chat Message Box",
show_label=False).style(container=False)
with gr.Column():
with gr.Row():
submit = gr.Button("Submit")
stop = gr.Button("Stop")
clear = gr.Button("Clear")
system_msg = gr.Textbox(
start_message, label="System Message", interactive=False, visible=False)
submit_event = msg.submit(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True)
submit_click_event = submit.click(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True)
stop.click(fn=None, inputs=None, outputs=None, cancels=[
submit_event, submit_click_event], queue=False)
clear.click(lambda: None, None, [chatbot], queue=False)
demo.queue(max_size=32, concurrency_count=2)
demo.launch()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment