Skip to content

Instantly share code, notes, and snippets.

@kishida
Created October 25, 2023 05:09
Show Gist options
  • Save kishida/46e7d540c64b7b7a9eda3be987bbbc6b to your computer and use it in GitHub Desktop.
Save kishida/46e7d540c64b7b7a9eda3be987bbbc6b to your computer and use it in GitHub Desktop.
ct2 chat
# based on StableLM chat
# https://huggingface.co/spaces/stabilityai/stablelm-tuned-alpha-chat
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList
import ctranslate2
print(f"Starting to load the model to memory")
model_name = "line-corporation/japanese-large-lm-3.6b-instruction-sft"
ct2_model = "line-3.6b-sft-ct2"
#ct2_model = "line-3.6b-sft-ct2-fp16"
#model_name = "matsuo-lab/weblab-10b-instruction-sft"
#ct2_model = "weblab-sft-ct2"
print (f"model: {model_name}\nct2: {ct2_model}")
generator = ctranslate2.Generator(ct2_model)
tok = AutoTokenizer.from_pretrained(model_name, use_fast = False)
print(f"Sucessfully loaded the model to the memory")
start_message = ""
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
stop_ids = [50278, 50279, 50277, 1, 0]
for stop_id in stop_ids:
if input_ids[0][-1] == stop_id:
return True
return False
def user(message, history):
# Append the user's message to the conversation history
return "", history + [[message, ""]]
def chat(curr_system_message, history):
# Initialize a StopOnTokens object
stop = StopOnTokens()
# Construct the input message string for the model by concatenating the current system message and conversation history
curr_system_message = """ユーザー: 質問に答えたり要望を実現してください。
システム: わかりました。
"""
messages = curr_system_message + \
"\n".join(["\n".join(["ユーザー: "+item[0], "システム: "+item[1]])
for item in history])
# Tokenize the messages string
# print(messages)
model_inputs = tok.encode(messages, add_special_tokens=False)
model_inputs = tok.convert_ids_to_tokens(model_inputs)
streamer = generator.generate_tokens(
model_inputs,
sampling_temperature=0.8,
sampling_topk=20,
max_length=1024,
)
# print(history)
# Initialize an empty string to store the generated text
partial_text = ""
for new_text in streamer:
# print(new_text)
if new_text.token_id == tok.eos_token_id:
continue
partial_text += tok.decode(new_text.token_id, add_special_tokens=False)
history[-1][1] = partial_text
# Yield an empty string to cleanup the message textbox and the updated conversation history
yield history
return partial_text
with gr.Blocks() as demo:
# history = gr.State([])
gr.Markdown(f"## {model_name}")
chatbot = gr.Chatbot().style(height=500)
with gr.Row():
with gr.Column():
msg = gr.Textbox(label="Chat Message Box", placeholder="Chat Message Box",
show_label=False).style(container=False)
with gr.Column():
with gr.Row():
submit = gr.Button("Submit")
stop = gr.Button("Stop")
clear = gr.Button("Clear")
system_msg = gr.Textbox(
start_message, label="System Message", interactive=False, visible=False)
submit_event = msg.submit(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True)
submit_click_event = submit.click(fn=user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
fn=chat, inputs=[system_msg, chatbot], outputs=[chatbot], queue=True)
stop.click(fn=None, inputs=None, outputs=None, cancels=[
submit_event, submit_click_event], queue=False)
clear.click(lambda: None, None, [chatbot], queue=False)
demo.queue(max_size=32, concurrency_count=2)
demo.launch()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment