Skip to content

Instantly share code, notes, and snippets.

@rohan-paul
Created April 21, 2024 10:32
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save rohan-paul/5497e96548bcd7c14e0d6d8988d95bd8 to your computer and use it in GitHub Desktop.
Save rohan-paul/5497e96548bcd7c14e0d6d8988d95bd8 to your computer and use it in GitHub Desktop.
# Usecase
def make_prompt(input_text: str, system_prompt="", max_length=512) -> str:
"""
Generates text using a large language model, given a prompt and a device.
Args:
input_text (str): The input text for prompting.
system_prompt (str): The system prompt (not used in the function).
max_length (int): The maximum length of the generated text.
Returns:
A string containing the generated text.
"""
prompt = f"<s>[INST]{input_text}[/INST]"
tokenized_input = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)
model_inputs = tokenized_input.to(device)
output = model.generate(
**model_inputs,
max_length=max_length,
use_cache=True,
early_stopping=True,
bos_token_id=model.config.bos_token_id,
eos_token_id=model.config.eos_token_id,
pad_token_id=model.config.eos_token_id,
temperature=0.1,
do_sample=True
)
# Randomly select one of the generated outputs
response = random.choice(tokenizer.batch_decode(output))
# Wrap the response text to a width of 90 characters
wrapped_response = text_wrapper(response)
print(wrapped_response)
# The code assumes that 'tokenizer' and 'model' are defined
# and available before calling this function.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment