Skip to content

Instantly share code, notes, and snippets.

@ruslanmv
Created May 30, 2024 13:21
Show Gist options
  • Save ruslanmv/8a126eedc27252deee284f731ef1b47f to your computer and use it in GitHub Desktop.
Save ruslanmv/8a126eedc27252deee284f731ef1b47f to your computer and use it in GitHub Desktop.
watsonx question answering
# Import necessary libraries
import os
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes, DecodingMethods
def get_model(model_type, api_key, project_id, max_tokens, min_tokens, decoding, temperature, top_k, top_p):
generate_params = {
GenParams.MAX_NEW_TOKENS: max_tokens,
GenParams.MIN_NEW_TOKENS: min_tokens,
GenParams.DECODING_METHOD: decoding,
GenParams.TEMPERATURE: temperature,
GenParams.TOP_K: top_k,
GenParams.TOP_P: top_p,
}
model = Model(
model_id=model_type,
params=generate_params,
credentials={
"apikey": api_key,
"url": "https://us-south.ml.cloud.ibm.com"
},
project_id=project_id
)
return model
def qa(question):
# Fetch environment variables
api_key = os.getenv("WATSONX_API_KEY")
project_id = os.getenv("PROJECT_ID")
if not api_key or not project_id:
raise ValueError("API key and Project ID must be set in environment variables")
# Specify model parameters
model_type = "meta-llama/llama-3-8b-instruct"
max_tokens = 100
min_tokens = 50
top_k = 50
top_p = 1
decoding = DecodingMethods.GREEDY
temperature = 0.7
# Get the WatsonX model
model = get_model(model_type, api_key, project_id, max_tokens, min_tokens, decoding, temperature, top_k, top_p)
complete_prompt = create_prompt(question)
# Print the prompt for review
print("----------------------------------------------------------------------------------------------------")
print(f"*** Prompt: {complete_prompt} ***")
print("----------------------------------------------------------------------------------------------------")
generated_response = model.generate(prompt=complete_prompt)
response_text = generated_response['results'][0]['generated_text']
# Remove trailing white spaces
return response_text.strip()
def create_prompt(question):
prompt = f"""<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are a large language model trained to be informative and comprehensive.
I am a chatbot assistant trying to answer questions for users in a proper and informative way.
The user asked: "{question}"
Using your knowledge and the provided URL (if available), answer the user's question in a comprehensive and informative way, avoiding any informal language or slang.
<|end_of_text|>"""
return prompt
qa("How are you")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment