Last active
April 23, 2024 12:41
-
-
Save mayankjobanputra/2688f9caac046e1723e7528d5ddb8037 to your computer and use it in GitHub Desktop.
Ollama example with params
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from ollama import Client | |
def ollama_chat_call(model, prompt, ip='0.0.0.0', port=11434): | |
client = Client(host=f'http://{ip}:{port}') | |
response = client.chat(model=model, | |
messages=[{'role': 'user', 'content': prompt},], | |
options={"seed": 5, "temperature": 0.8, "num_predict": 1000, | |
"repeat_penalty": 1.2, "top_p": 0.9, "top_k": 40}, | |
stream=False) | |
return response | |
def ollama_generate_call(model, prompt, ip='0.0.0.0', port=11434): | |
client = Client(host=f'http://{ip}:{port}') | |
response = client.generate(model=model, | |
prompt=prompt, | |
options={"seed": 5, "temperature": 0.8, "num_predict": 1000, | |
"repeat_penalty": 1.2, "top_p": 0.9, "top_k": 40}, | |
stream=False) | |
return response | |
if __name__ == '__main__': | |
model = "llama3:instruct" | |
prompt = "What is Llama-3? Can you make a joke about it?" | |
response = ollama_chat_call(model, prompt) | |
print(response) | |
response = ollama_generate_call(model, prompt) | |
print(response) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment