Skip to content

Instantly share code, notes, and snippets.

@mayankjobanputra
Last active April 23, 2024 12:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mayankjobanputra/2688f9caac046e1723e7528d5ddb8037 to your computer and use it in GitHub Desktop.
Save mayankjobanputra/2688f9caac046e1723e7528d5ddb8037 to your computer and use it in GitHub Desktop.
Ollama example with params
from ollama import Client
def ollama_chat_call(model, prompt, ip='0.0.0.0', port=11434):
client = Client(host=f'http://{ip}:{port}')
response = client.chat(model=model,
messages=[{'role': 'user', 'content': prompt},],
options={"seed": 5, "temperature": 0.8, "num_predict": 1000,
"repeat_penalty": 1.2, "top_p": 0.9, "top_k": 40},
stream=False)
return response
def ollama_generate_call(model, prompt, ip='0.0.0.0', port=11434):
client = Client(host=f'http://{ip}:{port}')
response = client.generate(model=model,
prompt=prompt,
options={"seed": 5, "temperature": 0.8, "num_predict": 1000,
"repeat_penalty": 1.2, "top_p": 0.9, "top_k": 40},
stream=False)
return response
if __name__ == '__main__':
model = "llama3:instruct"
prompt = "What is Llama-3? Can you make a joke about it?"
response = ollama_chat_call(model, prompt)
print(response)
response = ollama_generate_call(model, prompt)
print(response)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment