Skip to content

Instantly share code, notes, and snippets.

@DurandA
Created December 18, 2023 22:45
Show Gist options
  • Save DurandA/48b0b6de1e80aafb5cab9435e3f652a8 to your computer and use it in GitHub Desktop.
Save DurandA/48b0b6de1e80aafb5cab9435e3f652a8 to your computer and use it in GitHub Desktop.
Langchain Ollama demo
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from pydantic import BaseModel, Field, field_validator
llm = Ollama(
model="dolphin-mixtral",
#callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
#llm("Tell me about the history of AI")
model=llm
# Define your desired data structure.
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
# You can add custom validation logic easily with Pydantic.
@field_validator("setup")
def question_ends_with_question_mark(cls, v: str) -> str:
if v[-1] != "?":
raise ValueError("Badly formed question!")
return v
# And a query intented to prompt a language model to populate the data structure.
joke_query = "Tell me a joke."
# Set up a parser + inject instructions into the prompt template.
parser = PydanticOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
_input = prompt.format_prompt(query=joke_query)
output = model(_input.to_string())
joke: Joke = parser.parse(output)
print(f'{joke=} ({type(joke)})')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment