Skip to content

Instantly share code, notes, and snippets.

@TheodoreGalanos
Last active April 12, 2023 23:05
Show Gist options
  • Save TheodoreGalanos/0b84640430cb6883ee57515efb7e0c85 to your computer and use it in GitHub Desktop.
Save TheodoreGalanos/0b84640430cb6883ee57515efb7e0c85 to your computer and use it in GitHub Desktop.
"""
the prompts.json file should contain (adjust for your prompt and use case):
{
"prompts": {
{
"example": {
"input_variables = ["variables", "here"],
"template = ["Prompt details and {variables} {here} according to your use case.\nRationale:"]
}
},
"rci":
{
"review": {
"input_variables =["your", "inputs", "here", "rationale"],
"template = ["Prompt details and {variables} {here} according to your use case.\nRationale: {rationale}.\nReview your previous answer and find problems with the rationale.\nReview:"],
},
"improve" : {
"input_variables": ["same", "as", "before", "rationale", "review"],
"template = ["Prompt details and {variables} {here} according to your use case.\nRationale: {rationale}.\nReview your previous answer and find problems with the rationale.\nReview: {review}.\nBased on the problems you identified in your review, improve your rationale.\nImproved rationale:"]
}
}
}
"""
from langchain import LLMChain, PromptTemplate
from langchain.llms import OpenAI
from langchain import load_prompt
def load_template(json_file: str, prompt_category: str, prompt_name: str):
with open(json_file, 'r', encoding='utf8') as in_file:
content = json.load(in_file)
prompt = content[prompt_category][prompt_name]
return prompt['input_variables'], prompt['template']
# Instantiate a model
llm = OpenAI(...)
# Run the prompt and get a response
prompt_variables, prompt_template = load_template('prompts.json', 'prompts', 'example')
input_prompt = PromptTemplate(
input_variables = prompt_variables,
template = prompt_template
)
input_chain = LLMChain(prompt=input_prompt, llm=llm)
response = input_chain.run(
{
'variables': "some text",
'here': "some text"
}
)
# Get the response and create a review
review_variables, review_template = load_template('prompts.json', 'rci', 'review')
review_prompt = PromptTemplate(
input_variables = review_variables,
template = review_template
)
review_chain = LLMChain(prompt=review_prompt, llm=llm)
review = review_chain.run(
{
'variables': "some text",
'here': "some text",
'rationale': response
}
)
# Get the response and review and create an improved response
improve_variables, improve_template = load_template('prompts.json', 'rci', 'improve')
improve_prompt = PromptTemplate(
input_variables = improve_variables,
template = improve_template
)
improve_chain = LLMChain(prompt=improve_prompt, llm=llm)
improved_response = improve_chain.run(
{
'variables': "some text",
'here': "some text",
'rationale': response,
'review': review
}
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment