Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created October 13, 2023 16:17
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save thoraxe/97e223cdaa3f3b9a0166485331de1d59 to your computer and use it in GitHub Desktop.
Save thoraxe/97e223cdaa3f3b9a0166485331de1d59 to your computer and use it in GitHub Desktop.
.error_wrappers.ValidationError: 1 validation error for ConversationChain
__root__
Got unexpected prompt input variables. The prompt expects ['input', 'original_query'], but got ['history'] as inputs from memory, and input as the normal input key. (type=value_error)
⌁63% θ63° [thoraxe:~/Red_Hat/ … /fastapi-lightspeed-service] [fastapi-ols-39] main(+20/-11)* 1 ±
⌁63% θ64° [thoraxe:~/Red_Hat/ … /fastapi-lightspeed-service] [fastapi-ols-39] main(+20/-11)* 1 ±
⌁63% θ64° [thoraxe:~/Red_Hat/ … /fastapi-lightspeed-service] [fastapi-ols-39] main(+20/-11)* 1 ±
⌁63% θ64° [thoraxe:~/Red_Hat/ … /fastapi-lightspeed-service] [fastapi-ols-39] main(+20/-11)* 1 ± python routertest.py
> Entering new MultiPromptChain chain...
> Entering new LLMRouterChain chain...
/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/llm.py:280: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.
warnings.warn(
> Entering new LLMChain chain...
Prompt after formatting:
Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
```json
{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}
```
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input if you don't think any modifications are needed.
<< CANDIDATE PROMPTS >>
yaml-generator: Used for queries related to generating YAML files and objects for kubernetes and openshift
general-question: Good for general questions and answers
<< INPUT >>
1. Create a ClusterAutoscaler YAML that specifies the maximum size of the cluster.
<< OUTPUT (must include ```json at the start of the response) >>
<< OUTPUT (must end with ```) >>
> Finished chain.
> Finished chain.
yaml-generator: {'input': 'Create a ClusterAutoscaler YAML with a maximum size specified'}Traceback (most recent call last):
File "/home/thoraxe/Red_Hat/openshift/llamaindex-experiments/fastapi-lightspeed-service/routertest.py", line 130, in <module>
chain.run(
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 506, in run
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 306, in __call__
raise e
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 300, in __call__
self._call(inputs, run_manager=run_manager)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/router/base.py", line 99, in _call
return self.destination_chains[route.destination](
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 282, in __call__
inputs = self.prep_inputs(inputs)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 439, in prep_inputs
self._validate_inputs(inputs)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/langchain/chains/base.py", line 191, in _validate_inputs
raise ValueError(f"Missing some input keys: {missing_keys}")
ValueError: Missing some input keys: {'original_query'}
import logging, sys, os
# logging.basicConfig(stream=sys.stdout, format='%(asctime)s [%(name)s] %(levelname)s: %(message)s', level=logging.INFO)
from langchain.chains.router import MultiPromptChain
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
import os
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from llama_index.llms import LangChainLLM
from llama_index import ServiceContext
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams
from genai.credentials import Credentials
api_key = os.getenv("BAM_API_KEY", None)
api_url = os.getenv("BAM_URL", None)
creds = Credentials(api_key, api_endpoint=api_url)
params = {
GenParams.DECODING_METHOD: "greedy",
GenParams.MIN_NEW_TOKENS: 1,
GenParams.MAX_NEW_TOKENS: 256,
}
original_query = 'How do I configure my cluster for autoscaling up to ten nodes?'
task = (
"1. Create a ClusterAutoscaler YAML that specifies the maximum size of the cluster."
)
yaml_generator_template = """
Background context question from a user:
{original_query}
Using the background context, please complete the following Kubernetes and OpenShift YAML generation task:
{input}
"""
general_question_template = """
Instructions:
- You are a helpful assistant.
- You are an expert in Kubernetes and OpenShift.
- Respond to questions about topics other than Kubernetes and OpenShift with: "I can only answer questions about Kubernetes and OpenShift"
- Refuse to participate in anything that could harm a human.
- Your job is to look at the following description and provide a response.
- Base your answer on the provided task and query and not on prior knowledge.
Background Context:
{original_query}
Question:
{input}
Response:
"""
prompt_infos = [
{
"name": "yaml-generator",
"description": "Used for queries related to generating YAML files and objects for kubernetes and openshift",
"prompt_template": yaml_generator_template,
},
{
"name": "general-question",
"description": "Good for general questions and answers",
"prompt_template": general_question_template,
},
]
openai_predictor = OpenAI()
granite_instruct_predictor = LangChainInterface(
model="xxx", params=params, credentials=creds, verbose=True
)
star_coder_predictor = LangChainInterface(
model="yyy", params=params, credentials=creds, verbose=True
)
destination_chains = {}
# set up the YAML generator chain
name = "yaml-generator"
prompt_template = yaml_generator_template
prompt = PromptTemplate(template=prompt_template, input_variables=["input", "original_query"])
chain = LLMChain(llm=star_coder_predictor, prompt=prompt, verbose=True)
destination_chains[name] = chain
# set up the general question chain
name = "general-question"
prompt_template = general_question_template
prompt = PromptTemplate(template=prompt_template, input_variables=["input", "original_query"])
chain = LLMChain(llm=granite_instruct_predictor, prompt=prompt, verbose=True)
destination_chains[name] = chain
default_chain = ConversationChain(
llm=granite_instruct_predictor, output_key="text", verbose=True
)
destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations_str)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(),
)
llm_chain = LLMChain(llm=openai_predictor, prompt=router_prompt, verbose=True)
router_chain = LLMRouterChain(llm_chain=llm_chain, verbose=True)
chain = MultiPromptChain(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=default_chain,
verbose=True,
)
print(
chain.run(
input="1. Create a ClusterAutoscaler YAML that specifies the maximum size of the cluster.",
original_query=original_query
)
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment