Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created October 10, 2023 17:59
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save thoraxe/e8e6ab7af195a294c50172c66cf8eaef to your computer and use it in GitHub Desktop.
Save thoraxe/e8e6ab7af195a294c50172c66cf8eaef to your computer and use it in GitHub Desktop.
Traceback (most recent call last):
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 408, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 84, in __call__
return await self.app(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/fastapi/applications.py", line 292, in __call__
await super().__call__(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/applications.py", line 122, in __call__
await self.middleware_stack(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/middleware/errors.py", line 184, in __call__
raise exc
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/middleware/errors.py", line 162, in __call__
await self.app(scope, receive, _send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/middleware/exceptions.py", line 79, in __call__
raise exc
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/middleware/exceptions.py", line 68, in __call__
await self.app(scope, receive, sender)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 20, in __call__
raise e
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/fastapi/middleware/asyncexitstack.py", line 17, in __call__
await self.app(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/routing.py", line 718, in __call__
await route.handle(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/routing.py", line 276, in handle
await self.app(scope, receive, send)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/routing.py", line 66, in app
response = await func(request)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/fastapi/routing.py", line 273, in app
raw_response = await run_endpoint_function(
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/fastapi/routing.py", line 192, in run_endpoint_function
return await run_in_threadpool(dependant.call, **values)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/starlette/concurrency.py", line 41, in run_in_threadpool
return await anyio.to_thread.run_sync(func, *args)
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/thoraxe/.pyenv/versions/3.9.16/envs/fastapi-ols-39/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/home/thoraxe/Red_Hat/openshift/llamaindex-experiments/fastapi-lightspeed-service/ols.py", line 65, in ols_request
processed = task_processor.process_tasks(conversation, rag_model, task_list, llm_request.query)
TypeError: process_tasks() takes 4 positional arguments but 5 were given
task_processor = TaskProcessor()
processed = task_processor.process_tasks(conversation, rag_model, task_list, llm_request.query)
import logging
import sys
from string import Template
from model_context import get_watsonx_predictor
class TaskProcessor:
def process_tasks(conversation, model, tasklist, original_query):
prompt_instructions = Template(
"""Instructions:
- You are a helpful assistant.
- You are an expert in Kubernetes and OpenShift.
- Respond to questions about topics other than Kubernetes and OpenShift with: "I can only answer questions about Kubernetes and OpenShift"
- Refuse to participate in anything that could harm a human.
- Your job is to look at the following description and provide a response.
- Base your answer on the provided task and query and not on prior knowledge.
TASK:
${task}
QUERY:
${query}
Question:
Does the above query contain enough information about the task? Provide a yes or no answer with explanation.
Response:
"""
)
logging.info(conversation + " Beginning task processing")
# iterate over the tasks and figure out if we should abort and request more information
# build a dictionary of stuff to use later
to_do_stuff = list()
for task in tasklist:
logging.info(conversation + " task: " + task)
# determine if we have enough information to answer the task
task_query = prompt_instructions.substitute(task=task, query=original_query)
logging.info(conversation + " task query: " + task_query)
logging.info(conversation + " usng model: " + model)
bare_llm = get_watsonx_predictor(model=model, min_new_tokens=5)
response = str(bare_llm(task_query))
logging.info(conversation + " task response: " + response)
# strip <|endoftext|> from the reponse
clean_response = response.split("<|endoftext|>")[0]
# check if the response was a yes or no answer
# TODO: need to handle when this fails to return an integer
response_status = int(determine_yes_no(conversation, model, clean_response))
logging.info(conversation + " response status: " + str(response_status))
if response_status == 0:
logging.info(
conversation
+ " Aborting task processing for no response - need details"
)
resolution_request = str(
"In trying to answer your question, we were unable to determine how to proceed."
" The step we failed on was the following:\n "
+ task
+ " The failure message was:\n "
+ clean_response
+ " Please try rephrasing your request to include information that might help complete the task."
)
logging.info(
conversation + " resolution request: " + resolution_request
)
return [response_status, resolution_request]
elif response_status == 1:
# we have enough information for the task, so put the task into a list for later
to_do_stuff.append(task)
logging.info(conversation + " Continuing task processing")
else:
logging.info(conversation + " Unknown response status")
return [response_status, "Unknown error occurred"]
return [1, to_do_stuff]
def determine_yes_no(conversation, model, string):
prompt_instructions = Template(
"""Instructions:
- determine if a statement is a yes or a no
- return a 1 if the statement is a yes statement
- return a 0 if the statement is a no statement
- return a 9 if you cannot determine if the statement is a yes or no
Examples:
Statement: Yes, that sounds good.
Response: 1
Statement: No, I don't think that is wise.
Response: 0
Statement: Apples are red.
Response: 9
Statement: ${statement}
Response:
"""
)
logging.info(conversation + " usng model: " + model)
logging.info(conversation + " determining yes/no: " + string)
query = prompt_instructions.substitute(statement=string)
logging.info(conversation + " yes/no query: " + query)
logging.info(conversation + " usng model: " + model)
bare_llm = get_watsonx_predictor(model=model)
response = str(bare_llm(query))
clean_response = response.split("<|endoftext|>")[0]
logging.info(conversation + " yes/no response: " + clean_response)
return clean_response
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment