-
-
Save mberman84/584b470962c15930340ff49ae4e28a02 to your computer and use it in GitHub Desktop.
import autogen | |
config_list = [ | |
{ | |
'model': 'gpt-4', | |
'api_key': 'API_KEY' | |
} | |
] | |
llm_config={ | |
"request_timeout": 600, | |
"seed": 42, | |
"config_list": config_list, | |
"temperature": 0 | |
} | |
assistant = autogen.AssistantAgent( | |
name="CTO", | |
llm_config=llm_config, | |
system_message="Chief technical officer of a tech company" | |
) | |
user_proxy = autogen.UserProxyAgent( | |
name="user_proxy", | |
human_input_mode="NEVER", | |
max_consecutive_auto_reply=10, | |
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), | |
code_execution_config={"work_dir": "web"}, | |
llm_config=llm_config, | |
system_message="""Reply TERMINATE if the task has been solved at full satisfaction. | |
Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""" | |
) | |
task = """ | |
Write python code to output numbers 1 to 100, and then store the code in a file | |
""" | |
user_proxy.initiate_chat( | |
assistant, | |
message=task | |
) | |
task2 = """ | |
Change the code in the file you just created to instead output numbers 1 to 200 | |
""" | |
user_proxy.initiate_chat( | |
assistant, | |
message=task2 | |
) |
- autogen.AssistantAgent error fix:
pip uninstall autogen pyautogen
pip install pyautogen - request_timeout issue:
"timeout": 600 - Infinte loop issue:
in the user_proxy set the max_consecutive_auto_reply=0 - Stopping a session
ctrl + c
For me it seems when ever I get a ''' in the LLM response autogen stops with "Process finished with exit code 0"
Example LLM Output:
`CTO (to user_proxy):
def get_numbers():
return list(range(1, 201))
with open("generated.py", "w") as f:
f.write('''print(*get_numbers())''')
Process finished with exit code 0`
Here is the complete code. Am using text gen web UI local model not Run Pod using Mistral 7B instruct Q8 GGUF. Be sure to enable openai and gallery extensions and api and listen Under Sessions tab on Text Gen Web UI
import autogen
config_list = [
{
"base_url": "http://localhost:5000/v1",
"api_key": "NULL",
}
]
llm_config={
"timeout": 600,
"seed": 42,
"config_list": config_list,
}
USE_MEMGPT = True
assistant = autogen.AssistantAgent(
name="CTO",
llm_config=llm_config,
system_message="Chief technical officer of a tech company"
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
max_consecutive_auto_reply=0,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "web"},
llm_config=llm_config,
system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)
task = """
Write python code to output numbers 1 to 100, and then store the code in a file
"""
user_proxy.initiate_chat(
assistant,
message=task
)
task2 = """
Change the code in the file you just created to instead output numbers 1 to 200
"""
user_proxy.initiate_chat(
assistant,
message=task2
)
I'm getting stuck on the following error:
raise self._make_status_error_from_response(err.response) from None
openai.RateLimitError: Error code: 429 - {'error': {'message': 'Rate limit reached for gpt-4 in organization org-ehVxQx3I1TFqEcMydXbUkFUz on tokens per min (TPM): Limit 10000, Used 9343, Requested 1103. Please try again in 2.676s. Visit https://platform.openai.com/account/rate-limits to learn more.', 'type': 'tokens', 'param': None, 'code': 'rate_limit_exceeded'}}
what can i do with the rate-limiting error?
Found the solution here
Change...
to...