Created
November 29, 2023 15:31
-
-
Save mberman84/ea207e7d9e5f8c5f6a3252883ef16df3 to your computer and use it in GitHub Desktop.
AutoGen + Ollama Instructions
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1. # create new .py file with code found below | |
2. # install ollama | |
3. # install model you want “ollama run mistral” | |
4. conda create -n autogen python=3.11 | |
5. conda activate autogen | |
6. which python | |
7. python -m pip install pyautogen | |
7. ollama run mistral | |
8. ollama run codellama | |
9. # open new terminal | |
10. conda activate autogen | |
11. python -m pip install litellm | |
12. litellm --model ollama/mistral | |
13. # open new terminal | |
14. conda activate autogen | |
15. litellm --model ollama/codellama | |
### Code used: | |
import autogen | |
config_list_mistral = [ | |
{ | |
'base_url': "http://0.0.0.0:8000", | |
'api_key': "NULL" | |
} | |
] | |
config_list_codellama = [ | |
{ | |
'base_url': "http://0.0.0.0:25257", | |
'api_key': "NULL" | |
} | |
] | |
llm_config_mistral={ | |
"config_list": config_list_mistral, | |
} | |
llm_config_codellama={ | |
"config_list": config_list_codellama, | |
} | |
coder = autogen.AssistantAgent( | |
name="Coder", | |
llm_config=llm_config_codellama | |
) | |
user_proxy = autogen.UserProxyAgent( | |
name="user_proxy", | |
human_input_mode="NEVER", | |
max_consecutive_auto_reply=10, | |
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), | |
code_execution_config={"work_dir": "web"}, | |
llm_config=llm_config_mistral, | |
system_message="""Reply TERMINATE if the task has been solved at full satisfaction. | |
Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""" | |
) | |
task=""" | |
Write a python script to output numbers 1 to 100 and then the user_proxy agent should run the script | |
""" | |
user_proxy.initiate_chat(coder, message=task) |
import autogen # direct access to Ollama since 0.1.24, compatible with OpenAI /chat/completions BASE_URL="http://localhost:11434/v1" config_list_mistral = [ { 'base_url': BASE_URL, 'api_key': "fakekey", 'model': "mistral:latest", } ] config_list_codellama = [ { 'base_url': BASE_URL, 'api_key': "fakekey", 'model': "codellama:7b-code-q4_K_M", } ] llm_config_mistral={ "config_list": config_list_mistral, } llm_config_codellama={ "config_list": config_list_codellama, } user_proxy = autogen.UserProxyAgent( name="user_proxy", human_input_mode="NEVER", max_consecutive_auto_reply=10, is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), code_execution_config={"work_dir": "web", "use_docker": False}, llm_config=llm_config_mistral, system_message="""Reply TERMINATE if the task has been solved at full satisfaction. Otherwise, reply CONTINUE, or the reason why the task is not solved yet.""" ) coder = autogen.AssistantAgent( name="Coder", llm_config=llm_config_codellama ) task=""" Write a python script that lists the number from 1 to 100 """ user_proxy.initiate_chat(coder, message=task)
I found this code online that seems to be correct and directly addresses my needs. Thank you so much to @leolivier for sharing it.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
`import autogen
config_list = [
{
'base_url': "http://0.0.0.0:4000",
'api_key' : "NULL"
}
]
llm_config = {
'config_list': config_list,
}
assistant = autogen.AssistantAgent(
name = "Assistant",
llm_config = llm_config
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="TERMINATE",
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "web", "use_docker" : False},
llm_config=llm_config,
system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)
task="""
Tell me a joke
"""
user_proxy.initiate_chat(assistant, message=task)`
I have this as a one assistant and proxy code and I get this error if anyone could help
TypeError: Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given