Skip to content

Instantly share code, notes, and snippets.

@mberman84
Created November 29, 2023 15:31
Show Gist options
  • Save mberman84/ea207e7d9e5f8c5f6a3252883ef16df3 to your computer and use it in GitHub Desktop.
Save mberman84/ea207e7d9e5f8c5f6a3252883ef16df3 to your computer and use it in GitHub Desktop.
AutoGen + Ollama Instructions
1. # create new .py file with code found below
2. # install ollama
3. # install model you want “ollama run mistral”
4. conda create -n autogen python=3.11
5. conda activate autogen
6. which python
7. python -m pip install pyautogen
7. ollama run mistral
8. ollama run codellama
9. # open new terminal
10. conda activate autogen
11. python -m pip install litellm
12. litellm --model ollama/mistral
13. # open new terminal
14. conda activate autogen
15. litellm --model ollama/codellama
### Code used:
import autogen
config_list_mistral = [
{
'base_url': "http://0.0.0.0:8000",
'api_key': "NULL"
}
]
config_list_codellama = [
{
'base_url': "http://0.0.0.0:25257",
'api_key': "NULL"
}
]
llm_config_mistral={
"config_list": config_list_mistral,
}
llm_config_codellama={
"config_list": config_list_codellama,
}
coder = autogen.AssistantAgent(
name="Coder",
llm_config=llm_config_codellama
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "web"},
llm_config=llm_config_mistral,
system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)
task="""
Write a python script to output numbers 1 to 100 and then the user_proxy agent should run the script
"""
user_proxy.initiate_chat(coder, message=task)
@leolivier
Copy link

@Tedfulk are you sure you're using the same version of Ollama on your Mac and your Ubuntu ? I'm using it on WSL Ubuntu so not real Ubuntu but I don't think it would change this behavior...

@ZacharyJSmit
Copy link

ZacharyJSmit commented Feb 21, 2024

Here's the groupchat version of the script

import autogen
from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json, GroupChat, GroupChatManager
import chromadb
import os
from autogen import GroupChat
import json
from autogen.retrieve_utils import TEXT_FORMATS

######################################################################
config_list_mistral = [
    {
        "base_url": "http://localhost:35666/v1",
        "api_key": "sk-111111111111",        
        "model": "TheBloke/Llama-2-7B-32K-Instruct-GGUF"
    }
]

config_list_codellama = [
    {
        "base_url": "http://localhost:8000/v1",
        "api_key": "sk-111111111111",        
        "model": "TheBloke/Llama-2-7B-32K-Instruct-GGUF"
    }
]
######################################################################
llm_config_mistral={
    "config_list": config_list_mistral,
}

llm_config_codellama={
    "config_list": config_list_codellama,
}
######################################################################
llm_config_mistral = llm_config_mistral
llm_config_codellama = llm_config_codellama
######################################################################
assistant = autogen.AssistantAgent(
    name="Assistant",
    llm_config=llm_config_mistral,
    # code_execution=False # Disable code execution entirely
    code_execution_config={"work_dir":"coding", "use_docker":False}
)

coder = autogen.AssistantAgent(
    name="Coder",
    llm_config=llm_config_codellama,
    # code_execution=False # Disable code execution entirely
    code_execution_config={"work_dir":"coding", "use_docker":False}
)

user_proxy = autogen.UserProxyAgent(
    name="user_proxy",
    human_input_mode="NEVER",
    #human_input_mode="TERMINATE",
    max_consecutive_auto_reply=10,
    is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
    code_execution_config={"work_dir": "coding", "use_docker":False},
    llm_config=llm_config_mistral,
    system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)

task="""
Write a python script to output numbers 1 to 100 and then the user_proxy agent should run the script
"""

#task="""
#Write a script to output numbers 1 to X where X is a random number generated by the user_proxy agent
#"""

#user_proxy.initiate_chat(coder, message=task)  # Simple chat with coder

groupchat = autogen.GroupChat(agents=[user_proxy, coder, assistant], messages=[], max_round=12)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config_mistral)
user_proxy.initiate_chat(manager, message=task)

@qwickset
Copy link

self.send(self.generate_init_message(**context), recipient, silent=silent)
File "/root/miniconda3/lib/python3.11/site-packages/autogen/agentchat/conversable_agent.py", line 354, in send
recipient.receive(message, self, request_reply, silent)
File "/root/miniconda3/lib/python3.11/site-packages/autogen/agentchat/conversable_agent.py", line 489, in receive
self.send(reply, sender, silent=silent)

@N0kay You're supposed to put a model parameter in the config list, followed with the name.

Example:

config_list_dolphinmixtral = [ { 'base_url': "http://0.0.0.0:7577", 'api_key': "NULL", 'model': "ollama/dolphin-mixtral" } ]

Since the URL actually controls the model, this value can be "NULL" similar to the api_key value. Expected but not used.

@JaafarElou
Copy link

`import autogen

config_list = [
{
'base_url': "http://0.0.0.0:4000",
'api_key' : "NULL"
}
]

llm_config = {
'config_list': config_list,
}

assistant = autogen.AssistantAgent(
name = "Assistant",
llm_config = llm_config
)

user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="TERMINATE",
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={"work_dir": "web", "use_docker" : False},
llm_config=llm_config,
system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)

task="""
Tell me a joke
"""

user_proxy.initiate_chat(assistant, message=task)`

I have this as a one assistant and proxy code and I get this error if anyone could help
TypeError: Missing required arguments; Expected either ('messages' and 'model') or ('messages', 'model' and 'stream') arguments to be given

@shrijayan
Copy link

import autogen

# direct access to Ollama since 0.1.24, compatible with OpenAI /chat/completions
BASE_URL="http://localhost:11434/v1"

config_list_mistral = [
    {
        'base_url': BASE_URL,
        'api_key': "fakekey",
        'model': "mistral:latest",
    }
]

config_list_codellama = [
    {
        'base_url': BASE_URL,
        'api_key': "fakekey",
        'model': "codellama:7b-code-q4_K_M",
    }
]

llm_config_mistral={
    "config_list": config_list_mistral,
}

llm_config_codellama={
    "config_list": config_list_codellama,
}

user_proxy = autogen.UserProxyAgent(
    name="user_proxy",
    human_input_mode="NEVER",
    max_consecutive_auto_reply=10,
    is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
    code_execution_config={"work_dir": "web", "use_docker": False},
    llm_config=llm_config_mistral,
    system_message="""Reply TERMINATE if the task has been solved at full satisfaction.
Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
)

coder = autogen.AssistantAgent(
    name="Coder",
    llm_config=llm_config_codellama
)

task="""
Write a python script that lists the number from 1 to 100
"""

user_proxy.initiate_chat(coder, message=task)

I found this code online that seems to be correct and directly addresses my needs. Thank you so much to @leolivier for sharing it.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment