mirror of https://github.com/microsoft/autogen.git
Rename Responsive -> Conversable (#1202)
* responsive -> conversable * preview * rename * register reply * rename and version * bump version to 2.1.0 * notebook * bug fix
This commit is contained in:
parent
599731cb22
commit
4886cb5689
|
@ -1,12 +1,12 @@
|
|||
from .agent import Agent
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
from .assistant_agent import AssistantAgent
|
||||
from .user_proxy_agent import UserProxyAgent
|
||||
from .groupchat import GroupChat, GroupChatManager
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"ResponsiveAgent",
|
||||
"ConversableAgent",
|
||||
"AssistantAgent",
|
||||
"UserProxyAgent",
|
||||
"GroupChat",
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from .responsive_agent import ResponsiveAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
from typing import Callable, Dict, Optional, Union
|
||||
|
||||
|
||||
class AssistantAgent(ResponsiveAgent):
|
||||
class AssistantAgent(ConversableAgent):
|
||||
"""(In preview) Assistant agent, designed to solve a task with LLM.
|
||||
|
||||
AssistantAgent is a subclass of ResponsiveAgent configured with a default system message.
|
||||
AssistantAgent is a subclass of ConversableAgent configured with a default system message.
|
||||
The default system message is designed to solve a task with LLM,
|
||||
including suggesting python code blocks and debugging.
|
||||
`human_input_mode` is default to "NEVER"
|
||||
|
@ -52,7 +52,7 @@ Reply "TERMINATE" in the end when everything is done.
|
|||
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
||||
The limit only plays a role when human_input_mode is not "ALWAYS".
|
||||
**kwargs (dict): Please refer to other kwargs in
|
||||
[ResponsiveAgent](responsive_agent#__init__).
|
||||
[ConversableAgent](conversable_agent#__init__).
|
||||
"""
|
||||
super().__init__(
|
||||
name,
|
||||
|
|
|
@ -165,7 +165,7 @@ class MathUserProxyAgent(UserProxyAgent):
|
|||
default_auto_reply=default_auto_reply,
|
||||
**kwargs,
|
||||
)
|
||||
self.register_auto_reply([Agent, None], MathUserProxyAgent._generate_math_reply, 1)
|
||||
self.register_reply([Agent, None], MathUserProxyAgent._generate_math_reply, 1)
|
||||
# fixed var
|
||||
self._max_invalid_q_per_step = max_invalid_q_per_step
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ class RetrieveAssistantAgent(AssistantAgent):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.register_auto_reply(Agent, RetrieveAssistantAgent._generate_retrieve_assistant_reply)
|
||||
self.register_reply(Agent, RetrieveAssistantAgent._generate_retrieve_assistant_reply)
|
||||
|
||||
def _generate_retrieve_assistant_reply(
|
||||
self,
|
||||
|
|
|
@ -148,7 +148,7 @@ class RetrieveUserProxyAgent(UserProxyAgent):
|
|||
self._ipython = get_ipython()
|
||||
self._doc_idx = -1 # the index of the current used doc
|
||||
self._results = {} # the results of the current query
|
||||
self.register_auto_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply)
|
||||
self.register_reply(Agent, RetrieveUserProxyAgent._generate_retrieve_user_reply)
|
||||
|
||||
@staticmethod
|
||||
def get_max_tokens(model="gpt-3.5-turbo"):
|
||||
|
|
|
@ -21,11 +21,11 @@ except ImportError:
|
|||
return x
|
||||
|
||||
|
||||
class ResponsiveAgent(Agent):
|
||||
"""(Experimental) A class for generic responsive agents which can be configured as assistant or user proxy.
|
||||
class ConversableAgent(Agent):
|
||||
"""(In preview) A class for generic conversable agents which can be configured as assistant or user proxy.
|
||||
|
||||
After receiving each message, the agent will send a reply to the sender unless the msg is a termination msg.
|
||||
For example, AssistantAgent and UserProxyAgent are subclasses of ResponsiveAgent,
|
||||
For example, AssistantAgent and UserProxyAgent are subclasses of this class,
|
||||
configured with different default settings.
|
||||
|
||||
To modify auto reply, override `generate_reply` method.
|
||||
|
@ -119,12 +119,12 @@ class ResponsiveAgent(Agent):
|
|||
self._default_auto_reply = default_auto_reply
|
||||
self._reply_func_list = []
|
||||
self.reply_at_receive = defaultdict(bool)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_oai_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_code_execution_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.generate_function_call_reply)
|
||||
self.register_auto_reply([Agent, None], ResponsiveAgent.check_termination_and_human_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
|
||||
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
|
||||
|
||||
def register_auto_reply(
|
||||
def register_reply(
|
||||
self,
|
||||
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List],
|
||||
reply_func: Callable,
|
||||
|
@ -151,7 +151,7 @@ class ResponsiveAgent(Agent):
|
|||
The function takes a recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
|
||||
```python
|
||||
def reply_func(
|
||||
recipient: ResponsiveAgent,
|
||||
recipient: ConversableAgent,
|
||||
messages: Optional[List[Dict]] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
|
@ -499,7 +499,7 @@ class ResponsiveAgent(Agent):
|
|||
|
||||
def initiate_chat(
|
||||
self,
|
||||
recipient: "ResponsiveAgent",
|
||||
recipient: "ConversableAgent",
|
||||
clear_history: Optional[bool] = True,
|
||||
silent: Optional[bool] = False,
|
||||
**context,
|
||||
|
@ -522,7 +522,7 @@ class ResponsiveAgent(Agent):
|
|||
|
||||
async def a_initiate_chat(
|
||||
self,
|
||||
recipient: "ResponsiveAgent",
|
||||
recipient: "ConversableAgent",
|
||||
clear_history: Optional[bool] = True,
|
||||
silent: Optional[bool] = False,
|
||||
**context,
|
||||
|
@ -610,8 +610,8 @@ class ResponsiveAgent(Agent):
|
|||
return False, None
|
||||
if messages is None:
|
||||
messages = self._oai_messages[sender]
|
||||
last_n_messages = min(len(messages), code_execution_config.pop("last_n_messages", 1))
|
||||
for i in range(last_n_messages):
|
||||
last_n_messages = code_execution_config.pop("last_n_messages", 1)
|
||||
for i in range(min(len(messages), last_n_messages)):
|
||||
message = messages[-(i + 1)]
|
||||
code_blocks = extract_code(message["content"])
|
||||
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
|
|
@ -2,7 +2,7 @@ from dataclasses import dataclass
|
|||
import sys
|
||||
from typing import Dict, List, Optional, Union
|
||||
from .agent import Agent
|
||||
from .responsive_agent import ResponsiveAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
|
||||
|
||||
@dataclass
|
||||
|
@ -39,7 +39,7 @@ class GroupChat:
|
|||
Read the following conversation.
|
||||
Then select the next role from {self.agent_names} to play. Only return the role."""
|
||||
|
||||
def select_speaker(self, last_speaker: Agent, selector: ResponsiveAgent):
|
||||
def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):
|
||||
"""Select the next speaker."""
|
||||
selector.update_system_message(self.select_speaker_msg())
|
||||
final, name = selector.generate_oai_reply(
|
||||
|
@ -63,7 +63,7 @@ Then select the next role from {self.agent_names} to play. Only return the role.
|
|||
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
|
||||
|
||||
|
||||
class GroupChatManager(ResponsiveAgent):
|
||||
class GroupChatManager(ConversableAgent):
|
||||
"""(In preview) A chat manager agent that can manage a group chat of multiple agents."""
|
||||
|
||||
def __init__(
|
||||
|
@ -84,7 +84,7 @@ class GroupChatManager(ResponsiveAgent):
|
|||
system_message=system_message,
|
||||
**kwargs,
|
||||
)
|
||||
self.register_auto_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
|
||||
self.register_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
|
||||
# self._random = random.Random(seed)
|
||||
|
||||
def run_chat(
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
from .responsive_agent import ResponsiveAgent
|
||||
from .conversable_agent import ConversableAgent
|
||||
from typing import Callable, Dict, Optional, Union
|
||||
|
||||
|
||||
class UserProxyAgent(ResponsiveAgent):
|
||||
class UserProxyAgent(ConversableAgent):
|
||||
"""(In preview) A proxy agent for the user, that can execute code and provide feedback to the other agents.
|
||||
|
||||
UserProxyAgent is a subclass of ResponsiveAgent configured with `human_input_mode` to ALWAYS
|
||||
UserProxyAgent is a subclass of ConversableAgent configured with `human_input_mode` to ALWAYS
|
||||
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
|
||||
Code execution is enabled by default. LLM-based auto reply is disabled by default.
|
||||
To modify auto reply, register a method with (`register_auto_reply`)[responsive_agent#register_auto_reply].
|
||||
To modify auto reply, register a method with (`register_reply`)[conversable_agent#register_reply].
|
||||
To modify the way to get human input, override `get_human_input` method.
|
||||
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
|
||||
`run_code`, and `execute_function` methods respectively.
|
||||
|
|
|
@ -1 +1 @@
|
|||
__version__ = "2.0.3"
|
||||
__version__ = "2.1.0"
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"%%capture --no-stderr\n",
|
||||
"# %pip install flaml[autogen]~=2.0.0\n",
|
||||
"# %pip install flaml[autogen]~=2.1.0\n",
|
||||
"%pip install chess -U"
|
||||
]
|
||||
},
|
||||
|
@ -158,7 +158,7 @@
|
|||
" llm_config={\"temperature\": 0.0, \"config_list\": config_list_gpt4},\n",
|
||||
" max_consecutive_auto_reply=10,\n",
|
||||
" )\n",
|
||||
" self.register_auto_reply(autogen.ResponsiveAgent, BoardAgent._generate_board_reply)\n",
|
||||
" self.register_reply(autogen.ConversableAgent, BoardAgent._generate_board_reply)\n",
|
||||
" self.board = board\n",
|
||||
" self.correct_move_messages = defaultdict(list)\n",
|
||||
"\n",
|
||||
|
@ -226,8 +226,8 @@
|
|||
" max_consecutive_auto_reply=max_turns,\n",
|
||||
" **kwargs,\n",
|
||||
" )\n",
|
||||
" self.register_auto_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board, config=board_agent.board)\n",
|
||||
" self.register_auto_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player, config=board_agent)\n",
|
||||
" self.register_reply(BoardAgent, ChessPlayerAgent._generate_reply_for_board, config=board_agent.board)\n",
|
||||
" self.register_reply(ChessPlayerAgent, ChessPlayerAgent._generate_reply_for_player, config=board_agent)\n",
|
||||
" self.update_max_consecutive_auto_reply(board_agent.max_consecutive_auto_reply(), board_agent)\n",
|
||||
"\n",
|
||||
" def _generate_reply_for_board(\n",
|
||||
|
@ -262,7 +262,7 @@
|
|||
" return True, None\n",
|
||||
" # converse with the board until a legal move is made or max allowed retries.\n",
|
||||
" # change silent to False to see that conversation.\n",
|
||||
" self.initiate_chat(board_agent, clear_history=False, message=message, silent=True)\n",
|
||||
" self.initiate_chat(board_agent, clear_history=False, message=message, silent=self.human_input_mode == \"NEVER\")\n",
|
||||
" # last message sent by the board agent\n",
|
||||
" last_message = self._oai_messages[board_agent][-1]\n",
|
||||
" if last_message[\"role\"] == \"assistant\":\n",
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# %pip install flaml[autogen]~=2.0.0"
|
||||
"# %pip install flaml[autogen]~=2.1.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -258,7 +258,7 @@
|
|||
" )\n",
|
||||
" return False, None\n",
|
||||
"\n",
|
||||
"user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, config={\"news_stream\": data})"
|
||||
"user_proxy.register_reply(autogen.AssistantAgent, add_data_reply, 1, config={\"news_stream\": data})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -98,7 +98,7 @@ async def test_stream():
|
|||
)
|
||||
return False, None
|
||||
|
||||
user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, config={"news_stream": data})
|
||||
user_proxy.register_reply(autogen.AssistantAgent, add_data_reply, 1, config={"news_stream": data})
|
||||
|
||||
await user_proxy.a_initiate_chat(
|
||||
assistant,
|
||||
|
|
|
@ -1,48 +1,48 @@
|
|||
import pytest
|
||||
from flaml.autogen.agentchat import ResponsiveAgent
|
||||
from flaml.autogen.agentchat import ConversableAgent
|
||||
|
||||
|
||||
def test_trigger():
|
||||
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
agent.register_auto_reply(agent1, lambda recipient, messages, sender, config: (True, "hello"))
|
||||
agent = ConversableAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ConversableAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
agent.register_reply(agent1, lambda recipient, messages, sender, config: (True, "hello"))
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello"
|
||||
agent.register_auto_reply("a1", lambda recipient, messages, sender, config: (True, "hello a1"))
|
||||
agent.register_reply("a1", lambda recipient, messages, sender, config: (True, "hello a1"))
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello a1"
|
||||
agent.register_auto_reply(
|
||||
ResponsiveAgent, lambda recipient, messages, sender, config: (True, "hello responsive agent")
|
||||
agent.register_reply(
|
||||
ConversableAgent, lambda recipient, messages, sender, config: (True, "hello conversable agent")
|
||||
)
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello responsive agent"
|
||||
agent.register_auto_reply(
|
||||
assert agent1.last_message(agent)["content"] == "hello conversable agent"
|
||||
agent.register_reply(
|
||||
lambda sender: sender.name.startswith("a"), lambda recipient, messages, sender, config: (True, "hello a")
|
||||
)
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello a"
|
||||
agent.register_auto_reply(
|
||||
agent.register_reply(
|
||||
lambda sender: sender.name.startswith("b"), lambda recipient, messages, sender, config: (True, "hello b")
|
||||
)
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello a"
|
||||
agent.register_auto_reply(
|
||||
agent.register_reply(
|
||||
["agent2", agent1], lambda recipient, messages, sender, config: (True, "hello agent2 or agent1")
|
||||
)
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello agent2 or agent1"
|
||||
agent.register_auto_reply(
|
||||
agent.register_reply(
|
||||
["agent2", "agent3"], lambda recipient, messages, sender, config: (True, "hello agent2 or agent3")
|
||||
)
|
||||
agent1.initiate_chat(agent, message="hi")
|
||||
assert agent1.last_message(agent)["content"] == "hello agent2 or agent1"
|
||||
pytest.raises(ValueError, agent.register_auto_reply, 1, lambda recipient, messages, sender, config: (True, "hi"))
|
||||
pytest.raises(ValueError, agent.register_reply, 1, lambda recipient, messages, sender, config: (True, "hi"))
|
||||
pytest.raises(ValueError, agent._match_trigger, 1, agent1)
|
||||
|
||||
|
||||
def test_context():
|
||||
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
agent = ConversableAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ConversableAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
agent1.send(
|
||||
{
|
||||
"content": "hello {name}",
|
||||
|
@ -77,8 +77,8 @@ def test_context():
|
|||
|
||||
|
||||
def test_max_consecutive_auto_reply():
|
||||
agent = ResponsiveAgent("a0", max_consecutive_auto_reply=2, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ResponsiveAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
agent = ConversableAgent("a0", max_consecutive_auto_reply=2, llm_config=False, human_input_mode="NEVER")
|
||||
agent1 = ConversableAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
|
||||
assert agent.max_consecutive_auto_reply() == agent.max_consecutive_auto_reply(agent1) == 2
|
||||
agent.update_max_consecutive_auto_reply(1)
|
||||
assert agent.max_consecutive_auto_reply() == agent.max_consecutive_auto_reply(agent1) == 1
|
||||
|
@ -105,9 +105,9 @@ def test_max_consecutive_auto_reply():
|
|||
assert agent1.reply_at_receive[agent] is False and agent.reply_at_receive[agent1] is True
|
||||
|
||||
|
||||
def test_responsive_agent():
|
||||
dummy_agent_1 = ResponsiveAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
|
||||
dummy_agent_2 = ResponsiveAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
|
||||
def test_conversable_agent():
|
||||
dummy_agent_1 = ConversableAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
|
||||
dummy_agent_2 = ConversableAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
|
||||
|
||||
# monkeypatch.setattr(sys, "stdin", StringIO("exit"))
|
||||
dummy_agent_1.receive("hello", dummy_agent_2) # receive a str
|
||||
|
@ -159,7 +159,7 @@ def test_generate_reply():
|
|||
given_num = 10
|
||||
return num_to_be_added + given_num
|
||||
|
||||
dummy_agent_2 = ResponsiveAgent(name="user_proxy", human_input_mode="TERMINATE", function_map={"add_num": add_num})
|
||||
dummy_agent_2 = ConversableAgent(name="user_proxy", human_input_mode="TERMINATE", function_map={"add_num": add_num})
|
||||
messsages = [{"function_call": {"name": "add_num", "arguments": '{ "num_to_be_added": 5 }'}, "role": "assistant"}]
|
||||
|
||||
# when sender is None, messages is provided
|
||||
|
@ -168,7 +168,7 @@ def test_generate_reply():
|
|||
), "generate_reply not working when sender is None"
|
||||
|
||||
# when sender is provided, messages is None
|
||||
dummy_agent_1 = ResponsiveAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
|
||||
dummy_agent_1 = ConversableAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
|
||||
dummy_agent_2._oai_messages[dummy_agent_1] = messsages
|
||||
assert (
|
||||
dummy_agent_2.generate_reply(messages=None, sender=dummy_agent_1)["content"] == "15"
|
||||
|
@ -179,4 +179,4 @@ if __name__ == "__main__":
|
|||
test_trigger()
|
||||
# test_context()
|
||||
# test_max_consecutive_auto_reply()
|
||||
# test_responsive_agent(pytest.monkeypatch)
|
||||
# test_conversable_agent(pytest.monkeypatch)
|
|
@ -2,14 +2,14 @@ from flaml import autogen
|
|||
|
||||
|
||||
def test_chat_manager():
|
||||
agent1 = autogen.ResponsiveAgent(
|
||||
agent1 = autogen.ConversableAgent(
|
||||
"alice",
|
||||
max_consecutive_auto_reply=2,
|
||||
human_input_mode="NEVER",
|
||||
llm_config=False,
|
||||
default_auto_reply="This is alice sepaking.",
|
||||
)
|
||||
agent2 = autogen.ResponsiveAgent(
|
||||
agent2 = autogen.ConversableAgent(
|
||||
"bob",
|
||||
max_consecutive_auto_reply=2,
|
||||
human_input_mode="NEVER",
|
||||
|
@ -33,14 +33,14 @@ def test_chat_manager():
|
|||
|
||||
def test_plugin():
|
||||
# Give another Agent class ability to manage group chat
|
||||
agent1 = autogen.ResponsiveAgent(
|
||||
agent1 = autogen.ConversableAgent(
|
||||
"alice",
|
||||
max_consecutive_auto_reply=2,
|
||||
human_input_mode="NEVER",
|
||||
llm_config=False,
|
||||
default_auto_reply="This is alice sepaking.",
|
||||
)
|
||||
agent2 = autogen.ResponsiveAgent(
|
||||
agent2 = autogen.ConversableAgent(
|
||||
"bob",
|
||||
max_consecutive_auto_reply=2,
|
||||
human_input_mode="NEVER",
|
||||
|
@ -48,8 +48,8 @@ def test_plugin():
|
|||
default_auto_reply="This is bob speaking.",
|
||||
)
|
||||
groupchat = autogen.GroupChat(agents=[agent1, agent2], messages=[], max_round=2)
|
||||
group_chat_manager = autogen.ResponsiveAgent(name="deputy_manager", llm_config=False)
|
||||
group_chat_manager.register_auto_reply(
|
||||
group_chat_manager = autogen.ConversableAgent(name="deputy_manager", llm_config=False)
|
||||
group_chat_manager.register_reply(
|
||||
autogen.Agent,
|
||||
reply_func=autogen.GroupChatManager.run_chat,
|
||||
config=groupchat,
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
# AutoGen: Enabling Next-Gen GPT-X Applications
|
||||
# AutoGen for Large Language Models
|
||||
|
||||
Please refer to https://microsoft.github.io/autogen/.
|
||||
|
|
Loading…
Reference in New Issue