mirror of https://github.com/microsoft/autogen.git
Adding `register_nested_chats` to make nested chat easier (#1753)
* add util * add notebook * SoM notebook * doc * update notebook * SoM * optiguide * rename * add implementation * update notebook * update notebook * update notebook * summary method * initiate_chats enhancements * callable summary_method * summary method * summary method default * docstr * add timeout to slient pip install test * consolidate_chat_info * a_initiate_chat * AssertionError tests * AssertionError test * update tests * update test * remove redudant file * kwargs * update notebook * update notebook * nested * update SoM * update notebook * max_turns * add notebook * updatenotebook * add notebooks * update notebook and test * add links to the example page * annotations * documentation * default values * Expanded details about the purpose of each cell * update default value * default value for reply_func_from_nested_chats * default reply func name * update notebook * update optiGuide * upload fig * optiGuide notebook * doc * update notebook * update notebook * notebook format * update optiguide notebook * raise value error * update notebook * Improve language --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com> Co-authored-by: signalprime <15487280+signalprime@users.noreply.github.com> Co-authored-by: gagb <gagb@users.noreply.github.com>
This commit is contained in:
parent
d797d267e4
commit
c6f6707f4d
|
@ -6,6 +6,7 @@ import json
|
|||
import logging
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union
|
||||
import warnings
|
||||
from openai import BadRequestError
|
||||
|
@ -325,6 +326,80 @@ class ConversableAgent(LLMAgent):
|
|||
if ignore_async_in_sync_chat and inspect.iscoroutinefunction(reply_func):
|
||||
self._ignore_async_func_in_sync_chat_list.append(reply_func)
|
||||
|
||||
@staticmethod
|
||||
def _summary_from_nested_chats(
|
||||
chat_queue: List[Dict[str, Any]], recipient: Agent, messages: Union[str, Callable], sender: Agent, config: Any
|
||||
) -> Tuple[bool, str]:
|
||||
"""A simple chat reply function.
|
||||
This function initiate one or a sequence of chats between the "recipient" and the agents in the
|
||||
chat_queue.
|
||||
|
||||
It extracts and returns a summary from the nested chat based on the "summary_method" in each chat in chat_queue.
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str]: A tuple where the first element indicates the completion of the chat, and the second element contains the summary of the last chat if any chats were initiated.
|
||||
"""
|
||||
last_msg = messages[-1].get("content")
|
||||
chat_to_run = []
|
||||
for i, c in enumerate(chat_queue):
|
||||
current_c = c.copy()
|
||||
message = current_c.get("message")
|
||||
# If message is not provided in chat_queue, we by default use the last message from the original chat history as the first message in this nested chat (for the first chat in the chat queue).
|
||||
# NOTE: This setting is prone to change.
|
||||
if message is None and i == 0:
|
||||
message = last_msg
|
||||
if callable(message):
|
||||
message = message(recipient, messages, sender, config)
|
||||
# We only run chat that has a valid message. NOTE: This is prone to change dependin on applications.
|
||||
if message:
|
||||
current_c["message"] = message
|
||||
chat_to_run.append(current_c)
|
||||
if not chat_to_run:
|
||||
return True, None
|
||||
res = recipient.initiate_chats(chat_to_run)
|
||||
return True, res[-1].summary
|
||||
|
||||
def register_nested_chats(
|
||||
self,
|
||||
chat_queue: List[Dict[str, Any]],
|
||||
trigger: Union[Type[Agent], str, Agent, Callable[[Agent], bool], List] = [Agent, None],
|
||||
reply_func_from_nested_chats: Union[str, Callable] = "summary_from_nested_chats",
|
||||
position: int = 2,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Register a nested chat reply function.
|
||||
Args:
|
||||
chat_queue (list): a list of chat objects to be initiated.
|
||||
trigger (Agent class, str, Agent instance, callable, or list): Default to [Agent, None]. Ref to `register_reply` for details.
|
||||
reply_func_from_nested_chats (Callable, str): the reply function for the nested chat.
|
||||
The function takes a chat_queue for nested chat, recipient agent, a list of messages, a sender agent and a config as input and returns a reply message.
|
||||
Default to "summary_from_nested_chats", which corresponds to a built-in reply function that get summary from the nested chat_queue.
|
||||
```python
|
||||
def reply_func_from_nested_chats(
|
||||
chat_queue: List[Dict],
|
||||
recipient: ConversableAgent,
|
||||
messages: Optional[List[Dict]] = None,
|
||||
sender: Optional[Agent] = None,
|
||||
config: Optional[Any] = None,
|
||||
) -> Tuple[bool, Union[str, Dict, None]]:
|
||||
```
|
||||
position (int): Ref to `register_reply` for details. Default to 2. It means we first check the termination and human reply, then check the registered nested chat reply.
|
||||
kwargs: Ref to `register_reply` for details.
|
||||
"""
|
||||
if reply_func_from_nested_chats == "summary_from_nested_chats":
|
||||
reply_func_from_nested_chats = self._summary_from_nested_chats
|
||||
if not callable(reply_func_from_nested_chats):
|
||||
raise ValueError("reply_func_from_nested_chats must be a callable")
|
||||
reply_func = partial(reply_func_from_nested_chats, chat_queue)
|
||||
self.register_reply(
|
||||
trigger,
|
||||
reply_func,
|
||||
position,
|
||||
kwargs.get("config"),
|
||||
kwargs.get("reset_config"),
|
||||
ignore_async_in_sync_chat=kwargs.get("ignore_async_in_sync_chat"),
|
||||
)
|
||||
|
||||
@property
|
||||
def system_message(self) -> str:
|
||||
"""Return the system message."""
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
After Width: | Height: | Size: 266 KiB |
|
@ -2,12 +2,16 @@ from autogen import AssistantAgent, UserProxyAgent
|
|||
from autogen import GroupChat, GroupChatManager
|
||||
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
||||
import pytest
|
||||
from conftest import skip_openai
|
||||
import sys
|
||||
import os
|
||||
import autogen
|
||||
from typing import Literal
|
||||
from typing_extensions import Annotated
|
||||
from autogen import initiate_chats
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
|
||||
|
||||
def test_chat_messages_for_summary():
|
||||
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER")
|
||||
|
@ -402,7 +406,6 @@ def test_chats_exceptions():
|
|||
},
|
||||
]
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
AssertionError,
|
||||
match="llm client must be set in either the recipient or sender when summary_method is reflection_with_llm.",
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
import pytest
|
||||
import sys
|
||||
import os
|
||||
import autogen
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
def test_nested():
|
||||
config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST")
|
||||
llm_config = {"config_list": config_list}
|
||||
|
||||
tasks = [
|
||||
"""What's Microsoft's Stock price today?""",
|
||||
"""Make a pleasant joke about it.""",
|
||||
]
|
||||
|
||||
inner_assistant = autogen.AssistantAgent(
|
||||
"Inner-assistant",
|
||||
llm_config=llm_config,
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
)
|
||||
|
||||
inner_code_interpreter = autogen.UserProxyAgent(
|
||||
"Inner-code-interpreter",
|
||||
human_input_mode="NEVER",
|
||||
code_execution_config={
|
||||
"work_dir": "coding",
|
||||
"use_docker": False,
|
||||
},
|
||||
default_auto_reply="",
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
)
|
||||
|
||||
groupchat = autogen.GroupChat(
|
||||
agents=[inner_assistant, inner_code_interpreter],
|
||||
messages=[],
|
||||
speaker_selection_method="round_robin", # With two agents, this is equivalent to a 1:1 conversation.
|
||||
allow_repeat_speaker=False,
|
||||
max_round=8,
|
||||
)
|
||||
|
||||
manager = autogen.GroupChatManager(
|
||||
groupchat=groupchat,
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
llm_config=llm_config,
|
||||
code_execution_config={
|
||||
"work_dir": "coding",
|
||||
"use_docker": False,
|
||||
},
|
||||
)
|
||||
|
||||
assistant = autogen.AssistantAgent(
|
||||
name="Assistant",
|
||||
llm_config={"config_list": config_list},
|
||||
# is_termination_msg=lambda x: x.get("content", "") == "",
|
||||
)
|
||||
|
||||
user = autogen.UserProxyAgent(
|
||||
name="User",
|
||||
human_input_mode="NEVER",
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
code_execution_config={
|
||||
"last_n_messages": 1,
|
||||
"work_dir": "tasks",
|
||||
"use_docker": False,
|
||||
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
|
||||
)
|
||||
|
||||
writer = autogen.AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
your insightful and engaging articles.
|
||||
You transform complex concepts into compelling narratives.
|
||||
Reply "TERMINATE" in the end when everything is done.
|
||||
""",
|
||||
)
|
||||
|
||||
reviewer = autogen.AssistantAgent(
|
||||
name="Reviewer",
|
||||
llm_config={"config_list": config_list},
|
||||
system_message="""
|
||||
You are a compliance reviewer, known for your thoroughness and commitment to standards.
|
||||
Your task is to scrutinize content for any harmful elements or regulatory violations, ensuring
|
||||
all materials align with required guidelines.
|
||||
You must review carefully, identify potential issues, and maintain the integrity of the organization.
|
||||
Your role demands fairness, a deep understanding of regulations, and a focus on protecting against
|
||||
harm while upholding a culture of responsibility.
|
||||
You also help make revisions to ensure the content is accurate, clear, and compliant.
|
||||
Reply "TERMINATE" in the end when everything is done.
|
||||
""",
|
||||
)
|
||||
|
||||
def writing_message(recipient, messages, sender, config):
|
||||
return f"Polish the content to make an engaging and nicely formatted blog post. \n\n {recipient.chat_messages_for_summary(sender)[-1]['content']}"
|
||||
|
||||
nested_chat_queue = [
|
||||
{"recipient": manager, "summary_method": "reflection_with_llm"},
|
||||
{"recipient": writer, "message": writing_message, "summary_method": "last_msg", "max_turns": 1},
|
||||
{
|
||||
"recipient": reviewer,
|
||||
"message": "Review the content provided.",
|
||||
"summary_method": "last_msg",
|
||||
"max_turns": 1,
|
||||
},
|
||||
]
|
||||
assistant.register_nested_chats(
|
||||
nested_chat_queue,
|
||||
)
|
||||
user.initiate_chats([{"recipient": assistant, "message": tasks[0]}, {"recipient": assistant, "message": tasks[1]}])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_nested()
|
|
@ -22,10 +22,15 @@ Links to notebook examples:
|
|||
- Automated Task Solving with Coding & Planning Agents - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_planning.ipynb)
|
||||
- Automated Task Solving with transition paths specified in a graph - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_graph_modelling_language_using_select_speaker.ipynb)
|
||||
- Running a group chat as an inner-monolgue via the SocietyOfMindAgent - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_society_of_mind.ipynb)
|
||||
|
||||
1. **Sequential Multi-Agent Chats**
|
||||
- Solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_multi_task_chats.ipynb)
|
||||
- Async-solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_multi_task_async_chats.ipynb)
|
||||
- Solving Multiple Tasks in a Sequence of Chats Initiated by Different Agents - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchats.ipynb)
|
||||
- Solving Multiple Tasks in a Sequence of Chats Initiated by Different Agents - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchats_sequential_chats.ipynb)
|
||||
|
||||
1. **Nested Chats**
|
||||
- Solving Complex Tasks with Nested Chats - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_nestedchat.ipynb)
|
||||
- OptiGuide for Solving a Supply Chain Optimization Problem with Nested Chats with a Coding Agent and a Safeguard Agent - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_nestedchat_optiguide.ipynb)
|
||||
|
||||
1. **Applications**
|
||||
|
||||
|
|
Loading…
Reference in New Issue