mirror of https://github.com/microsoft/autogen.git
Resuming a GroupChat (#2627)
* Initial resume * Cleaned up resume function * Further updating resuming group chat * Added async resume_chat and documentation * Added test cases, refined group chat function parameters * compiled documentation * Added tests to main * Removed mdx file. * Revert "Merge remote-tracking branch 'origin/main' into groupchatresume" This reverts commit8f709308f0
, reversing changes made to8bfcb2bff1
. * Refactored resume to remove initiate_chat * fix git history * fix history * Added clean-up of objects, _groupchat references, and messages_to_string updated * Added termination-based resumption in notebook, added test cases and improved robustness on resuming messages parameter --------- Co-authored-by: Eric Zhu <ekzhu@users.noreply.github.com>
This commit is contained in:
parent
e509549a01
commit
a86c474339
|
@ -1,3 +1,5 @@
|
|||
import copy
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
|
@ -12,6 +14,7 @@ from ..graph_utils import check_graph_validity, invert_disallowed_to_allowed
|
|||
from ..io.base import IOStream
|
||||
from ..runtime_logging import log_new_agent, logging_enabled
|
||||
from .agent import Agent
|
||||
from .chat import ChatResult
|
||||
from .conversable_agent import ConversableAgent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -1116,6 +1119,290 @@ class GroupChatManager(ConversableAgent):
|
|||
a.previous_cache = None
|
||||
return True, None
|
||||
|
||||
def resume(
|
||||
self,
|
||||
messages: Union[List[Dict], str],
|
||||
remove_termination_string: str = None,
|
||||
silent: Optional[bool] = False,
|
||||
) -> Tuple[ConversableAgent, Dict]:
|
||||
"""Resumes a group chat using the previous messages as a starting point. Requires the agents, group chat, and group chat manager to be established
|
||||
as per the original group chat.
|
||||
|
||||
Args:
|
||||
- messages Union[List[Dict], str]: The content of the previous chat's messages, either as a Json string or a list of message dictionaries.
|
||||
- remove_termination_string str: Remove the provided string from the last message to prevent immediate termination
|
||||
- silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
|
||||
|
||||
Returns:
|
||||
- Tuple[ConversableAgent, Dict]: A tuple containing the last agent who spoke and their message
|
||||
"""
|
||||
|
||||
# Convert messages from string to messages list, if needed
|
||||
if isinstance(messages, str):
|
||||
messages = self.messages_from_string(messages)
|
||||
elif isinstance(messages, list) and all(isinstance(item, dict) for item in messages):
|
||||
messages = copy.deepcopy(messages)
|
||||
else:
|
||||
raise Exception("Messages is not of type str or List[Dict]")
|
||||
|
||||
# Clean up the objects, ensuring there are no messages in the agents and group chat
|
||||
|
||||
# Clear agent message history
|
||||
for agent in self._groupchat.agents:
|
||||
if isinstance(agent, ConversableAgent):
|
||||
agent.clear_history()
|
||||
|
||||
# Clear Manager message history
|
||||
self.clear_history()
|
||||
|
||||
# Clear GroupChat messages
|
||||
self._groupchat.reset()
|
||||
|
||||
# Validation of message and agents
|
||||
|
||||
try:
|
||||
self._valid_resume_messages(messages)
|
||||
except:
|
||||
raise
|
||||
|
||||
# Load the messages into the group chat
|
||||
for i, message in enumerate(messages):
|
||||
|
||||
if "name" in message:
|
||||
message_speaker_agent = self._groupchat.agent_by_name(message["name"])
|
||||
else:
|
||||
# If there's no name, assign the group chat manager (this is an indication the ChatResult messages was used instead of groupchat.messages as state)
|
||||
message_speaker_agent = self
|
||||
message["name"] = self.name
|
||||
|
||||
# If it wasn't an agent speaking, it may be the manager
|
||||
if not message_speaker_agent and message["name"] == self.name:
|
||||
message_speaker_agent = self
|
||||
|
||||
# Add previous messages to each agent (except their own messages and the last message, as we'll kick off the conversation with it)
|
||||
if i != len(messages) - 1:
|
||||
for agent in self._groupchat.agents:
|
||||
if agent.name != message["name"]:
|
||||
self.send(message, self._groupchat.agent_by_name(agent.name), request_reply=False, silent=True)
|
||||
|
||||
# Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly
|
||||
if message_speaker_agent:
|
||||
self._groupchat.append(message, message_speaker_agent)
|
||||
else:
|
||||
self._groupchat.messages.append(message)
|
||||
|
||||
# Last speaker agent
|
||||
last_speaker_name = message["name"]
|
||||
|
||||
# Last message to check for termination (we could avoid this by ignoring termination check for resume in the future)
|
||||
last_message = message
|
||||
|
||||
# Get last speaker as an agent
|
||||
previous_last_agent = self._groupchat.agent_by_name(name=last_speaker_name)
|
||||
|
||||
# If we didn't match a last speaker agent, we check that it's the group chat's admin name and assign the manager, if so
|
||||
if not previous_last_agent and (
|
||||
last_speaker_name == self._groupchat.admin_name or last_speaker_name == self.name
|
||||
):
|
||||
previous_last_agent = self
|
||||
|
||||
# Termination removal and check
|
||||
self._process_resume_termination(remove_termination_string, messages)
|
||||
|
||||
if not silent:
|
||||
iostream = IOStream.get_default()
|
||||
iostream.print(
|
||||
f"Prepared group chat with {len(messages)} messages, the last speaker is",
|
||||
colored(last_speaker_name, "yellow"),
|
||||
flush=True,
|
||||
)
|
||||
|
||||
# Update group chat settings for resuming
|
||||
self._groupchat.send_introductions = False
|
||||
|
||||
return previous_last_agent, last_message
|
||||
|
||||
async def a_resume(
|
||||
self,
|
||||
messages: Union[List[Dict], str],
|
||||
remove_termination_string: str = None,
|
||||
silent: Optional[bool] = False,
|
||||
) -> Tuple[ConversableAgent, Dict]:
|
||||
"""Resumes a group chat using the previous messages as a starting point, asynchronously. Requires the agents, group chat, and group chat manager to be established
|
||||
as per the original group chat.
|
||||
|
||||
Args:
|
||||
- messages Union[List[Dict], str]: The content of the previous chat's messages, either as a Json string or a list of message dictionaries.
|
||||
- remove_termination_string str: Remove the provided string from the last message to prevent immediate termination
|
||||
- silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
|
||||
|
||||
Returns:
|
||||
- Tuple[ConversableAgent, Dict]: A tuple containing the last agent who spoke and their message
|
||||
"""
|
||||
|
||||
# Convert messages from string to messages list, if needed
|
||||
if isinstance(messages, str):
|
||||
messages = self.messages_from_string(messages)
|
||||
elif isinstance(messages, list) and all(isinstance(item, dict) for item in messages):
|
||||
messages = copy.deepcopy(messages)
|
||||
else:
|
||||
raise Exception("Messages is not of type str or List[Dict]")
|
||||
|
||||
# Clean up the objects, ensuring there are no messages in the agents and group chat
|
||||
|
||||
# Clear agent message history
|
||||
for agent in self._groupchat.agents:
|
||||
if isinstance(agent, ConversableAgent):
|
||||
agent.clear_history()
|
||||
|
||||
# Clear Manager message history
|
||||
self.clear_history()
|
||||
|
||||
# Clear GroupChat messages
|
||||
self._groupchat.reset()
|
||||
|
||||
# Validation of message and agents
|
||||
|
||||
try:
|
||||
self._valid_resume_messages(messages)
|
||||
except:
|
||||
raise
|
||||
|
||||
# Load the messages into the group chat
|
||||
for i, message in enumerate(messages):
|
||||
|
||||
if "name" in message:
|
||||
message_speaker_agent = self._groupchat.agent_by_name(message["name"])
|
||||
else:
|
||||
# If there's no name, assign the group chat manager (this is an indication the ChatResult messages was used instead of groupchat.messages as state)
|
||||
message_speaker_agent = self
|
||||
message["name"] = self.name
|
||||
|
||||
# If it wasn't an agent speaking, it may be the manager
|
||||
if not message_speaker_agent and message["name"] == self.name:
|
||||
message_speaker_agent = self
|
||||
|
||||
# Add previous messages to each agent (except their own messages and the last message, as we'll kick off the conversation with it)
|
||||
if i != len(messages) - 1:
|
||||
for agent in self._groupchat.agents:
|
||||
if agent.name != message["name"]:
|
||||
await self.a_send(
|
||||
message, self._groupchat.agent_by_name(agent.name), request_reply=False, silent=True
|
||||
)
|
||||
|
||||
# Add previous message to the new groupchat, if it's an admin message the name may not match so add the message directly
|
||||
if message_speaker_agent:
|
||||
self._groupchat.append(message, message_speaker_agent)
|
||||
else:
|
||||
self._groupchat.messages.append(message)
|
||||
|
||||
# Last speaker agent
|
||||
last_speaker_name = message["name"]
|
||||
|
||||
# Last message to check for termination (we could avoid this by ignoring termination check for resume in the future)
|
||||
last_message = message
|
||||
|
||||
# Get last speaker as an agent
|
||||
previous_last_agent = self._groupchat.agent_by_name(name=last_speaker_name)
|
||||
|
||||
# If we didn't match a last speaker agent, we check that it's the group chat's admin name and assign the manager, if so
|
||||
if not previous_last_agent and (
|
||||
last_speaker_name == self._groupchat.admin_name or last_speaker_name == self.name
|
||||
):
|
||||
previous_last_agent = self
|
||||
|
||||
# Termination removal and check
|
||||
self._process_resume_termination(remove_termination_string, messages)
|
||||
|
||||
if not silent:
|
||||
iostream = IOStream.get_default()
|
||||
iostream.print(
|
||||
f"Prepared group chat with {len(messages)} messages, the last speaker is",
|
||||
colored(last_speaker_name, "yellow"),
|
||||
flush=True,
|
||||
)
|
||||
|
||||
# Update group chat settings for resuming
|
||||
self._groupchat.send_introductions = False
|
||||
|
||||
return previous_last_agent, last_message
|
||||
|
||||
def _valid_resume_messages(self, messages: List[Dict]):
|
||||
"""Validates the messages used for resuming
|
||||
|
||||
args:
|
||||
messages (List[Dict]): list of messages to resume with
|
||||
|
||||
returns:
|
||||
- bool: Whether they are valid for resuming
|
||||
"""
|
||||
# Must have messages to start with, otherwise they should run run_chat
|
||||
if not messages:
|
||||
raise Exception(
|
||||
"Cannot resume group chat as no messages were provided. Use GroupChatManager.run_chat or ConversableAgent.initiate_chat to start a new chat."
|
||||
)
|
||||
|
||||
# Check that all agents in the chat messages exist in the group chat
|
||||
for message in messages:
|
||||
if message.get("name"):
|
||||
if (
|
||||
not self._groupchat.agent_by_name(message["name"])
|
||||
and not message["name"] == self._groupchat.admin_name # ignore group chat's name
|
||||
and not message["name"] == self.name # ignore group chat manager's name
|
||||
):
|
||||
raise Exception(f"Agent name in message doesn't exist as agent in group chat: {message['name']}")
|
||||
|
||||
def _process_resume_termination(self, remove_termination_string: str, messages: List[Dict]):
|
||||
"""Removes termination string, if required, and checks if termination may occur.
|
||||
|
||||
args:
|
||||
remove_termination_string (str): termination string to remove from the last message
|
||||
|
||||
returns:
|
||||
None
|
||||
"""
|
||||
|
||||
last_message = messages[-1]
|
||||
|
||||
# Replace any given termination string in the last message
|
||||
if remove_termination_string:
|
||||
if messages[-1].get("content") and remove_termination_string in messages[-1]["content"]:
|
||||
messages[-1]["content"] = messages[-1]["content"].replace(remove_termination_string, "")
|
||||
|
||||
# Check if the last message meets termination (if it has one)
|
||||
if self._is_termination_msg:
|
||||
if self._is_termination_msg(last_message):
|
||||
logger.warning("WARNING: Last message meets termination criteria and this may terminate the chat.")
|
||||
|
||||
def messages_from_string(self, message_string: str) -> List[Dict]:
|
||||
"""Reads the saved state of messages in Json format for resume and returns as a messages list
|
||||
|
||||
args:
|
||||
- message_string: Json string, the saved state
|
||||
|
||||
returns:
|
||||
- List[Dict]: List of messages
|
||||
"""
|
||||
try:
|
||||
state = json.loads(message_string)
|
||||
except json.JSONDecodeError:
|
||||
raise Exception("Messages string is not a valid JSON string")
|
||||
|
||||
return state
|
||||
|
||||
def messages_to_string(self, messages: List[Dict]) -> str:
|
||||
"""Converts the provided messages into a Json string that can be used for resuming the chat.
|
||||
The state is made up of a list of messages
|
||||
|
||||
args:
|
||||
- messages (List[Dict]): set of messages to convert to a string
|
||||
|
||||
returns:
|
||||
- str: Json representation of the messages which can be persisted for resuming later
|
||||
"""
|
||||
|
||||
return json.dumps(messages)
|
||||
|
||||
def _raise_exception_on_async_reply_functions(self) -> None:
|
||||
"""Raise an exception if any async reply functions are registered.
|
||||
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
#!/usr/bin/env python3 -m pytest
|
||||
|
||||
import builtins
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
import autogen
|
||||
from autogen import Agent, GroupChat
|
||||
from autogen import Agent, AssistantAgent, GroupChat, GroupChatManager
|
||||
from autogen.exception_utils import AgentNameConflict, UndefinedNextAgent
|
||||
|
||||
|
||||
|
@ -1766,6 +1768,204 @@ def test_select_speaker_auto_messages():
|
|||
)
|
||||
|
||||
|
||||
def test_manager_messages_to_string():
|
||||
"""In this test we test the conversion of messages to a JSON string"""
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at finding the next speaker.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.",
|
||||
"name": "Chairperson",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
groupchat = GroupChat(messages=messages, agents=[])
|
||||
manager = GroupChatManager(groupchat)
|
||||
|
||||
# Convert the messages List[Dict] to a JSON string
|
||||
converted_string = manager.messages_to_string(messages)
|
||||
|
||||
# The conversion should match the original messages
|
||||
assert json.loads(converted_string) == messages
|
||||
|
||||
|
||||
def test_manager_messages_from_string():
|
||||
"""In this test we test the conversion of a JSON string of messages to a messages List[Dict]"""
|
||||
messages_str = r"""[{"content": "You are an expert at finding the next speaker.", "role": "system"}, {"content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.", "name": "Chairperson", "role": "assistant"}]"""
|
||||
|
||||
groupchat = GroupChat(messages=[], agents=[])
|
||||
manager = GroupChatManager(groupchat)
|
||||
|
||||
# Convert the messages List[Dict] to a JSON string
|
||||
messages = manager.messages_from_string(messages_str)
|
||||
|
||||
# The conversion should match the original messages
|
||||
assert messages_str == json.dumps(messages)
|
||||
|
||||
|
||||
def test_manager_resume_functions():
|
||||
"""Tests functions within the resume chat functionality"""
|
||||
|
||||
# Setup
|
||||
coder = AssistantAgent(name="Coder", llm_config=None)
|
||||
groupchat = GroupChat(messages=[], agents=[coder])
|
||||
manager = GroupChatManager(groupchat)
|
||||
|
||||
# Tests that messages are indeed passed in
|
||||
with pytest.raises(Exception):
|
||||
manager._valid_resume_messages(messages=[])
|
||||
|
||||
# Tests that the messages passed in match the agents of the group chat
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at finding the next speaker.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.",
|
||||
"name": "Chairperson",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
# Chairperson does not exist as an agent
|
||||
with pytest.raises(Exception):
|
||||
manager._valid_resume_messages(messages)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at finding the next speaker.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": "Let's get this meeting started. First the Product_Manager will create 3 new product ideas.",
|
||||
"name": "Coder",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
# Coder does exist as an agent, no error
|
||||
manager._valid_resume_messages(messages)
|
||||
|
||||
# Tests termination message replacement
|
||||
final_msg = (
|
||||
"Let's get this meeting started. First the Product_Manager will create 3 new product ideas. TERMINATE this."
|
||||
)
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at finding the next speaker.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": final_msg,
|
||||
"name": "Coder",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
manager._process_resume_termination(remove_termination_string="TERMINATE", messages=messages)
|
||||
|
||||
# TERMINATE should be removed
|
||||
assert messages[-1]["content"] == final_msg.replace("TERMINATE", "")
|
||||
|
||||
# Check if the termination string doesn't exist there's no replacing of content
|
||||
final_msg = (
|
||||
"Let's get this meeting started. First the Product_Manager will create 3 new product ideas. TERMINATE this."
|
||||
)
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at finding the next speaker.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": final_msg,
|
||||
"name": "Coder",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
manager._process_resume_termination(remove_termination_string="THE-END", messages=messages)
|
||||
|
||||
# It should not be changed
|
||||
assert messages[-1]["content"] == final_msg
|
||||
|
||||
# Test that it warns that the termination condition would match
|
||||
manager._is_termination_msg = lambda x: x.get("content", "").find("TERMINATE") >= 0
|
||||
|
||||
# Attach a handler to the logger so we can check the log output
|
||||
log_stream = io.StringIO()
|
||||
handler = logging.StreamHandler(log_stream)
|
||||
logger = logging.getLogger() # Get the root logger
|
||||
logger.addHandler(handler)
|
||||
|
||||
# We should get a warning that TERMINATE is still in the messages
|
||||
manager._process_resume_termination(remove_termination_string="THE-END", messages=messages)
|
||||
|
||||
# Get the logged output and check that the warning was provided.
|
||||
log_output = log_stream.getvalue()
|
||||
|
||||
assert "WARNING: Last message meets termination criteria and this may terminate the chat." in log_output
|
||||
|
||||
|
||||
def test_manager_resume_returns():
|
||||
"""Tests the return resume chat functionality"""
|
||||
|
||||
# Test the return agent and message is correct
|
||||
coder = AssistantAgent(name="Coder", llm_config=None)
|
||||
groupchat = GroupChat(messages=[], agents=[coder])
|
||||
manager = GroupChatManager(groupchat)
|
||||
messages = [
|
||||
{
|
||||
"content": "You are an expert at coding.",
|
||||
"role": "system",
|
||||
},
|
||||
{
|
||||
"content": "Let's get coding, should I use Python?",
|
||||
"name": "Coder",
|
||||
"role": "assistant",
|
||||
},
|
||||
]
|
||||
|
||||
return_agent, return_message = manager.resume(messages=messages)
|
||||
|
||||
assert return_agent == coder
|
||||
assert return_message == messages[-1]
|
||||
|
||||
# Test when no agent provided, the manager will be returned
|
||||
messages = [{"content": "You are an expert at coding.", "role": "system", "name": "chat_manager"}]
|
||||
|
||||
return_agent, return_message = manager.resume(messages=messages)
|
||||
|
||||
assert return_agent == manager
|
||||
assert return_message == messages[-1]
|
||||
|
||||
|
||||
def test_manager_resume_messages():
|
||||
"""Tests that the messages passed into resume are the correct format"""
|
||||
|
||||
coder = AssistantAgent(name="Coder", llm_config=None)
|
||||
groupchat = GroupChat(messages=[], agents=[coder])
|
||||
manager = GroupChatManager(groupchat)
|
||||
messages = 1
|
||||
|
||||
# Only acceptable messages types are JSON str and List[Dict]
|
||||
|
||||
# Try a number
|
||||
with pytest.raises(Exception):
|
||||
return_agent, return_message = manager.resume(messages=messages)
|
||||
|
||||
# Try an empty string
|
||||
with pytest.raises(Exception):
|
||||
return_agent, return_message = manager.resume(messages="")
|
||||
|
||||
# Try a message starter string, which isn't valid
|
||||
with pytest.raises(Exception):
|
||||
return_agent, return_message = manager.resume(messages="Let's get this conversation started.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test_func_call_groupchat()
|
||||
# test_broadcast()
|
||||
|
@ -1784,7 +1984,12 @@ if __name__ == "__main__":
|
|||
# test_role_for_select_speaker_messages()
|
||||
# test_select_speaker_message_and_prompt_templates()
|
||||
# test_speaker_selection_agent_name_match()
|
||||
test_speaker_selection_auto_process_result()
|
||||
test_speaker_selection_validate_speaker_name()
|
||||
test_select_speaker_auto_messages()
|
||||
# test_speaker_selection_auto_process_result()
|
||||
# test_speaker_selection_validate_speaker_name()
|
||||
# test_select_speaker_auto_messages()
|
||||
test_manager_messages_to_string()
|
||||
test_manager_messages_from_string()
|
||||
test_manager_resume_functions()
|
||||
test_manager_resume_returns()
|
||||
test_manager_resume_messages()
|
||||
# pass
|
||||
|
|
|
@ -0,0 +1,760 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Resuming a GroupChat\n",
|
||||
"\n",
|
||||
"In GroupChat, we can resume a previous group chat by passing the messages from that conversation to the GroupChatManager's `resume` function (or `a_resume` for asynchronous workflows). This prepares the GroupChat, GroupChatManager, and group chat's agents for resuming. An agent's `initiate_chat` can then be called to resume the chat.\n",
|
||||
"\n",
|
||||
"The `resume` function returns the last agent in the messages as well as the last message itself. These can be used to run the `initiate_chat`.\n",
|
||||
"\n",
|
||||
"To resume, the agents, GroupChat, and GroupChatManager objects must exist and match the original group chat.\n",
|
||||
"\n",
|
||||
"The messages passed into the `resume` function can be passed in as a JSON string or a `List[Dict]` of messages, typically from the ChatResult's `chat_history` of the previous conversation or the GroupChat's `messages` property. Use the GroupChatManager's `messages_to_string` function to retrieve a JSON string that can be used for resuming:\n",
|
||||
"\n",
|
||||
"```text\n",
|
||||
"# Save chat messages for resuming later on using the chat history\n",
|
||||
"messages_json = mygroupchatmanager.messages_to_string(previous_chat_result.chat_history)\n",
|
||||
"\n",
|
||||
"# Alternatively you can use the GroupChat's messages property\n",
|
||||
"messages_json = mygroupchatmanager.messages_to_string(mygroupchatmanager.groupchat.messages)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"\n",
|
||||
"An example of the JSON string:\n",
|
||||
"```json\n",
|
||||
"[{\"content\": \"Find the latest paper about gpt-4 on arxiv and find its potential applications in software.\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Plan:\\n1. **Engineer**: Search for the latest paper on GPT-4 on arXiv.\\n2. **Scientist**: Read the paper and summarize the key findings and potential applications of GPT-4.\\n3. **Engineer**: Identify potential software applications where GPT-4 can be utilized based on the scientist's summary.\\n4. **Scientist**: Provide insights on the feasibility and impact of implementing GPT-4 in the identified software applications.\\n5. **Engineer**: Develop a prototype or proof of concept to demonstrate how GPT-4 can be integrated into the selected software application.\\n6. **Scientist**: Evaluate the prototype, provide feedback, and suggest any improvements or modifications.\\n7. **Engineer**: Make necessary revisions based on the scientist's feedback and finalize the integration of GPT-4 into the software application.\\n8. **Admin**: Review the final software application with GPT-4 integration and approve for further development or implementation.\\n\\nFeedback from admin and critic is needed for further refinement of the plan.\", \"role\": \"user\", \"name\": \"Planner\"}, {\"content\": \"Agree\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\", \"role\": \"user\", \"name\": \"Planner\"}]\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"When preparing for resuming, the messages will be validated against the groupchat's agents to make sure that the messages can be assigned to them. Messages will be allocated to the agents and then the last speaker and message will be returned for use in `initiate_chat`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Continuing a terminated conversation\n",
|
||||
"If the previous group chat terminated and the resuming group chat has the same termination condition (such as if the message contains \"TERMINATE\") then the conversation will terminate when resuming as the terminate check occurs with the message passed in to `initiate_chat`.\n",
|
||||
"\n",
|
||||
"If the termination condition is based on a string within the message, you can pass in that string in the `remove_termination_string` parameter of the `resume` function and it will be removed. If the termination condition is more complicated, you will need to adjust the messages accordingly before calling `resume`.\n",
|
||||
"\n",
|
||||
"The `resume` function will then check if the last message provided still meets the termination condition and warns you, if so."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example of resuming a GroupChat\n",
|
||||
"\n",
|
||||
"Start with the LLM config. This can differ from the original group chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/usr/local/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import autogen\n",
|
||||
"\n",
|
||||
"# Put your api key in the environment variable OPENAI_API_KEY\n",
|
||||
"config_list = [\n",
|
||||
" {\n",
|
||||
" \"model\": \"gpt-4-0125-preview\",\n",
|
||||
" \"api_key\": os.environ[\"OPENAI_API_KEY\"],\n",
|
||||
" }\n",
|
||||
"]\n",
|
||||
"\n",
|
||||
"gpt4_config = {\n",
|
||||
" \"cache_seed\": 42, # change the cache_seed for different trials\n",
|
||||
" \"temperature\": 0,\n",
|
||||
" \"config_list\": config_list,\n",
|
||||
" \"timeout\": 120,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create the group chat objects, they should have the same `name` as the original group chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Create Agents, GroupChat, and GroupChatManager in line with the original group chat\n",
|
||||
"\n",
|
||||
"planner = autogen.AssistantAgent(\n",
|
||||
" name=\"Planner\",\n",
|
||||
" system_message=\"\"\"Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.\n",
|
||||
"The plan may involve an engineer who can write code and a scientist who doesn't write code.\n",
|
||||
"Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.\n",
|
||||
"\"\"\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"user_proxy = autogen.UserProxyAgent(\n",
|
||||
" name=\"Admin\",\n",
|
||||
" system_message=\"A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.\",\n",
|
||||
" code_execution_config=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"engineer = autogen.AssistantAgent(\n",
|
||||
" name=\"Engineer\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
" system_message=\"\"\"Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor.\n",
|
||||
"Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor.\n",
|
||||
"If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.\n",
|
||||
"\"\"\",\n",
|
||||
")\n",
|
||||
"scientist = autogen.AssistantAgent(\n",
|
||||
" name=\"Scientist\",\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
" system_message=\"\"\"Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code.\"\"\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"executor = autogen.UserProxyAgent(\n",
|
||||
" name=\"Executor\",\n",
|
||||
" system_message=\"Executor. Execute the code written by the engineer and report the result.\",\n",
|
||||
" human_input_mode=\"NEVER\",\n",
|
||||
" code_execution_config={\n",
|
||||
" \"last_n_messages\": 3,\n",
|
||||
" \"work_dir\": \"paper\",\n",
|
||||
" \"use_docker\": False,\n",
|
||||
" }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"groupchat = autogen.GroupChat(\n",
|
||||
" agents=[user_proxy, engineer, scientist, planner, executor],\n",
|
||||
" messages=[],\n",
|
||||
" max_round=10,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt4_config)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load the previous messages (from a JSON string or messages `List[Dict]`)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Messages in a JSON string\n",
|
||||
"previous_state = r\"\"\"[{\"content\": \"Find the latest paper about gpt-4 on arxiv and find its potential applications in software.\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Plan:\\n1. **Engineer**: Search for the latest paper on GPT-4 on arXiv.\\n2. **Scientist**: Read the paper and summarize the key findings and potential applications of GPT-4.\\n3. **Engineer**: Identify potential software applications where GPT-4 can be utilized based on the scientist's summary.\\n4. **Scientist**: Provide insights on the feasibility and impact of implementing GPT-4 in the identified software applications.\\n5. **Engineer**: Develop a prototype or proof of concept to demonstrate how GPT-4 can be integrated into the selected software application.\\n6. **Scientist**: Evaluate the prototype, provide feedback, and suggest any improvements or modifications.\\n7. **Engineer**: Make necessary revisions based on the scientist's feedback and finalize the integration of GPT-4 into the software application.\\n8. **Admin**: Review the final software application with GPT-4 integration and approve for further development or implementation.\\n\\nFeedback from admin and critic is needed for further refinement of the plan.\", \"role\": \"user\", \"name\": \"Planner\"}, {\"content\": \"Agree\", \"role\": \"user\", \"name\": \"Admin\"}, {\"content\": \"Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\", \"role\": \"user\", \"name\": \"Planner\"}]\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Resume the group chat using the last agent and last message."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared group chat with 4 messages, the last speaker is \u001b[33mPlanner\u001b[0m\n",
|
||||
"\u001b[33mPlanner\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Great! Let's proceed with the plan outlined earlier. I will start by searching for the latest paper on GPT-4 on arXiv. Once I find the paper, the scientist will summarize the key findings and potential applications of GPT-4. We will then proceed with the rest of the steps as outlined. I will keep you updated on our progress.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"```python\n",
|
||||
"import requests\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"\n",
|
||||
"# Define the URL for the arXiv search\n",
|
||||
"url = \"https://arxiv.org/search/?query=GPT-4&searchtype=all&source=header\"\n",
|
||||
"\n",
|
||||
"# Send a GET request to the URL\n",
|
||||
"response = requests.get(url)\n",
|
||||
"\n",
|
||||
"# Parse the HTML content of the page\n",
|
||||
"soup = BeautifulSoup(response.content, 'html.parser')\n",
|
||||
"\n",
|
||||
"# Find the first paper related to GPT-4\n",
|
||||
"paper = soup.find('li', class_='arxiv-result')\n",
|
||||
"if paper:\n",
|
||||
" title = paper.find('p', class_='title').text.strip()\n",
|
||||
" authors = paper.find('p', class_='authors').text.strip()\n",
|
||||
" abstract = paper.find('p', class_='abstract').text.strip().replace('\\n', ' ')\n",
|
||||
" link = paper.find('p', class_='list-title').find('a')['href']\n",
|
||||
" print(f\"Title: {title}\\nAuthors: {authors}\\nAbstract: {abstract}\\nLink: {link}\")\n",
|
||||
"else:\n",
|
||||
" print(\"No GPT-4 papers found on arXiv.\")\n",
|
||||
"```\n",
|
||||
"This script searches for the latest paper on GPT-4 on arXiv, extracts the title, authors, abstract, and link to the paper, and prints this information.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[31m\n",
|
||||
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
|
||||
"\u001b[33mExecutor\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"exitcode: 0 (execution succeeded)\n",
|
||||
"Code output: \n",
|
||||
"Title: Smurfs: Leveraging Multiple Proficiency Agents with Context-Efficiency for Tool Planning\n",
|
||||
"Authors: Authors:\n",
|
||||
"Junzhi Chen, \n",
|
||||
" \n",
|
||||
" Juhao Liang, \n",
|
||||
" \n",
|
||||
" Benyou Wang\n",
|
||||
"Abstract: Abstract: …scenarios. Notably, Smurfs outmatches the ChatGPT-ReACT in the ToolBench I2 and I3 benchmark with a remarkable 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%. Furthermore, through comprehensive ablation studies, we dissect the contribution of the core components of the multi-agent… ▽ More The emergence of large language models (LLMs) has opened up unprecedented possibilities for automating complex tasks that are often comparable to human performance. Despite their capabilities, LLMs still encounter difficulties in completing tasks that require high levels of accuracy and complexity due to their inherent limitations in handling multifaceted problems single-handedly. This paper introduces \"Smurfs\", a cutting-edge multi-agent framework designed to revolutionize the application of LLMs. By transforming a conventional LLM into a synergistic multi-agent ensemble, Smurfs enhances task decomposition and execution without necessitating extra training. This is achieved through innovative prompting strategies that allocate distinct roles within the model, thereby facilitating collaboration among specialized agents. The framework gives access to external tools to efficiently solve complex tasks. Our empirical investigation, featuring the mistral-7b-instruct model as a case study, showcases Smurfs' superior capability in intricate tool utilization scenarios. Notably, Smurfs outmatches the ChatGPT-ReACT in the ToolBench I2 and I3 benchmark with a remarkable 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%. Furthermore, through comprehensive ablation studies, we dissect the contribution of the core components of the multi-agent framework to its overall efficacy. This not only verifies the effectiveness of the framework, but also sets a route for future exploration of multi-agent LLM systems. △ Less\n",
|
||||
"Link: https://arxiv.org/abs/2405.05955\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mScientist\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Based on the abstract of the paper titled \"Smurfs: Leveraging Multiple Proficiency Agents with Context-Efficiency for Tool Planning,\" the key findings and potential applications of GPT-4 can be summarized as follows:\n",
|
||||
"\n",
|
||||
"### Key Findings:\n",
|
||||
"- The paper introduces \"Smurfs,\" a multi-agent framework that enhances the capabilities of large language models (LLMs) like GPT-4 by transforming them into a synergistic multi-agent ensemble. This approach allows for better task decomposition and execution without additional training.\n",
|
||||
"- Smurfs utilize innovative prompting strategies to allocate distinct roles within the model, facilitating collaboration among specialized agents and giving access to external tools for solving complex tasks.\n",
|
||||
"- In the ToolBench I2 and I3 benchmark, Smurfs outperformed ChatGPT-ReACT with an 84.4% win rate, surpassing the highest recorded performance of a GPT-4 model at 73.5%.\n",
|
||||
"- Comprehensive ablation studies were conducted to understand the contribution of the core components of the multi-agent framework to its overall efficacy.\n",
|
||||
"\n",
|
||||
"### Potential Applications in Software:\n",
|
||||
"- **Tool Planning and Automation**: Smurfs can be applied to software that requires complex tool planning and automation, enhancing the software's ability to perform tasks that involve multiple steps or require the use of external tools.\n",
|
||||
"- **Collaborative Systems**: The multi-agent ensemble approach can be utilized in developing collaborative systems where different components or agents work together to complete tasks more efficiently than a single agent could.\n",
|
||||
"- **Enhanced Problem-Solving**: Software that involves complex problem-solving can benefit from Smurfs by leveraging the specialized capabilities of different agents within the ensemble, leading to more accurate and efficient solutions.\n",
|
||||
"- **Task Decomposition**: Applications that require breaking down complex tasks into simpler sub-tasks can use the Smurfs framework to improve task decomposition and execution, potentially leading to better performance and outcomes.\n",
|
||||
"\n",
|
||||
"The integration of GPT-4 with the Smurfs framework presents a novel approach to enhancing the capabilities of LLMs in software applications, particularly in areas that require complex task planning, execution, and problem-solving.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Given the scientist's summary on the potential applications of GPT-4 as enhanced by the Smurfs framework, we can identify several software applications where GPT-4 can be utilized effectively:\n",
|
||||
"\n",
|
||||
"1. **Project Management Tools**: Integration of GPT-4 with Smurfs can revolutionize project management software by automating complex planning tasks, optimizing resource allocation, and providing actionable insights for project execution.\n",
|
||||
"\n",
|
||||
"2. **Code Generation and Software Development**: Leveraging GPT-4 in IDEs (Integrated Development Environments) or other software development tools can enhance code generation capabilities, provide context-aware suggestions, and automate debugging processes.\n",
|
||||
"\n",
|
||||
"3. **Customer Support and Chatbots**: GPT-4 can be used to power advanced customer support chatbots that understand complex queries, provide accurate information, and automate problem-solving for customer issues.\n",
|
||||
"\n",
|
||||
"4. **Educational Platforms**: In educational software, GPT-4 can personalize learning experiences, automate content generation, and provide interactive tutoring services.\n",
|
||||
"\n",
|
||||
"5. **Healthcare Applications**: GPT-4 can assist in healthcare applications by analyzing medical data, providing diagnostic support, and offering personalized healthcare advice.\n",
|
||||
"\n",
|
||||
"6. **Creative Writing and Content Generation**: Software tools for creative writing and content generation can benefit from GPT-4's capabilities to produce original content, assist in storytelling, and generate ideas.\n",
|
||||
"\n",
|
||||
"7. **Business Intelligence and Analytics**: GPT-4 can enhance business intelligence software by automating data analysis, generating reports, and providing insights based on large datasets.\n",
|
||||
"\n",
|
||||
"8. **Security and Threat Analysis**: In cybersecurity applications, GPT-4 can be used to analyze threats, automate security protocols, and provide recommendations for threat mitigation.\n",
|
||||
"\n",
|
||||
"These applications demonstrate the versatility and potential impact of integrating GPT-4 into various software solutions, offering opportunities for automation, enhanced efficiency, and improved user experiences across different domains.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mAdmin\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Approve\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mScientist\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"With the approval from the admin, the plan to explore and integrate GPT-4 into various software applications, leveraging its enhanced capabilities through the Smurfs framework, is set to proceed. This initiative promises to bring significant advancements in automation, efficiency, and user experience across a wide range of software applications, from project management tools to healthcare applications. The next steps involve detailed planning and execution for the development of prototypes or proof of concepts for selected applications, followed by evaluation, feedback, and finalization of GPT-4 integration into these software solutions.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Given the approval and the insights provided, the next steps involve detailed planning for the development and integration of GPT-4 into selected software applications. This process will include:\n",
|
||||
"\n",
|
||||
"1. **Selection of Specific Applications**: Based on the potential applications identified, select one or more specific software applications for prototype development. This selection will be based on factors such as feasibility, potential impact, and available resources.\n",
|
||||
"\n",
|
||||
"2. **Prototype Development**: Develop a prototype or proof of concept for the selected application(s). This will involve designing the architecture, integrating GPT-4 with the Smurfs framework, and implementing the necessary functionalities to demonstrate the application of GPT-4 in the software.\n",
|
||||
"\n",
|
||||
"3. **Evaluation and Feedback**: Once the prototype is developed, it will be evaluated to assess its performance, usability, and effectiveness in leveraging GPT-4's capabilities. Feedback will be gathered from potential users and stakeholders to identify areas for improvement.\n",
|
||||
"\n",
|
||||
"4. **Revisions and Finalization**: Based on the feedback received, necessary revisions and improvements will be made to the prototype. This step may involve refining the integration of GPT-4, optimizing the software's performance, and enhancing user experience.\n",
|
||||
"\n",
|
||||
"5. **Implementation and Deployment**: After finalizing the prototype, the next step will involve planning for the full-scale implementation and deployment of the software application with GPT-4 integration. This will include addressing any scalability, security, and maintenance considerations.\n",
|
||||
"\n",
|
||||
"6. **Continuous Improvement**: Post-deployment, it will be important to monitor the software's performance and user feedback continuously. This will enable ongoing improvements and updates to ensure that the software remains effective and relevant.\n",
|
||||
"\n",
|
||||
"This structured approach will ensure that the integration of GPT-4 into software applications is carried out effectively, leading to innovative solutions that harness the full potential of GPT-4 and the Smurfs framework.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mAdmin\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Approve\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mEngineer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"With the final approval from the admin, the project to integrate GPT-4 into selected software applications, leveraging its capabilities through the Smurfs framework, is officially set to move forward. This marks the beginning of an innovative journey towards developing advanced software solutions that can automate complex tasks, enhance efficiency, and improve user experiences across various domains. The focus will now shift to the execution phase, where detailed planning, development, and iterative improvements will bring these concepts to life. This initiative promises to showcase the transformative potential of GPT-4 in the software industry, setting new benchmarks for what is possible with artificial intelligence.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prepare the group chat for resuming\n",
|
||||
"last_agent, last_message = manager.resume(messages=previous_state)\n",
|
||||
"\n",
|
||||
"# Resume the chat using the last agent and message\n",
|
||||
"result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"#1, Admin: Find the latest paper about gpt-4 on arxiv and find its potential applications i ...\n",
|
||||
"#2, Planner: Plan: 1. **Engineer**: Search for the latest paper on GPT-4 on arXiv. 2. **Scien ...\n",
|
||||
"#3, Admin: Agree \n",
|
||||
"#4, Planner: Great! Let's proceed with the plan outlined earlier. I will start by searching f ...\n",
|
||||
"#5, Engineer: ```python import requests from bs4 import BeautifulSoup # Define the URL for th ...\n",
|
||||
"#6, Executor: exitcode: 0 (execution succeeded) Code output: Title: Smurfs: Leveraging Multip ...\n",
|
||||
"#7, Scientist: Based on the abstract of the paper titled \"Smurfs: Leveraging Multiple Proficien ...\n",
|
||||
"#8, Engineer: Given the scientist's summary on the potential applications of GPT-4 as enhanced ...\n",
|
||||
"#9, Admin: Approve \n",
|
||||
"#10, Scientist: With the approval from the admin, the plan to explore and integrate GPT-4 into v ...\n",
|
||||
"#11, Engineer: Given the approval and the insights provided, the next steps involve detailed pl ...\n",
|
||||
"#12, Admin: Approve \n",
|
||||
"#13, Engineer: With the final approval from the admin, the project to integrate GPT-4 into sele ...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Output the final chat history showing the original 4 messages and resumed messages\n",
|
||||
"for i, message in enumerate(groupchat.messages):\n",
|
||||
" print(\n",
|
||||
" f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n",
|
||||
" f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example of resuming a terminated GroupChat\n",
|
||||
"\n",
|
||||
"This example shows how to resume a group chat that was terminated with a termination string.\n",
|
||||
"\n",
|
||||
"The termination string is `TERMINATE`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 86,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Previous state with TERMINATE in the last message\n",
|
||||
"previous_state = r\"\"\"[{\"content\": \"Let's get this meeting started. We'll have a set order of speakers. First the Product_Manager will create 3 new product ideas. Then the Chief_Marketing_Officer will speak and talk about the idea they have chosen to move forward with. Then the Digital_Marketer will create 3 marketing strategies for that idea. We MUST stick to the speaking order which is Product_Manager first, Chief_Marketing_Officer second, Digital_Marketer third, and finally the Chief_Marketing_Officer will speak and end the meeting.\", \"role\": \"user\", \"name\": \"Chairperson\"}, {\"content\": \"Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present three new product ideas for our luxury car models: 1. 'EcoLux': A hybrid electric-gasoline vehicle that combines the best of both worlds, offering exceptional fuel efficiency and reduced carbon footprint. 2. 'AeroSport': A high-performance sports car with advanced aerodynamics, designed to deliver unparalleled speed and agility on the track or on the open road. 3. 'SmartDrive': An intelligent driver-assistance system that uses AI-powered sensors and cameras to anticipate and respond to potential hazards, ensuring a safer driving experience. Now it's your turn, Chief_Marketing_Officer! Which of these ideas do you think has the most potential?\", \"role\": \"user\", \"name\": \"Chief_Marketing_Officer\"}, {\"content\": \"Thank you for presenting those innovative product ideas, Product_Manager. After careful consideration, I believe 'EcoLux' has the most potential. With the growing concern about climate change and environmental sustainability, a hybrid electric-gasoline vehicle that offers exceptional fuel efficiency and reduced carbon footprint could be a game-changer in the luxury car market. Additionally, it aligns with our company's commitment to innovation and responsibility. Now it's your turn, Digital_Marketer! Can you come up with three marketing strategies for 'EcoLux'?\", \"role\": \"user\", \"name\": \"Product_Manager\"}, {\"content\": \"Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. TERMINATE\", \"role\": \"user\", \"name\": \"Digital_Marketer\"}]\"\"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Create the group chat objects, they should have the same `name` as the original group chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 87,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"user_proxy = autogen.UserProxyAgent(\n",
|
||||
" name=\"Chairperson\",\n",
|
||||
" system_message=\"The chairperson for the meeting.\",\n",
|
||||
" code_execution_config={},\n",
|
||||
" human_input_mode=\"TERMINATE\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"cmo = autogen.AssistantAgent(\n",
|
||||
" name=\"Chief_Marketing_Officer\",\n",
|
||||
" # system_message is used in the select speaker message\n",
|
||||
" description=\"The head of the marketing department working with the product manager and digital marketer to execute a strong marketing campaign for your car company.\",\n",
|
||||
" # description is used to prompt the LLM as this agent\n",
|
||||
" system_message=\"You, Jane titled Chief_Marketing_Officer, or CMO, are the head of the marketing department and your objective is to guide your team to producing and marketing unique ideas for your luxury car models. Don't include your name at the start of your response or speak for any other team member, let them come up with their own ideas and strategies, speak just for yourself as the head of marketing. When yourself, the Product_Manager, and the Digital_Marketer have spoken and the meeting is finished, say TERMINATE to conclude the meeting.\",\n",
|
||||
" is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"pm = autogen.AssistantAgent(\n",
|
||||
" name=\"Product_Manager\",\n",
|
||||
" # system_message is used in the select speaker message\n",
|
||||
" description=\"Product head for the luxury model cars product line in the car company. Always coming up with new product enhancements for the cars.\",\n",
|
||||
" # description is used to prompt the LLM as this agent\n",
|
||||
" system_message=\"You, Alice titled Product_Manager, are always coming up with new product enhancements for the luxury car models you look after. Review the meeting so far and respond with the answer to your current task. Don't include your name at the start of your response and don't speak for anyone else, leave the Chairperson to pick the next person to speak.\",\n",
|
||||
" is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"digital = autogen.AssistantAgent(\n",
|
||||
" name=\"Digital_Marketer\",\n",
|
||||
" # system_message is used in the select speaker message\n",
|
||||
" description=\"A seasoned digital marketer who comes up with online marketing strategies that highlight the key features of the luxury car models.\",\n",
|
||||
" # description is used to prompt the LLM as this agent\n",
|
||||
" system_message=\"You, Elizabeth titled Digital_Marketer, are a senior online marketing specialist who comes up with marketing strategies that highlight the key features of the luxury car models. Review the meeting so far and respond with the answer to your current task. Don't include your name at the start of your response and don't speak for anyone else, leave the Chairperson to pick the next person to speak.\",\n",
|
||||
" is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\"),\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Customised message, this is always the first message in the context\n",
|
||||
"my_speaker_select_msg = \"\"\"You are a chairperson for a marketing meeting for this car manufacturer where multiple members of the team will speak.\n",
|
||||
"The job roles of the team at the meeting, and their responsibilities, are:\n",
|
||||
"{roles}\"\"\"\n",
|
||||
"\n",
|
||||
"# Customised prompt, this is always the last message in the context\n",
|
||||
"my_speaker_select_prompt = \"\"\"Read the above conversation.\n",
|
||||
"Then select ONLY THE NAME of the next job role from {agentlist} to speak. Do not explain why.\"\"\"\n",
|
||||
"\n",
|
||||
"groupchat = autogen.GroupChat(\n",
|
||||
" agents=[user_proxy, cmo, pm, digital],\n",
|
||||
" messages=[],\n",
|
||||
" max_round=10,\n",
|
||||
" select_speaker_message_template=my_speaker_select_msg,\n",
|
||||
" select_speaker_prompt_template=my_speaker_select_prompt,\n",
|
||||
" max_retries_for_selecting_speaker=2, # New\n",
|
||||
" select_speaker_auto_verbose=False, # New\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"manager = autogen.GroupChatManager(\n",
|
||||
" groupchat=groupchat,\n",
|
||||
" llm_config=gpt4_config,\n",
|
||||
" is_termination_msg=lambda x: \"TERMINATE\" in x.get(\"content\", \"\"),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Prepare the resumption of the group chat without removing the termination condition. A warning will show. Then attempting to resume the chat will terminate immediately."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 88,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING: Last message meets termination criteria and this may terminate the chat. Set ignore_initial_termination_check=False to avoid checking termination at the start of the chat.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared group chat with 4 messages, the last speaker is \u001b[33mDigital_Marketer\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prepare the group chat for resuming WITHOUT removing the TERMINATE message\n",
|
||||
"last_agent, last_message = manager.resume(messages=previous_state)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 89,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mDigital_Marketer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. TERMINATE\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Resume and it will terminate immediately\n",
|
||||
"result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This time, we will remove the termination message, by using the `remove_termination_string` parameter, and then resume."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 90,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared group chat with 4 messages, the last speaker is \u001b[33mDigital_Marketer\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prepare the group chat for resuming WITH removal of TERMINATE message\n",
|
||||
"last_agent, last_message = manager.resume(messages=previous_state, remove_termination_string=\"TERMINATE\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 91,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mDigital_Marketer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three marketing strategies: 1. 'Green Revolution' Campaign: Highlighting the eco-friendly features of EcoLux through a series of social media ads and influencer partnerships. We can partner with eco-conscious influencers to showcase how EcoLux is not only a luxury car but also an environmentally responsible choice. 2. 'Fuel for Thought' Content Series: Creating a content series that explores the intersection of technology, sustainability, and luxury. This could include blog posts, videos, and podcasts that delve into the innovative features of EcoLux and its impact on the environment. 3. 'EcoLux Experience' Event Marketing: Hosting exclusive events and test drives for potential customers to experience the performance and eco-friendliness of EcoLux firsthand. These events can be held at upscale locations and feature interactive exhibits, product demonstrations, and networking opportunities. These strategies will help position EcoLux as a leader in the luxury electric-vehicle market while appealing to environmentally conscious consumers who value innovation and sustainability. \n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Thank you, Digital_Marketer, for those comprehensive and innovative marketing strategies. Each strategy you've outlined aligns perfectly with our vision for EcoLux, emphasizing its eco-friendly features, technological innovation, and luxury appeal. The 'Green Revolution' Campaign will leverage the power of social media and influencers to reach our target audience effectively. The 'Fuel for Thought' Content Series will educate and engage potential customers on the importance of sustainability in the luxury automotive sector. Lastly, the 'EcoLux Experience' Event Marketing will provide an immersive experience that showcases the unique value proposition of EcoLux. \n",
|
||||
"\n",
|
||||
"I believe these strategies will collectively create a strong market presence for EcoLux, appealing to both luxury car enthusiasts and environmentally conscious consumers. Let's proceed with these strategies and ensure that every touchpoint communicates EcoLux's commitment to luxury, innovation, and sustainability. \n",
|
||||
"\n",
|
||||
"TERMINATE\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Resume the chat using the last agent and message\n",
|
||||
"result = last_agent.initiate_chat(recipient=manager, message=last_message, clear_history=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can see that the conversation continued, the Chief_Marketing_officer spoke and they terminated the conversation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 92,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"#1, Chairperson: Let's get this meeting started. We'll have a set order of speakers. First the Pr ...\n",
|
||||
"#2, Chief_Marketing_Officer: Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present ...\n",
|
||||
"#3, Product_Manager: Thank you for presenting those innovative product ideas, Product_Manager. After ...\n",
|
||||
"#4, Digital_Marketer: Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three ...\n",
|
||||
"#5, Chief_Marketing_Officer: Thank you, Digital_Marketer, for those comprehensive and innovative marketing st ...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Output the final chat history showing the original 4 messages and the resumed message\n",
|
||||
"for i, message in enumerate(groupchat.messages):\n",
|
||||
" print(\n",
|
||||
" f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n",
|
||||
" f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example of resuming a terminated GroupChat with a new message and agent\n",
|
||||
"\n",
|
||||
"Rather than continuing a group chat by using the last message, we can resume a group chat using a new message.\n",
|
||||
"\n",
|
||||
"**IMPORTANT**: To remain in a group chat, use the GroupChatManager to initiate the chat, otherwise you can continue with an agent-to-agent conversation by using another agent to initiate the chat.\n",
|
||||
"\n",
|
||||
"We'll continue with the previous example by using the messages from that conversation and resuming it with a new conversation in the agent 'meeting'.\n",
|
||||
"\n",
|
||||
"We start by preparing the group chat by using the messages from the previous chat."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 93,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING: Last message meets termination criteria and this may terminate the chat. Set ignore_initial_termination_check=False to avoid checking termination at the start of the chat.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Prepared group chat with 5 messages, the last speaker is \u001b[33mChief_Marketing_Officer\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Prepare the group chat for resuming using the previous messages. We don't need to remove the TERMINATE string as we aren't using the last message for resuming.\n",
|
||||
"last_agent, last_message = manager.resume(messages=groupchat.messages)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Let's continue the meeting with a new topic."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 94,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mchat_manager\u001b[0m (to Chief_Marketing_Officer):\n",
|
||||
"\n",
|
||||
"Team, let's now think of a name for the next vehicle that embodies that idea. Chief_Marketing_Officer and Product_manager can you both suggest one and then we can conclude.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Given the focus on sustainability and luxury, I suggest the name \"VerdeVogue\" for our next vehicle. \"Verde\" reflects the green, eco-friendly aspect of the car, while \"Vogue\" emphasizes its stylish and trendsetting nature in the luxury market. This name encapsulates the essence of combining environmental responsibility with high-end design and performance. \n",
|
||||
"\n",
|
||||
"Now, I'd like to hear the Product_Manager's suggestion.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mProduct_Manager\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"For our next vehicle, I propose the name \"EcoPrestige.\" This name highlights the vehicle's eco-friendly nature and its luxurious, prestigious status in the market. \"Eco\" emphasizes our commitment to sustainability and environmental responsibility, while \"Prestige\" conveys the car's high-end quality, sophistication, and the elite status it offers to its owners. This name perfectly blends our goals of offering a sustainable luxury vehicle that doesn't compromise on performance or style.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n",
|
||||
"\u001b[33mChief_Marketing_Officer\u001b[0m (to chat_manager):\n",
|
||||
"\n",
|
||||
"Thank you, Product_Manager, for your suggestion. Both \"VerdeVogue\" and \"EcoPrestige\" capture the essence of our new vehicle's eco-friendly luxury. As we move forward, we'll consider these names carefully to ensure our branding aligns perfectly with our product's unique value proposition and market positioning. \n",
|
||||
"\n",
|
||||
"This concludes our meeting. Thank you, everyone, for your valuable contributions. TERMINATE.\n",
|
||||
"\n",
|
||||
"--------------------------------------------------------------------------------\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Resume the chat using a different agent and message\n",
|
||||
"result = manager.initiate_chat(\n",
|
||||
" recipient=cmo,\n",
|
||||
" message=\"Team, let's now think of a name for the next vehicle that embodies that idea. Chief_Marketing_Officer and Product_manager can you both suggest one and then we can conclude.\",\n",
|
||||
" clear_history=False,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 95,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"#1, Chairperson: Let's get this meeting started. We'll have a set order of speakers. First the Pr ...\n",
|
||||
"#2, Chief_Marketing_Officer: Sounds like a plan! Let's get started. As the Product_Manager, I'd like to present ...\n",
|
||||
"#3, Product_Manager: Thank you for presenting those innovative product ideas, Product_Manager. After ...\n",
|
||||
"#4, Digital_Marketer: Thank you, Chief_Marketing_Officer! For 'EcoLux', I propose the following three ...\n",
|
||||
"#5, Chief_Marketing_Officer: Given the focus on sustainability and luxury, I suggest the name \"VerdeVogue\" for ...\n",
|
||||
"#6, Product_Manager: For our next vehicle, I propose the name \"EcoPrestige.\" This name highlights the ...\n",
|
||||
"#7, Chief_Marketing_Officer: Thank you, Product_Manager, for your suggestion. Both \"VerdeVogue\" and \"EcoPrest ...\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Output the final chat history showing the original 4 messages and the resumed message\n",
|
||||
"for i, message in enumerate(groupchat.messages):\n",
|
||||
" print(\n",
|
||||
" f\"#{i + 1}, {message['name']}: {message['content'][:80]}\".replace(\"\\n\", \" \"),\n",
|
||||
" f\"{'...' if len(message['content']) > 80 else ''}\".replace(\"\\n\", \" \"),\n",
|
||||
" )"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"front_matter": {
|
||||
"description": "Custom Speaker Selection Function",
|
||||
"tags": [
|
||||
"orchestration",
|
||||
"group chat"
|
||||
]
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "autogen",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
Loading…
Reference in New Issue