Improve auto reply registration (#1170)

* Improve auto reply registration

* object key

* fix test error

* bug fix in math user proxy agent

* allow send/receive without reply

* reset -> stop
This commit is contained in:
Chi Wang 2023-08-04 07:26:58 -07:00 committed by GitHub
parent 45b7d908e4
commit 2208dfb79e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 896 additions and 321 deletions

View File

@ -2,7 +2,7 @@ from .agent import Agent
from .responsive_agent import ResponsiveAgent
from .assistant_agent import AssistantAgent
from .user_proxy_agent import UserProxyAgent
from .groupchat import GroupChatManager, GroupChatParticipant
from .groupchat import GroupChatManager
__all__ = [
"Agent",
@ -10,5 +10,4 @@ __all__ = [
"AssistantAgent",
"UserProxyAgent",
"GroupChatManager",
"GroupChatParticipant",
]

View File

@ -24,10 +24,10 @@ class Agent:
"""Get the name of the agent."""
return self._name
def send(self, message: Union[Dict, str], recipient: "Agent"):
def send(self, message: Union[Dict, str], recipient: "Agent", request_reply: Optional[bool] = None):
"""(Aabstract method) Send a message to another agent."""
def receive(self, message: Union[Dict, str], sender: "Agent"):
def receive(self, message: Union[Dict, str], sender: "Agent", request_reply: Optional[bool] = None):
"""(Abstract method) Receive a message from another agent."""
def reset(self):

View File

@ -165,7 +165,7 @@ class MathUserProxyAgent(UserProxyAgent):
default_auto_reply=default_auto_reply,
**kwargs,
)
self.register_auto_reply(Agent, self._generate_math_reply)
self.register_auto_reply(Agent, self._generate_math_reply, 1)
# fixed var
self._max_invalid_q_per_step = max_invalid_q_per_step
@ -283,7 +283,7 @@ class MathUserProxyAgent(UserProxyAgent):
):
"""Generate an auto reply."""
if messages is None:
messages = self._oai_messages[sender.name]
messages = self._oai_messages[sender]
message = messages[-1]
message = message.get("content", "")
code_blocks = extract_code(message)
@ -313,7 +313,7 @@ class MathUserProxyAgent(UserProxyAgent):
reply = reply.strip()
if self.last_reply == reply:
return reply + "\nYour query or result is same from the last, please try a new approach."
return True, reply + "\nYour query or result is same from the last, please try a new approach."
self.last_reply = reply
if not all_success:

View File

@ -1,5 +1,5 @@
import sys
from typing import Dict, List, Optional, Tuple, Union
from typing import Dict, List, Optional, Union
from .agent import Agent
from .responsive_agent import ResponsiveAgent
@ -7,19 +7,16 @@ from .responsive_agent import ResponsiveAgent
class GroupChatManager(ResponsiveAgent):
"""(WIP) A chat manager agent that can manage a group chat of multiple agents."""
agents: List["GroupChatParticipant"]
agents: List[Agent]
max_round: int
def _participant_roles(self):
return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
def _select_speaker_msg(self):
return {
"role": "system",
"content": f"""You are in a role play game. The following roles are available:
return f"""You are in a role play game. The following roles are available:
{self._participant_roles()}. Read the following conversation.
Then select the next role from {self._agent_names} to play. Only return the role.""",
}
Then select the next role from {self._agent_names} to play. Only return the role."""
def __init__(
self,
@ -28,6 +25,7 @@ Then select the next role from {self._agent_names} to play. Only return the role
# unlimited consecutive auto reply by default
max_consecutive_auto_reply: Optional[int] = sys.maxsize,
human_input_mode: Optional[str] = "NEVER",
system_message: Optional[str] = "Group chat manager.",
# seed: Optional[int] = 4,
**kwargs,
):
@ -37,11 +35,9 @@ Then select the next role from {self._agent_names} to play. Only return the role
human_input_mode=human_input_mode,
**kwargs,
)
self.register_auto_reply(GroupChatParticipant, self._generate_reply_for_participant)
self.register_auto_reply(Agent, self._generate_reply_for_participant)
self.max_round = max_round
self._agent_names = []
self._next_speaker = None
self._round = 0
self._messages = []
# self._random = random.Random(seed)
@ -50,94 +46,43 @@ Then select the next role from {self._agent_names} to play. Only return the role
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Union[str, Dict, None]:
self._agent_names = [agent.name for agent in self.agents]
if messages is None:
messages = self._oai_messages[sender.name]
messages = self._oai_messages[sender]
message = messages[-1]
# set the name to sender's name if the role is not function
if message["role"] != "function":
message["name"] = sender.name
self._messages.append(message)
self._next_speaker = None
# broadcast the message to all agents except the sender
for agent in self.agents:
if agent != sender:
self.send(message, agent)
if self._round == 0:
self._agent_names = [agent.name for agent in self.agents]
self._round += 1
if self._round >= self.max_round:
return True, None
# speaker selection msg from an agent
self._next_speaker = self._select_speaker(sender)
self._next_speaker.send(self._next_speaker.generate_reply(sender=self), self)
speaker = sender
for i in range(self.max_round):
# set the name to speaker's name if the role is not function
if message["role"] != "function":
message["name"] = speaker.name
self._messages.append(message)
# broadcast the message to all agents except the speaker
for agent in self.agents:
if agent != speaker:
self.send(message, agent, request_reply=False)
if i != self.max_round - 1:
# speaker selection msg from an agent
speaker = self._select_speaker(speaker)
speaker.send(speaker.generate_reply(sender=self), self, request_reply=False)
message = self.last_message(speaker)
return True, None
@property
def next_speaker(self):
"""Return the next speaker."""
return self._next_speaker
def _select_speaker(self, last_speaker: "GroupChatParticipant"):
def _select_speaker(self, last_speaker: Agent):
"""Select the next speaker."""
final, name = self._generate_oai_reply([self._select_speaker_msg()] + self._messages)
self.update_system_message(self._select_speaker_msg())
final, name = self._generate_oai_reply(self._messages)
if not final:
# i = self._random.randint(0, len(self._agent_names) - 1) # randomly pick an id
name = self._agent_names[(self._agent_names.index(last_speaker.name) + 1) % len(self._agent_names)]
return self.agent_by_name(name)
return self.agents[(self._agent_names.index(last_speaker.name) + 1) % len(self._agent_names)]
try:
return self.agent_by_name(name)
except ValueError:
return self.agents[(self._agent_names.index(last_speaker.name) + 1) % len(self._agent_names)]
def agent_by_name(self, name: str) -> "GroupChatParticipant":
def agent_by_name(self, name: str) -> Agent:
"""Find the next speaker based on the message."""
return self.agents[self._agent_names.index(name)]
def reset(self):
super().reset()
self._round = 0
self._messages.clear()
self._next_speaker = None
class GroupChatParticipant(ResponsiveAgent):
"""(WIP) A group chat participant agent that can participate in a group chat."""
group_chat_manager: GroupChatManager
def __init__(
self,
name,
group_chat_manager=None,
**kwargs,
):
super().__init__(
name=name,
**kwargs,
)
self.register_auto_reply(GroupChatManager, self._generate_reply_for_chat_manager)
self.group_chat_manager = group_chat_manager
def _generate_reply_for_chat_manager(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""Generate reply for the chat manager."""
return self.group_chat_manager.next_speaker != self, None
# def _speaker_selection(self, instruction):
# """Select the next speaker."""
# if self.llm_config is False:
# if self.human_input_mode == "NEVER":
# return self.name
# else:
# return self.get_human_input(instruction["content"])
# sender = self.chat_manager.room
# roles_msg = {
# "content": f"""The following roles are available:
# {self._participant_roles()}""",
# "role": "system",
# }
# old_system_msg = self.system_message
# self.update_system_message(instruction["content"])
# reply = self._generate_oai_reply([roles_msg] + self.chat_messages[sender.name])
# self.update_system_message(old_system_msg)
# return reply

View File

@ -109,22 +109,27 @@ class ResponsiveAgent(Agent):
self._function_map = {} if function_map is None else function_map
self._default_auto_reply = default_auto_reply
self._class_specific_reply = []
self.reply_at_receive = defaultdict(bool)
self.register_auto_reply(Agent, self._generate_oai_reply)
self.register_auto_reply(Agent, self._generate_code_execution_reply)
self.register_auto_reply(Agent, self._generate_function_call_reply)
self.register_auto_reply(Agent, self._check_termination_and_human_reply)
def register_auto_reply(self, class_type, reply_func: Callable):
def register_auto_reply(self, class_type, reply_func: Callable, position: int = 0):
"""Register a class-specific reply function.
The class-specific reply function will be called when the sender is an instance of the class_type.
The function registered later will be checked earlier.
The function registered later will be checked earlier by default.
To change the order, set the position to a positive integer.
Args:
class_type (Class): the class type.
reply_func (Callable): the reply function.
position (int): the position of the reply function in the reply function list.
"""
self._class_specific_reply.append((class_type, reply_func))
self._class_specific_reply.insert(position, (class_type, reply_func))
@property
def system_message(self):
"""Return the system message."""
return self._oai_system_message[0]["content"]
@ -149,13 +154,11 @@ class ResponsiveAgent(Agent):
for k in self._max_consecutive_auto_reply_dict:
self._max_consecutive_auto_reply_dict[k] = value
else:
self._max_consecutive_auto_reply_dict[sender.name] = value
self._max_consecutive_auto_reply_dict[sender] = value
def max_consecutive_auto_reply(self, sender: Optional[Agent] = None) -> int:
"""The maximum number of consecutive auto replies."""
return (
self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender.name]
)
return self._max_consecutive_auto_reply if sender is None else self._max_consecutive_auto_reply_dict[sender]
@property
def chat_messages(self) -> Dict[str, List[Dict]]:
@ -181,7 +184,7 @@ class ResponsiveAgent(Agent):
for conversation in self._oai_messages.values():
return conversation[-1]
raise ValueError("More than one conversation is found. Please specify the sender to get the last message.")
return self._oai_messages[agent.name][-1]
return self._oai_messages[agent][-1]
@property
def use_docker(self) -> Union[bool, str, None]:
@ -200,7 +203,7 @@ class ResponsiveAgent(Agent):
else:
return message
def _append_oai_message(self, message: Union[Dict, str], role, conversation_id) -> bool:
def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: Agent) -> bool:
"""Append a message to the ChatCompletion conversation.
If the message received is a string, it will be put in the "content" field of the new dictionary.
@ -210,7 +213,7 @@ class ResponsiveAgent(Agent):
Args:
message (dict or str): message to be appended to the ChatCompletion conversation.
role (str): role of the message, can be "assistant" or "function".
conversation_id (str): id of the conversation, should be the name of the recipient or sender.
conversation_id (Agent): id of the conversation, should be the recipient or sender.
Returns:
bool: whether the message is appended to the ChatCompletion conversation.
@ -225,7 +228,7 @@ class ResponsiveAgent(Agent):
self._oai_messages[conversation_id].append(oai_message)
return True
def send(self, message: Union[Dict, str], recipient: Agent):
def send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optional[bool] = None) -> bool:
"""Send a message to another agent.
Args:
@ -252,15 +255,16 @@ class ResponsiveAgent(Agent):
So effectively, this provides a way for an agent to send a "link" and modify
the content of the "link" later.
recipient (Agent): the recipient of the message.
request_reply (bool or None): whether to request a reply from the recipient.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
# When the agent composes and sends the message, the role of the message is "assistant"
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient.name)
valid = self._append_oai_message(message, "assistant", recipient)
if valid:
recipient.receive(message, self)
recipient.receive(message, self, request_reply)
else:
raise ValueError(
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
@ -296,7 +300,7 @@ class ResponsiveAgent(Agent):
print(colored("*" * len(func_print), "green"), flush=True)
print("\n", "-" * 80, flush=True, sep="")
def receive(self, message: Union[Dict, str], sender: Agent):
def receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optional[bool] = None):
"""Receive a message from another agent.
Once a message is received, this function sends a reply to the sender or stop.
@ -312,18 +316,22 @@ class ResponsiveAgent(Agent):
5. "context" (dict): the context of the message, which will be passed to
[autogen.Completion.create](../oai/Completion#create).
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
message = self._message_to_dict(message)
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
valid = self._append_oai_message(message, "user", sender.name)
valid = self._append_oai_message(message, "user", sender)
if not valid:
raise ValueError(
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
self._print_received_message(message, sender)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
reply = self.generate_reply(sender=sender)
if reply is not None:
self.send(reply, sender)
@ -343,6 +351,7 @@ class ResponsiveAgent(Agent):
"""
self.reset_consecutive_auto_reply_counter(recipient)
recipient.reset_consecutive_auto_reply_counter(self)
self.reply_at_receive[recipient] = recipient.reply_at_receive[self] = True
if clear_history:
self.clear_history(recipient)
recipient.clear_history(self)
@ -352,13 +361,21 @@ class ResponsiveAgent(Agent):
"""Reset the agent."""
self.clear_history()
self.reset_consecutive_auto_reply_counter()
self.stop_reply_at_receive()
def stop_reply_at_receive(self, sender: Optional[Agent] = None):
"""Reset the reply_at_receive of the sender."""
if sender is None:
self.reply_at_receive.clear()
else:
self.reply_at_receive[sender] = False
def reset_consecutive_auto_reply_counter(self, sender: Optional[Agent] = None):
"""Reset the consecutive_auto_reply_counter of the sender."""
if sender is None:
self._consecutive_auto_reply_counter.clear()
else:
self._consecutive_auto_reply_counter[sender.name] = 0
self._consecutive_auto_reply_counter[sender] = 0
def clear_history(self, agent: Optional[Agent] = None):
"""Clear the chat history of the agent.
@ -369,7 +386,7 @@ class ResponsiveAgent(Agent):
if agent is None:
self._oai_messages.clear()
else:
self._oai_messages[agent.name].clear()
self._oai_messages[agent].clear()
def _generate_oai_reply(
self,
@ -379,7 +396,7 @@ class ResponsiveAgent(Agent):
if self.llm_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender.name]
messages = self._oai_messages[sender]
# TODO: #1143 handle token limit exceeded error
response = oai.ChatCompletion.create(
@ -387,13 +404,48 @@ class ResponsiveAgent(Agent):
)
return True, oai.ChatCompletion.extract_text_or_function_call(response)[0]
def _generate_code_execution_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if self._code_execution_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
code_blocks = extract_code(message["content"])
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN`
return False, None
# code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
# if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# return code_blocks[0][1]
# try to execute the code
exitcode, logs = self.execute_code_blocks(code_blocks)
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
def _generate_function_call_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
if "function_call" in message:
_, func_return = self.execute_function(message["function_call"])
return True, func_return
return False, None
def _check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
if messages is None:
messages = self._oai_messages[sender.name]
messages = self._oai_messages[sender]
message = messages[-1]
reply = ""
no_human_input_msg = ""
@ -405,7 +457,7 @@ class ResponsiveAgent(Agent):
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
reply = reply if reply or not self._is_termination_msg(message) else "exit"
else:
if self._consecutive_auto_reply_counter[sender.name] >= self._max_consecutive_auto_reply_dict[sender.name]:
if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
if self.human_input_mode == "NEVER":
reply = "exit"
else:
@ -438,84 +490,58 @@ class ResponsiveAgent(Agent):
# stop the conversation
if reply == "exit":
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] = 0
self._consecutive_auto_reply_counter[sender] = 0
return True, None
# send the human reply
if reply or self._max_consecutive_auto_reply_dict[sender.name] == 0:
if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] = 0
self._consecutive_auto_reply_counter[sender] = 0
return True, reply
# increment the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender.name] += 1
self._consecutive_auto_reply_counter[sender] += 1
if self.human_input_mode != "NEVER":
print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
return False, None
def _generate_function_call_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
if "function_call" in message:
_, func_return = self.execute_function(message["function_call"])
return True, func_return
return False, None
def _generate_code_execution_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
):
if self._code_execution_config is False:
return False, None
if messages is None:
messages = self._oai_messages[sender.name]
message = messages[-1]
code_blocks = extract_code(message["content"])
if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# no code block is found, lang should be `UNKNOWN`
return False, None
# code_blocks, _ = find_code(messages, sys_msg=self._oai_system_message, **self.llm_config)
# if len(code_blocks) == 1 and code_blocks[0][0] == UNKNOWN:
# return code_blocks[0][1]
# try to execute the code
exitcode, logs = self.execute_code_blocks(code_blocks)
exitcode2str = "execution succeeded" if exitcode == 0 else "execution failed"
return True, f"exitcode: {exitcode} ({exitcode2str})\nCode output: {logs}"
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
exclude: Optional[List[Callable]] = None,
) -> Union[str, Dict, None]:
"""Reply based on the conversation history.
"""Reply based on the conversation history and the sender.
First, execute function or code and return the result.
AI replies are generated only when no code execution is performed.
Subclasses can override this method to customize the reply.
Either messages or sender must be provided.
Use registered class-specific reply functions to generate replies.
By default, the following functions are checked in order:
1. _check_termination_and_human_reply
2. _generate_function_call_reply
3. _generate_code_execution_reply
4. _generate_oai_reply
Every function returns a tuple (final, reply).
When a function returns final=False, the next function will be checked.
So by default, termination and human reply will be checked first.
If not terminating and human reply is skipped, execute function or code and return the result.
AI replies are generated only when no code execution is performed.
Args:
messages: a list of messages in the conversation history.
default_reply (str or dict): default reply.
sender: sender of an Agent instance.
exclude: a list of functions to exclude.
Returns:
str or dict or None: reply. None if no reply is generated.
"""
assert messages is not None or sender is not None, "Either messages or sender must be provided."
final, reply = self._check_termination_and_human_reply(sender=sender)
if final:
return reply
if sender is not None:
for class_specifc_reply in self._class_specific_reply[-1::-1]:
if isinstance(sender, class_specifc_reply[0]):
for class_specifc_reply in self._class_specific_reply:
if isinstance(sender, class_specifc_reply[0]) and (
not exclude or class_specifc_reply[1] not in exclude
):
final, reply = class_specifc_reply[1](messages, sender)
if final:
return reply

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,7 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -8,6 +9,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -34,6 +36,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -71,6 +74,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -106,6 +110,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -114,44 +119,31 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"\n",
"llm_config = {\"config_list\": config_list_gpt4}\n",
"group_chat_manager = autogen.GroupChatManager(max_round=4, llm_config=llm_config)\n",
"human = autogen.GroupChatParticipant(\n",
"human = autogen.UserProxyAgent(\n",
" name=\"Human\",\n",
" system_message=\"A human admin.\",\n",
" human_input_mode=\"ALWAYS\",\n",
" llm_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"alice = autogen.GroupChatParticipant(\n",
"alice = autogen.AssistantAgent(\n",
" name=\"Alice\",\n",
" system_message=autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE,\n",
" max_consecutive_auto_reply=sys.maxsize,\n",
" human_input_mode=\"NEVER\",\n",
" llm_config=llm_config,\n",
" code_execution_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"bob = autogen.GroupChatParticipant(\n",
"bob = autogen.AssistantAgent(\n",
" name=\"Bob\",\n",
" system_message=\"Code reviewer. Prevent code execution if unsafe or not well documented. Suggest changes. Otherwise, approve and return the final code to execute.\",\n",
" max_consecutive_auto_reply=sys.maxsize,\n",
" human_input_mode=\"NEVER\",\n",
" llm_config=llm_config,\n",
" code_execution_config=False,\n",
" group_chat_manager=group_chat_manager,\n",
")\n",
"\n",
"group_chat_manager.agents = [human, alice, bob]"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
@ -160,7 +152,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"metadata": {},
"outputs": [
{
@ -181,13 +173,7 @@
"\n",
"find a latest paper about generative agents\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"--------------------------------------------------------------------------------\n",
"\u001b[33mAlice\u001b[0m (to chat_manager):\n",
"\n",
"As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
@ -256,10 +242,6 @@
"Alternatively, databases like PubMed or arXiv.org provide free access to a large number of scientific papers - you might want to check them out for latest research papers on your topic of interest.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
"\u001b[33mchat_manager\u001b[0m (to Bob):\n",
"\n",
"As an AI, I am unable to browse or search the web, download or read a file directly. But I can provide you with a Python script to scrape Google Scholar for the latest papers on generative agents.\n",
@ -388,10 +370,6 @@
"Always use this script carefully because web-scraping isn't always reliable or legal on all web pages. Always ensure you have express permission or that the website's terms and conditions don't forbid this kind of usage.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n",
"\u001b[31m\n",
">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
"\u001b[33mchat_manager\u001b[0m (to Alice):\n",
"\n",
"Your code as it stands can throw an exception and result in an error if the HTTP request fails or if no search results are found. Also, the use of 'beautifulsoup4' and 'requests' should be well-documented.\n",
@ -476,7 +454,7 @@
}
],
"source": [
"human.send(\"find a latest paper about generative agents\", group_chat_manager)"
"human.initiate_chat(group_chat_manager, message=\"find a latest paper about generative agents\")"
]
}
],

View File

@ -3,31 +3,29 @@ from flaml import autogen
def test_chat_manager():
group_chat_manager = autogen.GroupChatManager(max_round=2, llm_config=False)
agent1 = autogen.GroupChatParticipant(
agent1 = autogen.ResponsiveAgent(
"alice",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is alice sepaking.",
group_chat_manager=group_chat_manager,
)
agent2 = autogen.GroupChatParticipant(
agent2 = autogen.ResponsiveAgent(
"bob",
max_consecutive_auto_reply=2,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply="This is bob speaking.",
group_chat_manager=group_chat_manager,
)
group_chat_manager.agents = [agent1, agent2]
agent1.send("start", group_chat_manager)
agent1.initiate_chat(group_chat_manager, message="hello")
assert len(agent1.chat_messages[group_chat_manager.name]) == 2
assert len(agent1.chat_messages[group_chat_manager]) == 2
group_chat_manager.reset()
agent1.reset()
agent2.reset()
agent2.send("start", group_chat_manager)
agent2.initiate_chat(group_chat_manager, message="hello")
if __name__ == "__main__":

View File

@ -1,5 +1,3 @@
import sys
from io import StringIO
import pytest
from flaml.autogen.agentchat import ResponsiveAgent
@ -48,30 +46,34 @@ def test_max_consecutive_auto_reply():
assert agent.max_consecutive_auto_reply() == agent.max_consecutive_auto_reply(agent1) == 1
agent1.initiate_chat(agent, message="hello")
assert agent._consecutive_auto_reply_counter[agent1.name] == 1
assert agent._consecutive_auto_reply_counter[agent1] == 1
agent1.initiate_chat(agent, message="hello again")
# with auto reply because the counter is reset
assert agent1.last_message(agent)["role"] == "user"
assert len(agent1.chat_messages[agent.name]) == 2
assert len(agent.chat_messages[agent1.name]) == 2
assert len(agent1.chat_messages[agent]) == 2
assert len(agent.chat_messages[agent1]) == 2
assert agent._consecutive_auto_reply_counter[agent1.name] == 1
assert agent._consecutive_auto_reply_counter[agent1] == 1
agent1.send(message="bye", recipient=agent)
# no auto reply
assert agent1.last_message(agent)["role"] == "assistant"
agent1.initiate_chat(agent, clear_history=False, message="hi")
assert len(agent1.chat_messages[agent.name]) > 2
assert len(agent.chat_messages[agent1.name]) > 2
assert len(agent1.chat_messages[agent]) > 2
assert len(agent.chat_messages[agent1]) > 2
assert agent1.reply_at_receive[agent] == agent.reply_at_receive[agent1] is True
agent1.stop_reply_at_receive(agent)
assert agent1.reply_at_receive[agent] is False and agent.reply_at_receive[agent1] is True
def test_responsive_agent(monkeypatch):
def test_responsive_agent():
dummy_agent_1 = ResponsiveAgent(name="dummy_agent_1", human_input_mode="ALWAYS")
dummy_agent_2 = ResponsiveAgent(name="dummy_agent_2", human_input_mode="TERMINATE")
monkeypatch.setattr(sys, "stdin", StringIO("exit"))
# monkeypatch.setattr(sys, "stdin", StringIO("exit"))
dummy_agent_1.receive("hello", dummy_agent_2) # receive a str
monkeypatch.setattr(sys, "stdin", StringIO("TERMINATE\n\n"))
# monkeypatch.setattr(sys, "stdin", StringIO("TERMINATE\n\n"))
dummy_agent_1.receive(
{
"content": "hello {name}",
@ -81,18 +83,18 @@ def test_responsive_agent(monkeypatch):
},
dummy_agent_2,
) # receive a dict
assert "context" in dummy_agent_1.chat_messages["dummy_agent_2"][-2]
assert "context" in dummy_agent_1.chat_messages[dummy_agent_2][-1]
# receive dict without openai fields to be printed, such as "content", 'function_call'. There should be no error raised.
pre_len = len(dummy_agent_1.chat_messages["dummy_agent_2"])
pre_len = len(dummy_agent_1.chat_messages[dummy_agent_2])
with pytest.raises(ValueError):
dummy_agent_1.receive({"message": "hello"}, dummy_agent_2)
assert pre_len == len(
dummy_agent_1.chat_messages["dummy_agent_2"]
dummy_agent_1.chat_messages[dummy_agent_2]
), "When the message is not an valid openai message, it should not be appended to the oai conversation."
monkeypatch.setattr(sys, "stdin", StringIO("exit"))
# monkeypatch.setattr(sys, "stdin", StringIO("exit"))
dummy_agent_1.send("TERMINATE", dummy_agent_2) # send a str
monkeypatch.setattr(sys, "stdin", StringIO("exit"))
# monkeypatch.setattr(sys, "stdin", StringIO("exit"))
dummy_agent_1.send(
{
"content": "TERMINATE",
@ -101,17 +103,17 @@ def test_responsive_agent(monkeypatch):
) # send a dict
# send dict with no openai fields
pre_len = len(dummy_agent_1.chat_messages["dummy_agent_2"])
pre_len = len(dummy_agent_1.chat_messages[dummy_agent_2])
with pytest.raises(ValueError):
dummy_agent_1.send({"message": "hello"}, dummy_agent_2)
assert pre_len == len(
dummy_agent_1.chat_messages["dummy_agent_2"]
dummy_agent_1.chat_messages[dummy_agent_2]
), "When the message is not a valid openai message, it should not be appended to the oai conversation."
# update system message
dummy_agent_1.update_system_message("new system message")
assert dummy_agent_1._oai_system_message[0]["content"] == "new system message"
assert dummy_agent_1.system_message == "new system message"
if __name__ == "__main__":

View File

@ -156,6 +156,8 @@ user_proxy.initiate_chat(
* [Automated Chess Game Playing & Chitchatting by GPT-4 Agents](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_chess.ipynb)
* [Automated Task Solving by Group Chat](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat.ipynb)
## Enhanced Inference
One can use [`flaml.autogen.Completion.create`](/docs/reference/autogen/oai/completion#create) to perform inference.