Fix for bugs appeared when using function calls with clear history functionality (#1531)

* resolved errors happening when using function calling and clear history

* checking in nr_of_messages_to_preserve were provided

* code formatting

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* test added, dict signature improved

* Test updated

Co-authored-by: Chi Wang <wang.chi@microsoft.com>

* test improved

* test improved

* comment about preserving additional message added

* commentary about clear history called in tool response improved

* created test for clear hisotry called from tool response

* code formatting

* added 'USER INTERRUPTED' as internal content of tool response

* added separate vatiable 'nr_messages_to_preserve_internal'

---------

Co-authored-by: Chi Wang <wang.chi@microsoft.com>
This commit is contained in:
Grigorij Dudnik 2024-03-02 19:03:52 +01:00 committed by GitHub
parent d60464374d
commit f2e42326e1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 127 additions and 8 deletions

View File

@ -1126,8 +1126,18 @@ class ConversableAgent(LLMAgent):
if recipient is None:
if nr_messages_to_preserve:
for key in self._oai_messages:
nr_messages_to_preserve_internal = nr_messages_to_preserve
# if breaking history between function call and function response, save function call message
# additionally, otherwise openai will return error
first_msg_to_save = self._oai_messages[key][-nr_messages_to_preserve_internal]
if "tool_responses" in first_msg_to_save:
nr_messages_to_preserve_internal += 1
print(
f"Preserving one more message for {self.name} to not divide history between tool call and "
f"tool response."
)
# Remove messages from history except last `nr_messages_to_preserve` messages.
self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve:]
self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:]
else:
self._oai_messages.clear()
else:

View File

@ -596,9 +596,11 @@ class GroupChatManager(ConversableAgent):
if (
groupchat.enable_clear_history
and isinstance(reply, dict)
and reply["content"]
and "CLEAR HISTORY" in reply["content"].upper()
):
reply["content"] = self.clear_agents_history(reply["content"], groupchat)
reply["content"] = self.clear_agents_history(reply, groupchat)
# The speaker sends the message without requesting a reply
speaker.send(reply, self, request_reply=False)
message = self.last_message(speaker)
@ -684,7 +686,7 @@ class GroupChatManager(ConversableAgent):
for agent in self._groupchat.agents:
agent._raise_exception_on_async_reply_functions()
def clear_agents_history(self, reply: str, groupchat: GroupChat) -> str:
def clear_agents_history(self, reply: dict, groupchat: GroupChat) -> str:
"""Clears history of messages for all agents or selected one. Can preserve selected number of last messages.
That function is called when user manually provide "clear history" phrase in his reply.
When "clear history" is provided, the history of messages for all agents is cleared.
@ -696,23 +698,27 @@ class GroupChatManager(ConversableAgent):
Phrase "clear history" and optional arguments are cut out from the reply before it passed to the chat.
Args:
reply (str): Admin reply to analyse.
reply (dict): reply message dict to analyze.
groupchat (GroupChat): GroupChat object.
"""
reply_content = reply["content"]
# Split the reply into words
words = reply.split()
words = reply_content.split()
# Find the position of "clear" to determine where to start processing
clear_word_index = next(i for i in reversed(range(len(words))) if words[i].upper() == "CLEAR")
# Extract potential agent name and steps
words_to_check = words[clear_word_index + 2 : clear_word_index + 4]
nr_messages_to_preserve = None
nr_messages_to_preserve_provided = False
agent_to_memory_clear = None
for word in words_to_check:
if word.isdigit():
nr_messages_to_preserve = int(word)
nr_messages_to_preserve_provided = True
elif word[:-1].isdigit(): # for the case when number of messages is followed by dot or other sign
nr_messages_to_preserve = int(word[:-1])
nr_messages_to_preserve_provided = True
else:
for agent in groupchat.agents:
if agent.name == word:
@ -721,6 +727,12 @@ class GroupChatManager(ConversableAgent):
elif agent.name == word[:-1]: # for the case when agent name is followed by dot or other sign
agent_to_memory_clear = agent
break
# preserve last tool call message if clear history called inside of tool response
if "tool_responses" in reply and not nr_messages_to_preserve:
nr_messages_to_preserve = 1
logger.warning(
"The last tool call message will be saved to prevent errors caused by tool response without tool call."
)
# clear history
if agent_to_memory_clear:
if nr_messages_to_preserve:
@ -746,7 +758,7 @@ class GroupChatManager(ConversableAgent):
agent.clear_history(nr_messages_to_preserve=nr_messages_to_preserve)
# Reconstruct the reply without the "clear history" command and parameters
skip_words_number = 2 + int(bool(agent_to_memory_clear)) + int(bool(nr_messages_to_preserve))
reply = " ".join(words[:clear_word_index] + words[clear_word_index + skip_words_number :])
skip_words_number = 2 + int(bool(agent_to_memory_clear)) + int(nr_messages_to_preserve_provided)
reply_content = " ".join(words[:clear_word_index] + words[clear_word_index + skip_words_number :])
return reply
return reply_content

View File

@ -779,6 +779,103 @@ def test_clear_agents_history():
{"content": "How you doing?", "name": "sam", "role": "user"},
]
# testing saving tool_call message when clear history going to remove it leaving only tool_response message
agent1.reset()
agent2.reset()
agent3.reset()
# we want to broadcast the message only in the preparation.
groupchat = autogen.GroupChat(agents=[agent1, agent2, agent3], messages=[], max_round=1, enable_clear_history=True)
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False)
# We want to trigger the broadcast of group chat manager, which requires `request_reply` to be set to True.
agent1.send("dummy message", group_chat_manager, request_reply=True)
agent1.send(
{
"content": None,
"role": "assistant",
"function_call": None,
"tool_calls": [
{"id": "call_test_id", "function": {"arguments": "", "name": "test_tool"}, "type": "function"}
],
},
group_chat_manager,
request_reply=True,
)
agent1.send(
{
"role": "tool",
"tool_responses": [{"tool_call_id": "call_emulated", "role": "tool", "content": "example tool response"}],
"content": "example tool response",
},
group_chat_manager,
request_reply=True,
)
# increase max_round to 3
groupchat.max_round = 3
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False)
with mock.patch.object(builtins, "input", lambda _: "clear history alice 1. How you doing?"):
agent1.initiate_chat(group_chat_manager, message="hello", clear_history=False)
agent1_history = list(agent1._oai_messages.values())[0]
assert agent1_history == [
{
"tool_calls": [
{"id": "call_test_id", "function": {"arguments": "", "name": "test_tool"}, "type": "function"},
],
"content": None,
"role": "assistant",
},
{
"content": "example tool response",
"tool_responses": [{"tool_call_id": "call_emulated", "role": "tool", "content": "example tool response"}],
"role": "tool",
},
]
# testing clear history called from tool response
agent1.reset()
agent2.reset()
agent3.reset()
agent2 = autogen.ConversableAgent(
"bob",
max_consecutive_auto_reply=10,
human_input_mode="NEVER",
llm_config=False,
default_auto_reply={
"role": "tool",
"tool_responses": [{"tool_call_id": "call_emulated", "role": "tool", "content": "USER INTERRUPTED"}],
"content": "Clear history. How you doing?",
},
)
groupchat = autogen.GroupChat(agents=[agent1, agent2, agent3], messages=[], max_round=1, enable_clear_history=True)
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False)
agent1.send("dummy message", group_chat_manager, request_reply=True)
agent1.send(
{
"content": None,
"role": "assistant",
"function_call": None,
"tool_calls": [
{"id": "call_test_id", "function": {"arguments": "", "name": "test_tool"}, "type": "function"}
],
},
group_chat_manager,
request_reply=True,
)
groupchat.max_round = 2
group_chat_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=False)
agent1.initiate_chat(group_chat_manager, message="hello")
agent1_history = list(agent1._oai_messages.values())[0]
assert agent1_history == [
{
"tool_calls": [
{"id": "call_test_id", "function": {"arguments": "", "name": "test_tool"}, "type": "function"},
],
"content": None,
"role": "assistant",
},
]
def test_get_agent_by_name():
def agent(name: str) -> autogen.ConversableAgent: