Autobuild Function calling (#3238)

* [Fix] Precommit issues

* [Fix] checks

* [Fix] iterating through list_of_functions

* [Fix] pre-commit checks

* Update test/agentchat/contrib/test_agent_builder.py

Co-authored-by: Chi Wang <4250911+sonichi@users.noreply.github.com>

---------

Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: Ryan Sweet <rysweet@microsoft.com>
Co-authored-by: Chi Wang <4250911+sonichi@users.noreply.github.com>
This commit is contained in:
Krishna Shedbalkar 2024-10-12 06:39:12 +05:30 committed by GitHub
parent 11ef58b98e
commit 4f060e5efa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 671 additions and 19 deletions

View File

@ -172,6 +172,26 @@ Match roles in the role set to each expert in expert set.
```
"""
AGENT_FUNCTION_MAP_PROMPT = """Consider the following function.
Function Name: {function_name}
Function Description: {function_description}
The agent details are given in the format: {format_agent_details}
Which one of the following agents should be able to execute this function, preferably an agent with programming background?
{agent_details}
Hint:
# Only respond with the name of the agent that is most suited to execute the function and nothing else.
"""
UPDATED_AGENT_SYSTEM_MESSAGE = """
{agent_system_message}
You have access to execute the function: {function_name}.
With following description: {function_description}
"""
def __init__(
self,
config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
@ -358,6 +378,7 @@ Match roles in the role set to each expert in expert set.
self,
building_task: str,
default_llm_config: Dict,
list_of_functions: Optional[List[Dict]] = None,
coding: Optional[bool] = None,
code_execution_config: Optional[Dict] = None,
use_oai_assistant: Optional[bool] = False,
@ -373,6 +394,7 @@ Match roles in the role set to each expert in expert set.
coding: use to identify if the user proxy (a code interpreter) should be added.
code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...).
default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...).
list_of_functions: list of functions to be associated with Agents
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
user_proxy: user proxy's class that can be used to replace the default user proxy.
@ -480,8 +502,9 @@ Match roles in the role set to each expert in expert set.
"code_execution_config": code_execution_config,
}
)
_config_check(self.cached_configs)
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
return self._build_agents(use_oai_assistant, list_of_functions, user_proxy=user_proxy, **kwargs)
def build_from_library(
self,
@ -653,13 +676,18 @@ Match roles in the role set to each expert in expert set.
return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
def _build_agents(
self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[autogen.ConversableAgent] = None, **kwargs
self,
use_oai_assistant: Optional[bool] = False,
list_of_functions: Optional[List[Dict]] = None,
user_proxy: Optional[autogen.ConversableAgent] = None,
**kwargs,
) -> Tuple[List[autogen.ConversableAgent], Dict]:
"""
Build agents with generated configs.
Args:
use_oai_assistant: use OpenAI assistant api instead of self-constructed agent.
list_of_functions: list of functions to be associated to Agents
user_proxy: user proxy's class that can be used to replace the default user proxy.
Returns:
@ -695,6 +723,53 @@ Match roles in the role set to each expert in expert set.
)
agent_list = agent_list + [user_proxy]
agent_details = []
for agent in agent_list[:-1]:
agent_details.append({"name": agent.name, "description": agent.description})
if list_of_functions:
for func in list_of_functions:
resp = (
self.builder_model.create(
messages=[
{
"role": "user",
"content": self.AGENT_FUNCTION_MAP_PROMPT.format(
function_name=func["name"],
function_description=func["description"],
format_agent_details='[{"name": "agent_name", "description": "agent description"}, ...]',
agent_details=str(json.dumps(agent_details)),
),
}
]
)
.choices[0]
.message.content
)
autogen.agentchat.register_function(
func["function"],
caller=self.agent_procs_assign[resp][0],
executor=agent_list[0],
name=func["name"],
description=func["description"],
)
agents_current_system_message = [
agent["system_message"] for agent in agent_configs if agent["name"] == resp
][0]
self.agent_procs_assign[resp][0].update_system_message(
self.UPDATED_AGENT_SYSTEM_MESSAGE.format(
agent_system_message=agents_current_system_message,
function_name=func["name"],
function_description=func["description"],
)
)
print(f"Function {func['name']} is registered to agent {resp}.")
return agent_list, self.cached_configs.copy()
def save(self, filepath: Optional[str] = None) -> str:

File diff suppressed because one or more lines are too long

View File

@ -3,15 +3,18 @@
import json
import os
import sys
from unittest.mock import MagicMock, patch
import pytest
import autogen
from autogen.agentchat.contrib.agent_builder import AgentBuilder
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
from conftest import reason, skip_openai # noqa: E402
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 # noqa: E402
try:
import chromadb
@ -22,6 +25,7 @@ else:
skip = False
here = os.path.abspath(os.path.dirname(__file__))
llm_config = {"temperature": 0}
def _config_check(config):
@ -37,10 +41,27 @@ def _config_check(config):
assert agent_config.get("system_message", None) is not None
@pytest.mark.skipif(
skip_openai,
reason=reason,
)
# Function initializes a group chat with agents and starts a execution_task.
def start_task(execution_task: str, agent_list: list):
group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)
manager = autogen.GroupChatManager(
groupchat=group_chat,
llm_config={"config_list": autogen.config_list_from_json(f"{KEY_LOC}/{OAI_CONFIG_LIST}"), **llm_config},
)
agent_list[0].initiate_chat(manager, message=execution_task)
ask_ossinsight_mock = MagicMock()
# Function to test function calling
def ask_ossinsight(question: str) -> str:
ask_ossinsight_mock(question)
return "The repository microsoft/autogen has 123,456 stars on GitHub."
@pytest.mark.skipif(skip_openai, reason=reason)
def test_build():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@ -69,6 +90,99 @@ def test_build():
assert len(agent_config["agent_configs"]) <= builder.max_agents
@pytest.mark.skipif(skip_openai or skip, reason=reason + "OR dependency not installed")
def test_build_assistant_with_function_calling():
list_of_functions = [
{
"name": "ossinsight_data_api",
"description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the related and structured data.",
"function": ask_ossinsight,
}
]
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST, config_file_location=KEY_LOC, builder_model="gpt-4", agent_model="gpt-4"
)
building_task = "How many stars microsoft/autogen has on GitHub?"
agent_list, agent_config = builder.build(
building_task=building_task,
default_llm_config={"temperature": 0},
code_execution_config={
"last_n_messages": 2,
"work_dir": f"{here}/test_agent_scripts",
"timeout": 60,
"use_docker": "python:3",
},
list_of_functions=list_of_functions,
)
_config_check(agent_config)
# check number of agents
assert len(agent_config["agent_configs"]) <= builder.max_agents
# Mock the 'ask_ossinsight' function in the '_main_' module using a context manager.
with patch(f"{__name__}.ask_ossinsight") as mocked_function:
# Execute 'start_task' which should trigger 'ask_ossinsight' due to the given execution task.
start_task(
execution_task="How many stars microsoft/autogen has on GitHub?",
agent_list=agent_list,
)
# Verify that 'ask_ossinsight' was called exactly once during the task execution.
mocked_function.assert_called()
@pytest.mark.skipif(
skip_openai,
reason="requested to skip",
)
def test_build_gpt_assistant_with_function_calling():
list_of_functions = [
{
"name": "ossinsight_data_api",
"description": "This is an API endpoint allowing users (analysts) to input question about GitHub in text format to retrieve the related and structured data.",
"function": ask_ossinsight,
}
]
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST, config_file_location=KEY_LOC, builder_model="gpt-4", agent_model="gpt-4"
)
building_task = "Determine number of stars of GitHub repositories"
agent_list, agent_config = builder.build(
building_task=building_task,
default_llm_config={"temperature": 0},
code_execution_config={
"last_n_messages": 2,
"work_dir": f"{here}/test_agent_scripts",
"timeout": 60,
"use_docker": "python:3",
},
list_of_functions=list_of_functions,
use_oai_assistant=True,
)
_config_check(agent_config)
# check number of agents
assert len(agent_config["agent_configs"]) <= builder.max_agents
# Mock the 'ask_ossinsight' function in the '_main_' module using a context manager.
with patch(f"{__name__}.ask_ossinsight") as mocked_function:
# Execute 'start_task' which should trigger 'ask_ossinsight' due to the given execution task.
start_task(
execution_task="How many stars microsoft/autogen has on GitHub?",
agent_list=agent_list,
)
# Verify that 'ask_ossinsight' was called exactly once during the task execution.
mocked_function.assert_called()
@pytest.mark.skipif(
skip_openai or skip,
reason=reason + "OR dependency not installed",
@ -122,10 +236,7 @@ def test_build_from_library():
assert len(agent_config["agent_configs"]) <= builder.max_agents
@pytest.mark.skipif(
skip_openai,
reason=reason,
)
@pytest.mark.skipif(skip_openai, reason=reason)
def test_save():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@ -159,10 +270,7 @@ def test_save():
_config_check(saved_configs)
@pytest.mark.skipif(
skip_openai,
reason=reason,
)
@pytest.mark.skipif(skip_openai, reason=reason)
def test_load():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@ -188,10 +296,7 @@ def test_load():
_config_check(loaded_agent_configs)
@pytest.mark.skipif(
skip_openai,
reason=reason,
)
@pytest.mark.skipif(skip_openai, reason=reason)
def test_clear_agent():
builder = AgentBuilder(
config_file_or_env=OAI_CONFIG_LIST,
@ -218,6 +323,8 @@ def test_clear_agent():
if __name__ == "__main__":
test_build()
test_build_assistant_with_function_calling()
test_build_gpt_assistant_with_function_calling()
test_build_from_library()
test_save()
test_load()