mirror of https://github.com/microsoft/autogen.git
improve test speed (#2406)
* improve test speed * speed up test * speed up test
This commit is contained in:
parent
297904f210
commit
d307818dd9
|
@ -39,7 +39,7 @@ if not skip_openai:
|
|||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
filter_dict={
|
||||
"model": ["gpt-4", "gpt-4-0314", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
||||
"model": ["gpt-3.5-turbo"],
|
||||
},
|
||||
file_location=KEY_LOC,
|
||||
)
|
||||
|
|
|
@ -85,15 +85,14 @@ def test_agent_usage():
|
|||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
assistant = AssistantAgent(
|
||||
"assistant",
|
||||
system_message="You are a helpful assistant.",
|
||||
llm_config={
|
||||
"timeout": 600,
|
||||
"cache_seed": None,
|
||||
"config_list": config_list,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -104,7 +103,6 @@ def test_agent_usage():
|
|||
code_execution_config=False,
|
||||
llm_config={
|
||||
"config_list": config_list,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
},
|
||||
# In the system message the "user" always refers to the other agent.
|
||||
system_message="You ask a user for help. You check the answer from the user and provide feedback.",
|
||||
|
@ -140,5 +138,5 @@ def test_agent_usage():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_gathering()
|
||||
# test_gathering()
|
||||
test_agent_usage()
|
||||
|
|
|
@ -9,14 +9,7 @@ import autogen
|
|||
from autogen.agentchat import AssistantAgent, UserProxyAgent
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
|
||||
try:
|
||||
from openai import OpenAI
|
||||
except ImportError:
|
||||
skip = True
|
||||
else:
|
||||
skip = False or skip_openai
|
||||
from conftest import reason, skip_openai # noqa: E402
|
||||
|
||||
KEY_LOC = "notebook"
|
||||
OAI_CONFIG_LIST = "OAI_CONFIG_LIST"
|
||||
|
@ -24,8 +17,8 @@ here = os.path.abspath(os.path.dirname(__file__))
|
|||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.platform in ["darwin", "win32"] or skip,
|
||||
reason="do not run on MacOS or windows OR openai not installed OR requested to skip",
|
||||
sys.platform in ["darwin", "win32"] or skip_openai,
|
||||
reason="do not run on MacOS or windows OR " + reason,
|
||||
)
|
||||
def test_ai_user_proxy_agent():
|
||||
conversations = {}
|
||||
|
@ -34,6 +27,7 @@ def test_ai_user_proxy_agent():
|
|||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
assistant = AssistantAgent(
|
||||
"assistant",
|
||||
|
@ -67,7 +61,7 @@ def test_ai_user_proxy_agent():
|
|||
print("Result summary:", res.summary)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5):
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
|
@ -111,9 +105,13 @@ If "Thank you" or "You\'re welcome" are said in the conversation, then say TERMI
|
|||
assert not isinstance(user.use_docker, bool) # None or str
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=10):
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, file_location=KEY_LOC)
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_create_execute_script(human_input_mode="NEVER", max_consecutive_auto_reply=3):
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
conversations = {}
|
||||
# autogen.ChatCompletion.start_logging(conversations)
|
||||
llm_config = {
|
||||
|
@ -160,13 +158,13 @@ print('Hello world!')
|
|||
# autogen.ChatCompletion.stop_logging()
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2):
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={
|
||||
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
||||
"tags": ["gpt-4", "gpt-4-32k"],
|
||||
},
|
||||
)
|
||||
hard_questions = [
|
||||
|
@ -207,4 +205,5 @@ if __name__ == "__main__":
|
|||
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
|
||||
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
|
||||
# although the max_consecutive_auto_reply is set to 10.
|
||||
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)
|
||||
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=2)
|
||||
# test_ai_user_proxy_agent()
|
||||
|
|
|
@ -10,14 +10,7 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||
import autogen
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
|
||||
try:
|
||||
from openai import OpenAI
|
||||
except ImportError:
|
||||
skip = True
|
||||
else:
|
||||
skip = False or skip_openai
|
||||
from conftest import reason, skip_openai # noqa: E402
|
||||
|
||||
|
||||
def get_market_news(ind, ind_upper):
|
||||
|
@ -61,24 +54,15 @@ def get_market_news(ind, ind_upper):
|
|||
return feeds_summary
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_groupchat():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
|
||||
llm_config = {
|
||||
"timeout": 600,
|
||||
"cache_seed": 41,
|
||||
"config_list": config_list,
|
||||
"temperature": 0,
|
||||
}
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||
|
||||
# create an AssistantAgent instance named "assistant"
|
||||
assistant = autogen.AssistantAgent(
|
||||
name="assistant",
|
||||
llm_config={
|
||||
"timeout": 600,
|
||||
"cache_seed": 41,
|
||||
"config_list": config_list,
|
||||
"temperature": 0,
|
||||
},
|
||||
|
@ -93,20 +77,21 @@ async def test_async_groupchat():
|
|||
default_auto_reply=None,
|
||||
)
|
||||
|
||||
groupchat = autogen.GroupChat(agents=[user_proxy, assistant], messages=[], max_round=12)
|
||||
groupchat = autogen.GroupChat(
|
||||
agents=[user_proxy, assistant], messages=[], max_round=3, speaker_selection_method="round_robin"
|
||||
)
|
||||
manager = autogen.GroupChatManager(
|
||||
groupchat=groupchat,
|
||||
llm_config=llm_config,
|
||||
is_termination_msg=lambda x: "TERMINATE" in x.get("content", ""),
|
||||
)
|
||||
await user_proxy.a_initiate_chat(manager, message="""Have a short conversation with the assistant.""")
|
||||
await user_proxy.a_initiate_chat(manager, message="""223434*3422=?.""")
|
||||
assert len(user_proxy.chat_messages) > 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
@pytest.mark.asyncio
|
||||
async def test_stream():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||
data = asyncio.Future()
|
||||
|
||||
async def add_stock_price_data():
|
||||
|
@ -167,9 +152,10 @@ async def test_stream():
|
|||
while not data_task.done() and not data_task.cancelled():
|
||||
reply = await user_proxy.a_generate_reply(sender=assistant)
|
||||
if reply is not None:
|
||||
res = await user_proxy.a_send(reply, assistant)
|
||||
print("Chat summary and cost:", res.summary, res.cost)
|
||||
await user_proxy.a_send(reply, assistant)
|
||||
# print("Chat summary and cost:", res.summary, res.cost)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_stream())
|
||||
# asyncio.run(test_stream())
|
||||
asyncio.run(test_async_groupchat())
|
||||
|
|
|
@ -17,9 +17,10 @@ from conftest import skip_openai # noqa: E402
|
|||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_chats():
|
||||
config_list = autogen.config_list_from_json(
|
||||
config_list_35 = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
|
||||
financial_tasks = [
|
||||
|
@ -32,16 +33,16 @@ async def test_async_chats():
|
|||
|
||||
financial_assistant_1 = AssistantAgent(
|
||||
name="Financial_assistant_1",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
system_message="You are a knowledgeable AI Assistant. Reply TERMINATE when everything is done.",
|
||||
)
|
||||
financial_assistant_2 = AssistantAgent(
|
||||
name="Financial_assistant_2",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
)
|
||||
writer = AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
|
|
|
@ -11,26 +11,19 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||
import autogen
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
|
||||
try:
|
||||
from openai import OpenAI
|
||||
except ImportError:
|
||||
skip = True
|
||||
else:
|
||||
skip = False or skip_openai
|
||||
from conftest import reason, skip_openai # noqa: E402
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_get_human_input():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||
|
||||
# create an AssistantAgent instance named "assistant"
|
||||
assistant = autogen.AssistantAgent(
|
||||
name="assistant",
|
||||
max_consecutive_auto_reply=2,
|
||||
llm_config={"seed": 41, "config_list": config_list, "temperature": 0},
|
||||
llm_config={"config_list": config_list, "temperature": 0},
|
||||
)
|
||||
|
||||
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
||||
|
@ -48,10 +41,10 @@ async def test_async_get_human_input():
|
|||
print("Human input:", res.human_input)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_max_turn():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||
|
||||
# create an AssistantAgent instance named "assistant"
|
||||
assistant = autogen.AssistantAgent(
|
||||
|
@ -79,5 +72,5 @@ async def test_async_max_turn():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_async_get_human_input())
|
||||
# asyncio.run(test_async_get_human_input())
|
||||
asyncio.run(test_async_max_turn())
|
||||
|
|
|
@ -9,10 +9,10 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
|
|||
from typing_extensions import Annotated
|
||||
|
||||
import autogen
|
||||
from autogen import AssistantAgent, GroupChat, GroupChatManager, UserProxyAgent, initiate_chats
|
||||
from autogen import AssistantAgent, GroupChat, GroupChatManager, UserProxyAgent, filter_config, initiate_chats
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import skip_openai # noqa: E402
|
||||
from conftest import reason, skip_openai # noqa: E402
|
||||
|
||||
config_list = (
|
||||
[]
|
||||
|
@ -23,6 +23,18 @@ config_list = (
|
|||
)
|
||||
)
|
||||
|
||||
config_list_35 = (
|
||||
[]
|
||||
if skip_openai
|
||||
else autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
)
|
||||
|
||||
config_list_tool = filter_config(config_list_35, {"tags": ["tool"]})
|
||||
|
||||
|
||||
def test_chat_messages_for_summary():
|
||||
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER", code_execution_config={"use_docker": False})
|
||||
|
@ -45,7 +57,7 @@ def test_chat_messages_for_summary():
|
|||
assert len(messages) == 2
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_chats_group():
|
||||
financial_tasks = [
|
||||
"""What are the full names of NVDA and TESLA.""",
|
||||
|
@ -68,12 +80,12 @@ def test_chats_group():
|
|||
|
||||
financial_assistant = AssistantAgent(
|
||||
name="Financial_assistant",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
)
|
||||
|
||||
writer = AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
your insightful and engaging articles.
|
||||
|
@ -87,7 +99,7 @@ def test_chats_group():
|
|||
system_message="""Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.
|
||||
Reply "TERMINATE" in the end when everything is done.
|
||||
""",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
)
|
||||
|
||||
groupchat_1 = GroupChat(agents=[user_proxy, financial_assistant, critic], messages=[], max_round=3)
|
||||
|
@ -97,7 +109,7 @@ def test_chats_group():
|
|||
manager_1 = GroupChatManager(
|
||||
groupchat=groupchat_1,
|
||||
name="Research_manager",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
code_execution_config={
|
||||
"last_n_messages": 1,
|
||||
"work_dir": "groupchat",
|
||||
|
@ -108,7 +120,7 @@ def test_chats_group():
|
|||
manager_2 = GroupChatManager(
|
||||
groupchat=groupchat_2,
|
||||
name="Writing_manager",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
code_execution_config={
|
||||
"last_n_messages": 1,
|
||||
"work_dir": "groupchat",
|
||||
|
@ -154,7 +166,7 @@ def test_chats_group():
|
|||
print(all_res[1].summary)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_chats():
|
||||
import random
|
||||
|
||||
|
@ -182,17 +194,17 @@ def test_chats():
|
|||
func = Function()
|
||||
financial_assistant_1 = AssistantAgent(
|
||||
name="Financial_assistant_1",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
function_map={"get_random_number": func.get_random_number},
|
||||
)
|
||||
financial_assistant_2 = AssistantAgent(
|
||||
name="Financial_assistant_2",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
function_map={"get_random_number": func.get_random_number},
|
||||
)
|
||||
writer = AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
|
@ -284,7 +296,7 @@ def test_chats():
|
|||
# print(blogpost.summary, insights_and_blogpost)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_chats_general():
|
||||
financial_tasks = [
|
||||
"""What are the full names of NVDA and TESLA.""",
|
||||
|
@ -296,15 +308,15 @@ def test_chats_general():
|
|||
|
||||
financial_assistant_1 = AssistantAgent(
|
||||
name="Financial_assistant_1",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
)
|
||||
financial_assistant_2 = AssistantAgent(
|
||||
name="Financial_assistant_2",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
)
|
||||
writer = AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
|
@ -388,7 +400,7 @@ def test_chats_general():
|
|||
# print(blogpost.summary, insights_and_blogpost)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_chats_exceptions():
|
||||
financial_tasks = [
|
||||
"""What are the full names of NVDA and TESLA.""",
|
||||
|
@ -472,10 +484,10 @@ def test_chats_exceptions():
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_chats_w_func():
|
||||
llm_config = {
|
||||
"config_list": config_list,
|
||||
"config_list": config_list_tool,
|
||||
"timeout": 120,
|
||||
}
|
||||
|
||||
|
@ -528,9 +540,9 @@ def test_chats_w_func():
|
|||
print(res.summary, res.cost, res.chat_history)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_udf_message_in_chats():
|
||||
llm_config = {"config_list": config_list}
|
||||
llm_config_35 = {"config_list": config_list_35}
|
||||
|
||||
research_task = """
|
||||
## NVDA (NVIDIA Corporation)
|
||||
|
@ -560,11 +572,11 @@ def test_udf_message_in_chats():
|
|||
|
||||
researcher = autogen.AssistantAgent(
|
||||
name="Financial_researcher",
|
||||
llm_config=llm_config,
|
||||
llm_config=llm_config_35,
|
||||
)
|
||||
writer = autogen.AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config=llm_config,
|
||||
llm_config=llm_config_35,
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
your insightful and engaging articles.
|
||||
|
@ -609,8 +621,8 @@ def test_udf_message_in_chats():
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test_chats()
|
||||
test_chats_general()
|
||||
test_chats()
|
||||
# test_chats_general()
|
||||
# test_chats_exceptions()
|
||||
# test_chats_group()
|
||||
# test_chats_w_func()
|
||||
|
|
|
@ -21,10 +21,9 @@ from autogen.agentchat.conversable_agent import register_function
|
|||
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
|
||||
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
|
||||
from conftest import MOCK_OPEN_AI_API_KEY, skip_openai # noqa: E402
|
||||
from conftest import MOCK_OPEN_AI_API_KEY, reason, skip_openai # noqa: E402
|
||||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
REASON = "requested to skip openai tests"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -918,13 +917,13 @@ def test_register_functions():
|
|||
|
||||
@pytest.mark.skipif(
|
||||
skip_openai,
|
||||
reason=REASON,
|
||||
reason=reason,
|
||||
)
|
||||
def test_function_registration_e2e_sync() -> None:
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
filter_dict={
|
||||
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
||||
"tags": ["tool"],
|
||||
},
|
||||
file_location=KEY_LOC,
|
||||
)
|
||||
|
@ -995,7 +994,7 @@ def test_function_registration_e2e_sync() -> None:
|
|||
|
||||
@pytest.mark.skipif(
|
||||
skip_openai,
|
||||
reason=REASON,
|
||||
reason=reason,
|
||||
)
|
||||
@pytest.mark.asyncio()
|
||||
async def test_function_registration_e2e_async() -> None:
|
||||
|
@ -1071,15 +1070,15 @@ async def test_function_registration_e2e_async() -> None:
|
|||
stopwatch_mock.assert_called_once_with(num_seconds="2")
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_max_turn():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"model": ["gpt-3.5-turbo"]})
|
||||
|
||||
# create an AssistantAgent instance named "assistant"
|
||||
assistant = autogen.AssistantAgent(
|
||||
name="assistant",
|
||||
max_consecutive_auto_reply=10,
|
||||
llm_config={"timeout": 600, "cache_seed": 41, "config_list": config_list},
|
||||
llm_config={"config_list": config_list},
|
||||
)
|
||||
|
||||
user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
|
||||
|
@ -1093,7 +1092,7 @@ def test_max_turn():
|
|||
assert len(res.chat_history) <= 6
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_message_func():
|
||||
import random
|
||||
|
||||
|
@ -1149,7 +1148,7 @@ def test_message_func():
|
|||
print(chat_res_play.summary)
|
||||
|
||||
|
||||
@pytest.mark.skipif(skip_openai, reason=REASON)
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_summary():
|
||||
import random
|
||||
|
||||
|
@ -1161,8 +1160,7 @@ def test_summary():
|
|||
return random.randint(0, 100)
|
||||
|
||||
config_list = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
OAI_CONFIG_LIST, file_location=KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]}
|
||||
)
|
||||
|
||||
def my_message_play(sender, recipient, context):
|
||||
|
@ -1322,5 +1320,6 @@ if __name__ == "__main__":
|
|||
# test_no_llm_config()
|
||||
# test_max_turn()
|
||||
# test_process_before_send()
|
||||
test_message_func()
|
||||
# test_message_func()
|
||||
test_summary()
|
||||
# test_function_registration_e2e_sync()
|
||||
|
|
|
@ -45,21 +45,18 @@ async def test_function_call_groupchat(key, value, sync):
|
|||
self.call_count += 1
|
||||
return random.randint(0, 100)
|
||||
|
||||
config_list_gpt4 = autogen.config_list_from_json(
|
||||
# llm_config without functions
|
||||
config_list_35 = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
filter_dict={
|
||||
"model": ["gpt-4", "gpt-4-0314", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
||||
},
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"]},
|
||||
)
|
||||
llm_config_no_function = {"config_list": config_list_35}
|
||||
config_list_tool = autogen.filter_config(config_list_35, {"tags": ["tool"]})
|
||||
llm_config = {
|
||||
"config_list": config_list_gpt4,
|
||||
"cache_seed": 42,
|
||||
"config_list": config_list_tool,
|
||||
key: value,
|
||||
}
|
||||
# llm_config without functions
|
||||
llm_config_no_function = llm_config.copy()
|
||||
del llm_config_no_function[key]
|
||||
|
||||
func = Function()
|
||||
user_proxy = autogen.UserProxyAgent(
|
||||
|
|
|
@ -15,7 +15,7 @@ from conftest import reason, skip_openai # noqa: E402
|
|||
|
||||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_get_human_input():
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
|
||||
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC, filter_dict={"tags": ["gpt-3.5-turbo"]})
|
||||
|
||||
# create an AssistantAgent instance named "assistant"
|
||||
assistant = autogen.AssistantAgent(
|
||||
|
|
|
@ -38,14 +38,13 @@ def test_math_user_proxy_agent():
|
|||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={
|
||||
"model": ["gpt-4", "gpt4", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-v0314"],
|
||||
"tags": ["gpt-3.5-turbo"],
|
||||
},
|
||||
)
|
||||
assistant = AssistantAgent(
|
||||
"assistant",
|
||||
system_message="You are a helpful assistant.",
|
||||
llm_config={
|
||||
"timeout": 600,
|
||||
"cache_seed": 42,
|
||||
"config_list": config_list,
|
||||
},
|
||||
|
@ -127,5 +126,5 @@ def test_generate_prompt():
|
|||
if __name__ == "__main__":
|
||||
# test_add_remove_print()
|
||||
# test_execute_one_python_code()
|
||||
test_generate_prompt()
|
||||
# test_generate_prompt()
|
||||
test_math_user_proxy_agent()
|
||||
|
|
|
@ -16,6 +16,11 @@ from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402
|
|||
@pytest.mark.skipif(skip_openai, reason=reason)
|
||||
def test_nested():
|
||||
config_list = autogen.config_list_from_json(env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC)
|
||||
config_list_35 = autogen.config_list_from_json(
|
||||
OAI_CONFIG_LIST,
|
||||
file_location=KEY_LOC,
|
||||
filter_dict={"tags": ["gpt-3.5-turbo"]},
|
||||
)
|
||||
llm_config = {"config_list": config_list}
|
||||
|
||||
tasks = [
|
||||
|
@ -60,13 +65,13 @@ def test_nested():
|
|||
|
||||
assistant = autogen.AssistantAgent(
|
||||
name="Assistant",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config=False,
|
||||
# is_termination_msg=lambda x: x.get("content", "") == "",
|
||||
)
|
||||
|
||||
assistant_2 = autogen.AssistantAgent(
|
||||
name="Assistant",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
# is_termination_msg=lambda x: x.get("content", "") == "",
|
||||
)
|
||||
|
||||
|
@ -94,7 +99,7 @@ def test_nested():
|
|||
|
||||
writer = autogen.AssistantAgent(
|
||||
name="Writer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
system_message="""
|
||||
You are a professional writer, known for
|
||||
your insightful and engaging articles.
|
||||
|
@ -105,7 +110,7 @@ def test_nested():
|
|||
|
||||
autogen.AssistantAgent(
|
||||
name="Reviewer",
|
||||
llm_config={"config_list": config_list},
|
||||
llm_config={"config_list": config_list_35},
|
||||
system_message="""
|
||||
You are a compliance reviewer, known for your thoroughness and commitment to standards.
|
||||
Your task is to scrutinize content for any harmful elements or regulatory violations, ensuring
|
||||
|
@ -130,7 +135,10 @@ def test_nested():
|
|||
trigger=user,
|
||||
)
|
||||
user.initiate_chats(
|
||||
[{"recipient": assistant, "message": tasks[0], "max_turns": 1}, {"recipient": assistant_2, "message": tasks[1]}]
|
||||
[
|
||||
{"recipient": assistant, "message": tasks[0], "max_turns": 1},
|
||||
{"recipient": assistant_2, "message": tasks[1], "max_turns": 1},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue