support async in agents (#1178)

* Make auto reply method pluggable

* support async

* async

* allow richer trigger types

* test list

* rename key
This commit is contained in:
Chi Wang 2023-08-07 18:34:47 -07:00 committed by GitHub
parent a603e6dddc
commit c44d2f4a01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1111 additions and 39 deletions

View File

@ -27,9 +27,15 @@ class Agent:
def send(self, message: Union[Dict, str], recipient: "Agent", request_reply: Optional[bool] = None):
"""(Aabstract method) Send a message to another agent."""
async def a_send(self, message: Union[Dict, str], recipient: "Agent", request_reply: Optional[bool] = None):
"""(Aabstract async method) Send a message to another agent."""
def receive(self, message: Union[Dict, str], sender: "Agent", request_reply: Optional[bool] = None):
"""(Abstract method) Receive a message from another agent."""
async def a_receive(self, message: Union[Dict, str], sender: "Agent", request_reply: Optional[bool] = None):
"""(Abstract async method) Receive a message from another agent."""
def reset(self):
"""(Abstract method) Reset the agent."""
@ -37,6 +43,7 @@ class Agent:
self,
messages: Optional[List[Dict]] = None,
sender: Optional["Agent"] = None,
**kwargs,
) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages.
@ -46,3 +53,18 @@ class Agent:
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
"""
async def a_generate_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional["Agent"] = None,
**kwargs,
) -> Union[str, Dict, None]:
"""(Abstract async method) Generate a reply based on the received messages.
Args:
messages (list[dict]): a list of messages received.
sender: sender of an Agent instance.
Returns:
str or dict or None: the generated reply. If None, no reply is generated.
"""

View File

@ -1,3 +1,4 @@
import asyncio
from collections import defaultdict
import copy
import json
@ -309,6 +310,48 @@ class ResponsiveAgent(Agent):
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
async def a_send(self, message: Union[Dict, str], recipient: Agent, request_reply: Optional[bool] = None) -> bool:
"""(async) Send a message to another agent.
Args:
message (dict or str): message to be sent.
The message could contain the following fields (either content or function_call must be provided):
- content (str): the content of the message.
- function_call (str): the name of the function to be called.
- name (str): the name of the function to be called.
- role (str): the role of the message, any role that is not "function"
will be modified to "assistant".
- context (dict): the context of the message, which will be passed to
[autogen.Completion.create](../oai/Completion#create).
For example, one agent can send a message A as:
```python
{
"content": lambda context: context["use_tool_msg"],
"context": {
"use_tool_msg": "Use tool X if they are relevant."
}
}
```
Next time, one agent can send a message B with a different "use_tool_msg".
Then the content of message A will be refreshed to the new "use_tool_msg".
So effectively, this provides a way for an agent to send a "link" and modify
the content of the "link" later.
recipient (Agent): the recipient of the message.
request_reply (bool or None): whether to request a reply from the recipient.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
# When the agent composes and sends the message, the role of the message is "assistant"
# unless it's "function".
valid = self._append_oai_message(message, "assistant", recipient)
if valid:
await recipient.a_receive(message, self, request_reply)
else:
raise ValueError(
"Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
def _print_received_message(self, message: Union[Dict, str], sender: Agent):
# print the message received
print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True)
@ -339,6 +382,16 @@ class ResponsiveAgent(Agent):
print(colored("*" * len(func_print), "green"), flush=True)
print("\n", "-" * 80, flush=True, sep="")
def _process_received_message(self, message, sender):
message = self._message_to_dict(message)
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
valid = self._append_oai_message(message, "user", sender)
if not valid:
raise ValueError(
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
self._print_received_message(message, sender)
def receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optional[bool] = None):
"""Receive a message from another agent.
@ -361,20 +414,50 @@ class ResponsiveAgent(Agent):
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
message = self._message_to_dict(message)
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
valid = self._append_oai_message(message, "user", sender)
if not valid:
raise ValueError(
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
)
self._print_received_message(message, sender)
self._process_received_message(message, sender)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
reply = self.generate_reply(sender=sender)
if reply is not None:
self.send(reply, sender)
async def a_receive(self, message: Union[Dict, str], sender: Agent, request_reply: Optional[bool] = None):
"""(async) Receive a message from another agent.
Once a message is received, this function sends a reply to the sender or stop.
The reply can be generated automatically or entered manually by a human.
Args:
message (dict or str): message from the sender. If the type is dict, it may contain the following reserved fields (either content or function_call need to be provided).
1. "content": content of the message, can be None.
2. "function_call": a dictionary containing the function name and arguments.
3. "role": role of the message, can be "assistant", "user", "function".
This field is only needed to distinguish between "function" or "assistant"/"user".
4. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
5. "context" (dict): the context of the message, which will be passed to
[autogen.Completion.create](../oai/Completion#create).
sender: sender of an Agent instance.
request_reply (bool or None): whether a reply is requested from the sender.
If None, the value is determined by `self.reply_at_receive[sender]`.
Raises:
ValueError: if the message can't be converted into a valid ChatCompletion message.
"""
self._process_received_message(message, sender)
if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
return
reply = await self.a_generate_reply(sender=sender)
if reply is not None:
await self.a_send(reply, sender)
def _prepare_chat(self, recipient, clear_history):
self.reset_consecutive_auto_reply_counter(recipient)
recipient.reset_consecutive_auto_reply_counter(self)
self.reply_at_receive[recipient] = recipient.reply_at_receive[self] = True
if clear_history:
self.clear_history(recipient)
recipient.clear_history(self)
def initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bool] = True, **context):
"""Initiate a chat with the recipient agent.
@ -388,14 +471,25 @@ class ResponsiveAgent(Agent):
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self.reset_consecutive_auto_reply_counter(recipient)
recipient.reset_consecutive_auto_reply_counter(self)
self.reply_at_receive[recipient] = recipient.reply_at_receive[self] = True
if clear_history:
self.clear_history(recipient)
recipient.clear_history(self)
self._prepare_chat(recipient, clear_history)
self.send(self.generate_init_message(**context), recipient)
async def a_initiate_chat(self, recipient: "ResponsiveAgent", clear_history: Optional[bool] = True, **context):
"""(async) Initiate a chat with the recipient agent.
Reset the consecutive auto reply counter.
If `clear_history` is True, the chat history with the recipient agent will be cleared.
`generate_init_message` is called to generate the initial message for the agent.
Args:
recipient: the recipient agent.
clear_history (bool): whether to clear the chat history with the agent.
**context: any context information.
"message" needs to be provided if the `generate_init_message` method is not overridden.
"""
self._prepare_chat(recipient, clear_history)
await self.a_send(self.generate_init_message(**context), recipient)
def reset(self):
"""Reset the agent."""
self.clear_history()
@ -573,12 +667,12 @@ class ResponsiveAgent(Agent):
"""Reply based on the conversation history and the sender.
Either messages or sender must be provided.
Use registered class-specific reply functions to generate replies.
Use registered auto reply functions to generate replies.
By default, the following functions are checked in order:
1. _check_termination_and_human_reply
2. _generate_function_call_reply
3. _generate_code_execution_reply
4. _generate_oai_reply
1. check_termination_and_human_reply
2. generate_function_call_reply
3. generate_code_execution_reply
4. generate_oai_reply
Every function returns a tuple (final, reply).
When a function returns final=False, the next function will be checked.
So by default, termination and human reply will be checked first.
@ -597,16 +691,68 @@ class ResponsiveAgent(Agent):
assert messages is not None or sender is not None, "Either messages or sender must be provided."
if sender is not None:
for reply_func_tuple in self._reply_func_list:
if exclude and reply_func_tuple["reply_func"] in exclude:
reply_func = reply_func_tuple["reply_func"]
if exclude and reply_func in exclude:
continue
if asyncio.coroutines.iscoroutinefunction(reply_func):
continue
if self._match_trigger(reply_func_tuple["trigger"], sender):
final, reply = reply_func_tuple["reply_func"](
final, reply = reply_func(
self, messages=messages, sender=sender, context=reply_func_tuple["context"]
)
if final:
return reply
return self._default_auto_reply
async def a_generate_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
exclude: Optional[List[Callable]] = None,
) -> Union[str, Dict, None]:
"""(async) Reply based on the conversation history and the sender.
Either messages or sender must be provided.
Use registered auto reply functions to generate replies.
By default, the following functions are checked in order:
1. check_termination_and_human_reply
2. generate_function_call_reply
3. generate_code_execution_reply
4. generate_oai_reply
Every function returns a tuple (final, reply).
When a function returns final=False, the next function will be checked.
So by default, termination and human reply will be checked first.
If not terminating and human reply is skipped, execute function or code and return the result.
AI replies are generated only when no code execution is performed.
Args:
messages: a list of messages in the conversation history.
default_reply (str or dict): default reply.
sender: sender of an Agent instance.
exclude: a list of functions to exclude.
Returns:
str or dict or None: reply. None if no reply is generated.
"""
assert messages is not None or sender is not None, "Either messages or sender must be provided."
if sender is not None:
for reply_func_tuple in self._reply_func_list:
reply_func = reply_func_tuple["reply_func"]
if exclude and reply_func in exclude:
continue
if self._match_trigger(reply_func_tuple["trigger"], sender):
if asyncio.coroutines.iscoroutinefunction(reply_func):
final, reply = await reply_func(
self, messages=messages, sender=sender, context=reply_func_tuple["context"]
)
else:
final, reply = reply_func(
self, messages=messages, sender=sender, context=reply_func_tuple["context"]
)
if final:
return reply
return self._default_auto_reply
def _match_trigger(self, trigger, sender):
"""Check if the sender matches the trigger."""
if isinstance(trigger, str):

View File

@ -9,7 +9,6 @@ class UserProxyAgent(ResponsiveAgent):
and `llm_config` to False. By default, the agent will prompt for human input every time a message is received.
Code execution is enabled by default. LLM-based auto reply is disabled by default.
To modify auto reply, register a method with (`register_auto_reply`)[responsive_agent#register_auto_reply].
The method should have a similar signature with `_generate_oai_reply` method.
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.

View File

@ -1 +1 @@
__version__ = "2.0.0rc4"
__version__ = "2.0.0rc5"

View File

@ -19,8 +19,8 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution & Debugging\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for the human user to execute the code written by `AssistantAgent`, or automatically execute the code. Depending on the setting of `human_input_mode` and `max_consecutive_auto_reply`, the `UserProxyAgent` either solicits feedback from the human user or returns auto-feedback based on the result of code execution (success or failure and corresponding outputs) to `AssistantAgent`. `AssistantAgent` will debug the code and suggest new code if the result contains error. The two agents keep communicating to each other until the task is done.\n",
"\n",

View File

@ -32,7 +32,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install flaml[autogen]~=2.0.0rc4\n",
"# %pip install flaml[autogen]~=2.0.0rc5\n",
"%pip install chess -U"
]
},

View File

@ -17,7 +17,7 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs need to be passed to `AssistantAgent` to initialize the agent. The corresponding functions need to be passed to `UserProxyAgent`, which will be responsible for executing any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to make sure the instructions align with the function call descriptions.\n",
"\n",

View File

@ -32,7 +32,7 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"# %pip install flaml[autogen]~=2.0.0rc4"
"# %pip install flaml[autogen]~=2.0.0rc5"
]
},
{

View File

@ -19,8 +19,8 @@
"source": [
"# Auto Generated Agent Chat: Task Solving with Code Generation, Execution, Debugging & Human Feedback\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to solve a challenging math problem with human feedback. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. By setting `human_input_mode` properly, the `UserProxyAgent` can also prompt the user for feedback to `AssistantAgent`. For example, when `human_input_mode` is set to \"ALWAYS\", the `UserProxyAgent` will always prompt the user for feedback. When user feedback is provided, the `UserProxyAgent` will directly pass the feedback to `AssistantAgent`. When no user feedback is provided, the `UserProxyAgent` will execute the code written by `AssistantAgent` and return the execution results (success or failure and corresponding outputs) to `AssistantAgent`.\n",
"\n",

View File

@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Coding and Planning Agent\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use multiple agents to work together and accomplish a task which requires finding info from the web and coding. `AssistantAgent` is an LLM-based agent that can write and debug Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We further create a planning agent for the assistant agent to consult. The planning agent is a variation of the LLM-based `AssistantAgent` with a different system message.\n",
"\n",

View File

@ -0,0 +1,781 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"<a href=\"https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/autogen_agentchat_stream.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"slideshow": {
"slide_type": "slide"
}
},
"source": [
"# Interactive LLM Agent Dealing with Data Stream\n",
"\n",
"`flaml.autogen` offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use customized agents to continuously acquires news from the web and ask for investment suggestions.\n",
"\n",
"## Requirements\n",
"\n",
"FLAML requires `Python>=3.8`. To run this notebook example, please install flaml with the [autogen] option:\n",
"```bash\n",
"pip install flaml[autogen]\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"execution": {
"iopub.execute_input": "2023-02-13T23:40:52.317406Z",
"iopub.status.busy": "2023-02-13T23:40:52.316561Z",
"iopub.status.idle": "2023-02-13T23:40:52.321193Z",
"shell.execute_reply": "2023-02-13T23:40:52.320628Z"
}
},
"outputs": [],
"source": [
"# %pip install flaml[autogen]~=2.0.0rc5"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set your API Endpoint\n",
"\n",
"The [`config_list_from_json`](https://microsoft.github.io/FLAML/docs/reference/autogen/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from flaml import autogen\n",
"\n",
"config_list = autogen.config_list_from_json(\"OAI_CONFIG_LIST\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the models with matching names are kept in the list based on the filter condition.\n",
"\n",
"The config list looks like the following:\n",
"```python\n",
"config_list = [\n",
" {\n",
" 'model': 'gpt-4',\n",
" 'api_key': '<your OpenAI API key here>',\n",
" }, # OpenAI API endpoint for gpt-4\n",
" {\n",
" 'model': 'gpt-4',\n",
" 'api_key': '<your first Azure OpenAI API key here>',\n",
" 'api_base': '<your first Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" }, # Azure OpenAI API endpoint for gpt-4\n",
" {\n",
" 'model': 'gpt-4',\n",
" 'api_key': '<your second Azure OpenAI API key here>',\n",
" 'api_base': '<your second Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" }, # another Azure OpenAI API endpoint for gpt-4\n",
" {\n",
" 'model': 'gpt-3.5-turbo',\n",
" 'api_key': '<your OpenAI API key here>',\n",
" }, # OpenAI API endpoint for gpt-3.5-turbo\n",
" {\n",
" 'model': 'gpt-3.5-turbo',\n",
" 'api_key': '<your first Azure OpenAI API key here>',\n",
" 'api_base': '<your first Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" }, # Azure OpenAI API endpoint for gpt-3.5-turbo\n",
" {\n",
" 'model': 'gpt-3.5-turbo',\n",
" 'api_key': '<your second Azure OpenAI API key here>',\n",
" 'api_base': '<your second Azure OpenAI API base here>',\n",
" 'api_type': 'azure',\n",
" 'api_version': '2023-06-01-preview',\n",
" }, # another Azure OpenAI API endpoint for gpt-3.5-turbo\n",
"]\n",
"```\n",
"\n",
"If you open this notebook in colab, you can upload your files by clicking the file icon on the left panel and then choose \"upload file\" icon.\n",
"\n",
"You can set the value of config_list in other ways you prefer, e.g., loading from a YAML file."
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Example Task: Investment suggestion with realtime data\n",
"\n",
"We consider a scenario where news data are streamed from a source, and we use an assistant agent to continually provide investment suggestions based on the data.\n",
"\n",
"First, we use the following code to simulate the data stream process."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import asyncio\n",
"\n",
"def get_market_news(ind, ind_upper):\n",
" import requests\n",
" import json\n",
" # replace the \"demo\" apikey below with your own key from https://www.alphavantage.co/support/#api-key\n",
" # url = 'https://www.alphavantage.co/query?function=NEWS_SENTIMENT&tickers=AAPL&sort=LATEST&limit=5&apikey=demo'\n",
" # r = requests.get(url)\n",
" # data = r.json()\n",
" # with open('market_news_local.json', 'r') as file:\n",
" # # Load JSON data from file\n",
" # data = json.load(file)\n",
" data = {\n",
" \"feed\": [\n",
" {\n",
" \"title\": \"Palantir CEO Says Our Generation's Atomic Bomb Could Be AI Weapon - And Arrive Sooner Than You Think - Palantir Technologies ( NYSE:PLTR ) \",\n",
" \"summary\": \"Christopher Nolan's blockbuster movie \\\"Oppenheimer\\\" has reignited the public discourse surrounding the United States' use of an atomic bomb on Japan at the end of World War II.\",\n",
" \"overall_sentiment_score\": 0.009687,\n",
" },\n",
" {\n",
" \"title\": '3 \"Hedge Fund Hotels\" Pulling into Support',\n",
" \"summary\": \"Institutional quality stocks have several benefits including high-liquidity, low beta, and a long runway. Strategist Andrew Rocco breaks down what investors should look for and pitches 3 ideas.\",\n",
" \"banner_image\": \"https://staticx-tuner.zacks.com/images/articles/main/92/87.jpg\",\n",
" \"overall_sentiment_score\": 0.219747,\n",
" },\n",
" {\n",
" \"title\": \"PDFgear, Bringing a Completely-Free PDF Text Editing Feature\",\n",
" \"summary\": \"LOS ANGELES, July 26, 2023 /PRNewswire/ -- PDFgear, a leading provider of PDF solutions, announced a piece of exciting news for everyone who works extensively with PDF documents.\",\n",
" \"overall_sentiment_score\": 0.360071,\n",
" },\n",
" {\n",
" \"title\": \"Researchers Pitch 'Immunizing' Images Against Deepfake Manipulation\",\n",
" \"summary\": \"A team at MIT says injecting tiny disruptive bits of code can cause distorted deepfake images.\",\n",
" \"overall_sentiment_score\": -0.026894,\n",
" },\n",
" {\n",
" \"title\": \"Nvidia wins again - plus two more takeaways from this week's mega-cap earnings\",\n",
" \"summary\": \"We made some key conclusions combing through quarterly results for Microsoft and Alphabet and listening to their conference calls with investors.\",\n",
" \"overall_sentiment_score\": 0.235177,\n",
" },\n",
" ]\n",
" }\n",
" feeds = data[\"feed\"][ind:ind_upper]\n",
" feeds_summary = \"\\n\".join(\n",
" [\n",
" f\"News summary: {f['title']}. {f['summary']} overall_sentiment_score: {f['overall_sentiment_score']}\"\n",
" for f in feeds\n",
" ]\n",
" )\n",
" return feeds_summary\n",
"\n",
"data = asyncio.Future()\n",
"\n",
"async def add_stock_price_data():\n",
" # simulating the data stream\n",
" for i in range(0, 5, 1):\n",
" latest_news = get_market_news(i, i + 1)\n",
" if data.done():\n",
" data.result().append(latest_news)\n",
" else:\n",
" data.set_result([latest_news])\n",
" # print(data.result())\n",
" await asyncio.sleep(5)\n",
"\n",
"data_task = asyncio.create_task(add_stock_price_data())\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Then, we construct agents. An assistant agent is created to answer the question using LLM. A UserProxyAgent is created to ask questions, and add the new data in the conversation when they are available."
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from flaml import autogen\n",
"\n",
"# create an AssistantAgent instance named \"assistant\"\n",
"assistant = autogen.AssistantAgent(\n",
" name=\"assistant\",\n",
" llm_config={\n",
" \"request_timeout\": 600,\n",
" \"seed\": 41,\n",
" \"config_list\": config_list,\n",
" \"temperature\": 0,\n",
" },\n",
" system_message=\"You are a financial expert.\",\n",
")\n",
"# create a UserProxyAgent instance named \"user\"\n",
"user_proxy = autogen.UserProxyAgent(\n",
" name=\"user\",\n",
" human_input_mode=\"NEVER\",\n",
" max_consecutive_auto_reply=5,\n",
" code_execution_config=False,\n",
" default_auto_reply=None,\n",
")\n",
"\n",
"async def add_data_reply(recipient, messages, sender, context):\n",
" await asyncio.sleep(0.1)\n",
" data = context[\"news_stream\"]\n",
" if data.done():\n",
" result = data.result()\n",
" if result:\n",
" news_str = \"\\n\".join(result)\n",
" result.clear()\n",
" return (\n",
" True,\n",
" f\"Just got some latest market news. Merge your new suggestion with previous ones.\\n{news_str}\",\n",
" )\n",
" return False, None\n",
"\n",
"user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, context={\"news_stream\": data})"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"We invoke the `a_initiate_chat()` method of the user proxy agent to start the conversation."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33muser\u001b[0m (to assistant):\n",
"\n",
"Give me investment suggestion in 3 bullet points.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to user):\n",
"\n",
"1. Diversify Your Portfolio: Don't put all your eggs in one basket. Spread your investments across a variety of asset classes such as stocks, bonds, real estate, and commodities. This can help to mitigate risk and potentially increase returns.\n",
"\n",
"2. Invest for the Long Term: Investing is not about making a quick buck, but about growing your wealth over time. Stick to a long-term investment strategy and avoid the temptation to engage in frequent buying and selling.\n",
"\n",
"3. Regularly Review Your Investments: The market is dynamic and constantly changing. Regularly review your investment portfolio to ensure it aligns with your financial goals and risk tolerance. Adjust your investments as necessary based on changes in your personal circumstances and market conditions.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33muser\u001b[0m (to assistant):\n",
"\n",
"Just got some latest market news. Merge your new suggestion with previous ones.\n",
"News summary: Palantir CEO Says Our Generation's Atomic Bomb Could Be AI Weapon - And Arrive Sooner Than You Think - Palantir Technologies ( NYSE:PLTR ) . Christopher Nolan's blockbuster movie \"Oppenheimer\" has reignited the public discourse surrounding the United States' use of an atomic bomb on Japan at the end of World War II. overall_sentiment_score: 0.009687\n",
"News summary: 3 \"Hedge Fund Hotels\" Pulling into Support. Institutional quality stocks have several benefits including high-liquidity, low beta, and a long runway. Strategist Andrew Rocco breaks down what investors should look for and pitches 3 ideas. overall_sentiment_score: 0.219747\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to user):\n",
"\n",
"1. Diversify Your Portfolio: Given the recent news about AI technology and its potential impact, consider investing in tech companies like Palantir Technologies that are at the forefront of AI development. However, remember to maintain a diversified portfolio across various sectors and asset classes to mitigate risk.\n",
"\n",
"2. Long-Term Investment Strategy: Despite the potential for rapid advancements in AI, it's important to maintain a long-term investment perspective. While these developments may bring short-term volatility, they could also present long-term growth opportunities.\n",
"\n",
"3. Regularly Review and Adjust Your Investments: With the news about \"Hedge Fund Hotels\" and their potential benefits, consider reviewing your portfolio to see if these high-liquidity, low beta stocks fit into your investment strategy. However, always adjust your investments based on your personal circumstances, risk tolerance, and market conditions. \n",
"\n",
"4. Invest in AI and Tech Stocks: Given the potential of AI as highlighted by Palantir's CEO, consider investing in companies that are leading in AI and other technological advancements. This could provide significant growth opportunities.\n",
"\n",
"5. Consider \"Hedge Fund Hotels\": The news suggests that these stocks have several benefits including high-liquidity, low beta, and a long runway. If these align with your investment goals, they could be a good addition to your portfolio.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33muser\u001b[0m (to assistant):\n",
"\n",
"Just got some latest market news. Merge your new suggestion with previous ones.\n",
"News summary: PDFgear, Bringing a Completely-Free PDF Text Editing Feature. LOS ANGELES, July 26, 2023 /PRNewswire/ -- PDFgear, a leading provider of PDF solutions, announced a piece of exciting news for everyone who works extensively with PDF documents. overall_sentiment_score: 0.360071\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to user):\n",
"\n",
"1. Diversify Your Portfolio: With the latest news about PDFgear, consider investing in companies that provide digital solutions, as they are becoming increasingly important in our digital age. However, continue to maintain a diversified portfolio across various sectors and asset classes to mitigate risk.\n",
"\n",
"2. Long-Term Investment Strategy: The announcement from PDFgear could potentially lead to short-term gains, but remember to maintain a long-term investment perspective. The digital solutions sector is expected to grow in the long run, providing potential growth opportunities.\n",
"\n",
"3. Regularly Review and Adjust Your Investments: Given the news about PDFgear and its potential impact on the digital solutions sector, it's important to review your portfolio to see if it aligns with these new market trends. Adjust your investments based on your personal circumstances, risk tolerance, and market conditions.\n",
"\n",
"4. Invest in Digital Solutions: The news about PDFgear highlights the potential growth in the digital solutions sector. Consider investing in companies that are leading in this field, as they could provide significant growth opportunities.\n",
"\n",
"5. Consider Tech Stocks: With the continuous advancements in technology, tech stocks, including those in AI, digital solutions, and other tech sub-sectors, could be a good addition to your portfolio, given their potential for high growth.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33muser\u001b[0m (to assistant):\n",
"\n",
"Just got some latest market news. Merge your new suggestion with previous ones.\n",
"News summary: Researchers Pitch 'Immunizing' Images Against Deepfake Manipulation. A team at MIT says injecting tiny disruptive bits of code can cause distorted deepfake images. overall_sentiment_score: -0.026894\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to user):\n",
"\n",
"1. Diversify Your Portfolio: The latest news about deepfake manipulation suggests potential growth in cybersecurity and AI sectors. Consider investing in companies that are working on these technologies, but continue to maintain a diversified portfolio across various sectors and asset classes to mitigate risk.\n",
"\n",
"2. Long-Term Investment Strategy: While the news about deepfake manipulation might cause short-term volatility in the tech sector, it's important to maintain a long-term investment perspective. The cybersecurity and AI sectors are expected to grow in the long run, providing potential growth opportunities.\n",
"\n",
"3. Regularly Review and Adjust Your Investments: Given the news about deepfake manipulation and its potential impact on the tech sector, it's important to review your portfolio to see if it aligns with these new market trends. Adjust your investments based on your personal circumstances, risk tolerance, and market conditions.\n",
"\n",
"4. Invest in Cybersecurity and AI: The news about deepfake manipulation highlights the potential growth in the cybersecurity and AI sectors. Consider investing in companies that are leading in these fields, as they could provide significant growth opportunities.\n",
"\n",
"5. Consider Tech Stocks: With the continuous advancements in technology, tech stocks, including those in AI, digital solutions, cybersecurity, and other tech sub-sectors, could be a good addition to your portfolio, given their potential for high growth.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33muser\u001b[0m (to assistant):\n",
"\n",
"Just got some latest market news. Merge your new suggestion with previous ones.\n",
"News summary: Nvidia wins again - plus two more takeaways from this week's mega-cap earnings. We made some key conclusions combing through quarterly results for Microsoft and Alphabet and listening to their conference calls with investors. overall_sentiment_score: 0.235177\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33massistant\u001b[0m (to user):\n",
"\n",
"1. Diversify Your Portfolio: The latest news about Nvidia, Microsoft, and Alphabet suggests potential growth in the tech sector. Consider investing in these companies, but continue to maintain a diversified portfolio across various sectors and asset classes to mitigate risk.\n",
"\n",
"2. Long-Term Investment Strategy: While the news about these tech giants might cause short-term volatility, it's important to maintain a long-term investment perspective. The tech sector, particularly companies like Nvidia, Microsoft, and Alphabet, are expected to grow in the long run, providing potential growth opportunities.\n",
"\n",
"3. Regularly Review and Adjust Your Investments: Given the news about Nvidia, Microsoft, and Alphabet and their potential impact on the tech sector, it's important to review your portfolio to see if it aligns with these new market trends. Adjust your investments based on your personal circumstances, risk tolerance, and market conditions.\n",
"\n",
"4. Invest in Tech Giants: The news about Nvidia, Microsoft, and Alphabet highlights the potential growth in the tech sector. Consider investing in these tech giants, as they could provide significant growth opportunities.\n",
"\n",
"5. Consider Tech Stocks: With the continuous advancements in technology, tech stocks, including those in AI, digital solutions, cybersecurity, and other tech sub-sectors, could be a good addition to your portfolio, given their potential for high growth.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
}
],
"source": [
"await user_proxy.a_initiate_chat(\n",
" assistant,\n",
" message=\"\"\"Give me investment suggestion in 3 bullet points.\"\"\",\n",
")\n",
"while not data_task.done() and not data_task.cancelled():\n",
" reply = await user_proxy.a_generate_reply(sender=assistant)\n",
" if reply is not None:\n",
" await user_proxy.a_send(reply, assistant)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.17"
},
"vscode": {
"interpreter": {
"hash": "949777d72b0d2535278d3dc13498b2535136f6dfe0678499012e853ee9abcab1"
}
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"state": {
"2d910cfd2d2a4fc49fc30fbbdc5576a7": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "2.0.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"454146d0f7224f038689031002906e6f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "2.0.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_e4ae2b6f5a974fd4bafb6abb9d12ff26",
"IPY_MODEL_577e1e3cc4db4942b0883577b3b52755",
"IPY_MODEL_b40bdfb1ac1d4cffb7cefcb870c64d45"
],
"layout": "IPY_MODEL_dc83c7bff2f241309537a8119dfc7555",
"tabbable": null,
"tooltip": null
}
},
"577e1e3cc4db4942b0883577b3b52755": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "2.0.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_allow_html": false,
"layout": "IPY_MODEL_2d910cfd2d2a4fc49fc30fbbdc5576a7",
"max": 1,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_74a6ba0c3cbc4051be0a83e152fe1e62",
"tabbable": null,
"tooltip": null,
"value": 1
}
},
"6086462a12d54bafa59d3c4566f06cb2": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "2.0.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"74a6ba0c3cbc4051be0a83e152fe1e62": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"7d3f3d9e15894d05a4d188ff4f466554": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "HTMLStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "HTMLStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "StyleView",
"background": null,
"description_width": "",
"font_size": null,
"text_color": null
}
},
"b40bdfb1ac1d4cffb7cefcb870c64d45": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "2.0.0",
"_view_name": "HTMLView",
"description": "",
"description_allow_html": false,
"layout": "IPY_MODEL_f1355871cc6f4dd4b50d9df5af20e5c8",
"placeholder": "",
"style": "IPY_MODEL_ca245376fd9f4354af6b2befe4af4466",
"tabbable": null,
"tooltip": null,
"value": " 1/1 [00:00&lt;00:00, 44.69it/s]"
}
},
"ca245376fd9f4354af6b2befe4af4466": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "HTMLStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "HTMLStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "StyleView",
"background": null,
"description_width": "",
"font_size": null,
"text_color": null
}
},
"dc83c7bff2f241309537a8119dfc7555": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "2.0.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e4ae2b6f5a974fd4bafb6abb9d12ff26": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "2.0.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "2.0.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "2.0.0",
"_view_name": "HTMLView",
"description": "",
"description_allow_html": false,
"layout": "IPY_MODEL_6086462a12d54bafa59d3c4566f06cb2",
"placeholder": "",
"style": "IPY_MODEL_7d3f3d9e15894d05a4d188ff4f466554",
"tabbable": null,
"tooltip": null,
"value": "100%"
}
},
"f1355871cc6f4dd4b50d9df5af20e5c8": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "2.0.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "2.0.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "2.0.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border_bottom": null,
"border_left": null,
"border_right": null,
"border_top": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
}
},
"version_major": 2,
"version_minor": 0
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -19,7 +19,7 @@
"source": [
"# Auto Generated Agent Chat: Collaborative Task Solving with Multiple Agents and Human Users\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code. Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate an application involving multiple agents and human users to work together and accomplish a task. `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We create multiple `UserProxyAgent` instances which can represent different human users.\n",
"\n",

View File

@ -19,8 +19,8 @@
"source": [
"# Auto Generated Agent Chat: Solving Tasks Requiring Web Info\n",
"\n",
"FLAML offers an experimental feature of interactive LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents-experimental).\n",
"FLAML offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/FLAML/docs/Use-Cases/Auto-Generation#agents).\n",
"\n",
"In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to perform tasks which require acquiring info from the web:\n",
"* discuss a paper based on its URL.\n",

View File

@ -0,0 +1,114 @@
import asyncio
from flaml import autogen
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
def get_market_news(ind, ind_upper):
data = {
"feed": [
{
"title": "Palantir CEO Says Our Generation's Atomic Bomb Could Be AI Weapon - And Arrive Sooner Than You Think - Palantir Technologies ( NYSE:PLTR ) ",
"summary": "Christopher Nolan's blockbuster movie \"Oppenheimer\" has reignited the public discourse surrounding the United States' use of an atomic bomb on Japan at the end of World War II.",
"overall_sentiment_score": 0.009687,
},
{
"title": '3 "Hedge Fund Hotels" Pulling into Support',
"summary": "Institutional quality stocks have several benefits including high-liquidity, low beta, and a long runway. Strategist Andrew Rocco breaks down what investors should look for and pitches 3 ideas.",
"banner_image": "https://staticx-tuner.zacks.com/images/articles/main/92/87.jpg",
"overall_sentiment_score": 0.219747,
},
{
"title": "PDFgear, Bringing a Completely-Free PDF Text Editing Feature",
"summary": "LOS ANGELES, July 26, 2023 /PRNewswire/ -- PDFgear, a leading provider of PDF solutions, announced a piece of exciting news for everyone who works extensively with PDF documents.",
"overall_sentiment_score": 0.360071,
},
{
"title": "Researchers Pitch 'Immunizing' Images Against Deepfake Manipulation",
"summary": "A team at MIT says injecting tiny disruptive bits of code can cause distorted deepfake images.",
"overall_sentiment_score": -0.026894,
},
{
"title": "Nvidia wins again - plus two more takeaways from this week's mega-cap earnings",
"summary": "We made some key conclusions combing through quarterly results for Microsoft and Alphabet and listening to their conference calls with investors.",
"overall_sentiment_score": 0.235177,
},
]
}
feeds = data["feed"][ind:ind_upper]
feeds_summary = "\n".join(
[
f"News summary: {f['title']}. {f['summary']} overall_sentiment_score: {f['overall_sentiment_score']}"
for f in feeds
]
)
return feeds_summary
async def test_stream():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
data = asyncio.Future()
async def add_stock_price_data():
# simulating the data stream
for i in range(0, 2, 1):
latest_news = get_market_news(i, i + 1)
if data.done():
data.result().append(latest_news)
else:
data.set_result([latest_news])
# print(data.result())
await asyncio.sleep(5)
data_task = asyncio.create_task(add_stock_price_data())
# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
llm_config={
"request_timeout": 600,
"seed": 41,
"config_list": config_list,
"temperature": 0,
},
system_message="You are a financial expert.",
)
# create a UserProxyAgent instance named "user"
user_proxy = autogen.UserProxyAgent(
name="user",
human_input_mode="NEVER",
max_consecutive_auto_reply=5,
code_execution_config=False,
default_auto_reply=None,
)
async def add_data_reply(recipient, messages, sender, context):
await asyncio.sleep(0.1)
data = context["news_stream"]
if data.done():
result = data.result()
if result:
news_str = "\n".join(result)
result.clear()
return (
True,
f"Just got some latest market news. Merge your new suggestion with previous ones.\n{news_str}",
)
return False, None
user_proxy.register_auto_reply(autogen.AssistantAgent, add_data_reply, 1, context={"news_stream": data})
await user_proxy.a_initiate_chat(
assistant,
message="""Give me investment suggestion in 3 bullet points.""",
)
while not data_task.done() and not data_task.cancelled():
reply = await user_proxy.a_generate_reply(sender=assistant)
if reply is not None:
await user_proxy.a_send(reply, assistant)
if __name__ == "__main__":
asyncio.run(test_stream())

View File

@ -11,11 +11,13 @@
The package is under active development with more features upcoming.
## Agents (Experimental)
## Agents
[`flaml.autogen.agentchat`](/docs/reference/autogen/agentchat/agent) contains an experimental implementation of interactive agents which can adapt to human or simulated feedback. This subpackage is under active development.
[`flaml.autogen.agentchat`](/docs/reference/autogen/agentchat/agent) offers conversable agents which can adapt to human or simulated feedback. This subpackage is under active development.
We have designed a generic `ResponsiveAgent` class for Agents that are capable of communicating with each other through the exchange of messages to collaboratively finish a task. An agent can communicate with other agents and perform actions. Different agents can differ in what actions they perform after receiving messages. Two representative subclasses are `AssistantAgent` and `UserProxyAgent`.
### Basic Concept
We have designed a generic `ResponsiveAgent` class for Agents that are capable of conversing with each other through the exchange of messages to collaboratively finish a task. An agent can communicate with other agents and perform actions. Different agents can differ in what actions they perform after receiving messages. Two representative subclasses are `AssistantAgent` and `UserProxyAgent`.
- `AssistantAgent`. Designed to act as an assistant by responding to user requests. It could write Python code (in a Python coding block) for a user to execute when a message (typically a description of a task that needs to be solved) is received. Under the hood, the Python code is written by LLM (e.g., GPT-4). It can also receive the execution results and suggest code with bug fix. Its behavior can be altered by passing a new system message. The LLM [inference](#enhanced-inference) configuration can be configured via `llm_config`.
- `UserProxyAgent`. Serves as a proxy for the human user. Upon receiving a message, the UserProxyAgent will either solicit the human user's input or prepare an automatically generated reply. The chosen action depends on the settings of the `human_input_mode` and `max_consecutive_auto_reply` when the `UserProxyAgent` instance is constructed, and whether a human user input is available.
@ -25,6 +27,8 @@ When `llm_config` is set to a dict, `UserProxyAgent` can generate replies using
The auto-reply capability of `ResponsiveAgent` allows for more autonomous multi-agent communication while retaining the possibility of human intervention.
One can also easily extend it by registering auto_reply functions with the `register_auto_reply()` method.
### Basic Example
Example usage of the agents to solve a task with code:
```python
from flaml.autogen import AssistantAgent, UserProxyAgent
@ -53,13 +57,14 @@ In the example above, we create an AssistantAgent named "assistant" to serve as
Please find a visual illustration of how UserProxyAgent and AssistantAgent collaboratively solve the above task below:
![Agent Chat Example](images/agent_example.png)
#### Human Input Mode
### Human Input Mode
The `human_input_mode` parameter of `UserProxyAgent` controls the behavior of the agent when it receives a message. It can be set to `"NEVER"`, `"ALWAYS"`, or `"TERMINATE"`.
- Under the mode `human_input_mode="NEVER"`, the multi-turn conversation between the assistant and the user_proxy stops when the number of auto-reply reaches the upper limit specified by `max_consecutive_auto_reply` or the received message is a termination message according to `is_termination_msg`.
- When `human_input_mode` is set to `"ALWAYS"`, the user proxy agent solicits human input every time a message is received; and the conversation stops when the human input is "exit", or when the received message is a termination message and no human input is provided.
- When `human_input_mode` is set to `"TERMINATE"`, the user proxy agent solicits human input only when a termination message is received or the number of auto replies reaches `max_consecutive_auto_reply`.
#### Function Calling
### Function Calling
To leverage [function calling capability of OpenAI's Chat Completions API](https://openai.com/blog/function-calling-and-other-api-updates?ref=upstract.com), one can pass in a list of callable functions or class methods to `UserProxyAgent`, which corresponds to the description of functions passed to OpenAI's API.
Example usage of the agents to solve a task with function calling feature:
@ -141,6 +146,8 @@ user_proxy.initiate_chat(
)
```
### Notebook Examples
*Interested in trying it yourself? Please check the following notebook examples:*
* [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_auto_feedback_from_code_execution.ipynb)
@ -158,6 +165,8 @@ user_proxy.initiate_chat(
* [Automated Task Solving by Group Chat](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_groupchat.ipynb)
* [Automated Continual Learning from New Data](https://github.com/microsoft/FLAML/blob/main/notebook/autogen_agentchat_stream.ipynb)
## Enhanced Inference
One can use [`flaml.autogen.Completion.create`](/docs/reference/autogen/oai/completion#create) to perform inference.
@ -546,6 +555,7 @@ The compact history is more efficient and the individual API call history contai
[`flaml.autogen.math_utils`](/docs/reference/autogen/math_utils) offers utilities for math problems, such as:
- a [eval_math_responses](/docs/reference/autogen/math_utils#eval_math_responses) function to select a response using voting, and check if the final answer is correct if the canonical solution is provided.
## For Further Reading
*Interested in the research that leads to this package? Please check the following papers.*
* [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).