Supporting callable message (#1852)

* add message field

* send

* message func doc str

* test dict message

* retiring soon

* generate_init_message docstr

* remove todo

* update notebook

* CompressibleAgent

* update notebook

* add test

* retrieve agent

* update test

* summary_method args

* summary

* carryover

* dict message

* update nested doc

* generate_init_message

* fix typo

* update docs for mathchat

* Fix missing message

* Add docstrings

* model

* notebook

* default naming

---------

Co-authored-by: Chi Wang <wang.chi@microsoft.com>
Co-authored-by: kevin666aa <yrwu000627@gmail.com>
Co-authored-by: Li Jiang <bnujli@gmail.com>
Co-authored-by: Li Jiang <lijiang1@microsoft.com>
This commit is contained in:
Qingyun Wu 2024-03-09 15:27:46 -05:00 committed by GitHub
parent 83e1789a50
commit c75655a340
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 3082 additions and 2565 deletions

View File

@ -111,13 +111,19 @@ def __post_carryover_processing(chat_info: Dict[str, Any]):
if isinstance(chat_info["carryover"], list)
else chat_info["carryover"]
)
message = chat_info.get("message")
if isinstance(message, str):
print_message = message
elif callable(message):
print_message = "Callable: " + message.__name__
elif isinstance(message, dict):
print_message = "Dict: " + str(message)
elif message is None:
print_message = "None"
print(colored("\n" + "*" * 80, "blue"), flush=True, sep="")
print(
colored(
"Start a new chat with the following message: \n"
+ chat_info.get("message")
+ "\n\nWith the following carryover: \n"
+ print_carryover,
"Starting a new chat....\n\nMessage:\n" + print_message + "\n\nCarryover: \n" + print_carryover,
"blue",
),
flush=True,
@ -132,35 +138,19 @@ def initiate_chats(chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
Each dictionary should contain the input arguments for `ConversableAgent.initiate_chat`.
More specifically, each dictionary could include the following fields:
- recipient: the recipient agent.
- "sender": the sender agent.
- "recipient": the recipient agent.
- clear_history (bool): whether to clear the chat history with the agent. Default is True.
- silent (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
- cache (Cache or None): the cache client to be used for this conversation. Default is None.
- max_turns (int or None): the maximum number of turns for the chat. If None, the chat will continue until a termination condition is met. Default is None.
- "message" needs to be provided if the `generate_init_message` method is not overridden.
Otherwise, input() will be called to get the initial message.
- "summary_method": a string or callable specifying the method to get a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
- Supported string are "last_msg" and "reflection_with_llm":
when set "last_msg", it returns the last message of the dialog as the summary.
when set "reflection_with_llm", it returns a summary extracted using an llm client.
`llm_config` must be set in either the recipient or sender.
"reflection_with_llm" requires the llm_config to be set in either the sender or the recipient.
- A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g,
```python
def my_summary_method(
sender: ConversableAgent,
recipient: ConversableAgent,
):
return recipient.last_message(sender)["content"]
```
- "summary_prompt": This filed can be used to specify the prompt used to extract a summary when summary_method is "reflection_with_llm".
Default is None and the following default prompt will be used when "summary_method" is set to "reflection_with_llm":
"Identify and extract the final solution to the originally asked question based on the conversation."
- "carryover": It can be used to specify the carryover information to be passed to this chat.
If provided, we will combine this carryover with the "message" content when generating the initial chat
message in `generate_init_message`.
- "clear_history" (bool): whether to clear the chat history with the agent. Default is True.
- "silent" (bool or None): (Experimental) whether to print the messages for this conversation. Default is False.
- "cache" (Cache or None): the cache client to be used for this conversation. Default is None.
- "max_turns" (int or None): the maximum number of turns for the chat. If None, the chat will continue until a termination condition is met. Default is None.
- "summary_method" (str or callable): a string or callable specifying the method to get a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
- "summary_args" (dict): a dictionary of arguments to be passed to the summary_method. Default is {}.
- "message" (str, callable or None): if None, input() will be called to get the initial message.
- **context: additional context information to be passed to the chat.
- "carryover": It can be used to specify the carryover information to be passed to this chat.
If provided, we will combine this carryover with the "message" content when generating the initial chat
message in `generate_init_message`.
Returns:
(list): a list of ChatResult objects corresponding to the finished chats in the chat_queue.

View File

@ -19,7 +19,7 @@ logger = logging.getLogger(__name__)
class CompressibleAgent(ConversableAgent):
"""(Experimental) CompressibleAgent agent. While this agent retains all the default functionalities of the `AssistantAgent`,
"""(CompressibleAgent will be deprecated. Refer to https://github.com/microsoft/autogen/blob/main/notebook/agentchat_capability_long_context_handling.ipynb for long context handling capability.) CompressibleAgent agent. While this agent retains all the default functionalities of the `AssistantAgent`,
it also provides the added feature of compression when activated through the `compress_config` setting.
`compress_config` is set to False by default, making this agent equivalent to the `AssistantAgent`.

View File

@ -177,28 +177,35 @@ class MathUserProxyAgent(UserProxyAgent):
self._previous_code = ""
self.last_reply = None
def generate_init_message(self, problem, prompt_type="default", customized_prompt=None):
@staticmethod
def message_generator(sender, recipient, context):
"""Generate a prompt for the assistant agent with the given problem and prompt.
Args:
problem (str): the problem to be solved.
prompt_type (str): the type of the prompt. Possible values are "default", "python", "wolfram".
(1) "default": the prompt that allows the agent to choose between 3 ways to solve a problem:
1. write a python program to solve it directly.
2. solve it directly without python.
3. solve it step by step with python.
(2) "python":
a simplified prompt from the third way of the "default" prompt, that asks the assistant
to solve the problem step by step with python.
(3) "two_tools":
a simplified prompt similar to the "python" prompt, but allows the model to choose between
Python and Wolfram Alpha to solve the problem.
customized_prompt (str): a customized prompt to be used. If it is not None, the prompt_type will be ignored.
sender (Agent): the sender of the message.
recipient (Agent): the recipient of the message.
context (dict): a dictionary with the following fields:
problem (str): the problem to be solved.
prompt_type (str, Optional): the type of the prompt. Possible values are "default", "python", "wolfram".
(1) "default": the prompt that allows the agent to choose between 3 ways to solve a problem:
1. write a python program to solve it directly.
2. solve it directly without python.
3. solve it step by step with python.
(2) "python":
a simplified prompt from the third way of the "default" prompt, that asks the assistant
to solve the problem step by step with python.
(3) "two_tools":
a simplified prompt similar to the "python" prompt, but allows the model to choose between
Python and Wolfram Alpha to solve the problem.
customized_prompt (str, Optional): a customized prompt to be used. If it is not None, the prompt_type will be ignored.
Returns:
str: the generated prompt ready to be sent to the assistant agent.
"""
self._reset()
sender._reset()
problem = context.get("problem")
prompt_type = context.get("prompt_type", "default")
customized_prompt = context.get("customized_prompt", None)
if customized_prompt is not None:
return customized_prompt + problem
return PROMPTS[prompt_type] + problem

View File

@ -408,23 +408,31 @@ class RetrieveUserProxyAgent(UserProxyAgent):
self._results = results
print("doc_ids: ", results["ids"])
def generate_init_message(self, problem: str, n_results: int = 20, search_string: str = ""):
"""Generate an initial message with the given problem and prompt.
Args:
problem (str): the problem to be solved.
n_results (int): the number of results to be retrieved.
search_string (str): only docs containing this string will be retrieved.
Returns:
str: the generated prompt ready to be sent to the assistant agent.
@staticmethod
def message_generator(sender, recipient, context):
"""
self._reset()
self.retrieve_docs(problem, n_results, search_string)
self.problem = problem
self.n_results = n_results
doc_contents = self._get_context(self._results)
message = self._generate_message(doc_contents, self._task)
Generate an initial message with the given context for the RetrieveUserProxyAgent.
Args:
sender (Agent): the sender agent. It should be the instance of RetrieveUserProxyAgent.
recipient (Agent): the recipient agent. Usually it's the assistant agent.
context (dict): the context for the message generation. It should contain the following keys:
- problem (str): the problem to be solved.
- n_results (int): the number of results to be retrieved. Default is 20.
- search_string (str): only docs that contain an exact match of this string will be retrieved. Default is "".
Returns:
str: the generated message ready to be sent to the recipient agent.
"""
sender._reset()
problem = context.get("problem", "")
n_results = context.get("n_results", 20)
search_string = context.get("search_string", "")
sender.retrieve_docs(problem, n_results, search_string)
sender.problem = problem
sender.n_results = n_results
doc_contents = sender._get_context(sender._results)
message = sender._generate_message(doc_contents, sender._task)
return message
def run_code(self, code, **kwargs):

View File

@ -63,14 +63,13 @@ class ConversableAgent(LLMAgent):
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""
DEFAULT_CONFIG = {} # An empty configuration
MAX_CONSECUTIVE_AUTO_REPLY = 100 # maximum number of consecutive auto replies (subject to future change)
DEFAULT_summary_prompt = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
DEFAULT_summary_method = "last_msg"
DEFAULT_SUMMARY_PROMPT = "Summarize the takeaway from the conversation. Do not add any introductory phrases."
DEFAULT_SUMMARY_METHOD = "last_msg"
llm_config: Union[Dict, Literal[False]]
def __init__(
@ -857,13 +856,16 @@ class ConversableAgent(LLMAgent):
silent: Optional[bool] = False,
cache: Optional[Cache] = None,
max_turns: Optional[int] = None,
summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD,
summary_args: Optional[dict] = {},
message: Optional[Union[Dict, str, Callable]] = None,
**context,
) -> ChatResult:
"""Initiate a chat with the recipient agent.
Reset the consecutive auto reply counter.
If `clear_history` is True, the chat history with the recipient agent will be cleared.
`generate_init_message` is called to generate the initial message for the agent.
Args:
recipient: the recipient agent.
@ -873,9 +875,7 @@ class ConversableAgent(LLMAgent):
max_turns (int or None): the maximum number of turns for the chat between the two agents. One turn means one conversation round trip. Note that this is different from
[max_consecutive_auto_reply](#max_consecutive_auto_reply) which is the maximum number of consecutive auto replies; and it is also different from [max_rounds in GroupChat](./groupchat#groupchat-objects) which is the maximum number of rounds in a group chat session.
If max_turns is set to None, the chat will continue until a termination condition is met. Default is None.
**context: any context information. It has the following reserved fields:
"message": a str of message. Needs to be provided. Otherwise, input() will be called to get the initial message.
"summary_method": a string or callable specifying the method to get a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
summary_method (string or callable) : a method to get a summary from the chat. Default is DEFAULT_SUMMARY_METHOD, i.e., "last_msg".
- Supported string are "last_msg" and "reflection_with_llm":
when set "last_msg", it returns the last message of the dialog as the summary.
when set "reflection_with_llm", it returns a summary extracted using an llm client.
@ -886,14 +886,53 @@ class ConversableAgent(LLMAgent):
def my_summary_method(
sender: ConversableAgent,
recipient: ConversableAgent,
summary_args: dict,
):
return recipient.last_message(sender)["content"]
```
"summary_prompt": a string of text used to prompt a LLM-based agent (the sender or receiver agent) to reflext
summary_args (dict): a dictionary of arguments to be passed to the summary_method.
E.g., a string of text used to prompt a LLM-based agent (the sender or receiver agent) to reflext
on the conversation and extract a summary when summary_method is "reflection_with_llm".
Default is DEFAULT_summary_prompt, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
"carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
If provided, we will combine this carryover with the "message" content when generating the initial chat
Default is DEFAULT_SUMMARY_PROMPT, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
message (str, dict or Callable): the initial message to be sent to the recipient. Needs to be provided. Otherwise, input() will be called to get the initial message.
- If a string or a dict is provided, it will be used as the initial message. `generate_init_message` is called to generate the initial message for the agent based on this string and the context.
If dict, it may contain the following reserved fields (either content or function_call need to be provided).
1. "content": content of the message, can be None.
2. "function_call": a dictionary containing the function name and arguments. (deprecated in favor of "tool_calls")
3. "tool_calls": a list of dictionaries containing the function name and arguments.
4. "role": role of the message, can be "assistant", "user", "function".
This field is only needed to distinguish between "function" or "assistant"/"user".
5. "name": In most cases, this field is not needed. When the role is "function", this field is needed to indicate the function name.
6. "context" (dict): the context of the message, which will be passed to
[OpenAIWrapper.create](../oai/client#create).
- If a callable is provided, it will be called to get the initial message in the form of a string or a dict. If the returned value is a dict, it should contain the following reserved fields:
If the returned type is dict, it may contain the reserved fields mentioned above.
Example of a callable message (returning a string):
```python
def my_message(sender: ConversableAgent, recipient: ConversableAgent, context: dict) -> Union[str, Dict]:
carryover = context.get("carryover", "")
if isinstance(message, list):
carryover = carryover[-1]
final_msg = "Write a blogpost." + "\nContext: \n" + carryover
return final_msg
```
Example of a callable message (returning a dict):
```python
def my_message(sender: ConversableAgent, recipient: ConversableAgent, context: dict) -> Union[str, Dict]:
final_msg = {}
carryover = context.get("carryover", "")
carryover = context.get("carryover", "")
if isinstance(message, list):
carryover = carryover[-1]
final_msg["content"] = "Write a blogpost." + "\nContext: \n" + carryover
final_msg["context"] = {"prefix": "Today I feel"}
return final_msg
```
**context: any context information. It has the following reserved fields:
"carryover": a string or a list of string to specify the carryover information to be passed to this chat.
If provided, we will combine this carryover (by attaching a "context: "string and the carryover content after the message content) with the "message" content when generating the initial chat
message in `generate_init_message`.
Raises:
@ -904,6 +943,7 @@ class ConversableAgent(LLMAgent):
"""
_chat_info = context.copy()
_chat_info["recipient"] = recipient
_chat_info["sender"] = self
consolidate_chat_info(_chat_info, uniform_sender=self)
for agent in [self, recipient]:
agent._raise_exception_on_async_reply_functions()
@ -913,7 +953,10 @@ class ConversableAgent(LLMAgent):
self._prepare_chat(recipient, clear_history, reply_at_receive=False)
for _ in range(max_turns):
if _ == 0:
msg2send = self.generate_init_message(**context)
if isinstance(message, Callable):
msg2send = message(_chat_info["sender"], _chat_info["recipient"], context)
else:
msg2send = self.generate_init_message(message, **context)
else:
msg2send = self.generate_reply(messages=self.chat_messages[recipient], sender=recipient)
if msg2send is None:
@ -921,11 +964,15 @@ class ConversableAgent(LLMAgent):
self.send(msg2send, recipient, request_reply=True, silent=silent)
else:
self._prepare_chat(recipient, clear_history)
self.send(self.generate_init_message(**context), recipient, silent=silent)
if isinstance(message, Callable):
msg2send = message(_chat_info["sender"], _chat_info["recipient"], context)
else:
msg2send = self.generate_init_message(message, **context)
self.send(msg2send, recipient, silent=silent)
summary = self._summarize_chat(
context.get("summary_method", ConversableAgent.DEFAULT_summary_method),
summary_method,
summary_args,
recipient,
prompt=context.get("summary_prompt"),
cache=cache,
)
for agent in [self, recipient]:
@ -946,6 +993,9 @@ class ConversableAgent(LLMAgent):
silent: Optional[bool] = False,
cache: Optional[Cache] = None,
max_turns: Optional[int] = None,
summary_method: Optional[Union[str, Callable]] = DEFAULT_SUMMARY_METHOD,
summary_args: Optional[dict] = {},
message: Optional[Union[str, Callable]] = None,
**context,
) -> ChatResult:
"""(async) Initiate a chat with the recipient agent.
@ -961,6 +1011,7 @@ class ConversableAgent(LLMAgent):
"""
_chat_info = context.copy()
_chat_info["recipient"] = recipient
_chat_info["sender"] = self
consolidate_chat_info(_chat_info, uniform_sender=self)
for agent in [self, recipient]:
agent.previous_cache = agent.client_cache
@ -969,7 +1020,10 @@ class ConversableAgent(LLMAgent):
self._prepare_chat(recipient, clear_history, reply_at_receive=False)
for _ in range(max_turns):
if _ == 0:
msg2send = await self.a_generate_init_message(**context)
if isinstance(message, Callable):
msg2send = message(_chat_info["sender"], _chat_info["recipient"], context)
else:
msg2send = await self.a_generate_init_message(message, **context)
else:
msg2send = await self.a_generate_reply(messages=self.chat_messages[recipient], sender=recipient)
if msg2send is None:
@ -977,11 +1031,15 @@ class ConversableAgent(LLMAgent):
await self.a_send(msg2send, recipient, request_reply=True, silent=silent)
else:
self._prepare_chat(recipient, clear_history)
await self.a_send(await self.a_generate_init_message(**context), recipient, silent=silent)
if isinstance(message, Callable):
msg2send = message(_chat_info["sender"], _chat_info["recipient"], context)
else:
msg2send = await self.a_generate_init_message(message, **context)
await self.a_send(msg2send, recipient, silent=silent)
summary = self._summarize_chat(
context.get("summary_method", ConversableAgent.DEFAULT_summary_method),
summary_method,
summary_args,
recipient,
prompt=context.get("summary_prompt"),
cache=cache,
)
for agent in [self, recipient]:
@ -998,8 +1056,8 @@ class ConversableAgent(LLMAgent):
def _summarize_chat(
self,
summary_method,
summary_args,
recipient: Optional[Agent] = None,
prompt: Optional[str] = None,
cache: Optional[Cache] = None,
) -> str:
"""Get a chat summary from an agent participating in a chat.
@ -1011,35 +1069,60 @@ class ConversableAgent(LLMAgent):
def my_summary_method(
sender: ConversableAgent,
recipient: ConversableAgent,
summary_args: dict,
):
return recipient.last_message(sender)["content"]
```
summary_args (dict): a dictionary of arguments to be passed to the summary_method.
recipient: the recipient agent in a chat.
prompt (str): the prompt used to get a summary when summary_method is "reflection_with_llm".
Returns:
str: a chat summary from the agent.
"""
agent = self if recipient is None else recipient
summary = ""
if summary_method is None:
return summary
if "cache" not in summary_args:
summary_args["cache"] = cache
if summary_method == "reflection_with_llm":
prompt = ConversableAgent.DEFAULT_summary_prompt if prompt is None else prompt
if not isinstance(prompt, str):
raise ValueError("The summary_prompt must be a string.")
msg_list = agent.chat_messages_for_summary(self)
try:
summary = self._reflection_with_llm(prompt, msg_list, llm_agent=agent, cache=cache)
except BadRequestError as e:
warnings.warn(f"Cannot extract summary using reflection_with_llm: {e}", UserWarning)
elif summary_method == "last_msg" or summary_method is None:
try:
summary = agent.last_message(self)["content"].replace("TERMINATE", "")
except (IndexError, AttributeError) as e:
warnings.warn(f"Cannot extract summary using last_msg: {e}", UserWarning)
elif isinstance(summary_method, Callable):
summary = summary_method(recipient, self)
summary_method = self._relfection_with_llm_as_summary
elif summary_method == "last_msg":
summary_method = self._last_msg_as_summary
if isinstance(summary_method, Callable):
summary = summary_method(self, recipient, summary_args)
else:
raise ValueError(
"If not None, the summary_method must be a string from [`reflection_with_llm`, `last_msg`] or a callable."
)
return summary
@staticmethod
def _last_msg_as_summary(sender, recipient, summary_args) -> str:
"""Get a chat summary from the last message of the recipient."""
try:
summary = recipient.last_message(sender)["content"].replace("TERMINATE", "")
except (IndexError, AttributeError) as e:
warnings.warn(f"Cannot extract summary using last_msg: {e}. Using an empty str as summary.", UserWarning)
summary = ""
return summary
@staticmethod
def _relfection_with_llm_as_summary(sender, recipient, summary_args):
prompt = summary_args.get("summary_prompt")
prompt = ConversableAgent.DEFAULT_SUMMARY_PROMPT if prompt is None else prompt
if not isinstance(prompt, str):
raise ValueError("The summary_prompt must be a string.")
msg_list = recipient.chat_messages_for_summary(sender)
agent = sender if recipient is None else recipient
try:
summary = sender._reflection_with_llm(prompt, msg_list, llm_agent=agent, cache=summary_args.get("cache"))
except BadRequestError as e:
warnings.warn(
f"Cannot extract summary using reflection_with_llm: {e}. Using an empty str as summary.", UserWarning
)
summary = ""
return summary
def _reflection_with_llm(
@ -1072,7 +1155,6 @@ class ConversableAgent(LLMAgent):
def initiate_chats(self, chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
"""(Experimental) Initiate chats with multiple agents.
TODO: add async version of this method.
Args:
chat_queue (List[Dict]): a list of dictionaries containing the information of the chats.
@ -2105,70 +2187,63 @@ class ConversableAgent(LLMAgent):
"content": str(content),
}
def generate_init_message(self, **context) -> Union[str, Dict]:
def generate_init_message(self, message: Union[Dict, str, None], **context) -> Union[str, Dict]:
"""Generate the initial message for the agent.
TODO: offer a way to customize initial message without overriding this function.
Override this function to customize the initial message based on user's request.
If not overridden, "message" needs to be provided in the context, or input() will be called to get the initial message.
If message is None, input() will be called to get the initial message.
Args:
message (str or None): the message to be processed.
**context: any context information. It has the following reserved fields:
"message": a str of message.
"summary_method": a string or callable specifying the method to get a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
- Supported string are "last_msg" and "reflection_with_llm":
when set "last_msg", it returns the last message of the dialog as the summary.
when set "reflection_with_llm", it returns a summary extracted using an llm client.
`llm_config` must be set in either the recipient or sender.
"reflection_with_llm" requires the llm_config to be set in either the sender or the recipient.
- A callable summary_method should take the recipient and sender agent in a chat as input and return a string of summary. E.g,
```python
def my_summary_method(
sender: ConversableAgent,
recipient: ConversableAgent,
):
return recipient.last_message(sender)["content"]
```
When both the sender and the recipient have an llm client, the recipient's llm client will be used.
"summary_prompt": a string of text used to prompt a LLM-based agent (the sender or receiver agent) to reflext
on the conversation and extract a summary when summary_method is "reflection_with_llm".
Default is DEFAULT_summary_prompt, i.e., "Summarize takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out."
"carryover": a string or a list of string to specify the carryover information to be passed to this chat. It can be a string or a list of string.
If provided, we will combine this carryover with the "message" content when generating the initial chat
message.
Returns:
str or dict: the processed message.
"""
if "message" not in context:
context["message"] = self.get_human_input(">")
self._process_carryover(context)
return context["message"]
if message is None:
message = self.get_human_input(">")
if isinstance(message, str):
return self._process_carryover(message, context)
elif isinstance(message, dict):
message = message.copy()
# TODO: Do we need to do the following?
# if message.get("content") is None:
# message["content"] = self.get_human_input(">")
message["content"] = self._process_carryover(message.get("content", ""), context)
return message
def _process_carryover(self, context):
carryover = context.get("carryover", "")
def _process_carryover(self, message: str, context: dict) -> str:
carryover = context.get("carryover")
if carryover:
# if carryover is string
if isinstance(carryover, str):
context["message"] = context["message"] + "\nContext: \n" + carryover
message += "\nContext: \n" + carryover
elif isinstance(carryover, list):
context["message"] = context["message"] + "\nContext: \n" + ("\n").join([t for t in carryover])
message += "\nContext: \n" + ("\n").join([t for t in carryover])
else:
raise InvalidCarryOverType(
"Carryover should be a string or a list of strings. Not adding carryover to the message."
)
return message
async def a_generate_init_message(self, **context) -> Union[str, Dict]:
async def a_generate_init_message(self, message: Union[Dict, str, None], **context) -> Union[str, Dict]:
"""Generate the initial message for the agent.
TODO: offer a way to customize initial message without overriding this function.
Override this function to customize the initial message based on user's request.
If not overridden, "message" needs to be provided in the context, or input() will be called to get the initial message.
If message is None, input() will be called to get the initial message.
Args:
Please refer to `generate_init_message` for the description of the arguments.
Returns:
str or dict: the processed message.
"""
if "message" not in context:
context["message"] = await self.a_get_human_input(">")
self._process_carryover(context)
return context["message"]
if message is None:
message = await self.a_get_human_input(">")
if isinstance(message, str):
return self._process_carryover(message, context)
elif isinstance(message, dict):
message = message.copy()
message["content"] = self._process_carryover(message["content"], context)
return message
def register_function(self, function_map: Dict[str, Union[Callable, None]]):
"""Register functions to the agent.

View File

@ -14,7 +14,6 @@ class UserProxyAgent(ConversableAgent):
To modify the way to get human input, override `get_human_input` method.
To modify the way to execute code blocks, single code block, or function call, override `execute_code_blocks`,
`run_code`, and `execute_function` methods respectively.
To customize the initial message when a conversation starts, override `generate_init_message` method.
"""
# Default UserProxyAgent.description values, based on human_input_mode

File diff suppressed because one or more lines are too long

View File

@ -465,7 +465,7 @@
"# With human-in-loop, the conversation will continue until the user says \"exit\".\n",
"code_problem = \"How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\"\n",
"ragproxyagent.initiate_chat(\n",
" assistant, problem=code_problem, search_string=\"spark\"\n",
" assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string=\"spark\"\n",
") # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain \"spark\"."
]
},
@ -1020,7 +1020,7 @@
"assistant.reset()\n",
"\n",
"qa_problem = \"Who is the author of FLAML?\"\n",
"ragproxyagent.initiate_chat(assistant, problem=qa_problem)"
"ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem)"
]
},
{
@ -1433,7 +1433,7 @@
"# set `human_input_mode` to be `ALWAYS`, so the agent will ask for human input at every step.\n",
"ragproxyagent.human_input_mode = \"ALWAYS\"\n",
"code_problem = \"how to build a time series forecasting model for stock price using FLAML?\"\n",
"ragproxyagent.initiate_chat(assistant, problem=code_problem)"
"ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=code_problem)"
]
},
{
@ -1991,7 +1991,9 @@
"# set `human_input_mode` to be `ALWAYS`, so the agent will ask for human input at every step.\n",
"ragproxyagent.human_input_mode = \"ALWAYS\"\n",
"qa_problem = \"Is there a function named `tune_automl` in FLAML?\"\n",
"ragproxyagent.initiate_chat(assistant, problem=qa_problem) # type \"exit\" to exit the conversation"
"ragproxyagent.initiate_chat(\n",
" assistant, message=ragproxyagent.message_generator, problem=qa_problem\n",
") # type \"exit\" to exit the conversation"
]
},
{
@ -2582,7 +2584,7 @@
" assistant.reset()\n",
"\n",
" qa_problem = questions[i]\n",
" ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=30)"
" ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem, n_results=30)"
]
},
{
@ -3009,7 +3011,7 @@
" assistant.reset()\n",
"\n",
" qa_problem = questions[i]\n",
" ragproxyagent.initiate_chat(assistant, problem=qa_problem, n_results=10)"
" ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem, n_results=10)"
]
}
],

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -35,14 +35,14 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LLM models: ['gpt-35-turbo', 'gpt-35-turbo-0613']\n"
"LLM models: ['gpt-4-1106-preview', 'gpt-4-turbo-preview', 'gpt-4-0613', 'gpt-35-turbo-0613', 'gpt-35-turbo-1106']\n"
]
}
],
@ -75,7 +75,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@ -162,6 +162,7 @@
" # Start chatting with boss_aid as this is the user proxy agent.\n",
" boss_aid.initiate_chat(\n",
" manager,\n",
" message=boss_aid.message_generator,\n",
" problem=PROBLEM,\n",
" n_results=3,\n",
" )\n",
@ -206,7 +207,8 @@
" boss_aid.problem = message if not hasattr(boss_aid, \"problem\") else boss_aid.problem\n",
" _, ret_msg = boss_aid._generate_retrieve_user_reply(message)\n",
" else:\n",
" ret_msg = boss_aid.generate_init_message(message, n_results=n_results)\n",
" _context = {\"problem\": message, \"n_results\": n_results}\n",
" ret_msg = boss_aid.message_generator(boss_aid, None, _context)\n",
" return ret_msg if ret_msg else message\n",
"\n",
" boss_aid.human_input_mode = \"NEVER\" # Disable human input for boss_aid since it only retrieves content.\n",
@ -249,7 +251,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -269,110 +271,49 @@
"text": [
"\u001b[33mSenior_Python_Engineer\u001b[0m (to chat_manager):\n",
"\n",
"To use Spark for parallel training in FLAML, you need to set up a Spark cluster and configure FLAML to use Spark as the backend. Here's a sample code to demonstrate how to use Spark for parallel training in FLAML:\n",
"To use Apache Spark for parallel training in FLAML, you need to use the `flaml.tune.run` function. Here is a sample code:\n",
"\n",
"```python\n",
"from flaml import AutoML\n",
"from pyspark.sql import SparkSession\n",
"from flaml import tune\n",
"\n",
"# Create a Spark session\n",
"spark = SparkSession.builder \\\n",
" .appName(\"FLAML with Spark\") \\\n",
" .getOrCreate()\n",
"# Define your training function\n",
"def training_function(config):\n",
" # your training code here\n",
" pass\n",
"\n",
"# Load your data into a Spark DataFrame\n",
"data = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"your_data.csv\")\n",
"\n",
"# Initialize FLAML with Spark backend\n",
"automl = AutoML()\n",
"automl.initialize(spark=spark)\n",
"\n",
"# Specify the search space and other settings\n",
"settings = {\n",
" \"time_budget\": 60, # total time in seconds\n",
" \"metric\": 'accuracy',\n",
" \"task\": 'classification',\n",
" \"log_file_name\": 'flaml.log',\n",
"# Define your search space\n",
"search_space = {\n",
" \"lr\": tune.loguniform(1e-4, 1e-1),\n",
" \"momentum\": tune.uniform(0.1, 0.9),\n",
"}\n",
"\n",
"# Train and tune the model using FLAML\n",
"automl.fit(data=data, **settings)\n",
"# Use SparkTrials for parallelization\n",
"from ray.tune import SparkTrials\n",
"\n",
"# Get the best model and its hyperparameters\n",
"best_model = automl.best_model\n",
"best_config = automl.best_config\n",
"spark_trials = SparkTrials(parallelism=2)\n",
"\n",
"# Print the best model and its hyperparameters\n",
"print(\"Best model:\", best_model)\n",
"print(\"Best hyperparameters:\", best_config)\n",
"analysis = tune.run(\n",
" training_function,\n",
" config=search_space,\n",
" num_samples=10,\n",
" scheduler=tune.schedulers.FIFOScheduler(),\n",
" progress_reporter=tune.JupyterNotebookReporter(overwrite=True),\n",
" trial_executor=spark_trials,\n",
")\n",
"\n",
"# Terminate the Spark session\n",
"spark.stop()\n",
"print(\"Best config: \", analysis.get_best_config(metric=\"accuracy\", mode=\"max\"))\n",
"\n",
"# Get a dataframe for analyzing trial results.\n",
"df = analysis.results_df\n",
"```\n",
"\n",
"Make sure to replace `\"your_data.csv\"` with the path to your actual data file. Adjust the `settings` dictionary according to your requirements.\n",
"In this code, `training_function` is your training function, which should take a `config` argument. This `config` argument is a dictionary that includes hyperparameters for your model. The `search_space` is a dictionary that defines the search space for your hyperparameters.\n",
"\n",
"This code initializes a Spark session, loads the data into a Spark DataFrame, and then uses FLAML's `AutoML` class to train and tune a model in parallel using Spark. Finally, it prints the best model and its hyperparameters.\n",
"The `tune.run` function is used to start the hyperparameter tuning. The `config` argument is your search space, `num_samples` is the number of times to sample from the search space, and `scheduler` is the scheduler for the trials. The `trial_executor` argument is set to `spark_trials` to use Spark for parallelization.\n",
"\n",
"Remember to install FLAML and PySpark before running this code.\n",
"The `analysis.get_best_config` function is used to get the best hyperparameters found during the tuning. The `analysis.results_df` gives a dataframe that contains the results of all trials.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"To use Spark for parallel training in FLAML, you need to set up a Spark cluster and configure FLAML to use Spark as the backend. Here's a sample code to demonstrate how to use Spark for parallel training in FLAML:\n",
"\n",
"```python\n",
"from flaml import AutoML\n",
"from pyspark.sql import SparkSession\n",
"\n",
"# Create a Spark session\n",
"spark = SparkSession.builder \\\n",
" .appName(\"FLAML with Spark\") \\\n",
" .getOrCreate()\n",
"\n",
"# Load your data into a Spark DataFrame\n",
"data = spark.read.format(\"csv\").option(\"header\", \"true\").load(\"your_data.csv\")\n",
"\n",
"# Initialize FLAML with Spark backend\n",
"automl = AutoML()\n",
"automl.initialize(spark=spark)\n",
"\n",
"# Specify the search space and other settings\n",
"settings = {\n",
" \"time_budget\": 60, # total time in seconds\n",
" \"metric\": 'accuracy',\n",
" \"task\": 'classification',\n",
" \"log_file_name\": 'flaml.log',\n",
"}\n",
"\n",
"# Train and tune the model using FLAML\n",
"automl.fit(data=data, **settings)\n",
"\n",
"# Get the best model and its hyperparameters\n",
"best_model = automl.best_model\n",
"best_config = automl.best_config\n",
"\n",
"# Print the best model and its hyperparameters\n",
"print(\"Best model:\", best_model)\n",
"print(\"Best hyperparameters:\", best_config)\n",
"\n",
"# Terminate the Spark session\n",
"spark.stop()\n",
"```\n",
"\n",
"Make sure to replace `\"your_data.csv\"` with the path to your actual data file. Adjust the `settings` dictionary according to your requirements.\n",
"\n",
"This code initializes a Spark session, loads the data into a Spark DataFrame, and then uses FLAML's `AutoML` class to train and tune a model in parallel using Spark. Finally, it prints the best model and its hyperparameters.\n",
"\n",
"Remember to install FLAML and PySpark before running this code.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mCode_Reviewer\u001b[0m (to chat_manager):\n",
"\n",
"Looks good to me! The code demonstrates how to use Spark for parallel training in FLAML. It initializes a Spark session, loads the data into a Spark DataFrame, and then uses FLAML's `AutoML` class to train and tune a model in parallel using Spark. Finally, it prints the best model and its hyperparameters. Just make sure to replace `\"your_data.csv\"` with the actual path to the data file and adjust the `settings` dictionary as needed. \n",
"\n",
"If there are no further questions, I will terminate.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mProduct_Manager\u001b[0m (to chat_manager):\n",
"Please note that you need to have Apache Spark and Ray installed and properly configured in your environment to run this code.\n",
"\n",
"TERMINATE\n",
"\n",
@ -1108,9 +1049,13 @@
}
],
"metadata": {
"front_matter": {
"tags": ["group chat", "orchestration", "RAG"],
"description": "Implement and manage a multi-agent chat system using AutoGen, where AI assistants retrieve information, generate code, and interact collaboratively to solve complex tasks, especially in areas not covered by their training data."
"front_matter": {
"description": "Implement and manage a multi-agent chat system using AutoGen, where AI assistants retrieve information, generate code, and interact collaboratively to solve complex tasks, especially in areas not covered by their training data.",
"tags": [
"group chat",
"orchestration",
"RAG"
]
},
"kernelspec": {
"display_name": "flaml",
@ -1127,7 +1072,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.10.13"
}
},
"nbformat": 4,

View File

@ -11,7 +11,7 @@
"source": [
"# Solving Complex Tasks with Nested Chats\n",
"\n",
"This notebook shows how you can leverage \"nested chats\" to solve complex task with AutoGen. Nested chats allow AutoGen agents to use other agents as their inner monologue to accomplish tasks. This abstraction is powerful as it allows you to compose agents in rich ways. This notebook shows how you can nest a pretty complex sequence of chats among _inner_ agents inside an _outer_ agent.\n",
"This notebook shows how you can leverage **nested chats** to solve complex task with AutoGen. Nested chats is a sequence of chats created by a receiver agent after receiving a message from a sender agent and finished before the receiver agent replies to this message. Nested chats allow AutoGen agents to use other agents as their inner monologue to accomplish tasks. This abstraction is powerful as it allows you to compose agents in rich ways. This notebook shows how you can nest a pretty complex sequence of chats among _inner_ agents inside an _outer_ agent.\n",
"\n",
"\\:\\:\\:info Requirements\n",
"\n",
@ -809,8 +809,10 @@
],
"metadata": {
"front_matter": {
"tags": ["nested chat"],
"description": "Solve complex tasks with one or more sequence chats nested as inner monologue."
"description": "Solve complex tasks with one or more sequence chats nested as inner monologue.",
"tags": [
"nested chat"
]
},
"kernelspec": {
"display_name": "Python 3",

File diff suppressed because one or more lines are too long

View File

@ -9,7 +9,7 @@
}
},
"source": [
"# Solving Multiple Tasks in a Sequence of Chats\n",
"# Solving Multiple Tasks in a Sequence of Chats with Different Conversable Agent Pairs\n",
"\n",
"This notebook showcases how to use the new chat interface `autogen.initiate_chats` to solve a set of tasks with a sequence of chats. \n",
"\n",
@ -75,17 +75,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Solve tasks with a series of chats\n",
"### Example 1: Solve tasks with a series of chats\n",
"\n",
"The `autogen.initiate_chats` interface can take a list of dictionaries as inputs. Each dictionary preserves the following fields: \n",
"\n",
"- `sender`: a conversable agent as the sender;\n",
"- `recipient`: a conversable agent as the recipient;\n",
"- `message`: is a string of text (typically a message containing the task);\n",
"- `summary_method`: A string specifying the method to get a summary from the chat. Currently supported choices include `last_msg`, which takes the last message from the chat history as the summary, and `reflection_with_llm`, which uses an LLM call to reflect on the chat history and summarize a takeaway;\n",
"- `summary_prompt`: A string specifying how to instruct an LLM-backed agent (either the recipient or the sender in the chat) to reflect on the chat history and derive a summary. If not otherwise specified, a default prompt will be used when `summary_method` is `reflection_with_llm`.\n",
"\"Summarize the takeaway from the conversation. Do not add any introductory phrases. If the intended request is NOT properly addressed, please point it out.\"\n",
"- `carryover`: A string or a list of string to specify additional context to be used in the chat. With `initiate_chats`, summary from previous chats will be added as carryover. They will be appended after the carryover provided by the user."
"- `message`: is a string of text (typically a message containing the task) or a callable;\n",
"- `summary_method`: A string or a callable to get a summary from the chat. Currently supported choices include `last_msg`, which takes the last message from the chat history as the summary, and `reflection_with_llm`, which uses an LLM call to reflect on the chat history and summarize a takeaway;"
]
},
{
@ -819,6 +816,312 @@
" ), f\"The chat history should contain at most 4 messages because max_turns is set to 2 in the {i}-th chat.\"\n",
" print(\"\\n\\n\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Example 2: Solve a Sequence of Tasks involving User Defined Message\n",
"\n",
"In this example, say I have two tasks. One resarch task and a one writing task. The writing task needs data from research task. In this example, we direct read data from a file as part of the message."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"research_task = \"\"\"What are daily stock prices of NVDA and TESLA in the past month. Save the results in a .md file named 'stock_prices.md'.\"\"\"\n",
"\n",
"\n",
"def my_writing_task(sender, recipient, context):\n",
" carryover = context.get(\"carryover\", \"\")\n",
" if isinstance(carryover, list):\n",
" carryover = carryover[-1]\n",
"\n",
" try:\n",
" filename = context.get(\"work_dir\", \"\") + \"/stock_prices.md\"\n",
" with open(filename, \"r\") as file:\n",
" data = file.read()\n",
" except Exception as e:\n",
" data = f\"An error occurred while reading the file: {e}\"\n",
"\n",
" return (\n",
" \"\"\"Develop an engaging blog post using any information provided. \"\"\"\n",
" + \"\\nContext:\\n\"\n",
" + carryover\n",
" + \"\\nData:\"\n",
" + data\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[34m\n",
"********************************************************************************\u001b[0m\n",
"\u001b[34mStarting a new chat....\n",
"\n",
"Message:\n",
"What are daily stock prices of NVDA and TESLA in the past month. Save the results in a .md file named 'stock_prices.md'.\n",
"\n",
"Carryover: \n",
"\u001b[0m\n",
"\u001b[34m\n",
"********************************************************************************\u001b[0m\n",
"\u001b[33mUser_Proxy_Auto\u001b[0m (to Financial_researcher):\n",
"\n",
"What are daily stock prices of NVDA and TESLA in the past month. Save the results in a .md file named 'stock_prices.md'.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mFinancial_researcher\u001b[0m (to User_Proxy_Auto):\n",
"\n",
"To accomplish this task, I will write a Python script that utilizes an API to fetch the daily stock prices of NVIDIA (NVDA) and Tesla (TSLA) over the past month. I will then format the data into Markdown and save it to a file named 'stock_prices.md'.\n",
"\n",
"Step 1: Fetch the stock price data.\n",
"Step 2: Format the data into Markdown.\n",
"Step 3: Save the data to 'stock_prices.md'.\n",
"\n",
"We'll use the `yfinance` library to fetch the stock data. It's a popular choice for financial data extraction. If you don't have `yfinance` installed, you would need to install it via pip (`!pip install yfinance` in Jupyter notebooks or `pip install yfinance` in the command line). \n",
"\n",
"Let's start with step 1 by writing a script to collect the stock price data of NVDA and TSLA.\n",
"\n",
"```python\n",
"# filename: fetch_stock_data.py\n",
"\n",
"import yfinance as yf\n",
"from datetime import datetime, timedelta\n",
"\n",
"# Function to fetch the stock data\n",
"def fetch_stock_data(ticker):\n",
" end_date = datetime.now()\n",
" start_date = end_date - timedelta(days=30)\n",
" \n",
" # Fetching the historical data\n",
" stock_data = yf.download(ticker, start=start_date, end=end_date)\n",
" \n",
" # Formatting the date to a more readable form\n",
" stock_data.index = stock_data.index.strftime('%Y-%m-%d')\n",
" \n",
" return stock_data\n",
"\n",
"# Fetching data for NVDA and TSLA\n",
"nvda_data = fetch_stock_data('NVDA')\n",
"tesla_data = fetch_stock_data('TSLA')\n",
"\n",
"# Formatting the data into Markdown\n",
"with open('stock_prices.md', 'w') as md_file:\n",
" md_file.write('# Stock Prices for NVDA and TSLA in the Past Month\\n\\n')\n",
" md_file.write('## NVDA\\n\\n')\n",
" md_file.write(nvda_data['Close'].to_markdown() + '\\n\\n')\n",
" md_file.write('## TSLA\\n\\n')\n",
" md_file.write(tesla_data['Close'].to_markdown() + '\\n\\n')\n",
"\n",
"print(\"Stock prices fetched and saved to 'stock_prices.md'.\")\n",
"```\n",
"\n",
"Execute this script after ensuring that `yfinance` is installed. It will fetch the data and save it to 'stock_prices.md'.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[31m\n",
">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
"\u001b[33mUser_Proxy_Auto\u001b[0m (to Financial_researcher):\n",
"\n",
"exitcode: 0 (execution succeeded)\n",
"Code output: \n",
"Stock prices fetched and saved to 'stock_prices.md'.\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mFinancial_researcher\u001b[0m (to User_Proxy_Auto):\n",
"\n",
"The script execution was successful, and it has fetched the stock prices for NVDA and TSLA for the past month and saved the data in a file named 'stock_prices.md'.\n",
"\n",
"You should now have a Markdown file with the stock prices. If you need further assistance or additional tasks, feel free to ask.\n",
"\n",
"TERMINATE\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[34m\n",
"********************************************************************************\u001b[0m\n",
"\u001b[34mStarting a new chat....\n",
"\n",
"Message:\n",
"Callable: my_writing_task\n",
"\n",
"Carryover: \n",
"The script execution was successful, and it has fetched the stock prices for NVDA and TSLA for the past month and saved the data in a file named 'stock_prices.md'.\n",
"\n",
"You should now have a Markdown file with the stock prices. If you need further assistance or additional tasks, feel free to ask.\n",
"\n",
"\u001b[0m\n",
"\u001b[34m\n",
"********************************************************************************\u001b[0m\n",
"\u001b[33mUser_Proxy_Auto\u001b[0m (to Writer):\n",
"\n",
"Develop an engaging blog post using any information provided. \n",
"Context:\n",
"The script execution was successful, and it has fetched the stock prices for NVDA and TSLA for the past month and saved the data in a file named 'stock_prices.md'.\n",
"\n",
"You should now have a Markdown file with the stock prices. If you need further assistance or additional tasks, feel free to ask.\n",
"\n",
"\n",
"Data:# Stock Prices for NVDA and TSLA in the Past Month\n",
"\n",
"## NVDA\n",
"\n",
"| Date | Close |\n",
"|:-----------|--------:|\n",
"| 2024-02-02 | 661.6 |\n",
"| 2024-02-05 | 693.32 |\n",
"| 2024-02-06 | 682.23 |\n",
"| 2024-02-07 | 700.99 |\n",
"| 2024-02-08 | 696.41 |\n",
"| 2024-02-09 | 721.33 |\n",
"| 2024-02-12 | 722.48 |\n",
"| 2024-02-13 | 721.28 |\n",
"| 2024-02-14 | 739 |\n",
"| 2024-02-15 | 726.58 |\n",
"| 2024-02-16 | 726.13 |\n",
"| 2024-02-20 | 694.52 |\n",
"| 2024-02-21 | 674.72 |\n",
"| 2024-02-22 | 785.38 |\n",
"| 2024-02-23 | 788.17 |\n",
"| 2024-02-26 | 790.92 |\n",
"| 2024-02-27 | 787.01 |\n",
"| 2024-02-28 | 776.63 |\n",
"| 2024-02-29 | 791.12 |\n",
"| 2024-03-01 | 822.79 |\n",
"\n",
"## TSLA\n",
"\n",
"| Date | Close |\n",
"|:-----------|--------:|\n",
"| 2024-02-02 | 187.91 |\n",
"| 2024-02-05 | 181.06 |\n",
"| 2024-02-06 | 185.1 |\n",
"| 2024-02-07 | 187.58 |\n",
"| 2024-02-08 | 189.56 |\n",
"| 2024-02-09 | 193.57 |\n",
"| 2024-02-12 | 188.13 |\n",
"| 2024-02-13 | 184.02 |\n",
"| 2024-02-14 | 188.71 |\n",
"| 2024-02-15 | 200.45 |\n",
"| 2024-02-16 | 199.95 |\n",
"| 2024-02-20 | 193.76 |\n",
"| 2024-02-21 | 194.77 |\n",
"| 2024-02-22 | 197.41 |\n",
"| 2024-02-23 | 191.97 |\n",
"| 2024-02-26 | 199.4 |\n",
"| 2024-02-27 | 199.73 |\n",
"| 2024-02-28 | 202.04 |\n",
"| 2024-02-29 | 201.88 |\n",
"| 2024-03-01 | 202.64 |\n",
"\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mWriter\u001b[0m (to User_Proxy_Auto):\n",
"\n",
"# A Rollercoaster Month: The Tale of NVDA and TSLA Stock Prices\n",
"\n",
"Welcome to our latest analysis where we bring the ups and downs of the stock market to life. If you've been keeping an eye on the tech industry, you've probably noticed the electrifying performance of two market giants: NVIDIA (NVDA) and Tesla (TSLA). Over the past month, these stocks have taken investors on quite the ride, and we're here to break down the twists and turns.\n",
"\n",
"## NVIDIA: A Journey Through the Digital Realm\n",
"\n",
"NVIDIA, the powerhouse behind some of the most advanced graphics processing technologies, began February modestly at $661.60. However, it didn't take long for this tech titan to start climbing. By February 5, we saw NVDA accelerating to a cool $693.32, hinting at the potential for a thrilling month ahead. The momentum wasn't constant, though, with minor dips, like the one on February 6th to $682.23, reminding shareholders that the journey wouldn't be without its bumps.\n",
"\n",
"Midway through the month, there came a slight calm before the storm, as the stock hovered around $720, suggesting that something big was on the horizon. And true to form, on February 22nd, NVIDIA took everyone by surprise as it surged to an impressive $785.38.\n",
"\n",
"The climax of this exhilarating ride came as February gave way to March, with NVIDIA stock skyrocketing to an apex of $822.79. Investors buckled in tight surely enjoyed the view from this peak.\n",
"\n",
"## Tesla: Charging Ahead with Volatility\n",
"\n",
"Tesla, well-loved and -debated for its visionary approach to automotive technology, also delivered its fair share of market excitement. Starting on a lower key at $187.91, TSLA had a more modest opening than NVDA but held promise for an interesting chapter.\n",
"\n",
"The stock dropped to a monthly low of $181.06 on February 5, causing a bit of heart-in-mouth for supporters. Yet, in classic Tesla fashion, the company steered back onto the track, recovering to $185.10 the next day. Over the next few days, Tesla continued to inch higher, reaching $189.56 by February 8th.\n",
"\n",
"Despite a temporary retreat mid-month, where the stock dipped to $188.13, the electric automaker charged back with vigor, breaking the $200 barrier and hitting a high of $200.45 on February 15th - giving investors a much-needed adrenaline boost.\n",
"\n",
"As February ended and March began, Tesla maintained a price over $200, somewhat steadying the wheel for a close at $202.64, a reassuring end to a month of market fluctuations.\n",
"\n",
"## What's Behind the Movements?\n",
"\n",
"While the numbers tell one story, they don't explain the 'why' behind these market rides. For NVIDIA, a series of positive earnings reports, strategic partnerships, and strong demand for gaming and server GPUs contributed to its stock gains. For Tesla, market sentiment often hinged on production milestones, regulatory news, and the ever-present Twitter activity of its CEO, Elon Musk.\n",
"\n",
"It's important to remember that the stock market is influenced by a complex web of factors, including investor sentiment, industry trends, geopolitical events, and economic indicators. As such, companies like NVIDIA and Tesla don't just operate in a vacuum—every shift in the market can be an echo of larger global narratives.\n",
"\n",
"## An Investor's Perspective\n",
"\n",
"The past month's performance of NVDA and TSLA provides valuable lessons for investors. It showcases the importance of staying informed and understanding that volatility is part and parcel of the investing experience, especially in sectors like technology where innovation is rapid and competition fierce.\n",
"\n",
"While these price swings may cause some to balk, seasoned stock market enthusiasts know that these fluctuations can present opportunities. Whether leveraging these movements for short-term gains or buckling in for the long haul, understanding the story behind the numbers is crucial.\n",
"\n",
"In conclusion, the tale of NVDA and TSLA over the past month has been nothing short of a nail-biter. As we keep our eyes glued to the screens for the next series of movements, always remember: investing is not just about the figures; it's about seeing the whole picture and appreciating every twist and turn of the story that unfolds.\n",
"\n",
"Stay tuned, fasten your seatbelts, and until next time, may your portfolio's story be equally thrilling and rewarding.\n",
"\n",
"TERMINATE\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
}
],
"source": [
"researcher = autogen.AssistantAgent(\n",
" name=\"Financial_researcher\",\n",
" llm_config=llm_config,\n",
")\n",
"writer = autogen.AssistantAgent(\n",
" name=\"Writer\",\n",
" llm_config=llm_config,\n",
" system_message=\"\"\"\n",
" You are a professional writer, known for\n",
" your insightful and engaging articles.\n",
" You transform complex concepts into compelling narratives.\n",
" Reply \"TERMINATE\" in the end when everything is done.\n",
" \"\"\",\n",
")\n",
"\n",
"user_proxy_auto = autogen.UserProxyAgent(\n",
" name=\"User_Proxy_Auto\",\n",
" human_input_mode=\"NEVER\",\n",
" is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n",
" code_execution_config={\n",
" \"last_n_messages\": 1,\n",
" \"work_dir\": \"tasks\",\n",
" \"use_docker\": False,\n",
" }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
")\n",
"\n",
"chat_results = autogen.initiate_chats(\n",
" [\n",
" {\n",
" \"sender\": user_proxy_auto,\n",
" \"recipient\": researcher,\n",
" \"message\": research_task,\n",
" \"clear_history\": True,\n",
" \"silent\": False,\n",
" \"summary_method\": \"last_msg\",\n",
" },\n",
" {\n",
" \"sender\": user_proxy_auto,\n",
" \"recipient\": writer,\n",
" \"message\": my_writing_task,\n",
" \"max_turns\": 2, # max number of turns for the conversation (added for demo purposes, generally not necessarily needed)\n",
" \"summary_method\": \"reflection_with_llm\",\n",
" \"work_dir\": \"tasks\",\n",
" },\n",
" ]\n",
")"
]
}
],
"metadata": {

View File

@ -73,7 +73,7 @@ def test_retrievechat():
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, silent=True)
ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=code_problem, silent=True)
print(conversations)

View File

@ -68,7 +68,9 @@ def test_retrievechat():
assistant.reset()
code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached."
ragproxyagent.initiate_chat(assistant, problem=code_problem, search_string="spark", silent=True)
ragproxyagent.initiate_chat(
assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string="spark", silent=True
)
print(conversations)

View File

@ -173,32 +173,36 @@ def test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10):
"Can we add a new point to the graph? It's distance should be randomly between 0 - 5 to each of the existing points.",
]
class TSPUserProxyAgent(UserProxyAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with open(f"{here}/tsp_prompt.txt", "r") as f:
self._prompt = f.read()
def generate_init_message(self, question) -> str:
return self._prompt.format(question=question)
def tsp_message(sender, recipient, context):
filename = context.get("prompt_filename", "")
with open(filename, "r") as f:
prompt = f.read()
question = context.get("question", "")
return prompt.format(question=question)
# autogen.ChatCompletion.start_logging()
assistant = AssistantAgent("assistant", llm_config={"temperature": 0, "config_list": config_list})
user = TSPUserProxyAgent(
user = UserProxyAgent(
"user",
code_execution_config={"work_dir": here},
code_execution_config={
"work_dir": here,
},
human_input_mode=human_input_mode,
max_consecutive_auto_reply=max_consecutive_auto_reply,
)
user.initiate_chat(assistant, question=hard_questions[2])
chat_res = user.initiate_chat(
assistant, message=tsp_message, question=hard_questions[2], prompt_filename=f"{here}/tsp_prompt.txt"
)
# print(autogen.ChatCompletion.logged_history)
# autogen.ChatCompletion.stop_logging()
# print(chat_res.summary)
print(chat_res.cost)
if __name__ == "__main__":
test_gpt35()
# test_gpt35()
# test_create_execute_script(human_input_mode="TERMINATE")
# when GPT-4, i.e., the DEFAULT_MODEL, is used, conversation in the following test
# should terminate in 2-3 rounds of interactions (because is_termination_msg should be true after 2-3 rounds)
# although the max_consecutive_auto_reply is set to 10.
# test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)
test_tsp(human_input_mode="NEVER", max_consecutive_auto_reply=10)

View File

@ -59,7 +59,7 @@ async def test_async_chats():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
chat_res = await user.a_initiate_chats(

View File

@ -16,14 +16,16 @@ from conftest import skip_openai # noqa: E402
def test_chat_messages_for_summary():
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER")
user = UserProxyAgent(name="user", human_input_mode="NEVER")
assistant = UserProxyAgent(name="assistant", human_input_mode="NEVER", code_execution_config={"use_docker": False})
user = UserProxyAgent(name="user", human_input_mode="NEVER", code_execution_config={"use_docker": False})
user.send("What is the capital of France?", assistant)
messages = assistant.chat_messages_for_summary(user)
assert len(messages) == 1
groupchat = GroupChat(agents=[user, assistant], messages=[], max_round=2)
manager = GroupChatManager(groupchat=groupchat, name="manager", llm_config=False)
manager = GroupChatManager(
groupchat=groupchat, name="manager", llm_config=False, code_execution_config={"use_docker": False}
)
user.initiate_chat(manager, message="What is the capital of France?")
messages = manager.chat_messages_for_summary(user)
assert len(messages) == 2
@ -42,10 +44,10 @@ def test_chats_group():
)
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Pros and cons of the companies I'm interested in. Keep it short.""",
"""Give lucky numbers for them.""",
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
writing_tasks = ["""Make a joke."""]
user_proxy = UserProxyAgent(
name="User_proxy",
@ -126,13 +128,15 @@ def test_chats_group():
"recipient": financial_assistant,
"message": financial_tasks[0],
"summary_method": "last_msg",
"max_turns": 1,
},
{
"recipient": manager_1,
"message": financial_tasks[1],
"summary_method": "reflection_with_llm",
"max_turns": 1,
},
{"recipient": manager_2, "message": writing_tasks[0]},
{"recipient": manager_2, "message": writing_tasks[0], "max_turns": 1},
]
)
@ -148,26 +152,44 @@ def test_chats_group():
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
def test_chats():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def luck_number_message(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Give lucky numbers for them."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
luck_number_message,
luck_number_message,
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
writing_tasks = ["""Make a joke."""]
func = Function()
financial_assistant_1 = AssistantAgent(
name="Financial_assistant_1",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
financial_assistant_2 = AssistantAgent(
name="Financial_assistant_2",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
writer = AssistantAgent(
name="Writer",
@ -192,9 +214,18 @@ def test_chats():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
# chat_res_play = user.initiate_chat(
# player,
# message= {"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
# max_turns=1,
# summary_method=my_summary,
# summary_args={"prefix": "This is the last message:"},
# )
# print(chat_res_play.summary)
chat_res = user.initiate_chats(
[
{
@ -215,12 +246,24 @@ def test_chats():
"message": financial_tasks[2],
"summary_method": "last_msg",
"clear_history": False,
"max_turns": 1,
},
{
"recipient": financial_assistant_1,
"message": {
"content": "Let's play a game.",
"function_call": {"name": "get_random_number", "arguments": "{}"},
},
"carryover": "I like even number.",
"summary_method": "last_msg",
"max_turns": 1,
},
{
"recipient": writer,
"message": writing_tasks[0],
"carryover": "I want to include a figure or a table of data in the blogpost.",
"carryover": "Make the numbers relevant.",
"summary_method": "last_msg",
"max_turns": 1,
},
]
)
@ -248,8 +291,8 @@ def test_chats_general():
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
"""Give lucky numbers for them.""",
"""Give lucky words for them.""",
]
writing_tasks = ["""Develop a short but engaging blog post using any information provided."""]
@ -297,7 +340,7 @@ def test_chats_general():
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
def my_summary_method(recipient, sender):
def my_summary_method(recipient, sender, summary_args):
return recipient.chat_messages[sender][0].get("content", "")
chat_res = initiate_chats(
@ -354,8 +397,8 @@ def test_chats_exceptions():
financial_tasks = [
"""What are the full names of NVDA and TESLA.""",
"""Get their stock price.""",
"""Analyze pros and cons. Keep it short.""",
"""Give lucky numbers for them.""",
"""Give lucky words for them.""",
]
financial_assistant_1 = AssistantAgent(
@ -491,10 +534,93 @@ def test_chats_w_func():
print(res.summary, res.cost, res.chat_history)
@pytest.mark.skipif(skip_openai, reason="requested to skip openai tests")
def test_udf_message_in_chats():
import autogen
config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST")
llm_config = {"config_list": config_list}
research_task = """
## NVDA (NVIDIA Corporation)
- Current Stock Price: $822.79
- Performance over the past month: 24.36%
## TSLA (Tesla, Inc.)
- Current Stock Price: $202.64
- Performance over the past month: 7.84%
Save them to a file named stock_prices.md.
"""
def my_writing_task(sender, recipient, context):
carryover = context.get("carryover", "")
if isinstance(carryover, list):
carryover = carryover[-1]
try:
filename = context.get("work_dir", "") + "/stock_prices.md"
with open(filename, "r") as file:
data = file.read()
except Exception as e:
data = f"An error occurred while reading the file: {e}"
return """Make a joke. """ + "\nContext:\n" + carryover + "\nData:" + data
researcher = autogen.AssistantAgent(
name="Financial_researcher",
llm_config=llm_config,
)
writer = autogen.AssistantAgent(
name="Writer",
llm_config=llm_config,
system_message="""
You are a professional writer, known for
your insightful and engaging articles.
You transform complex concepts into compelling narratives.
Reply "TERMINATE" in the end when everything is done.
""",
)
user_proxy_auto = autogen.UserProxyAgent(
name="User_Proxy_Auto",
human_input_mode="NEVER",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={
"last_n_messages": 1,
"work_dir": "tasks",
"use_docker": False,
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
)
chat_results = autogen.initiate_chats(
[
{
"sender": user_proxy_auto,
"recipient": researcher,
"message": research_task,
"clear_history": True,
"silent": False,
},
{
"sender": user_proxy_auto,
"recipient": writer,
"message": my_writing_task,
"max_turns": 2, # max number of turns for the conversation (added for demo purposes, generally not necessarily needed)
"summary_method": "reflection_with_llm",
"work_dir": "tasks",
},
]
)
print(chat_results[0].summary, chat_results[0].cost)
print(chat_results[1].summary, chat_results[1].cost)
if __name__ == "__main__":
test_chats()
test_chats_general()
# test_chats_general()
# test_chats_exceptions()
# test_chats_group()
# test_chats_w_func()
# test_chat_messages_for_summary()
# test_udf_message_in_chats()

View File

@ -14,7 +14,7 @@ from unittest.mock import patch
from pydantic import BaseModel, Field
from typing_extensions import Annotated
import autogen
import os
from autogen.agentchat import ConversableAgent, UserProxyAgent
from autogen.agentchat.conversable_agent import register_function
from autogen.exception_utils import InvalidCarryOverType, SenderRequired
@ -28,6 +28,8 @@ except ImportError:
else:
skip = False or skip_openai
here = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture
def conversable_agent():
@ -1091,6 +1093,137 @@ def test_max_turn():
assert len(res.chat_history) <= 6
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_message_func():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def my_message_play(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Let's play a game."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
func = Function()
# autogen.ChatCompletion.start_logging()
user = UserProxyAgent(
"user",
code_execution_config={
"work_dir": here,
"use_docker": False,
},
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
)
player = autogen.AssistantAgent(
name="Player",
system_message="You will use function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
description="A player that makes function_calls.",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
chat_res_play = user.initiate_chat(
player,
message={"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
max_turns=1,
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
message=my_message_play,
max_turns=1,
)
print(chat_res_play.summary)
@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
def test_summary():
import random
class Function:
call_count = 0
def get_random_number(self):
self.call_count += 1
return random.randint(0, 100)
config_list = autogen.config_list_from_json(
OAI_CONFIG_LIST,
file_location=KEY_LOC,
)
def my_message_play(sender, recipient, context):
final_msg = {}
final_msg["content"] = "Let's play a game."
final_msg["function_call"] = {"name": "get_random_number", "arguments": "{}"}
return final_msg
def my_summary(sender, recipient, summary_args):
prefix = summary_args.get("prefix", "Summary:")
return prefix + recipient.chat_messages[sender][-1].get("content", "")
func = Function()
# autogen.ChatCompletion.start_logging()
user = UserProxyAgent(
"user",
code_execution_config={
"work_dir": here,
"use_docker": False,
},
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
)
player = autogen.AssistantAgent(
name="Player",
system_message="You will use function `get_random_number` to get a random number. Stop only when you get at least 1 even number and 1 odd number. Reply TERMINATE to stop.",
description="A player that makes function_calls.",
llm_config={"config_list": config_list},
function_map={"get_random_number": func.get_random_number},
)
chat_res_play = user.initiate_chat(
player,
message=my_message_play,
# message="Make a joke about AI",
max_turns=1,
summary_method="reflection_with_llm",
summary_args={"summary_prompt": "Summarize the conversation into less than five words."},
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
# message=my_message_play,
message="Make a joke about AI",
max_turns=1,
summary_method=my_summary,
summary_args={"prefix": "This is the last message:"},
)
print(chat_res_play.summary)
chat_res_play = user.initiate_chat(
player,
message={"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}},
max_turns=1,
summary_method=my_summary,
summary_args={"prefix": "This is the last message:"},
)
print(chat_res_play.summary)
def test_process_before_send():
print_mock = unittest.mock.MagicMock()
@ -1140,4 +1273,6 @@ if __name__ == "__main__":
# test_conversable_agent()
# test_no_llm_config()
# test_max_turn()
test_process_before_send()
# test_process_before_send()
test_message_func()
test_summary()

View File

@ -53,11 +53,7 @@ def test_math_user_proxy_agent():
assistant.reset()
math_problem = "$x^3=125$. What is x?"
# assistant.receive(
# message=mathproxyagent.generate_init_message(math_problem),
# sender=mathproxyagent,
# )
res = mathproxyagent.initiate_chat(assistant, problem=math_problem)
res = mathproxyagent.initiate_chat(assistant, message=mathproxyagent.message_generator, problem=math_problem)
print(conversations)
print("Chat summary:", res.summary)
print("Chat history:", res.chat_history)
@ -121,13 +117,13 @@ def test_execute_one_wolfram_query():
def test_generate_prompt():
mathproxyagent = MathUserProxyAgent(name="MathChatAgent", human_input_mode="NEVER")
assert "customized" in mathproxyagent.generate_init_message(
problem="2x=4", prompt_type="python", customized_prompt="customized"
assert "customized" in mathproxyagent.message_generator(
mathproxyagent, None, {"problem": "2x=4", "prompt_type": "python", "customized_prompt": "customized"}
)
if __name__ == "__main__":
# test_add_remove_print()
# test_execute_one_python_code()
# test_generate_prompt()
test_generate_prompt()
test_math_user_proxy_agent()

View File

@ -353,7 +353,8 @@ def retrieve_content(message, n_results=3):
boss_aid.problem = message if not hasattr(boss_aid, "problem") else boss_aid.problem
_, ret_msg = boss_aid._generate_retrieve_user_reply(message)
else:
ret_msg = boss_aid.generate_init_message(message, n_results=n_results)
_context = {"problem": message, "n_results": n_results}
ret_msg = boss_aid.message_generator(boss_aid, None, _context)
return ret_msg if ret_msg else message
for agent in [boss, coder, pm, reviewer]: