mirror of https://github.com/microsoft/autogen.git
Use script-friendly example in README and quickstart (#3728)
* Use script-friendly example in README and quickstart * Remove accidentally commited file * Update instruction
This commit is contained in:
parent
d7ae970c87
commit
498854aed3
13
README.md
13
README.md
|
@ -106,12 +106,21 @@ The following code uses code execution, you need to have [Docker installed](http
|
||||||
and running on your machine.
|
and running on your machine.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from autogen_agentchat import EVENT_LOGGER_NAME
|
||||||
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
|
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
|
||||||
|
from autogen_agentchat.logging import ConsoleLogHandler
|
||||||
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
|
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
|
||||||
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
|
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
|
||||||
from autogen_core.components.models import OpenAIChatCompletionClient
|
from autogen_core.components.models import OpenAIChatCompletionClient
|
||||||
|
|
||||||
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
|
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||||
|
logger.addHandler(ConsoleLogHandler())
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
async def main() -> None:
|
||||||
|
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
|
||||||
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
|
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
|
||||||
coding_assistant_agent = CodingAssistantAgent(
|
coding_assistant_agent = CodingAssistantAgent(
|
||||||
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
|
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
|
||||||
|
@ -121,6 +130,8 @@ async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
|
||||||
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
|
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
|
||||||
termination_condition=StopMessageTermination(),
|
termination_condition=StopMessageTermination(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
|
|
||||||
### C#
|
### C#
|
||||||
|
|
|
@ -34,7 +34,7 @@ When using code, you must indicate the script type in the code block. The user c
|
||||||
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
||||||
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
||||||
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
||||||
Reply "TERMINATE" in the end when everything is done.""",
|
Reply "TERMINATE" in the end when code has been executed and task is complete.""",
|
||||||
):
|
):
|
||||||
super().__init__(name=name, description=description)
|
super().__init__(name=name, description=description)
|
||||||
self._model_client = model_client
|
self._model_client = model_client
|
||||||
|
|
|
@ -3,7 +3,6 @@ import logging
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from .. import EVENT_LOGGER_NAME
|
|
||||||
from ..agents import ChatMessage, StopMessage, TextMessage
|
from ..agents import ChatMessage, StopMessage, TextMessage
|
||||||
from ..teams._events import (
|
from ..teams._events import (
|
||||||
ContentPublishEvent,
|
ContentPublishEvent,
|
||||||
|
@ -68,8 +67,3 @@ class ConsoleLogHandler(logging.Handler):
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unexpected log record: {record.msg}")
|
raise ValueError(f"Unexpected log record: {record.msg}")
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
logger.addHandler(ConsoleLogHandler())
|
|
||||||
|
|
|
@ -29,6 +29,9 @@ The following example illustrates creating a simple agent team with two agents t
|
||||||
1. `CodingAssistantAgent` that generates responses using an LLM model.
|
1. `CodingAssistantAgent` that generates responses using an LLM model.
|
||||||
2. `CodeExecutorAgent` that executes code snippets and returns the output.
|
2. `CodeExecutorAgent` that executes code snippets and returns the output.
|
||||||
|
|
||||||
|
Because the `CodeExecutorAgent` uses a Docker command-line code executor to execute code snippets,
|
||||||
|
you need to have [Docker installed](https://docs.docker.com/engine/install/) and running on your machine.
|
||||||
|
|
||||||
The task is to "Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'."
|
The task is to "Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'."
|
||||||
|
|
||||||
```{include} stocksnippet.md
|
```{include} stocksnippet.md
|
||||||
|
|
|
@ -2,21 +2,32 @@
|
||||||
|
|
||||||
`````{tab-item} AgentChat (v0.4x)
|
`````{tab-item} AgentChat (v0.4x)
|
||||||
```python
|
```python
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from autogen_agentchat import EVENT_LOGGER_NAME
|
||||||
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
|
from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent
|
||||||
|
from autogen_agentchat.logging import ConsoleLogHandler
|
||||||
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
|
from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination
|
||||||
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
|
from autogen_core.components.code_executor import DockerCommandLineCodeExecutor
|
||||||
from autogen_core.components.models import OpenAIChatCompletionClient
|
from autogen_core.components.models import OpenAIChatCompletionClient
|
||||||
|
|
||||||
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
|
logger = logging.getLogger(EVENT_LOGGER_NAME)
|
||||||
|
logger.addHandler(ConsoleLogHandler())
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
async def main() -> None:
|
||||||
|
async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor:
|
||||||
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
|
code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor)
|
||||||
coding_assistant_agent = CodingAssistantAgent(
|
coding_assistant_agent = CodingAssistantAgent(
|
||||||
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4")
|
"coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o")
|
||||||
)
|
)
|
||||||
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
|
group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent])
|
||||||
result = await group_chat.run(
|
result = await group_chat.run(
|
||||||
task="Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
|
task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.",
|
||||||
termination_condition=StopMessageTermination(),
|
termination_condition=StopMessageTermination(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
asyncio.run(main())
|
||||||
```
|
```
|
||||||
`````
|
`````
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue