mirror of https://github.com/microsoft/autogen.git
dev6 (#4129)
This commit is contained in:
parent
4786f189bc
commit
e27c740961
|
@ -38,6 +38,7 @@ jobs:
|
||||||
{ ref: "v0.4.0.dev3", dest-dir: "0.4.0.dev3" },
|
{ ref: "v0.4.0.dev3", dest-dir: "0.4.0.dev3" },
|
||||||
{ ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4" },
|
{ ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4" },
|
||||||
{ ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5" },
|
{ ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5" },
|
||||||
|
{ ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6" },
|
||||||
]
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|
|
@ -101,7 +101,7 @@ We look forward to your contributions!
|
||||||
First install the packages:
|
First install the packages:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install 'autogen-agentchat==0.4.0.dev5' 'autogen-ext[openai]==0.4.0.dev5'
|
pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
The following code uses OpenAI's GPT-4o model and you need to provide your
|
The following code uses OpenAI's GPT-4o model and you need to provide your
|
||||||
|
|
|
@ -36,7 +36,12 @@
|
||||||
{
|
{
|
||||||
"name": "0.4.0.dev5",
|
"name": "0.4.0.dev5",
|
||||||
"version": "0.4.0.dev5",
|
"version": "0.4.0.dev5",
|
||||||
"url": "/autogen/0.4.0.dev5/",
|
"url": "/autogen/0.4.0.dev5/"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "0.4.0.dev6",
|
||||||
|
"version": "0.4.0.dev6",
|
||||||
|
"url": "/autogen/0.4.0.dev6/",
|
||||||
"preferred": true
|
"preferred": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-agentchat"
|
name = "autogen-agentchat"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "AutoGen agents and teams library"
|
description = "AutoGen agents and teams library"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -15,7 +15,7 @@ classifiers = [
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autogen-core==0.4.0.dev5",
|
"autogen-core==0.4.0.dev6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.uv]
|
[tool.uv]
|
||||||
|
|
|
@ -61,7 +61,7 @@ AgentChat </div>
|
||||||
High-level API that includes preset agents and teams for building multi-agent systems.
|
High-level API that includes preset agents and teams for building multi-agent systems.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install 'autogen-agentchat==0.4.0.dev5'
|
pip install 'autogen-agentchat==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
💡 *Start here if you are looking for an API similar to AutoGen 0.2*
|
💡 *Start here if you are looking for an API similar to AutoGen 0.2*
|
||||||
|
@ -82,7 +82,7 @@ Get Started
|
||||||
Provides building blocks for creating asynchronous, event driven multi-agent systems.
|
Provides building blocks for creating asynchronous, event driven multi-agent systems.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install 'autogen-core==0.4.0.dev5'
|
pip install 'autogen-core==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
+++
|
+++
|
||||||
|
|
|
@ -29,10 +29,10 @@ myst:
|
||||||
Library that is at a similar level of abstraction as AutoGen 0.2, including default agents and group chat.
|
Library that is at a similar level of abstraction as AutoGen 0.2, including default agents and group chat.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install 'autogen-agentchat==0.4.0.dev5'
|
pip install 'autogen-agentchat==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev5/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat)
|
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
(pkg-info-autogen-core)=
|
(pkg-info-autogen-core)=
|
||||||
|
@ -44,10 +44,10 @@ pip install 'autogen-agentchat==0.4.0.dev5'
|
||||||
Implements the core functionality of the AutoGen framework, providing basic building blocks for creating multi-agent systems.
|
Implements the core functionality of the AutoGen framework, providing basic building blocks for creating multi-agent systems.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install 'autogen-core==0.4.0.dev5'
|
pip install 'autogen-core==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev5/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core)
|
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
(pkg-info-autogen-ext)=
|
(pkg-info-autogen-ext)=
|
||||||
|
@ -59,7 +59,7 @@ pip install 'autogen-core==0.4.0.dev5'
|
||||||
Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution.
|
Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install 'autogen-ext==0.4.0.dev5'
|
pip install 'autogen-ext==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
Extras:
|
Extras:
|
||||||
|
@ -69,7 +69,7 @@ Extras:
|
||||||
- `docker` needed for {py:class}`~autogen_ext.code_executors.DockerCommandLineCodeExecutor`
|
- `docker` needed for {py:class}`~autogen_ext.code_executors.DockerCommandLineCodeExecutor`
|
||||||
- `openai` needed for {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`
|
- `openai` needed for {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`
|
||||||
|
|
||||||
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev5/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext)
|
[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext)
|
||||||
:::
|
:::
|
||||||
|
|
||||||
(pkg-info-autogen-magentic-one)=
|
(pkg-info-autogen-magentic-one)=
|
||||||
|
|
|
@ -61,7 +61,7 @@ Install the `autogen-agentchat` package using pip:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
||||||
pip install 'autogen-agentchat==0.4.0.dev5'
|
pip install 'autogen-agentchat==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install OpenAI for Model Client
|
## Install OpenAI for Model Client
|
||||||
|
@ -70,7 +70,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following
|
||||||
extensions:
|
extensions:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install 'autogen-ext[openai]==0.4.0.dev5'
|
pip install 'autogen-ext[openai]==0.4.0.dev6'
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install Docker for Code Execution
|
## Install Docker for Code Execution
|
||||||
|
|
|
@ -1,160 +1,160 @@
|
||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Quickstart"
|
"# Quickstart"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"```{include} warning.md\n",
|
"```{include} warning.md\n",
|
||||||
"\n",
|
"\n",
|
||||||
"```\n",
|
"```\n",
|
||||||
"\n",
|
"\n",
|
||||||
":::{note}\n",
|
":::{note}\n",
|
||||||
"For installation instructions, please refer to the [installation guide](./installation).\n",
|
"For installation instructions, please refer to the [installation guide](./installation).\n",
|
||||||
":::\n",
|
":::\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In AutoGen AgentChat, you can build applications quickly using preset agents.\n",
|
"In AutoGen AgentChat, you can build applications quickly using preset agents.\n",
|
||||||
"To illustrate this, we will begin with creating a team of a single agent\n",
|
"To illustrate this, we will begin with creating a team of a single agent\n",
|
||||||
"that can use tools and respond to messages.\n",
|
"that can use tools and respond to messages.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The following code uses the OpenAI model. If you haven't already, you need to\n",
|
"The following code uses the OpenAI model. If you haven't already, you need to\n",
|
||||||
"install the following package and extension:"
|
"install the following package and extension:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"languageId": "shellscript"
|
"languageId": "shellscript"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"pip install 'autogen-agentchat==0.4.0.dev5' 'autogen-ext[openai]==0.4.0.dev5'"
|
"pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"To use Azure OpenAI models and AAD authentication,\n",
|
"To use Azure OpenAI models and AAD authentication,\n",
|
||||||
"you can follow the instructions [here](./tutorial/models.ipynb#azure-openai)."
|
"you can follow the instructions [here](./tutorial/models.ipynb#azure-openai)."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"---------- user ----------\n",
|
"---------- user ----------\n",
|
||||||
"What is the weather in New York?\n",
|
"What is the weather in New York?\n",
|
||||||
"---------- weather_agent ----------\n",
|
"---------- weather_agent ----------\n",
|
||||||
"[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
|
"[FunctionCall(id='call_AhTZ2q3TNL8x0qs00e3wIZ7y', arguments='{\"city\":\"New York\"}', name='get_weather')]\n",
|
||||||
"[Prompt tokens: 79, Completion tokens: 15]\n",
|
"[Prompt tokens: 79, Completion tokens: 15]\n",
|
||||||
"---------- weather_agent ----------\n",
|
"---------- weather_agent ----------\n",
|
||||||
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
|
"[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_AhTZ2q3TNL8x0qs00e3wIZ7y')]\n",
|
||||||
"---------- weather_agent ----------\n",
|
"---------- weather_agent ----------\n",
|
||||||
"The weather in New York is currently 73 degrees and sunny.\n",
|
"The weather in New York is currently 73 degrees and sunny.\n",
|
||||||
"[Prompt tokens: 90, Completion tokens: 14]\n",
|
"[Prompt tokens: 90, Completion tokens: 14]\n",
|
||||||
"---------- weather_agent ----------\n",
|
"---------- weather_agent ----------\n",
|
||||||
"TERMINATE\n",
|
"TERMINATE\n",
|
||||||
"[Prompt tokens: 137, Completion tokens: 4]\n",
|
"[Prompt tokens: 137, Completion tokens: 4]\n",
|
||||||
"---------- Summary ----------\n",
|
"---------- Summary ----------\n",
|
||||||
"Number of messages: 5\n",
|
"Number of messages: 5\n",
|
||||||
"Finish reason: Text 'TERMINATE' mentioned\n",
|
"Finish reason: Text 'TERMINATE' mentioned\n",
|
||||||
"Total prompt tokens: 306\n",
|
"Total prompt tokens: 306\n",
|
||||||
"Total completion tokens: 33\n",
|
"Total completion tokens: 33\n",
|
||||||
"Duration: 1.43 seconds\n"
|
"Duration: 1.43 seconds\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from autogen_agentchat.agents import AssistantAgent\n",
|
"from autogen_agentchat.agents import AssistantAgent\n",
|
||||||
"from autogen_agentchat.task import Console, TextMentionTermination\n",
|
"from autogen_agentchat.task import Console, TextMentionTermination\n",
|
||||||
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
"from autogen_agentchat.teams import RoundRobinGroupChat\n",
|
||||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Define a tool\n",
|
"# Define a tool\n",
|
||||||
"async def get_weather(city: str) -> str:\n",
|
"async def get_weather(city: str) -> str:\n",
|
||||||
" return f\"The weather in {city} is 73 degrees and Sunny.\"\n",
|
" return f\"The weather in {city} is 73 degrees and Sunny.\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"async def main() -> None:\n",
|
"async def main() -> None:\n",
|
||||||
" # Define an agent\n",
|
" # Define an agent\n",
|
||||||
" weather_agent = AssistantAgent(\n",
|
" weather_agent = AssistantAgent(\n",
|
||||||
" name=\"weather_agent\",\n",
|
" name=\"weather_agent\",\n",
|
||||||
" model_client=OpenAIChatCompletionClient(\n",
|
" model_client=OpenAIChatCompletionClient(\n",
|
||||||
" model=\"gpt-4o-2024-08-06\",\n",
|
" model=\"gpt-4o-2024-08-06\",\n",
|
||||||
" # api_key=\"YOUR_API_KEY\",\n",
|
" # api_key=\"YOUR_API_KEY\",\n",
|
||||||
" ),\n",
|
" ),\n",
|
||||||
" tools=[get_weather],\n",
|
" tools=[get_weather],\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Define termination condition\n",
|
" # Define termination condition\n",
|
||||||
" termination = TextMentionTermination(\"TERMINATE\")\n",
|
" termination = TextMentionTermination(\"TERMINATE\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Define a team\n",
|
" # Define a team\n",
|
||||||
" agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination)\n",
|
" agent_team = RoundRobinGroupChat([weather_agent], termination_condition=termination)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Run the team and stream messages to the console\n",
|
" # Run the team and stream messages to the console\n",
|
||||||
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
|
" stream = agent_team.run_stream(task=\"What is the weather in New York?\")\n",
|
||||||
" await Console(stream)\n",
|
" await Console(stream)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
|
"# NOTE: if running this inside a Python script you'll need to use asyncio.run(main()).\n",
|
||||||
"await main()"
|
"await main()"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"The code snippet above introduces two high level concepts in AgentChat: *Agent* and *Team*. An Agent helps us define what actions are taken when a message is received. Specifically, we use the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset - an agent that can be given access to a model (e.g., LLM) and tools (functions) that it can then use to address tasks. A Team helps us define the rules for how agents interact with each other. In the {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team, agents respond in a sequential round-robin fashion.\n",
|
"The code snippet above introduces two high level concepts in AgentChat: *Agent* and *Team*. An Agent helps us define what actions are taken when a message is received. Specifically, we use the {py:class}`~autogen_agentchat.agents.AssistantAgent` preset - an agent that can be given access to a model (e.g., LLM) and tools (functions) that it can then use to address tasks. A Team helps us define the rules for how agents interact with each other. In the {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` team, agents respond in a sequential round-robin fashion.\n",
|
||||||
"In this case, we have a single agent, so the same agent is used for each round."
|
"In this case, we have a single agent, so the same agent is used for each round."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## What's Next?\n",
|
"## What's Next?\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Now that you have a basic understanding of how to define an agent and a team, consider following the [tutorial](./tutorial/index) for a walkthrough on other features of AgentChat.\n",
|
"Now that you have a basic understanding of how to define an agent and a team, consider following the [tutorial](./tutorial/index) for a walkthrough on other features of AgentChat.\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": ".venv",
|
"display_name": ".venv",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
"version": 3
|
"version": 3
|
||||||
},
|
},
|
||||||
"file_extension": ".py",
|
"file_extension": ".py",
|
||||||
"mimetype": "text/x-python",
|
"mimetype": "text/x-python",
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.5"
|
"version": "3.11.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 2
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,181 +1,181 @@
|
||||||
{
|
{
|
||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"# Models\n",
|
"# Models\n",
|
||||||
"\n",
|
"\n",
|
||||||
"In many cases, agents need access to model services such as OpenAI, Azure OpenAI, and local models.\n",
|
"In many cases, agents need access to model services such as OpenAI, Azure OpenAI, and local models.\n",
|
||||||
"AgentChat utilizes model clients provided by the\n",
|
"AgentChat utilizes model clients provided by the\n",
|
||||||
"[`autogen-ext`](../../core-user-guide/framework/model-clients.ipynb) package."
|
"[`autogen-ext`](../../core-user-guide/framework/model-clients.ipynb) package."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## OpenAI\n",
|
"## OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"To access OpenAI models, you need to install the `openai` extension to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`."
|
"To access OpenAI models, you need to install the `openai` extension to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"languageId": "shellscript"
|
"languageId": "shellscript"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"pip install 'autogen-ext[openai]==0.4.0.dev5'"
|
"pip install 'autogen-ext[openai]==0.4.0.dev6'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI."
|
"You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": 10,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
"from autogen_ext.models import OpenAIChatCompletionClient\n",
|
||||||
"\n",
|
"\n",
|
||||||
"opneai_model_client = OpenAIChatCompletionClient(\n",
|
"opneai_model_client = OpenAIChatCompletionClient(\n",
|
||||||
" model=\"gpt-4o-2024-08-06\",\n",
|
" model=\"gpt-4o-2024-08-06\",\n",
|
||||||
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n",
|
" # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"To test the model client, you can use the following code:"
|
"To test the model client, you can use the following code:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": 11,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n"
|
"CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from autogen_core.components.models import UserMessage\n",
|
"from autogen_core.components.models import UserMessage\n",
|
||||||
"\n",
|
"\n",
|
||||||
"result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
|
"result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n",
|
||||||
"print(result)"
|
"print(result)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Azure OpenAI\n",
|
"## Azure OpenAI\n",
|
||||||
"\n",
|
"\n",
|
||||||
"Install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`."
|
"Install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"languageId": "shellscript"
|
"languageId": "shellscript"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"pip install 'autogen-ext[openai,azure]==0.4.0.dev5'"
|
"pip install 'autogen-ext[openai,azure]==0.4.0.dev6'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n",
|
"To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n",
|
||||||
"For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n",
|
"For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n",
|
||||||
"\n",
|
"\n",
|
||||||
"The following code snippet shows how to use AAD authentication.\n",
|
"The following code snippet shows how to use AAD authentication.\n",
|
||||||
"The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role."
|
"The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from autogen_ext.models import AzureOpenAIChatCompletionClient\n",
|
"from autogen_ext.models import AzureOpenAIChatCompletionClient\n",
|
||||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Create the token provider\n",
|
"# Create the token provider\n",
|
||||||
"token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n",
|
"token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"az_model_client = AzureOpenAIChatCompletionClient(\n",
|
"az_model_client = AzureOpenAIChatCompletionClient(\n",
|
||||||
" model=\"{your-azure-deployment}\",\n",
|
" model=\"{your-azure-deployment}\",\n",
|
||||||
" api_version=\"2024-06-01\",\n",
|
" api_version=\"2024-06-01\",\n",
|
||||||
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
|
" azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n",
|
||||||
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
|
" azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n",
|
||||||
" # api_key=\"sk-...\", # For key-based authentication.\n",
|
" # api_key=\"sk-...\", # For key-based authentication.\n",
|
||||||
" model_capabilities={\n",
|
" model_capabilities={\n",
|
||||||
" \"vision\": True,\n",
|
" \"vision\": True,\n",
|
||||||
" \"function_calling\": True,\n",
|
" \"function_calling\": True,\n",
|
||||||
" \"json_output\": True,\n",
|
" \"json_output\": True,\n",
|
||||||
" },\n",
|
" },\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info."
|
"See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info."
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Local Models\n",
|
"## Local Models\n",
|
||||||
"\n",
|
"\n",
|
||||||
"We are working on it. Stay tuned!"
|
"We are working on it. Stay tuned!"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": ".venv",
|
"display_name": ".venv",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
"language_info": {
|
"language_info": {
|
||||||
"codemirror_mode": {
|
"codemirror_mode": {
|
||||||
"name": "ipython",
|
"name": "ipython",
|
||||||
"version": 3
|
"version": 3
|
||||||
},
|
},
|
||||||
"file_extension": ".py",
|
"file_extension": ".py",
|
||||||
"mimetype": "text/x-python",
|
"mimetype": "text/x-python",
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.5"
|
"version": "3.11.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 2
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-core"
|
name = "autogen-core"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "Foundational interfaces and agent runtime implementation for AutoGen"
|
description = "Foundational interfaces and agent runtime implementation for AutoGen"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "autogen-ext"
|
name = "autogen-ext"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
license = {file = "LICENSE-CODE"}
|
license = {file = "LICENSE-CODE"}
|
||||||
description = "AutoGen extensions library"
|
description = "AutoGen extensions library"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -15,7 +15,7 @@ classifiers = [
|
||||||
"Operating System :: OS Independent",
|
"Operating System :: OS Independent",
|
||||||
]
|
]
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autogen-core==0.4.0.dev5",
|
"autogen-core==0.4.0.dev6",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -330,7 +330,7 @@ wheels = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-agentchat"
|
name = "autogen-agentchat"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
source = { editable = "packages/autogen-agentchat" }
|
source = { editable = "packages/autogen-agentchat" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "autogen-core" },
|
{ name = "autogen-core" },
|
||||||
|
@ -341,7 +341,7 @@ requires-dist = [{ name = "autogen-core", editable = "packages/autogen-core" }]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-core"
|
name = "autogen-core"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
source = { editable = "packages/autogen-core" }
|
source = { editable = "packages/autogen-core" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "aiohttp" },
|
{ name = "aiohttp" },
|
||||||
|
@ -454,7 +454,7 @@ dev = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "autogen-ext"
|
name = "autogen-ext"
|
||||||
version = "0.4.0.dev5"
|
version = "0.4.0.dev6"
|
||||||
source = { editable = "packages/autogen-ext" }
|
source = { editable = "packages/autogen-ext" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "autogen-core" },
|
{ name = "autogen-core" },
|
||||||
|
|
Loading…
Reference in New Issue