mirror of https://github.com/microsoft/autogen.git
226 lines
5.8 KiB
Plaintext
226 lines
5.8 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"id": "9a71fa36",
|
|
"metadata": {
|
|
"editable": true,
|
|
"slideshow": {
|
|
"slide_type": ""
|
|
},
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"# Groupchat with Llamaindex agents\n",
|
|
"\n",
|
|
"[Llamaindex agents](https://docs.llamaindex.ai/en/stable/optimizing/agentic_strategies/agentic_strategies/) have the ability to use planning strategies to answer user questions. They can be integrated in Autogen in easy ways\n",
|
|
"\n",
|
|
"## Requirements"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c528cd6d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"! pip install pyautogen\n",
|
|
"! pip install llama-index\n",
|
|
"! pip install llama-index-tools-wikipedia\n",
|
|
"! pip install llama-index-readers-wikipedia\n",
|
|
"! pip install wikipedia"
|
|
]
|
|
},
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"id": "5ebd2397",
|
|
"metadata": {
|
|
"editable": true,
|
|
"slideshow": {
|
|
"slide_type": ""
|
|
},
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Set your API Endpoint"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "dca301a4",
|
|
"metadata": {
|
|
"editable": true,
|
|
"slideshow": {
|
|
"slide_type": ""
|
|
},
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"\n",
|
|
"import autogen\n",
|
|
"\n",
|
|
"config_list = [{\"model\": \"gpt-3.5-turbo-0125\", \"api_key\": os.getenv(\"OPENAI_API_KEY\")}]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "76c11ea8",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Set Llamaindex"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "2d3d298e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from llama_index.core import Settings\n",
|
|
"from llama_index.core.agent import ReActAgent\n",
|
|
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
|
|
"from llama_index.llms.openai import OpenAI\n",
|
|
"from llama_index.tools.wikipedia import WikipediaToolSpec\n",
|
|
"\n",
|
|
"llm = OpenAI(\n",
|
|
" model=\"gpt-3.5-turbo-0125\",\n",
|
|
" temperature=0.0,\n",
|
|
" api_key=os.environ.get(\"OPENAPI_API_KEY\", \"\"),\n",
|
|
")\n",
|
|
"\n",
|
|
"embed_model = OpenAIEmbedding(\n",
|
|
" model=\"text-embedding-ada-002\",\n",
|
|
" temperature=0.0,\n",
|
|
" api_key=os.environ.get(\"OPENAPI_API_KEY\", \"\"),\n",
|
|
")\n",
|
|
"\n",
|
|
"Settings.llm = llm\n",
|
|
"Settings.embed_model = embed_model\n",
|
|
"\n",
|
|
"# create a react agent to use wikipedia tool\n",
|
|
"wiki_spec = WikipediaToolSpec()\n",
|
|
"# Get the search wikipedia tool\n",
|
|
"wikipedia_tool = wiki_spec.to_tool_list()[1]\n",
|
|
"\n",
|
|
"location_specialist = ReActAgent.from_tools(tools=[wikipedia_tool], llm=llm, max_iterations=10, verbose=True)"
|
|
]
|
|
},
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"id": "2b9526e7",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Create agents\n",
|
|
"\n",
|
|
"In this example, we will create a Llamaindex agent to answer questions fecting data from wikipedia and a user proxy agent."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "1a10c9fe-1fbc-40c6-b655-5d2256864ce8",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from llamaindex_conversable_agent import LLamaIndexConversableAgent\n",
|
|
"\n",
|
|
"llm_config = {\n",
|
|
" \"temperature\": 0,\n",
|
|
" \"config_list\": config_list,\n",
|
|
"}\n",
|
|
"\n",
|
|
"trip_assistant = LLamaIndexConversableAgent(\n",
|
|
" \"trip_specialist\",\n",
|
|
" llama_index_agent=location_specialist,\n",
|
|
" system_message=\"You help customers finding more about places they would like to visit. You can use external resources to provide more details as you engage with the customer.\",\n",
|
|
" description=\"This agents helps customers discover locations to visit, things to do, and other details about a location. It can use external resources to provide more details. This agent helps in finding attractions, history and all that there si to know about a place\",\n",
|
|
")\n",
|
|
"\n",
|
|
"user_proxy = autogen.UserProxyAgent(\n",
|
|
" name=\"Admin\",\n",
|
|
" human_input_mode=\"ALWAYS\",\n",
|
|
" code_execution_config=False,\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"id": "966c96a4-cc8a-4400-b8db-a21b7142e33c",
|
|
"metadata": {},
|
|
"source": [
|
|
"Next, let's set up our group chat."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "354b4a8f-7a96-455b-9f17-cbc19d880462",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"groupchat = autogen.GroupChat(\n",
|
|
" agents=[trip_assistant, user_proxy],\n",
|
|
" messages=[],\n",
|
|
" max_round=500,\n",
|
|
" speaker_selection_method=\"round_robin\",\n",
|
|
" enable_clear_history=True,\n",
|
|
")\n",
|
|
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "d5518947",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chat_result = user_proxy.initiate_chat(\n",
|
|
" manager,\n",
|
|
" message=\"\"\"\n",
|
|
"What can i find in Tokyo related to Hayao Miyazaki and its moveis like Spirited Away?.\n",
|
|
"\"\"\",\n",
|
|
")"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"front_matter": {
|
|
"description": "Integrate llamaindex agents with Autogen.",
|
|
"tags": [
|
|
"react",
|
|
"llama index",
|
|
"software engineering"
|
|
]
|
|
},
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.9"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|