Merge branch 'main' into users/david/ollama-tools

This commit is contained in:
Xiaoyun Zhang 2024-09-06 11:33:37 -07:00 committed by GitHub
commit cc5d24ee30
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 2342 additions and 1075 deletions

View File

@ -103,7 +103,7 @@ Answer only YES or NO.
""" """
AGENT_NAME_PROMPT = """# Your task AGENT_NAME_PROMPT = """# Your task
Suggest no more then {max_agents} experts with their name according to the following user requirement. Suggest no more than {max_agents} experts with their name according to the following user requirement.
## User requirement ## User requirement
{task} {task}

View File

@ -0,0 +1,24 @@
from dataclasses import dataclass
from enum import Enum, auto
from typing import Optional
class DocumentType(Enum):
"""
Enum for supporting document type.
"""
TEXT = auto()
HTML = auto()
PDF = auto()
@dataclass
class Document:
"""
A wrapper of graph store query results.
"""
doctype: DocumentType
data: Optional[object] = None
path_or_url: Optional[str] = ""

View File

@ -0,0 +1,51 @@
from dataclasses import dataclass, field
from typing import List, Optional, Protocol
from .document import Document
@dataclass
class GraphStoreQueryResult:
"""
A wrapper of graph store query results.
answer: human readable answer to question/query.
results: intermediate results to question/query, e.g. node entities.
"""
answer: Optional[str] = None
results: list = field(default_factory=list)
class GraphQueryEngine(Protocol):
"""An abstract base class that represents a graph query engine on top of a underlying graph database.
This interface defines the basic methods for graph rag.
"""
def init_db(self, input_doc: List[Document] | None = None):
"""
This method initializes graph database with the input documents or records.
Usually, it takes the following steps,
1. connecting to a graph database.
2. extract graph nodes, edges based on input data, graph schema and etc.
3. build indexes etc.
Args:
input_doc: a list of input documents that are used to build the graph in database.
Returns: GraphStore
"""
pass
def add_records(self, new_records: List) -> bool:
"""
Add new records to the underlying database and add to the graph if required.
"""
pass
def query(self, question: str, n_results: int = 1, **kwargs) -> GraphStoreQueryResult:
"""
This method transform a string format question into database query and return the result.
"""
pass

View File

@ -0,0 +1,56 @@
from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability
from autogen.agentchat.conversable_agent import ConversableAgent
from .graph_query_engine import GraphQueryEngine
class GraphRagCapability(AgentCapability):
"""
A graph rag capability uses a graph query engine to give a conversable agent the graph rag ability.
An agent class with graph rag capability could
1. create a graph in the underlying database with input documents.
2. retrieved relevant information based on messages received by the agent.
3. generate answers from retrieved information and send messages back.
For example,
graph_query_engine = GraphQueryEngine(...)
graph_query_engine.init_db([Document(doc1), Document(doc2), ...])
graph_rag_agent = ConversableAgent(
name="graph_rag_agent",
max_consecutive_auto_reply=3,
...
)
graph_rag_capability = GraphRagCapbility(graph_query_engine)
graph_rag_capability.add_to_agent(graph_rag_agent)
user_proxy = UserProxyAgent(
name="user_proxy",
code_execution_config=False,
is_termination_msg=lambda msg: "TERMINATE" in msg["content"],
human_input_mode="ALWAYS",
)
user_proxy.initiate_chat(graph_rag_agent, message="Name a few actors who've played in 'The Matrix'")
# ChatResult(
# chat_id=None,
# chat_history=[
# {'content': 'Name a few actors who've played in \'The Matrix\'', 'role': 'graph_rag_agent'},
# {'content': 'A few actors who have played in The Matrix are:
# - Keanu Reeves
# - Laurence Fishburne
# - Carrie-Anne Moss
# - Hugo Weaving',
# 'role': 'user_proxy'},
# ...)
"""
def __init__(self, query_engine: GraphQueryEngine):
"""
initialize graph rag capability with a graph query engine
"""
...
def add_to_agent(self, agent: ConversableAgent): ...

View File

@ -221,7 +221,12 @@ $functions"""
cmd = [py_executable, "-m", "pip", "install"] + required_packages cmd = [py_executable, "-m", "pip", "install"] + required_packages
try: try:
result = subprocess.run( result = subprocess.run(
cmd, cwd=self._work_dir, capture_output=True, text=True, timeout=float(self._timeout) cmd,
cwd=self._work_dir,
capture_output=True,
text=True,
timeout=float(self._timeout),
encoding="utf-8",
) )
except subprocess.TimeoutExpired as e: except subprocess.TimeoutExpired as e:
raise ValueError("Pip install timed out") from e raise ValueError("Pip install timed out") from e
@ -303,7 +308,13 @@ $functions"""
try: try:
result = subprocess.run( result = subprocess.run(
cmd, cwd=self._work_dir, capture_output=True, text=True, timeout=float(self._timeout), env=env cmd,
cwd=self._work_dir,
capture_output=True,
text=True,
timeout=float(self._timeout),
env=env,
encoding="utf-8",
) )
except subprocess.TimeoutExpired: except subprocess.TimeoutExpired:
logs_all += "\n" + TIMEOUT_MSG logs_all += "\n" + TIMEOUT_MSG

View File

@ -314,7 +314,7 @@ def oai_messages_to_anthropic_messages(params: Dict[str, Any]) -> list[dict[str,
last_tool_result_index = -1 last_tool_result_index = -1
for message in params["messages"]: for message in params["messages"]:
if message["role"] == "system": if message["role"] == "system":
params["system"] = message["content"] params["system"] = params.get("system", "") + (" " if "system" in params else "") + message["content"]
else: else:
# New messages will be added here, manage role alternations # New messages will be added here, manage role alternations
expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant" expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup> <PropertyGroup>
<VersionPrefix>0.1.0</VersionPrefix> <VersionPrefix>0.2.0</VersionPrefix>
<Authors>AutoGen</Authors> <Authors>AutoGen</Authors>
<PackageProjectUrl>https://microsoft.github.io/autogen-for-net/</PackageProjectUrl> <PackageProjectUrl>https://microsoft.github.io/autogen-for-net/</PackageProjectUrl>
<RepositoryUrl>https://github.com/microsoft/autogen</RepositoryUrl> <RepositoryUrl>https://github.com/microsoft/autogen</RepositoryUrl>

View File

@ -9,11 +9,10 @@ using FluentAssertions;
using Json.Schema; using Json.Schema;
using Json.Schema.Generation; using Json.Schema.Generation;
using OpenAI; using OpenAI;
using OpenAI.Chat;
namespace AutoGen.OpenAI.Sample; namespace AutoGen.OpenAI.Sample;
internal class Structural_Output public class Structural_Output
{ {
public static async Task RunAsync() public static async Task RunAsync()
{ {
@ -23,24 +22,25 @@ internal class Structural_Output
var schemaBuilder = new JsonSchemaBuilder().FromType<Person>(); var schemaBuilder = new JsonSchemaBuilder().FromType<Person>();
var schema = schemaBuilder.Build(); var schema = schemaBuilder.Build();
var personSchemaFormat = ChatResponseFormat.CreateJsonSchemaFormat(
name: "Person",
jsonSchema: BinaryData.FromObjectAsJson(schema),
description: "Person schema");
var openAIClient = new OpenAIClient(apiKey); var openAIClient = new OpenAIClient(apiKey);
var openAIClientAgent = new OpenAIChatAgent( var openAIClientAgent = new OpenAIChatAgent(
chatClient: openAIClient.GetChatClient(model), chatClient: openAIClient.GetChatClient(model),
name: "assistant", name: "assistant",
systemMessage: "You are a helpful assistant", systemMessage: "You are a helpful assistant")
responseFormat: personSchemaFormat) // structural output by passing schema to response format
.RegisterMessageConnector() .RegisterMessageConnector()
.RegisterPrintMessage(); .RegisterPrintMessage();
#endregion create_agent #endregion create_agent
#region chat_with_agent #region chat_with_agent
var reply = await openAIClientAgent.SendAsync("My name is John, I am 25 years old, and I live in Seattle. I like to play soccer and read books."); var prompt = new TextMessage(Role.User, """
My name is John, I am 25 years old, and I live in Seattle. I like to play soccer and read books.
""");
var reply = await openAIClientAgent.GenerateReplyAsync(
messages: [prompt],
options: new GenerateReplyOptions
{
OutputSchema = schema,
});
var person = JsonSerializer.Deserialize<Person>(reply.GetContent()); var person = JsonSerializer.Deserialize<Person>(reply.GetContent());
Console.WriteLine($"Name: {person.Name}"); Console.WriteLine($"Name: {person.Name}");
@ -60,11 +60,12 @@ internal class Structural_Output
person.City.Should().Be("Seattle"); person.City.Should().Be("Seattle");
person.Hobbies.Count.Should().Be(2); person.Hobbies.Count.Should().Be(2);
} }
}
#region person_class
public class Person #region person_class
{ [Title("Person")]
public class Person
{
[JsonPropertyName("name")] [JsonPropertyName("name")]
[Description("Name of the person")] [Description("Name of the person")]
[Required] [Required]
@ -86,5 +87,7 @@ public class Person
[JsonPropertyName("hobbies")] [JsonPropertyName("hobbies")]
[Description("Hobbies of the person")] [Description("Hobbies of the person")]
public List<string>? Hobbies { get; set; } public List<string>? Hobbies { get; set; }
}
#endregion person_class
} }
#endregion person_class

View File

@ -4,13 +4,12 @@
using System.Text.Json; using System.Text.Json;
using System.Text.Json.Serialization; using System.Text.Json.Serialization;
using AutoGen.Core; using AutoGen.Core;
using AutoGen.OpenAI;
using AutoGen.OpenAI.Extension; using AutoGen.OpenAI.Extension;
using FluentAssertions; using FluentAssertions;
using OpenAI; using OpenAI;
using OpenAI.Chat; using OpenAI.Chat;
namespace AutoGen.BasicSample; namespace AutoGen.OpenAI.Sample;
public class Use_Json_Mode public class Use_Json_Mode
{ {
@ -50,11 +49,11 @@ public class Use_Json_Mode
person.Age.Should().Be(25); person.Age.Should().Be(25);
person.Address.Should().BeNullOrEmpty(); person.Address.Should().BeNullOrEmpty();
} }
}
#region person_class
public class Person #region person_class
{ public class Person
{
[JsonPropertyName("name")] [JsonPropertyName("name")]
public string Name { get; set; } public string Name { get; set; }
@ -63,5 +62,7 @@ public class Person
[JsonPropertyName("address")] [JsonPropertyName("address")]
public string Address { get; set; } public string Address { get; set; }
}
#endregion person_class
} }
#endregion person_class

View File

@ -5,6 +5,7 @@ using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using Json.Schema;
namespace AutoGen.Core; namespace AutoGen.Core;
@ -42,6 +43,7 @@ public class GenerateReplyOptions
this.MaxToken = other.MaxToken; this.MaxToken = other.MaxToken;
this.StopSequence = other.StopSequence?.Select(s => s)?.ToArray(); this.StopSequence = other.StopSequence?.Select(s => s)?.ToArray();
this.Functions = other.Functions?.Select(f => f)?.ToArray(); this.Functions = other.Functions?.Select(f => f)?.ToArray();
this.OutputSchema = other.OutputSchema;
} }
public float? Temperature { get; set; } public float? Temperature { get; set; }
@ -51,4 +53,9 @@ public class GenerateReplyOptions
public string[]? StopSequence { get; set; } public string[]? StopSequence { get; set; }
public FunctionContract[]? Functions { get; set; } public FunctionContract[]? Functions { get; set; }
/// <summary>
/// Structural schema for the output. This property only applies to certain LLMs.
/// </summary>
public JsonSchema? OutputSchema { get; set; }
} }

View File

@ -335,7 +335,10 @@ public class OpenAIChatRequestMessageConnector : IMiddleware, IStreamingMiddlewa
var toolCall = message.ToolCalls.Select((tc, i) => new ChatCompletionsFunctionToolCall(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.FunctionName, tc.FunctionArguments)); var toolCall = message.ToolCalls.Select((tc, i) => new ChatCompletionsFunctionToolCall(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.FunctionName, tc.FunctionArguments));
var textContent = message.GetContent() ?? string.Empty; var textContent = message.GetContent() ?? string.Empty;
var chatRequestMessage = new ChatRequestAssistantMessage(textContent) { Name = message.From };
// don't include the name field when it's tool call message.
// fix https://github.com/microsoft/autogen/issues/3437
var chatRequestMessage = new ChatRequestAssistantMessage(textContent);
foreach (var tc in toolCall) foreach (var tc in toolCall)
{ {
chatRequestMessage.ToolCalls.Add(tc); chatRequestMessage.ToolCalls.Add(tc);

View File

@ -10,6 +10,7 @@ using System.Threading.Tasks;
using AutoGen.OpenAI.Extension; using AutoGen.OpenAI.Extension;
using global::OpenAI; using global::OpenAI;
using global::OpenAI.Chat; using global::OpenAI.Chat;
using Json.Schema;
namespace AutoGen.OpenAI; namespace AutoGen.OpenAI;
@ -179,6 +180,14 @@ public class OpenAIChatAgent : IStreamingAgent
} }
} }
if (options?.OutputSchema is not null)
{
option.ResponseFormat = ChatResponseFormat.CreateJsonSchemaFormat(
name: options.OutputSchema.GetTitle() ?? throw new ArgumentException("Output schema must have a title"),
jsonSchema: BinaryData.FromObjectAsJson(options.OutputSchema),
description: options.OutputSchema.GetDescription());
}
return option; return option;
} }

View File

@ -322,7 +322,10 @@ public class OpenAIChatRequestMessageConnector : IMiddleware, IStreamingMiddlewa
var toolCallParts = message.ToolCalls.Select((tc, i) => ChatToolCall.CreateFunctionToolCall(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.FunctionName, tc.FunctionArguments)); var toolCallParts = message.ToolCalls.Select((tc, i) => ChatToolCall.CreateFunctionToolCall(tc.ToolCallId ?? $"{tc.FunctionName}_{i}", tc.FunctionName, tc.FunctionArguments));
var textContent = message.GetContent() ?? null; var textContent = message.GetContent() ?? null;
var chatRequestMessage = new AssistantChatMessage(toolCallParts, textContent) { ParticipantName = message.From };
// Don't set participant name for assistant when it is tool call
// fix https://github.com/microsoft/autogen/issues/3437
var chatRequestMessage = new AssistantChatMessage(toolCallParts, textContent);
return [chatRequestMessage]; return [chatRequestMessage];
} }

View File

@ -139,7 +139,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": [], "Content": [],
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",
@ -184,7 +184,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": [], "Content": [],
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",
@ -210,7 +210,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": [], "Content": [],
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",

View File

@ -12,6 +12,7 @@
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\..\sample\AutoGen.OpenAI.Sample\AutoGen.OpenAI.Sample.csproj" />
<ProjectReference Include="..\..\src\AutoGen.OpenAI\AutoGen.OpenAI.csproj" /> <ProjectReference Include="..\..\src\AutoGen.OpenAI\AutoGen.OpenAI.csproj" />
<ProjectReference Include="..\..\src\AutoGen.SourceGenerator\AutoGen.SourceGenerator.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="false" /> <ProjectReference Include="..\..\src\AutoGen.SourceGenerator\AutoGen.SourceGenerator.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="false" />
<ProjectReference Include="..\AutoGen.Test.Share\AutoGen.Tests.Share.csproj" /> <ProjectReference Include="..\AutoGen.Test.Share\AutoGen.Tests.Share.csproj" />

View File

@ -27,6 +27,12 @@ public partial class OpenAIChatAgentTest
return $"The weather in {location} is sunny."; return $"The weather in {location} is sunny.";
} }
[Function]
public async Task<string> CalculateTaxAsync(string location, double income)
{
return $"[CalculateTax] The tax in {location} for income {income} is 1000.";
}
[ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")] [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
public async Task BasicConversationTestAsync() public async Task BasicConversationTestAsync()
{ {
@ -247,6 +253,64 @@ public partial class OpenAIChatAgentTest
} }
[ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
public async Task ItProduceValidContentAfterFunctionCall()
{
// https://github.com/microsoft/autogen/issues/3437
var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
var openaiClient = CreateOpenAIClientFromAzureOpenAI();
var options = new ChatCompletionOptions()
{
Temperature = 0.7f,
MaxTokens = 1,
};
var agentName = "assistant";
var getWeatherToolCall = new ToolCall(this.GetWeatherAsyncFunctionContract.Name, "{\"location\":\"Seattle\"}");
var getWeatherToolCallResult = new ToolCall(this.GetWeatherAsyncFunctionContract.Name, "{\"location\":\"Seattle\"}", "The weather in Seattle is sunny.");
var getWeatherToolCallMessage = new ToolCallMessage([getWeatherToolCall], from: agentName);
var getWeatherToolCallResultMessage = new ToolCallResultMessage([getWeatherToolCallResult], from: agentName);
var getWeatherAggregateMessage = new ToolCallAggregateMessage(getWeatherToolCallMessage, getWeatherToolCallResultMessage, from: agentName);
var calculateTaxToolCall = new ToolCall(this.CalculateTaxAsyncFunctionContract.Name, "{\"location\":\"Seattle\",\"income\":1000}");
var calculateTaxToolCallResult = new ToolCall(this.CalculateTaxAsyncFunctionContract.Name, "{\"location\":\"Seattle\",\"income\":1000}", "The tax in Seattle for income 1000 is 1000.");
var calculateTaxToolCallMessage = new ToolCallMessage([calculateTaxToolCall], from: agentName);
var calculateTaxToolCallResultMessage = new ToolCallResultMessage([calculateTaxToolCallResult], from: agentName);
var calculateTaxAggregateMessage = new ToolCallAggregateMessage(calculateTaxToolCallMessage, calculateTaxToolCallResultMessage, from: agentName);
var chatHistory = new List<IMessage>()
{
new TextMessage(Role.User, "What's the weather in Seattle", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in Seattle is sunny, now check the tax in seattle", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in Paris", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in Paris is sunny, now check the tax in Paris", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in New York", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in New York is sunny, now check the tax in New York", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in London", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in London is sunny, now check the tax in London", from: "admin"),
};
var agent = new OpenAIChatAgent(
chatClient: openaiClient.GetChatClient(deployName),
name: "assistant",
options: options)
.RegisterMessageConnector();
var res = await agent.GenerateReplyAsync(chatHistory, new GenerateReplyOptions
{
MaxToken = 1024,
Functions = [this.GetWeatherAsyncFunctionContract, this.CalculateTaxAsyncFunctionContract],
});
}
private OpenAIClient CreateOpenAIClientFromAzureOpenAI() private OpenAIClient CreateOpenAIClientFromAzureOpenAI()
{ {
var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable.");

View File

@ -276,7 +276,10 @@ public class OpenAIMessageTests
var innerMessage = msgs.Last(); var innerMessage = msgs.Last();
innerMessage!.Should().BeOfType<MessageEnvelope<ChatMessage>>(); innerMessage!.Should().BeOfType<MessageEnvelope<ChatMessage>>();
var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope<ChatMessage>)innerMessage!).Content; var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope<ChatMessage>)innerMessage!).Content;
chatRequestMessage.ParticipantName.Should().Be("assistant"); // when the message is a tool call message
// the name field should not be set
// please visit OpenAIChatRequestMessageConnector class for more information
chatRequestMessage.ParticipantName.Should().BeNullOrEmpty();
chatRequestMessage.ToolCalls.Count().Should().Be(1); chatRequestMessage.ToolCalls.Count().Should().Be(1);
chatRequestMessage.Content.First().Text.Should().Be("textContent"); chatRequestMessage.Content.First().Text.Should().Be("textContent");
chatRequestMessage.ToolCalls.First().Should().BeOfType<ChatToolCall>(); chatRequestMessage.ToolCalls.First().Should().BeOfType<ChatToolCall>();
@ -307,7 +310,10 @@ public class OpenAIMessageTests
innerMessage!.Should().BeOfType<MessageEnvelope<ChatMessage>>(); innerMessage!.Should().BeOfType<MessageEnvelope<ChatMessage>>();
var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope<ChatMessage>)innerMessage!).Content; var chatRequestMessage = (AssistantChatMessage)((MessageEnvelope<ChatMessage>)innerMessage!).Content;
chatRequestMessage.Content.Should().BeNullOrEmpty(); chatRequestMessage.Content.Should().BeNullOrEmpty();
chatRequestMessage.ParticipantName.Should().Be("assistant"); // when the message is a tool call message
// the name field should not be set
// please visit OpenAIChatRequestMessageConnector class for more information
chatRequestMessage.ParticipantName.Should().BeNullOrEmpty();
chatRequestMessage.ToolCalls.Count().Should().Be(2); chatRequestMessage.ToolCalls.Count().Should().Be(2);
for (int i = 0; i < chatRequestMessage.ToolCalls.Count(); i++) for (int i = 0; i < chatRequestMessage.ToolCalls.Count(); i++)
{ {

View File

@ -0,0 +1,48 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// OpenAISampleTest.cs
using System;
using System.IO;
using System.Threading.Tasks;
using AutoGen.OpenAI.Sample;
using AutoGen.Tests;
using Xunit.Abstractions;
namespace AutoGen.OpenAI.Tests;
public class OpenAISampleTest
{
private readonly ITestOutputHelper _output;
public OpenAISampleTest(ITestOutputHelper output)
{
_output = output;
Console.SetOut(new ConsoleWriter(_output));
}
[ApiKeyFact("OPENAI_API_KEY")]
public async Task Structural_OutputAsync()
{
await Structural_Output.RunAsync();
}
[ApiKeyFact("OPENAI_API_KEY")]
public async Task Use_Json_ModeAsync()
{
await Use_Json_Mode.RunAsync();
}
public class ConsoleWriter : StringWriter
{
private ITestOutputHelper output;
public ConsoleWriter(ITestOutputHelper output)
{
this.output = output;
}
public override void WriteLine(string? m)
{
output.WriteLine(m);
}
}
}

View File

@ -81,7 +81,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": "", "Content": "",
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",
@ -126,7 +126,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": "", "Content": "",
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",
@ -152,7 +152,7 @@
{ {
"Role": "assistant", "Role": "assistant",
"Content": "", "Content": "",
"Name": "assistant", "Name": null,
"TooCall": [ "TooCall": [
{ {
"Type": "Function", "Type": "Function",

View File

@ -4,6 +4,7 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Text.Json;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using AutoGen.OpenAI.V1.Extension; using AutoGen.OpenAI.V1.Extension;
@ -45,7 +46,11 @@ namespace AutoGen.OpenAI.V1.Tests
_output.WriteLine($"agent name: {agent.Name}"); _output.WriteLine($"agent name: {agent.Name}");
foreach (var message in messages) foreach (var message in messages)
{ {
_output.WriteLine(message.FormatMessage()); if (message is IMessage<object> envelope)
{
var json = JsonSerializer.Serialize(envelope.Content, new JsonSerializerOptions { WriteIndented = true });
_output.WriteLine(json);
}
} }
throw; throw;
@ -149,9 +154,9 @@ You create math question and ask student to answer it.
Then you check if the answer is correct. Then you check if the answer is correct.
If the answer is wrong, you ask student to fix it", If the answer is wrong, you ask student to fix it",
modelName: model) modelName: model)
.RegisterMessageConnector() .RegisterMiddleware(Print)
.RegisterStreamingMiddleware(functionCallMiddleware) .RegisterMiddleware(new OpenAIChatRequestMessageConnector())
.RegisterMiddleware(Print); .RegisterMiddleware(functionCallMiddleware);
return teacher; return teacher;
} }

View File

@ -22,7 +22,13 @@ public partial class OpenAIChatAgentTest
[Function] [Function]
public async Task<string> GetWeatherAsync(string location) public async Task<string> GetWeatherAsync(string location)
{ {
return $"The weather in {location} is sunny."; return $"[GetWeather] The weather in {location} is sunny.";
}
[Function]
public async Task<string> CalculateTaxAsync(string location, double income)
{
return $"[CalculateTax] The tax in {location} for income {income} is 1000.";
} }
[ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")] [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
@ -270,6 +276,64 @@ public partial class OpenAIChatAgentTest
action.Should().ThrowExactly<ArgumentException>().WithMessage("Messages should not be provided in options"); action.Should().ThrowExactly<ArgumentException>().WithMessage("Messages should not be provided in options");
} }
[ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")]
public async Task ItProduceValidContentAfterFunctionCall()
{
// https://github.com/microsoft/autogen/issues/3437
var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable.");
var openaiClient = CreateOpenAIClientFromAzureOpenAI();
var options = new ChatCompletionsOptions(deployName, [])
{
Temperature = 0.7f,
MaxTokens = 1,
};
var agentName = "assistant";
var getWeatherToolCall = new ToolCall(this.GetWeatherAsyncFunctionContract.Name, "{\"location\":\"Seattle\"}");
var getWeatherToolCallResult = new ToolCall(this.GetWeatherAsyncFunctionContract.Name, "{\"location\":\"Seattle\"}", "The weather in Seattle is sunny.");
var getWeatherToolCallMessage = new ToolCallMessage([getWeatherToolCall], from: agentName);
var getWeatherToolCallResultMessage = new ToolCallResultMessage([getWeatherToolCallResult], from: agentName);
var getWeatherAggregateMessage = new ToolCallAggregateMessage(getWeatherToolCallMessage, getWeatherToolCallResultMessage, from: agentName);
var calculateTaxToolCall = new ToolCall(this.CalculateTaxAsyncFunctionContract.Name, "{\"location\":\"Seattle\",\"income\":1000}");
var calculateTaxToolCallResult = new ToolCall(this.CalculateTaxAsyncFunctionContract.Name, "{\"location\":\"Seattle\",\"income\":1000}", "The tax in Seattle for income 1000 is 1000.");
var calculateTaxToolCallMessage = new ToolCallMessage([calculateTaxToolCall], from: agentName);
var calculateTaxToolCallResultMessage = new ToolCallResultMessage([calculateTaxToolCallResult], from: agentName);
var calculateTaxAggregateMessage = new ToolCallAggregateMessage(calculateTaxToolCallMessage, calculateTaxToolCallResultMessage, from: agentName);
var chatHistory = new List<IMessage>()
{
new TextMessage(Role.User, "What's the weather in Seattle", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in Seattle is sunny, now check the tax in seattle", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in Paris", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in Paris is sunny, now check the tax in Paris", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in New York", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in New York is sunny, now check the tax in New York", from: "admin"),
calculateTaxAggregateMessage,
new TextMessage(Role.User, "what's the weather in London", from: "user"),
getWeatherAggregateMessage,
new TextMessage(Role.User, "The weather in London is sunny, now check the tax in London", from: "admin"),
};
var agent = new OpenAIChatAgent(
openAIClient: openaiClient,
name: "assistant",
options: options)
.RegisterMessageConnector();
var res = await agent.GenerateReplyAsync(chatHistory, new GenerateReplyOptions
{
MaxToken = 1024,
Functions = [this.GetWeatherAsyncFunctionContract, this.CalculateTaxAsyncFunctionContract],
});
}
private OpenAIClient CreateOpenAIClientFromAzureOpenAI() private OpenAIClient CreateOpenAIClientFromAzureOpenAI()
{ {
var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable.");

View File

@ -278,7 +278,10 @@ public class OpenAIMessageTests
var innerMessage = msgs.Last(); var innerMessage = msgs.Last();
innerMessage!.Should().BeOfType<MessageEnvelope<ChatRequestMessage>>(); innerMessage!.Should().BeOfType<MessageEnvelope<ChatRequestMessage>>();
var chatRequestMessage = (ChatRequestAssistantMessage)((MessageEnvelope<ChatRequestMessage>)innerMessage!).Content; var chatRequestMessage = (ChatRequestAssistantMessage)((MessageEnvelope<ChatRequestMessage>)innerMessage!).Content;
chatRequestMessage.Name.Should().Be("assistant"); // when the message is a tool call message
// the name field should not be set
// please visit OpenAIChatRequestMessageConnector class for more information
chatRequestMessage.Name.Should().BeNullOrEmpty();
chatRequestMessage.ToolCalls.Count().Should().Be(1); chatRequestMessage.ToolCalls.Count().Should().Be(1);
chatRequestMessage.Content.Should().Be("textContent"); chatRequestMessage.Content.Should().Be("textContent");
chatRequestMessage.ToolCalls.First().Should().BeOfType<ChatCompletionsFunctionToolCall>(); chatRequestMessage.ToolCalls.First().Should().BeOfType<ChatCompletionsFunctionToolCall>();
@ -309,7 +312,11 @@ public class OpenAIMessageTests
innerMessage!.Should().BeOfType<MessageEnvelope<ChatRequestMessage>>(); innerMessage!.Should().BeOfType<MessageEnvelope<ChatRequestMessage>>();
var chatRequestMessage = (ChatRequestAssistantMessage)((MessageEnvelope<ChatRequestMessage>)innerMessage!).Content; var chatRequestMessage = (ChatRequestAssistantMessage)((MessageEnvelope<ChatRequestMessage>)innerMessage!).Content;
chatRequestMessage.Content.Should().BeNullOrEmpty(); chatRequestMessage.Content.Should().BeNullOrEmpty();
chatRequestMessage.Name.Should().Be("assistant");
// when the message is a tool call message
// the name field should not be set
// please visit OpenAIChatRequestMessageConnector class for more information
chatRequestMessage.Name.Should().BeNullOrEmpty();
chatRequestMessage.ToolCalls.Count().Should().Be(2); chatRequestMessage.ToolCalls.Count().Should().Be(2);
for (int i = 0; i < chatRequestMessage.ToolCalls.Count(); i++) for (int i = 0; i < chatRequestMessage.ToolCalls.Count(); i++)
{ {

View File

@ -0,0 +1,48 @@
# Release Notes for AutoGen.Net v0.2.0 🚀
## New Features 🌟
- **OpenAI Structural Format Output**: Added support for structural output format in the OpenAI integration. You can check out the example [here](https://github.com/microsoft/autogen/blob/main/dotnet/sample/AutoGen.OpenAI.Sample/Structural_Output.cs) ([#3482](https://github.com/microsoft/autogen/issues/3482)).
- **Structural Output Configuration**: Introduced a property for overriding the structural output schema when generating replies with `GenerateReplyOption` ([#3436](https://github.com/microsoft/autogen/issues/3436)).
## Bug Fixes 🐛
- **Fixed Error Code 500**: Resolved an issue where an error occurred when the message history contained multiple different tool calls with the `name` field ([#3437](https://github.com/microsoft/autogen/issues/3437)).
## Improvements 🔧
- **Leverage OpenAI V2.0 in AutoGen.OpenAI package**: The `AutoGen.OpenAI` package now uses OpenAI v2.0, providing improved functionality and performance. In the meantime, the original `AutoGen.OpenAI` is still available and can be accessed by `AutoGen.OpenAI.V1`. This allows users who prefer to continue to use `Azure.AI.OpenAI v1` package in their project. ([#3193](https://github.com/microsoft/autogen/issues/3193)).
- **Deprecation of GPTAgent**: `GPTAgent` has been deprecated in favor of `OpenAIChatAgent` and `OpenAIMessageConnector` ([#3404](https://github.com/microsoft/autogen/issues/3404)).
## Documentation 📚
- **Tool Call Instructions**: Added detailed documentation on using tool calls with `ollama` and `OpenAIChatAgent` ([#3248](https://github.com/microsoft/autogen/issues/3248)).
### Migration Guides 🔄
#### For the Deprecation of `GPTAgent` ([#3404](https://github.com/microsoft/autogen/issues/3404)):
**Before:**
```csharp
var agent = new GPTAgent(...);
```
**After:**
```csharp
var agent = new OpenAIChatAgent(...)
.RegisterMessageConnector();
```
#### For Using Azure.AI.OpenAI v2.0 ([#3193](https://github.com/microsoft/autogen/issues/3193)):
**Previous way of creating `OpenAIChatAgent`:**
```csharp
var openAIClient = new OpenAIClient(apiKey);
var openAIClientAgent = new OpenAIChatAgent(
openAIClient: openAIClient,
model: "gpt-4o-mini",
// Other parameters...
);
```
**New way of creating `OpenAIChatAgent`:**
```csharp
var openAIClient = new OpenAIClient(apiKey);
var openAIClientAgent = new OpenAIChatAgent(
chatClient: openAIClient.GetChatClient("gpt-4o-mini"),
// Other parameters...
);
```

View File

@ -1,3 +1,6 @@
- name: 0.2.0
href: 0.2.0.md
- name: 0.1.0 - name: 0.1.0
href: 0.1.0.md href: 0.1.0.md

View File

@ -0,0 +1,353 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "A3guIYLG6PnX"
},
"source": [
"# Agent with memory using Mem0"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "di1opljX6sQT"
},
"source": [
"This notebook demonstrates an intelligent customer service chatbot system that combines:\n",
"\n",
"- PyAutoGen for conversational agents\n",
"- Mem0 for memory management\n",
"\n",
"[Mem0](https://www.mem0.ai/) provides a smart, self-improving memory layer for Large Language Models (LLMs), enabling developers to create personalized AI experiences that evolve with each user interaction. Refer [docs](https://docs.mem0.ai/overview) for more information.\n",
"\n",
"\n",
"Mem0 uses a hybrid database approach, combining vector, key-value, and graph databases to efficiently store and retrieve different types of information. It associates memories with unique identifiers, extracts relevant facts and preferences when storing, and uses a sophisticated retrieval process that considers relevance, importance, and recency.\n",
"\n",
"Key features of Mem0 include:\n",
"1. Comprehensive Memory Management: Easily manage long-term, short-term, semantic, and episodic memories for individual users, agents, and sessions through robust APIs.\n",
"2. Self-Improving Memory: An adaptive system that continuously learns from user interactions, refining its understanding over time.\n",
"3. Cross-Platform Consistency: Ensures a unified user experience across various AI platforms and applications.\n",
"4. Centralized Memory Control: Simplifies storing, updating, and deleting memories.\n",
"\n",
"This approach allows for maintaining context across sessions, adaptive personalization, and dynamic updates, making it more powerful than traditional Retrieval-Augmented Generation (RAG) approaches for creating context-aware AI applications.\n",
"\n",
"\n",
"**The implementation showcases how to initialize agents, manage conversation memory, and facilitate multi-agent conversations for enhanced problem-solving in customer support scenarios.**"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EAKSBJcLAf0h"
},
"source": [
"## Requirements\n",
"\n",
"````{=mdx}\n",
":::info Requirements\n",
"Some extra dependencies are needed for this notebook, which can be installed via pip:\n",
"\n",
"```bash\n",
"pip install pyautogen mem0ai\n",
"```\n",
"\n",
"For more information, please refer to the [installation guide](/docs/installation/).\n",
":::\n",
"````"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "TA1-sGxD9-zZ"
},
"source": [
"## Get API Keys\n",
"\n",
"Please get `MEM0_API_KEY` from [Mem0 Platform](https://app.mem0.ai/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "4nQWyJ-n9qOB",
"outputId": "de897f4f-32fd-4359-f021-c3510467b69c"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"from mem0 import MemoryClient\n",
"\n",
"from autogen import ConversableAgent\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"your_api_key\"\n",
"os.environ[\"MEM0_API_KEY\"] = \"your_api_key\""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6GAZAkGmAjsT"
},
"source": [
"## Initialize Agent and Memory\n",
"\n",
"The conversational agent is set up using the 'gpt-4o' model and a mem0 client. We'll utilize the client's methods for storing and accessing memories.\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "xWSEHMKT9qLz"
},
"outputs": [],
"source": [
"agent = ConversableAgent(\n",
" \"chatbot\",\n",
" llm_config={\"config_list\": [{\"model\": \"gpt-4o\", \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n",
" code_execution_config=False,\n",
" function_map=None,\n",
" human_input_mode=\"NEVER\",\n",
")\n",
"\n",
"memory = MemoryClient()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "c7vGQoMg_KhS"
},
"source": [
"Initialize a conversation history for a Best Buy customer service chatbot. It contains a list of message exchanges between the user and the assistant, structured as dictionaries with 'role' and 'content' keys. The entire conversation is then stored in memory using the `memory.add()` method, associated with the identifier \"customer_service_bot\"."
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "b6ghSobs-mYo",
"outputId": "18a6e657-49ba-41cd-8e51-2e76cf668967"
},
"outputs": [
{
"data": {
"text/plain": [
"{'message': 'ok'}"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation = [\n",
" {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"Hi, I'm Best Buy's chatbot!\\n\\nThanks for being a My Best Buy TotalTM member.\\n\\nWhat can I help you with?\",\n",
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": 'Seeing horizontal lines on our tv. TV model: Sony - 77\" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV',\n",
" },\n",
" {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"Thanks for being a My Best Buy Total™ member. I can connect you to an expert immediately - just one perk of your membership!\\n\\nSelect the button below when you're ready to chat.\",\n",
" },\n",
" {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"Good evening, thank you for choosing Best Buy, Fnu. My name is Lovely. I hope you are doing well. I'm sorry to hear that you're seeing horizontal lines on your TV.\\n\\nI'm absolutely committed to exploring all possible ways to assist you to fix this issue.\\n\\nTo ensure that we are on the right account, may I please have your email address registered with your Best Buy account?\",\n",
" },\n",
" {\"role\": \"user\", \"content\": \"dd@gmail.com\"},\n",
" {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"Perfect! Thank you for providing all the details, surely you have made my job easier by doing this. I really appreciate it.\\n\\nI also want to take a moment to express our heartfelt appreciation for your trust and loyalty. Thank you for being an amazing customer of BestBuy Total.\\n\\nCould you please help me with the order number or product's details to check it quickly?\\n\\nSamsung - 49\\\" Odyssey OLED G9 (G95SC) DQHD 240Hz 0.03ms G-Sync Compatible Curved Smart Gaming Monitor - Silver - just to confirm this is the item, right?\",\n",
" },\n",
" {\"role\": \"user\", \"content\": \"Order number: 112217629\"},\n",
" {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"Superb! Thank you for confirmation.\\n\\nThank you for your patience. After exploring all possible solutions, I can help you to arrange a home repair appointment for your device. Our Geek Squad experts will visit your home to inspect and fix your device.\\n\\nIt's great that you have a protection plan - rest assured, we've got your back! As a valued Total member, you can avail this service at a minimal service fee. This fee, applicable to all repairs, covers the cost of diagnosing the issue and any small parts needed for the repair. It's part of our 24-month free protection plan.\\n\\nPlease click here to review the service fee and plan coverage details -\\n\\nhttps://www.bestbuy.com/site/best-buy-membership/best-buy-protection/pcmcat1608643232014.c?id=pcmcat1608643232014#jl-servicefees\\n\\nFnu - just to confirm shall I proceed to schedule the appointment?\",\n",
" },\n",
" {\"role\": \"user\", \"content\": \"Yes please\"},\n",
" {\"role\": \"assistant\", \"content\": \"When should I schedule the appointment?\"},\n",
" {\"role\": \"user\", \"content\": \"Schedule it for tomorrow please\"},\n",
"]\n",
"\n",
"memory.add(messages=conversation, user_id=\"customer_service_bot\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hlxxncxkAvck"
},
"source": [
"## Agent Inference\n",
"\n",
"We ask a question to the agent, utilizing mem0 to retrieve relevant memories. The agent then formulates a response based on both the question and the retrieved contextual information."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wYdDkuKc-Xc8",
"outputId": "b50f55d3-a280-4c0d-82e5-43fc0589109b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sure, your order number is 112217629.\n"
]
}
],
"source": [
"data = \"I forgot the order numnber, can you quickly tell me?\"\n",
"\n",
"relevant_memories = memory.search(data, user_id=\"customer_service_bot\")\n",
"flatten_relevant_memories = \"\\n\".join([m[\"memory\"] for m in relevant_memories])\n",
"\n",
"prompt = f\"\"\"Answer the user question considering the memories. Keep answers clear and concise.\n",
"Memories:\n",
"{flatten_relevant_memories}\n",
"\\n\\n\n",
"Question: {data}\n",
"\"\"\"\n",
"\n",
"reply = agent.generate_reply(messages=[{\"content\": prompt, \"role\": \"user\"}])\n",
"print(reply)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "sryX0gfdBGGD"
},
"source": [
"## Multi Agent Conversation\n",
"\n",
"Initialize two AI agents: a \"manager\" for resolving customer issues and a \"customer_bot\" for gathering information on customer problems, both using GPT-4. It then retrieves relevant memories for a given question, combining them with the question into a prompt. This prompt can be used by either the manager or customer_bot to generate a contextually informed response."
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"id": "Vq5BFL2l-XZl"
},
"outputs": [],
"source": [
"manager = ConversableAgent(\n",
" \"manager\",\n",
" system_message=\"You are a manager who helps in resolving customer issues.\",\n",
" llm_config={\"config_list\": [{\"model\": \"gpt-4\", \"temperature\": 0, \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n",
" human_input_mode=\"NEVER\",\n",
")\n",
"\n",
"customer_bot = ConversableAgent(\n",
" \"customer_bot\",\n",
" system_message=\"You are a customer service bot who gathers information on issues customers are facing. Keep answers clear and concise.\",\n",
" llm_config={\"config_list\": [{\"model\": \"gpt-4\", \"temperature\": 0, \"api_key\": os.environ.get(\"OPENAI_API_KEY\")}]},\n",
" human_input_mode=\"NEVER\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"id": "iX4ehmc6Fbib"
},
"outputs": [],
"source": [
"data = \"When is the appointment?\"\n",
"\n",
"relevant_memories = memory.search(data, user_id=\"customer_service_bot\")\n",
"flatten_relevant_memories = \"\\n\".join([m[\"memory\"] for m in relevant_memories])\n",
"\n",
"prompt = f\"\"\"\n",
"Context:\n",
"{flatten_relevant_memories}\n",
"\\n\\n\n",
"Question: {data}\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "D3paRZWvCIzt",
"outputId": "15eadb7c-5973-44f1-de43-5e6cdebe88de"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"manager (to customer_bot):\n",
"\n",
"\n",
"Context:\n",
"Scheduled an appointment for a home repair for tomorrow\n",
"Order number is 112217629\n",
"TV model is Sony - 77\" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV\n",
"User's email address is dd@gmail.com\n",
"\n",
"\n",
"\n",
"Question: When is the appointment?\n",
"\n",
"\n",
"--------------------------------------------------------------------------------\n",
"customer_bot (to manager):\n",
"\n",
"The appointment is scheduled for tomorrow.\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
}
],
"source": [
"result = manager.send(prompt, customer_bot, request_reply=True)"
]
}
],
"metadata": {
"front_matter": {
"description": "Use Mem0 to create agents with memory.",
"tags": [
"memory"
]
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

View File

@ -19,18 +19,16 @@
"[Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) is an all-in-one analytics solution for enterprises that covers everything from data movement to data science, Real-Time Analytics, and business intelligence. It offers a comprehensive suite of services, including data lake, data engineering, and data integration, all in one place. Its pre-built AI models include GPT-x models such as `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `gpt-4-8k`, `gpt-4-32k`, `gpt-35-turbo`, `gpt-35-turbo-16k` and `gpt-35-turbo-instruct`, etc. It's important to note that the Azure Open AI service is not supported on trial SKUs and only paid SKUs (F64 or higher, or P1 or higher) are supported.\n", "[Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) is an all-in-one analytics solution for enterprises that covers everything from data movement to data science, Real-Time Analytics, and business intelligence. It offers a comprehensive suite of services, including data lake, data engineering, and data integration, all in one place. Its pre-built AI models include GPT-x models such as `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `gpt-4-8k`, `gpt-4-32k`, `gpt-35-turbo`, `gpt-35-turbo-16k` and `gpt-35-turbo-instruct`, etc. It's important to note that the Azure Open AI service is not supported on trial SKUs and only paid SKUs (F64 or higher, or P1 or higher) are supported.\n",
"\n", "\n",
"In this notebook, we demonstrate several examples:\n", "In this notebook, we demonstrate several examples:\n",
"- 0. How to access pre-built LLM endpoints with AutoGen in Microsoft Fabric.\n",
"- 1. How to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code.\n", "- 1. How to use `AssistantAgent` and `UserProxyAgent` to write code and execute the code.\n",
"- 2. How to use `AssistantAgent` and `RetrieveUserProxyAgent` to do Retrieval Augmented Generation (RAG) for QA and Code Generation.\n", "- 2. How to use `AssistantAgent` and `RetrieveUserProxyAgent` to do Retrieval Augmented Generation (RAG) for QA and Code Generation.\n",
"- 3. How to use `MultimodalConversableAgent` to chat with images.\n", "- 3. How to use `MultimodalConversableAgent` to chat with images.\n",
"\n", "\n",
"### Requirements\n", "#### Requirements\n",
"\n", "\n",
"AutoGen requires `Python>=3.8`. To run this notebook example, please install:\n", "AutoGen requires `Python>=3.8`.\n",
"```bash\n",
"pip install \"pyautogen[retrievechat,lmm]>=0.2.28\"\n",
"```\n",
"\n", "\n",
"Also, this notebook depends on Microsoft Fabric pre-built LLM endpoints. Running it elsewhere may encounter errors." "Also, this notebook depends on Microsoft Fabric pre-built LLM endpoints and Fabric runtime 1.2+. Running it elsewhere may encounter errors."
] ]
}, },
{ {
@ -44,22 +42,12 @@
} }
}, },
"source": [ "source": [
"### Install AutoGen" "### Example 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2",
"metadata": {},
"outputs": [],
"source": [
"%pip install \"pyautogen[retrievechat,lmm]>=0.2.28\" -q"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "3", "id": "2",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -68,7 +56,32 @@
} }
}, },
"source": [ "source": [
"### Set up config_list and llm_config" "#### Work with openai<1\n",
"\n",
"AutoGen can work with openai<1 in Microsoft Fabric. To access pre-built LLM endpoints with AutoGen, you can follow below example.\n",
"\n",
"This example can run in Fabric runtime 1.2+."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"# pyautogen<=0.1.14 supports openai<1\n",
"%pip install \"pyautogen==0.1.14\" \"openai==0.28.1\" -q"
] ]
}, },
{ {
@ -86,26 +99,172 @@
} }
} }
}, },
"outputs": [],
"source": [
"config_list = [\n",
" {\n",
" \"model\": \"gpt-4o\",\n",
" },\n",
"]\n",
"\n",
"# Set temperature, timeout and other LLM configurations\n",
"llm_config = {\n",
" \"config_list\": config_list,\n",
" \"temperature\": 0,\n",
" \"timeout\": 600,\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"application/vnd.livy.statement-meta+json": { "application/vnd.livy.statement-meta+json": {
"execution_finish_time": "2024-06-07T15:24:20.5752101Z", "execution_finish_time": "2024-09-04T11:10:49.8449248Z",
"execution_start_time": "2024-06-07T15:24:03.7868628Z", "execution_start_time": "2024-09-04T11:09:05.6935167Z",
"livy_statement_state": "available", "livy_statement_state": "available",
"parent_msg_id": "bf8925aa-a2a2-4686-9388-3ec1eb12c5d7", "normalized_state": "finished",
"queued_time": "2024-06-07T15:23:08.5880731Z", "parent_msg_id": "5dacfdd5-af6c-4acd-9ece-60f9e0758174",
"session_id": "1d5e9aec-2019-408c-a19a-5db9fb175ae2", "queued_time": "2024-09-04T11:08:29.1314923Z",
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
"session_start_time": null, "session_start_time": null,
"spark_pool": null, "spark_pool": null,
"state": "finished", "state": "finished",
"statement_id": 9, "statement_id": 11,
"statement_ids": [ "statement_ids": [
9 11
] ]
}, },
"text/plain": [ "text/plain": [
"StatementMeta(, 1d5e9aec-2019-408c-a19a-5db9fb175ae2, 9, Finished, Available)" "StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 11, Finished, Available, Finished)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024-09-04 11:10:05.592463: I tensorflow/core/util/port.cc:113] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
"2024-09-04 11:10:11.425719: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
"To enable the following instructions: SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33muser\u001b[0m (to gpt-4o):\n",
"\n",
"Tell me a quick joke.\n",
"\n",
"--------------------------------------------------------------------------------\n",
"\u001b[33mgpt-4o\u001b[0m (to user):\n",
"\n",
"Why don't scientists trust atoms?\n",
"\n",
"Because they make up everything!\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
}
],
"source": [
"import autogen\n",
"\n",
"agent = autogen.agentchat.ConversableAgent(\n",
" name=config_list[0][\"model\"], llm_config=llm_config, max_consecutive_auto_reply=1, human_input_mode=\"NEVER\"\n",
")\n",
"userproxy = autogen.agentchat.ConversableAgent(\n",
" name=\"user\",\n",
" max_consecutive_auto_reply=0,\n",
" llm_config=False,\n",
" default_auto_reply=\"TERMINATE\",\n",
" human_input_mode=\"NEVER\",\n",
")\n",
"userproxy.initiate_chat(recipient=agent, message=\"Tell me a quick joke.\")"
]
},
{
"cell_type": "markdown",
"id": "6",
"metadata": {
"nteract": {
"transient": {
"deleting": false
}
}
},
"source": [
"#### Work with openai>=1\n",
"\n",
"AutoGen can work with openai>=1 in Microsoft Fabric. To access pre-built LLM endpoints with AutoGen, you can follow below example.\n",
"\n",
"This example and below examples can only run in Fabric runtime 1.3+."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7",
"metadata": {},
"outputs": [],
"source": [
"# pyautogen>0.1.14 supports openai>=1\n",
"%pip install \"pyautogen>0.2\" \"openai>1\" -q"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [
{
"data": {
"application/vnd.livy.statement-meta+json": {
"execution_finish_time": "2024-09-04T11:21:01.4134396Z",
"execution_start_time": "2024-09-04T11:21:01.0206532Z",
"livy_statement_state": "available",
"normalized_state": "finished",
"parent_msg_id": "b01282de-e79f-4a3e-9205-73fa8ab87599",
"queued_time": "2024-09-04T11:21:00.5084Z",
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
"session_start_time": null,
"spark_pool": null,
"state": "finished",
"statement_id": 38,
"statement_ids": [
38
]
},
"text/plain": [
"StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 38, Finished, Available, Finished)"
] ]
}, },
"metadata": {}, "metadata": {},
@ -113,42 +272,123 @@
} }
], ],
"source": [ "source": [
"from synapse.ml.mlflow import get_mlflow_env_config\n", "import types\n",
"\n", "\n",
"import httpx\n",
"from synapse.ml.fabric.credentials import get_openai_httpx_sync_client\n",
"\n", "\n",
"def get_config_list():\n", "import autogen\n",
" mlflow_env_configs = get_mlflow_env_config()\n",
" access_token = mlflow_env_configs.driver_aad_token\n",
" prebuilt_AI_base_url = mlflow_env_configs.workload_endpoint + \"cognitive/openai/\"\n",
"\n", "\n",
" config_list = [\n", "http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n",
"http_client.__deepcopy__ = types.MethodType(\n",
" lambda self, memo: self, http_client\n",
") # https://microsoft.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n",
"\n",
"config_list = [\n",
" {\n", " {\n",
" \"model\": \"gpt-4o\",\n", " \"model\": \"gpt-4o\",\n",
" \"api_key\": access_token,\n", " \"http_client\": http_client,\n",
" \"base_url\": prebuilt_AI_base_url,\n",
" \"api_type\": \"azure\",\n",
" \"api_version\": \"2024-02-01\",\n", " \"api_version\": \"2024-02-01\",\n",
" \"api_type\": \"azure\",\n",
" },\n", " },\n",
" ]\n", "]\n",
"\n", "\n",
" # Set temperature, timeout and other LLM configurations\n", "# Set temperature, timeout and other LLM configurations\n",
" llm_config = {\n", "llm_config = {\n",
" \"config_list\": config_list,\n", " \"config_list\": config_list,\n",
" \"temperature\": 0,\n", " \"temperature\": 0,\n",
" \"timeout\": 600,\n", "}"
" }\n", ]
" return config_list, llm_config\n", },
{
"cell_type": "code",
"execution_count": null,
"id": "9",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [
{
"data": {
"application/vnd.livy.statement-meta+json": {
"execution_finish_time": "2024-09-04T11:21:05.2491512Z",
"execution_start_time": "2024-09-04T11:21:03.4015759Z",
"livy_statement_state": "available",
"normalized_state": "finished",
"parent_msg_id": "4f4e94fa-4626-445d-86d0-b8fa3e3ae0b5",
"queued_time": "2024-09-04T11:21:02.9666054Z",
"session_id": "d0eed0e3-107e-4be2-8da7-7d6026d85e62",
"session_start_time": null,
"spark_pool": null,
"state": "finished",
"statement_id": 39,
"statement_ids": [
39
]
},
"text/plain": [
"StatementMeta(, d0eed0e3-107e-4be2-8da7-7d6026d85e62, 39, Finished, Available, Finished)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33muser\u001b[0m (to gpt-4o):\n",
"\n", "\n",
"Tell me a joke about openai.\n",
"\n", "\n",
"config_list, llm_config = get_config_list()\n", "--------------------------------------------------------------------------------\n",
"\u001b[33mgpt-4o\u001b[0m (to user):\n",
"\n", "\n",
"assert len(config_list) > 0\n", "Why did the AI go to school?\n",
"print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])" "\n",
"Because it wanted to improve its \"neural\" network!\n",
"\n",
"--------------------------------------------------------------------------------\n"
]
},
{
"data": {
"text/plain": [
"ChatResult(chat_id=None, chat_history=[{'content': 'Tell me a joke about openai.', 'role': 'assistant', 'name': 'user'}, {'content': 'Why did the AI go to school?\\n\\nBecause it wanted to improve its \"neural\" network!', 'role': 'user', 'name': 'gpt-4o'}], summary='Why did the AI go to school?\\n\\nBecause it wanted to improve its \"neural\" network!', cost={'usage_including_cached_inference': {'total_cost': 0.000435, 'gpt-4o-2024-05-13': {'cost': 0.000435, 'prompt_tokens': 27, 'completion_tokens': 20, 'total_tokens': 47}}, 'usage_excluding_cached_inference': {'total_cost': 0.000435, 'gpt-4o-2024-05-13': {'cost': 0.000435, 'prompt_tokens': 27, 'completion_tokens': 20, 'total_tokens': 47}}}, human_input=[])"
]
},
"execution_count": null,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import autogen\n",
"\n",
"agent = autogen.agentchat.ConversableAgent(\n",
" name=config_list[0][\"model\"], llm_config=llm_config, max_consecutive_auto_reply=1, human_input_mode=\"NEVER\"\n",
")\n",
"userproxy = autogen.agentchat.ConversableAgent(\n",
" name=\"user\",\n",
" max_consecutive_auto_reply=0,\n",
" llm_config=False,\n",
" default_auto_reply=\"TERMINATE\",\n",
" human_input_mode=\"NEVER\",\n",
")\n",
"userproxy.initiate_chat(recipient=agent, message=\"Tell me a joke about openai.\")"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "5", "id": "10",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -164,7 +404,71 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "6", "id": "11",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"%pip install \"pyautogen[retrievechat,lmm]>=0.2.28\" -q"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "12",
"metadata": {
"jupyter": {
"outputs_hidden": false,
"source_hidden": false
},
"nteract": {
"transient": {
"deleting": false
}
}
},
"outputs": [],
"source": [
"import types\n",
"\n",
"from synapse.ml.fabric.credentials import get_openai_httpx_sync_client\n",
"\n",
"import autogen\n",
"\n",
"http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n",
"http_client.__deepcopy__ = types.MethodType(\n",
" lambda self, memo: self, http_client\n",
") # https://microsoft.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n",
"\n",
"config_list = [\n",
" {\n",
" \"model\": \"gpt-4o\",\n",
" \"http_client\": http_client,\n",
" \"api_version\": \"2024-02-01\",\n",
" \"api_type\": \"azure\",\n",
" },\n",
"]\n",
"\n",
"# Set temperature, timeout and other LLM configurations\n",
"llm_config = {\n",
" \"config_list\": config_list,\n",
" \"temperature\": 0,\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "13",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -340,7 +644,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "7", "id": "14",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -392,7 +696,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "8", "id": "15",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -410,7 +714,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "9", "id": "16",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -466,7 +770,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "10", "id": "17",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -511,7 +815,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "11", "id": "18",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -564,7 +868,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "12", "id": "19",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -909,7 +1213,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "13", "id": "20",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -961,7 +1265,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "14", "id": "21",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -1029,7 +1333,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "15", "id": "22",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -1760,7 +2064,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "16", "id": "23",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -1812,7 +2116,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "17", "id": "24",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -1827,7 +2131,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "18", "id": "25",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -2913,7 +3217,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "19", "id": "26",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -2930,7 +3234,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "20", "id": "27",
"metadata": { "metadata": {
"nteract": { "nteract": {
"transient": { "transient": {
@ -2945,7 +3249,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "21", "id": "28",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -3032,7 +3336,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "22", "id": "29",
"metadata": { "metadata": {
"jupyter": { "jupyter": {
"outputs_hidden": false, "outputs_hidden": false,
@ -3088,7 +3392,8 @@
"name": "synapse_pyspark" "name": "synapse_pyspark"
}, },
"kernelspec": { "kernelspec": {
"display_name": "synapse_pyspark", "display_name": "Synapse PySpark",
"language": "Python",
"name": "synapse_pyspark" "name": "synapse_pyspark"
}, },
"language_info": { "language_info": {

View File

@ -811,7 +811,9 @@
"front_matter": { "front_matter": {
"description": "Solve complex tasks with one or more sequence chats nested as inner monologue.", "description": "Solve complex tasks with one or more sequence chats nested as inner monologue.",
"tags": [ "tags": [
"nested chat", "sequential chats", "orchestration" "nested chat",
"sequential chats",
"orchestration"
] ]
}, },
"kernelspec": { "kernelspec": {

View File

@ -72,6 +72,7 @@
"llm_config = {\"temperature\": 0}\n", "llm_config = {\"temperature\": 0}\n",
"config_list = autogen.config_list_from_json(config_file_or_env, filter_dict={\"model\": [\"gpt-4-1106-preview\", \"gpt-4\"]})\n", "config_list = autogen.config_list_from_json(config_file_or_env, filter_dict={\"model\": [\"gpt-4-1106-preview\", \"gpt-4\"]})\n",
"\n", "\n",
"\n",
"def start_task(execution_task: str, agent_list: list):\n", "def start_task(execution_task: str, agent_list: list):\n",
" group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)\n", " group_chat = autogen.GroupChat(agents=agent_list, messages=[], max_round=12)\n",
" manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={\"config_list\": config_list, **llm_config})\n", " manager = autogen.GroupChatManager(groupchat=group_chat, llm_config={\"config_list\": config_list, **llm_config})\n",
@ -923,4 +924,4 @@
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 5 "nbformat_minor": 5
} }

View File

@ -132,7 +132,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"\n",
"from pydantic import BaseModel, Field\n", "from pydantic import BaseModel, Field\n",
"\n", "\n",
"\n", "\n",
@ -680,7 +679,6 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"\n",
"# Define the function to generate the initial response\n", "# Define the function to generate the initial response\n",
"\n", "\n",
"\n", "\n",

View File

@ -16,7 +16,19 @@ from sqlmodel import (
Enum as SqlEnum, Enum as SqlEnum,
) )
SQLModel.model_config["protected_namespaces"] = () # added for python3.11 and sqlmodel 0.0.22 incompatibility
if hasattr(SQLModel, "model_config"):
SQLModel.model_config["protected_namespaces"] = ()
elif hasattr(SQLModel, "Config"):
class CustomSQLModel(SQLModel):
class Config:
protected_namespaces = ()
SQLModel = CustomSQLModel
else:
print("Warning: Unable to set protected_namespaces.")
# pylint: disable=protected-access # pylint: disable=protected-access

View File

@ -0,0 +1,17 @@
from unittest.mock import Mock
from autogen.agentchat.contrib.graph_rag.graph_query_engine import GraphQueryEngine
from autogen.agentchat.contrib.graph_rag.graph_rag_capability import GraphRagCapability
from autogen.agentchat.conversable_agent import ConversableAgent
def test_dry_run():
"""Dry run for basic graph rag objects."""
mock_graph_query_engine = Mock(spec=GraphQueryEngine)
graph_rag_agent = ConversableAgent(
name="graph_rag_agent",
max_consecutive_auto_reply=3,
)
graph_rag_capability = GraphRagCapability(mock_graph_query_engine)
graph_rag_capability.add_to_agent(graph_rag_agent)

View File

@ -0,0 +1,165 @@
# Mem0: Empower your AI applications with long-term memory and personalization
<img src="https://github.com/mem0ai/mem0/blob/main/docs/images/mem0-bg.png?raw=true" alt="Mem0 logo" style="width: 40%;" />
[Mem0 Platform](https://www.mem0.ai/) provides a smart, self-improving memory layer for Large Language Models (LLMs), enabling developers to create personalized AI experiences that evolve with each user interaction.
At a high level, Mem0 Platform offers comprehensive memory management, self-improving memory capabilities, cross-platform consistency, and centralized memory control for AI applications. For more info, check out the [Mem0 Platform Documentation](https://docs.mem0.ai).
| | |
| ---------------------------------------- | ----------------------------------------------------------------- |
| 🧠 **Comprehensive Memory Management** | Manage long-term, short-term, semantic, and episodic memories |
| 🔄 **Self-Improving Memory** | Adaptive system that learns from user interactions |
| 🌐 **Cross-Platform Consistency** | Unified user experience across various AI platforms |
| 🎛️ **Centralized Memory Control** | Effortless storage, updating, and deletion of memories |
| 🚀 **Simplified Development** | API-first approach for streamlined integration |
<details open>
<summary><b><u>Activity Dashboard</u></b></summary>
<a href="https://app.mem0.ai/">
<img src="https://github.com/mem0ai/mem0/blob/main/docs/images/platform/activity.png?raw=true" style="width: 70%;" alt="Activity Dashboard"/>
</a>
</details>
## Installation
Mem0 Platform works seamlessly with various AI applications.
1. **Sign Up:**
Create an account at [Mem0 Platform](https://app.mem0.ai/)
2. **Generate API Key:**
Create an API key in your Mem0 dashboard
3. **Install Mem0 SDK:**
```bash
pip install mem0ai
```
4. **Configure Your Environment:**
Add your API key to your environment variables
```
MEM0_API_KEY=<YOUR_MEM0_API_KEY>
```
5. **Initialize Mem0:**
```python
from mem0ai import MemoryClient
memory = MemoryClient(api_key=os.getenv("MEM0_API_KEY"))
```
After initializing Mem0, you can start using its memory management features in your AI application.
## Features
- **Long-term Memory**: Store and retrieve information persistently across sessions
- **Short-term Memory**: Manage temporary information within a single interaction
- **Semantic Memory**: Organize and retrieve conceptual knowledge
- **Episodic Memory**: Store and recall specific events or experiences
- **Self-Improving System**: Continuously refine understanding based on user interactions
## Common Use Cases
- Personalized Learning Assistants
- Customer Support AI Agents
- Healthcare Assistants
- Virtual Companions
## Mem0 Platform Examples
### AutoGen with Mem0 Example
This example demonstrates how to use Mem0 with AutoGen to create a conversational AI system with memory capabilities.
```python
import os
from autogen import ConversableAgent
from mem0 import MemoryClient
# Set up environment variables
os.environ["OPENAI_API_KEY"] = "your_openai_api_key"
os.environ["MEM0_API_KEY"] = "your_mem0_api_key"
# Initialize Agent and Memory
agent = ConversableAgent(
"chatbot",
llm_config={"config_list": [{"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}]},
code_execution_config=False,
function_map=None,
human_input_mode="NEVER",
)
memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY"))
# Insert a conversation into memory
conversation = [
{
"role": "assistant",
"content": "Hi, I'm Best Buy's chatbot!\n\nThanks for being a My Best Buy TotalTM member.\n\nWhat can I help you with?"
},
{
"role": "user",
"content": "Seeing horizontal lines on our tv. TV model: Sony - 77\" Class BRAVIA XR A80K OLED 4K UHD Smart Google TV"
},
........
]
memory.add(messages=conversation, user_id="customer_service_bot")
# Agent Inference
data = "Which TV am I using?"
relevant_memories = memory.search(data, user_id="customer_service_bot")
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
prompt = f"""Answer the user question considering the memories.
Memories:
{flatten_relevant_memories}
\n\n
Question: {data}
"""
reply = agent.generate_reply(messages=[{"content": prompt, "role": "user"}])
print("Reply :", reply)
# Multi Agent Conversation
manager = ConversableAgent(
"manager",
system_message="You are a manager who helps in resolving customer issues.",
llm_config={"config_list": [{"model": "gpt-4", "temperature": 0, "api_key": os.environ.get("OPENAI_API_KEY")}]},
human_input_mode="NEVER"
)
customer_bot = ConversableAgent(
"customer_bot",
system_message="You are a customer service bot who gathers information on issues customers are facing.",
llm_config={"config_list": [{"model": "gpt-4", "temperature": 0, "api_key": os.environ.get("OPENAI_API_KEY")}]},
human_input_mode="NEVER"
)
data = "What appointment is booked?"
relevant_memories = memory.search(data, user_id="customer_service_bot")
flatten_relevant_memories = "\n".join([m["memory"] for m in relevant_memories])
prompt = f"""
Context:
{flatten_relevant_memories}
\n\n
Question: {data}
"""
result = manager.send(prompt, customer_bot, request_reply=True)
```
Access the complete code from this notebook: [Mem0 with AutoGen](https://colab.research.google.com/drive/1NZEwC9w6V2S6hYmK7l2SQ9jhQrG1uKk8?usp=sharing)
This example showcases:
1. Setting up AutoGen agents and Mem0 memory
2. Adding a conversation to Mem0 memory
3. Using Mem0 to retrieve relevant memories for agent inference
4. Implementing a multi-agent conversation with memory-augmented context
For more Mem0 examples, visit our [documentation](https://docs.mem0.ai/examples).

View File

@ -14,7 +14,7 @@
"\n", "\n",
"Running this stack requires the installation of:\n", "Running this stack requires the installation of:\n",
"\n", "\n",
"1. AutoGen ([installation instructions](/docs/installation))\n", "1. AutoGen ([installation instructions](https://microsoft.github.io/autogen/docs/installation))\n",
"2. LiteLLM\n", "2. LiteLLM\n",
"3. Ollama\n", "3. Ollama\n",
"\n", "\n",