[.Net] add ollama-sample and adds more tests (#2776)

* add ollama-sample and adds more tests

* Update AutoGen.Ollama.Sample.csproj
This commit is contained in:
Xiaoyun Zhang 2024-05-23 21:14:29 -07:00 committed by GitHub
parent 8d55334e4d
commit 702c010d77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 164 additions and 31 deletions

View File

@ -43,6 +43,7 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Ollama", "src\AutoG
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AutoGen.Ollama.Tests", "test\AutoGen.Ollama.Tests\AutoGen.Ollama.Tests.csproj", "{03E31CAA-3728-48D3-B936-9F11CF6C18FE}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutoGen.Ollama.Sample", "sample\AutoGen.Ollama.Sample\AutoGen.Ollama.Sample.csproj", "{93AA4D0D-6EE4-44D5-AD77-7F73A3934544}"
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AutoGen.SemanticKernel.Sample", "sample\AutoGen.SemanticKernel.Sample\AutoGen.SemanticKernel.Sample.csproj", "{52958A60-3FF7-4243-9058-34A6E4F55C31}"
EndProject
Global
@ -119,6 +120,10 @@ Global
{03E31CAA-3728-48D3-B936-9F11CF6C18FE}.Debug|Any CPU.Build.0 = Debug|Any CPU
{03E31CAA-3728-48D3-B936-9F11CF6C18FE}.Release|Any CPU.ActiveCfg = Release|Any CPU
{03E31CAA-3728-48D3-B936-9F11CF6C18FE}.Release|Any CPU.Build.0 = Release|Any CPU
{93AA4D0D-6EE4-44D5-AD77-7F73A3934544}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{93AA4D0D-6EE4-44D5-AD77-7F73A3934544}.Debug|Any CPU.Build.0 = Debug|Any CPU
{93AA4D0D-6EE4-44D5-AD77-7F73A3934544}.Release|Any CPU.ActiveCfg = Release|Any CPU
{93AA4D0D-6EE4-44D5-AD77-7F73A3934544}.Release|Any CPU.Build.0 = Release|Any CPU
{52958A60-3FF7-4243-9058-34A6E4F55C31}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{52958A60-3FF7-4243-9058-34A6E4F55C31}.Debug|Any CPU.Build.0 = Debug|Any CPU
{52958A60-3FF7-4243-9058-34A6E4F55C31}.Release|Any CPU.ActiveCfg = Release|Any CPU
@ -145,6 +150,7 @@ Global
{B61388CA-DC73-4B7F-A7B2-7B9A86C9229E} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64}
{9F9E6DED-3D92-4970-909A-70FC11F1A665} = {18BF8DD7-0585-48BF-8F97-AD333080CE06}
{03E31CAA-3728-48D3-B936-9F11CF6C18FE} = {F823671B-3ECA-4AE6-86DA-25E920D3FE64}
{93AA4D0D-6EE4-44D5-AD77-7F73A3934544} = {FBFEAD1F-29EB-4D99-A672-0CD8473E10B9}
{52958A60-3FF7-4243-9058-34A6E4F55C31} = {FBFEAD1F-29EB-4D99-A672-0CD8473E10B9}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution

View File

@ -0,0 +1,24 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>$(TestTargetFramework)</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<GenerateDocumentationFile>True</GenerateDocumentationFile>
<NoWarn>$(NoWarn);CS8981;CS8600;CS8602;CS8604;CS8618;CS0219;SKEXP0054;SKEXP0050;SKEXP0110</NoWarn>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\AutoGen.DotnetInteractive\AutoGen.DotnetInteractive.csproj" />
<ProjectReference Include="..\..\src\AutoGen.Ollama\AutoGen.Ollama.csproj" />
<ProjectReference Include="..\..\src\AutoGen.SourceGenerator\AutoGen.SourceGenerator.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="false" />
<ProjectReference Include="..\..\src\AutoGen\AutoGen.csproj" />
<PackageReference Include="FluentAssertions" Version="$(FluentAssertionVersion)" />
</ItemGroup>
<ItemGroup>
<None Update="images\*.png">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>

View File

@ -0,0 +1,28 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Chat_With_LLaMA.cs
using AutoGen.Core;
using AutoGen.Ollama.Extension;
namespace AutoGen.Ollama.Sample;
public class Chat_With_LLaMA
{
public static async Task RunAsync()
{
using var httpClient = new HttpClient()
{
BaseAddress = new Uri("https://2xbvtxd1-11434.usw2.devtunnels.ms")
};
var ollamaAgent = new OllamaAgent(
httpClient: httpClient,
name: "ollama",
modelName: "llama3:latest",
systemMessage: "You are a helpful AI assistant")
.RegisterMessageConnector()
.RegisterPrintMessage();
var reply = await ollamaAgent.SendAsync("Can you write a piece of C# code to calculate 100th of fibonacci?");
}
}

View File

@ -0,0 +1,40 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Chat_With_LLaVA.cs
using AutoGen.Core;
using AutoGen.Ollama.Extension;
namespace AutoGen.Ollama.Sample;
public class Chat_With_LLaVA
{
public static async Task RunAsync()
{
using var httpClient = new HttpClient()
{
BaseAddress = new Uri("https://2xbvtxd1-11434.usw2.devtunnels.ms")
};
var ollamaAgent = new OllamaAgent(
httpClient: httpClient,
name: "ollama",
modelName: "llava:latest",
systemMessage: "You are a helpful AI assistant")
.RegisterMessageConnector()
.RegisterPrintMessage();
var image = Path.Combine("images", "background.png");
var binaryData = BinaryData.FromBytes(File.ReadAllBytes(image), "image/png");
var imageMessage = new ImageMessage(Role.User, binaryData);
var textMessage = new TextMessage(Role.User, "what's in this image?");
var reply = await ollamaAgent.SendAsync(chatHistory: [textMessage, imageMessage]);
// You can also use MultiModalMessage to put text and image together in one message
// In this case, all the messages in the multi-modal message will be put into single piece of message
// where the text is the concatenation of all the text messages seperated by \n
// and the images are all the images in the multi-modal message
var multiModalMessage = new MultiModalMessage(Role.User, [textMessage, imageMessage]);
reply = await ollamaAgent.SendAsync(chatHistory: [multiModalMessage]);
}
}

View File

@ -0,0 +1,6 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Program.cs
using AutoGen.Ollama.Sample;
await Chat_With_LLaVA.RunAsync();

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:300b7c9d6ba0c23a3e52fbd2e268141ddcca0434a9fb9dcf7e58e7e903d36dcf
size 2126185

View File

@ -64,13 +64,9 @@ public class OllamaMessageConnector : IStreamingMiddleware
// if the chunks are not empty, aggregate them into a single message
var messageContent = string.Join(string.Empty, chunks.Select(c => c.Message?.Value));
var message = new Message
{
Role = "assistant",
Value = messageContent,
};
var message = new TextMessage(Role.Assistant, messageContent, agent.Name);
yield return MessageEnvelope.Create(message, agent.Name);
yield return message;
}
private IEnumerable<IMessage> ProcessMessage(IEnumerable<IMessage> messages, IAgent agent)
@ -96,18 +92,25 @@ public class OllamaMessageConnector : IStreamingMiddleware
private IEnumerable<IMessage> ProcessMultiModalMessage(MultiModalMessage multiModalMessage, IAgent agent)
{
var messages = new List<IMessage>();
foreach (var message in multiModalMessage.Content)
{
messages.AddRange(message switch
{
TextMessage textMessage => ProcessTextMessage(textMessage, agent),
ImageMessage imageMessage => ProcessImageMessage(imageMessage, agent),
_ => throw new InvalidOperationException("Invalid message type"),
});
}
var textMessages = multiModalMessage.Content.Where(m => m is TextMessage textMessage && textMessage.GetContent() is not null);
var imageMessages = multiModalMessage.Content.Where(m => m is ImageMessage);
return messages;
// aggregate the text messages into one message
// by concatenating the content using newline
var textContent = string.Join("\n", textMessages.Select(m => ((TextMessage)m).Content));
// collect all the images
var images = imageMessages.SelectMany(m => ProcessImageMessage((ImageMessage)m, agent)
.SelectMany(m => (m as IMessage<Message>)?.Content.Images));
var message = new Message()
{
Role = "user",
Value = textContent,
Images = images.ToList(),
};
return [MessageEnvelope.Create(message, agent.Name)];
}
private IEnumerable<IMessage> ProcessImageMessage(ImageMessage imageMessage, IAgent agent)

View File

@ -2,13 +2,10 @@
// OllamaMessageTests.cs
using AutoGen.Core;
using AutoGen.Ollama;
using AutoGen.Tests;
using FluentAssertions;
using Xunit;
using Message = AutoGen.Ollama.Message;
namespace Autogen.Ollama.Tests;
namespace AutoGen.Ollama.Tests;
public class OllamaMessageTests
{
@ -42,6 +39,36 @@ public class OllamaMessageTests
await agent.SendAsync(userMessage);
}
[Fact]
public async Task ItProcessStreamingTextMessageAsync()
{
var messageConnector = new OllamaMessageConnector();
var agent = new EchoAgent("assistant")
.RegisterStreamingMiddleware(messageConnector);
var messageChunks = Enumerable.Range(0, 10)
.Select(i => new ChatResponseUpdate()
{
Message = new Message()
{
Value = i.ToString(),
Role = "assistant",
}
})
.Select(m => MessageEnvelope.Create(m));
IStreamingMessage? finalReply = null;
await foreach (var reply in agent.GenerateStreamingReplyAsync(messageChunks))
{
reply.Should().BeAssignableTo<IStreamingMessage>();
finalReply = reply;
}
finalReply.Should().BeOfType<TextMessage>();
var textMessage = (TextMessage)finalReply!;
textMessage.GetContent().Should().Be("0123456789");
}
[Fact]
public async Task ItProcessAssistantTextMessageAsync()
{
@ -126,17 +153,13 @@ public class OllamaMessageTests
var agent = new EchoAgent("assistant")
.RegisterMiddleware(async (msgs, _, innerAgent, ct) =>
{
msgs.Count().Should().Be(2);
var textMessage = msgs.First();
textMessage.Should().BeOfType<MessageEnvelope<Message>>();
var message = (IMessage<Message>)textMessage;
message.Content.Role.Should().Be("user");
msgs.Count().Should().Be(1);
var message = msgs.First();
message.Should().BeOfType<MessageEnvelope<Message>>();
var imageMessage = msgs.Last();
imageMessage.Should().BeOfType<MessageEnvelope<Message>>();
message = (IMessage<Message>)imageMessage;
message.Content.Role.Should().Be("user");
message.Content.Images!.Count.Should().Be(1);
var multiModalMessage = (IMessage<Message>)message;
multiModalMessage.Content.Images!.Count.Should().Be(1);
multiModalMessage.Content.Value.Should().Be("Hello");
return await innerAgent.GenerateReplyAsync(msgs);
})