From fbced4e70e18d9f42d0079a810dd7e762452a64a Mon Sep 17 00:00:00 2001 From: LangChain4j Date: Fri, 22 Mar 2024 11:46:28 +0100 Subject: [PATCH] Ollama: test that OpenAI API (OpenAiChatModel) works --- langchain4j-ollama/pom.xml | 6 ++ .../model/ollama/OllamaOpenAiChatModelIT.java | 55 +++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 langchain4j-ollama/src/test/java/dev/langchain4j/model/ollama/OllamaOpenAiChatModelIT.java diff --git a/langchain4j-ollama/pom.xml b/langchain4j-ollama/pom.xml index 8be1b74c0..371fffce9 100644 --- a/langchain4j-ollama/pom.xml +++ b/langchain4j-ollama/pom.xml @@ -76,6 +76,12 @@ test + + dev.langchain4j + langchain4j-open-ai + test + + org.tinylog tinylog-impl diff --git a/langchain4j-ollama/src/test/java/dev/langchain4j/model/ollama/OllamaOpenAiChatModelIT.java b/langchain4j-ollama/src/test/java/dev/langchain4j/model/ollama/OllamaOpenAiChatModelIT.java new file mode 100644 index 000000000..e8e6415be --- /dev/null +++ b/langchain4j-ollama/src/test/java/dev/langchain4j/model/ollama/OllamaOpenAiChatModelIT.java @@ -0,0 +1,55 @@ +package dev.langchain4j.model.ollama; + +import dev.langchain4j.data.message.AiMessage; +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatLanguageModel; +import dev.langchain4j.model.openai.OpenAiChatModel; +import dev.langchain4j.model.output.Response; +import dev.langchain4j.model.output.TokenUsage; +import org.junit.jupiter.api.Test; + +import static dev.langchain4j.model.ollama.OllamaImage.TINY_DOLPHIN_MODEL; +import static dev.langchain4j.model.output.FinishReason.STOP; +import static org.assertj.core.api.Assertions.assertThat; + +/** + * Tests if Ollama can be used via OpenAI API (langchain4j-open-ai module) + * See https://github.com/ollama/ollama/blob/main/docs/openai.md + */ +class OllamaOpenAiChatModelIT extends AbstractOllamaLanguageModelInfrastructure { + + ChatLanguageModel model = OpenAiChatModel.builder() + .apiKey("does not matter") // TODO make apiKey optional when using custom baseUrl? + .baseUrl(ollama.getEndpoint() + "/v1") // TODO add "/v1" by default? + .modelName(TINY_DOLPHIN_MODEL) + .temperature(0.0) + .logRequests(true) + .logResponses(true) + .build(); + + @Test + void should_generate_response() { + + // given + UserMessage userMessage = UserMessage.from("What is the capital of Germany?"); + + // when + Response response = model.generate(userMessage); + System.out.println(response); + + // then + AiMessage aiMessage = response.content(); + assertThat(aiMessage.text()).contains("Berlin"); + assertThat(aiMessage.toolExecutionRequests()).isNull(); + + TokenUsage tokenUsage = response.tokenUsage(); + assertThat(tokenUsage.inputTokenCount()).isEqualTo(35); + assertThat(tokenUsage.outputTokenCount()).isGreaterThan(0); + assertThat(tokenUsage.totalTokenCount()) + .isEqualTo(tokenUsage.inputTokenCount() + tokenUsage.outputTokenCount()); + + assertThat(response.finishReason()).isEqualTo(STOP); + } + + // TODO add more tests +}