diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/pom.xml b/langchain4j-google-ai-gemini-spring-boot-starter/pom.xml
new file mode 100644
index 00000000..e657ad61
--- /dev/null
+++ b/langchain4j-google-ai-gemini-spring-boot-starter/pom.xml
@@ -0,0 +1,67 @@
+
+
+ * This method configures and initializes a chat model using the provided properties. + * The bean is only created if the property {@code langchain4j.google-ai-gemini.chat-model.api-key} is defined. + *
+ * + * @param properties the configuration properties containing the chat model settings + * @return a configured instance of {@link GoogleAiGeminiChatModel} + */ + @Bean + @ConditionalOnProperty(name = PREFIX + ".chat-model.api-key") + GoogleAiGeminiChatModel googleAiGeminiChatModel(Properties properties) { + ChatModelProperties chatModelProperties = properties.getChatModel(); + return GoogleAiGeminiChatModel.builder() + .apiKey(chatModelProperties.apiKey()) + .modelName(chatModelProperties.modelName()) + .temperature(chatModelProperties.temperature()) + .maxOutputTokens(chatModelProperties.maxOutputTokens()) + .topK(chatModelProperties.topK()) + .topP(chatModelProperties.topP()) + .maxRetries(chatModelProperties.maxRetries()) + .logRequestsAndResponses(chatModelProperties.logRequestsAndResponses()) + .allowCodeExecution(chatModelProperties.allowCodeExecution()) + .includeCodeExecutionOutput(chatModelProperties.includeCodeExecutionOutput()) + .timeout(chatModelProperties.timeout()) + .build(); + } + + /** + * Creates a bean for the {@link GoogleAiGeminiStreamingChatModel}. + *+ * This method configures and initializes a streaming chat model using the provided properties. + * The bean is only created if the property {@code langchain4j.google-ai-gemini.streaming-chat-model.api-key} is defined. + *
+ * + * @param properties the configuration properties containing the streaming chat model settings + * @return a configured instance of {@link GoogleAiGeminiStreamingChatModel} + */ + @Bean + @ConditionalOnProperty(name = PREFIX + ".streaming-chat-model.api-key") + GoogleAiGeminiStreamingChatModel googleAiGeminiStreamingChatModel(Properties properties) { + ChatModelProperties streamingChatModelProperties = properties.getStreamingChatModel(); + return GoogleAiGeminiStreamingChatModel.builder() + .apiKey(streamingChatModelProperties.apiKey()) + .modelName(streamingChatModelProperties.modelName()) + .temperature(streamingChatModelProperties.temperature()) + .maxOutputTokens(streamingChatModelProperties.maxOutputTokens()) + .topK(streamingChatModelProperties.topK()) + .topP(streamingChatModelProperties.topP()) + .maxRetries(streamingChatModelProperties.maxRetries()) + .logRequestsAndResponses(streamingChatModelProperties.logRequestsAndResponses()) + .allowCodeExecution(streamingChatModelProperties.allowCodeExecution()) + .includeCodeExecutionOutput(streamingChatModelProperties.includeCodeExecutionOutput()) + .timeout(streamingChatModelProperties.timeout()) + .build(); + } + + /** + * Creates a bean for the {@link GoogleAiGeminiTokenizer}. + *+ * This method configures and initializes a tokenizer using the provided properties. + * The bean is only created if the property {@code langchain4j.google-ai-gemini.tokenizer.api-key} is defined. + *
+ * + * @param properties the configuration properties containing the tokenizer settings + * @return a configured instance of {@link GoogleAiGeminiTokenizer} + */ + @Bean + @ConditionalOnProperty(name = PREFIX + ".tokenizer.api-key") + GoogleAiGeminiTokenizer googleAiGeminiTokenizer(Properties properties) { + TokenizerProperties tokenizerProperties = properties.getTokenizer(); + return GoogleAiGeminiTokenizer.builder() + .apiKey(tokenizerProperties.apiKey()) + .modelName(tokenizerProperties.modelName()) + .maxRetries(tokenizerProperties.maxRetries()) + .logRequestsAndResponses(tokenizerProperties.logRequestsAndResponses()) + .timeout(tokenizerProperties.timeout()) + .build(); + } + +} diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/ChatModelProperties.java b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/ChatModelProperties.java new file mode 100644 index 00000000..0e473830 --- /dev/null +++ b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/ChatModelProperties.java @@ -0,0 +1,37 @@ +package dev.langchain4j.googleaigemini.spring; + +import java.time.Duration; + +/** + * Configuration properties for the Google AI Gemini Chat Model. + *+ * This class defines the necessary properties for configuring + * and using the chat model. + *
+ * + * @param apiKey The API key for authenticating requests to the Google AI Gemini service. + * @param modelName The name of the model to use. + * @param temperature The temperature setting to control response randomness. + * @param maxOutputTokens The maximum number of tokens to include in the model's output. + * @param topK The top-K sampling parameter to refine the response. + * @param topP The top-P (nucleus sampling) parameter for controlling diversity. + * @param maxRetries The maximum number of retries for failed requests. + * @param timeout The timeout duration for chat model requests. + * @param logRequestsAndResponses Flag to enable or disable logging of requests and responses. + * @param allowCodeExecution Flag to allow or disallow the execution of code. + * @param includeCodeExecutionOutput Flag to include or exclude code execution output in the response. + */ +record ChatModelProperties( + String apiKey, + String modelName, + Double temperature, + Integer maxOutputTokens, + Integer topK, + Double topP, + Integer maxRetries, + Duration timeout, + boolean logRequestsAndResponses, + boolean allowCodeExecution, + boolean includeCodeExecutionOutput +) { +} diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/Properties.java b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/Properties.java new file mode 100644 index 00000000..1151782c --- /dev/null +++ b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/Properties.java @@ -0,0 +1,24 @@ +package dev.langchain4j.googleaigemini.spring; + +import lombok.Getter; +import lombok.Setter; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.NestedConfigurationProperty; + +@Getter +@Setter +@ConfigurationProperties(prefix = Properties.PREFIX) +public class Properties { + + static final String PREFIX = "langchain4j.google-ai-gemini"; + + @NestedConfigurationProperty + ChatModelProperties chatModel; + + @NestedConfigurationProperty + ChatModelProperties streamingChatModel; + + @NestedConfigurationProperty + TokenizerProperties tokenizer; + +} diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/TokenizerProperties.java b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/TokenizerProperties.java new file mode 100644 index 00000000..da0f6002 --- /dev/null +++ b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/java/dev/langchain4j/googleaigemini/spring/TokenizerProperties.java @@ -0,0 +1,25 @@ +package dev.langchain4j.googleaigemini.spring; + +import java.time.Duration; + +/** + * Configuration properties for the Google AI Gemini Tokenizer. + *+ * This class defines the necessary properties for configuring + * and using the tokenizer. + *
+ * + * @param apiKey The API key for authenticating requests to the Google AI Gemini service. + * @param modelName The name of the model to use. + * @param maxRetries The maximum number of retries for failed requests. + * @param logRequestsAndResponses Flag to enable or disable logging of requests and responses. + * @param timeout The timeout duration for tokenizer requests. + */ +record TokenizerProperties( + String apiKey, + String modelName, + Integer maxRetries, + boolean logRequestsAndResponses, + Duration timeout +) { +} diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/src/main/resources/META-INF.spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/resources/META-INF.spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports new file mode 100644 index 00000000..8a8a5b16 --- /dev/null +++ b/langchain4j-google-ai-gemini-spring-boot-starter/src/main/resources/META-INF.spring/org.springframework.boot.autoconfigure.AutoConfiguration.imports @@ -0,0 +1 @@ +dev.langchain4j.googleaigemini.spring.AutoConfig \ No newline at end of file diff --git a/langchain4j-google-ai-gemini-spring-boot-starter/src/test/java/dev/langchain4j/googleaigemini/spring/AutoConfigIT.java b/langchain4j-google-ai-gemini-spring-boot-starter/src/test/java/dev/langchain4j/googleaigemini/spring/AutoConfigIT.java new file mode 100644 index 00000000..434786b6 --- /dev/null +++ b/langchain4j-google-ai-gemini-spring-boot-starter/src/test/java/dev/langchain4j/googleaigemini/spring/AutoConfigIT.java @@ -0,0 +1,108 @@ +package dev.langchain4j.googleaigemini.spring; + +import dev.langchain4j.data.message.AiMessage; +import dev.langchain4j.model.StreamingResponseHandler; +import dev.langchain4j.model.Tokenizer; +import dev.langchain4j.model.chat.ChatLanguageModel; +import dev.langchain4j.model.chat.StreamingChatLanguageModel; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; +import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel; +import dev.langchain4j.model.googleai.GoogleAiGeminiTokenizer; +import dev.langchain4j.model.output.Response; +import org.junit.jupiter.api.Test; +import org.springframework.boot.autoconfigure.AutoConfigurations; +import org.springframework.boot.test.context.runner.ApplicationContextRunner; + +import java.util.concurrent.CompletableFuture; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.assertj.core.api.Assertions.assertThat; + +class AutoConfigIT { + + private static final String API_KEY = System.getenv("GOOGLE_API_KEY"); + private static final String MODEL = "gemini-1.5-flash"; + + private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() + .withConfiguration(AutoConfigurations.of(AutoConfig.class)); + + @Test + void should_provide_chat_model() { + contextRunner + .withPropertyValues( + "langchain4j.google-ai-gemini.chat-model.api-key="+API_KEY, + "langchain4j.google-ai-gemini.chat-model.model-name="+ MODEL, + "langchain4j.google-ai-gemini.chat-model.max-output-tokens=20" + ) + .run(context -> { + ChatLanguageModel chatLanguageModel = context.getBean(ChatLanguageModel.class); + assertThat(chatLanguageModel).isInstanceOf(GoogleAiGeminiChatModel.class); + assertThat(chatLanguageModel.generate("What is the capital of Germany?")).contains("Berlin"); + assertThat(context.getBean(GoogleAiGeminiChatModel.class)).isSameAs(chatLanguageModel); + }); + } + + @Test + void should_provide_streaming_chat_model() { + contextRunner + .withPropertyValues( + "langchain4j.google-ai-gemini.streaming-chat-model.api-key=" + API_KEY, + "langchain4j.google-ai-gemini.streaming-chat-model.model-name="+ MODEL, + "langchain4j.google-ai-gemini.streaming-chat-model.max-tokens=20" + ) + .run(context -> { + + StreamingChatLanguageModel streamingChatLanguageModel = context.getBean(StreamingChatLanguageModel.class); + assertThat(streamingChatLanguageModel).isInstanceOf(GoogleAiGeminiStreamingChatModel.class); + CompletableFuture