diff --git a/Sources/GoogleAI/Chat.swift b/Sources/GoogleAI/Chat.swift index a83947e..6549df4 100644 --- a/Sources/GoogleAI/Chat.swift +++ b/Sources/GoogleAI/Chat.swift @@ -30,7 +30,11 @@ public class Chat { /// model. This will be provided to the model for each message sent as context for the discussion. public var history: [ModelContent] - /// See ``sendMessage(_:)-3ify5``. + /// Sends a message using the existing history of this chat as context. If successful, the message + /// and response will be added to the history. If unsuccessful, history will remain unchanged. + /// - Parameter parts: The new content to send as a single chat message. + /// - Returns: The model's response if no error occurred. + /// - Throws: A ``GenerateContentError`` if an error occurred. public func sendMessage(_ parts: any ThrowingPartsRepresentable...) async throws -> GenerateContentResponse { return try await sendMessage([ModelContent(parts: parts)]) @@ -76,7 +80,10 @@ public class Chat { return result } - /// See ``sendMessageStream(_:)-4abs3``. + /// Sends a message using the existing history of this chat as context. If successful, the message + /// and response will be added to the history. If unsuccessful, history will remain unchanged. + /// - Parameter parts: The new content to send as a single chat message. + /// - Returns: A stream containing the model's response or an error if an error occurred. @available(macOS 12.0, *) public func sendMessageStream(_ parts: any ThrowingPartsRepresentable...) -> AsyncThrowingStream { diff --git a/Sources/GoogleAI/GenerativeAISwift.swift b/Sources/GoogleAI/GenerativeAISwift.swift index 46c9102..65b7199 100644 --- a/Sources/GoogleAI/GenerativeAISwift.swift +++ b/Sources/GoogleAI/GenerativeAISwift.swift @@ -17,10 +17,11 @@ import Foundation #warning("Only iOS, macOS, and Catalyst targets are currently fully supported.") #endif -/// Constants associated with the GenerativeAISwift SDK +/// Constants associated with the GenerativeAISwift SDK. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, *) public enum GenerativeAISwift { /// String value of the SDK version public static let version = "0.5.2" + /// The Google AI backend endpoint URL. static let baseURL = "https://generativelanguage.googleapis.com" } diff --git a/Sources/GoogleAI/GenerativeModel.swift b/Sources/GoogleAI/GenerativeModel.swift index 7ed2016..ed1aecd 100644 --- a/Sources/GoogleAI/GenerativeModel.swift +++ b/Sources/GoogleAI/GenerativeModel.swift @@ -48,14 +48,14 @@ public final class GenerativeModel { /// Initializes a new remote model with the given parameters. /// /// - Parameters: - /// - name: The name of the model to use, e.g., `"gemini-1.5-pro-latest"`; see + /// - name: The name of the model to use, for example `"gemini-1.5-pro-latest"`; see /// [Gemini models](https://ai.google.dev/models/gemini) for a list of supported model names. /// - apiKey: The API key for your project. /// - generationConfig: The content generation parameters your model should use. /// - safetySettings: A value describing what types of harmful content your model should allow. /// - tools: A list of ``Tool`` objects that the model may use to generate the next response. /// - systemInstruction: Instructions that direct the model to behave a certain way; currently - /// only text content is supported, e.g., + /// only text content is supported, for example /// `ModelContent(role: "system", parts: "You are a cat. Your name is Neko.")`. /// - toolConfig: Tool configuration for any `Tool` specified in the request. /// - requestOptions Configuration parameters for sending requests to the backend. @@ -154,7 +154,7 @@ public final class GenerativeModel { /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) /// or "direct" prompts. For /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) - /// prompts, see ``generateContent(_:)-58rm0``. + /// prompts, see `generateContent(_ content: @autoclosure () throws -> [ModelContent])`. /// /// - Parameter content: The input(s) given to the model as a prompt (see /// ``ThrowingPartsRepresentable`` @@ -213,7 +213,7 @@ public final class GenerativeModel { /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) /// or "direct" prompts. For /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) - /// prompts, see ``generateContent(_:)-58rm0``. + /// prompts, see `generateContent(_ content: @autoclosure () throws -> [ModelContent])`. /// /// - Parameter content: The input(s) given to the model as a prompt (see /// ``ThrowingPartsRepresentable`` @@ -302,7 +302,7 @@ public final class GenerativeModel { /// [zero-shot](https://developers.google.com/machine-learning/glossary/generative#zero-shot-prompting) /// or "direct" prompts. For /// [few-shot](https://developers.google.com/machine-learning/glossary/generative#few-shot-prompting) - /// input, see ``countTokens(_:)-9spwl``. + /// input, see `countTokens(_ content: @autoclosure () throws -> [ModelContent])`. /// /// - Parameter content: The input(s) given to the model as a prompt (see /// ``ThrowingPartsRepresentable`` @@ -360,7 +360,7 @@ public final class GenerativeModel { } } -/// See ``GenerativeModel/countTokens(_:)-9spwl``. +/// An error thrown in `GenerativeModel.countTokens(_:)`. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, *) public enum CountTokensError: Error { case internalError(underlying: Error)