From 53e3b7c7222164ed9b4c5847416e47278cd89824 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 14 Oct 2024 12:52:14 -0400 Subject: [PATCH] Add docs, test and changelog entry --- FirebaseVertexAI/CHANGELOG.md | 3 +++ FirebaseVertexAI/Sources/Safety.swift | 24 ++++++++++++++++++- .../Tests/Unit/GenerativeModelTests.swift | 5 +++- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/FirebaseVertexAI/CHANGELOG.md b/FirebaseVertexAI/CHANGELOG.md index f2facff2411..346ef9a70bf 100644 --- a/FirebaseVertexAI/CHANGELOG.md +++ b/FirebaseVertexAI/CHANGELOG.md @@ -58,6 +58,9 @@ `totalBillableCharacters` counts, where applicable. (#13813) - [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content that may be used to harm civic integrity. (#13728) +- [added] Added `probabilityScore`, `severity` and `severityScore` in + `SafetyRating` to provide more fine-grained detail on blocked responses. + (#13875) - [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety filter. (#13863) - [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`, diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 611519a142b..2ff4fe85f1c 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -26,17 +26,34 @@ public struct SafetyRating: Equatable, Hashable, Sendable { /// The model-generated probability that the content falls under the specified harm ``category``. /// - /// See ``HarmProbability`` for a list of possible values. + /// See ``HarmProbability`` for a list of possible values. This is a discretized representation + /// of the ``probabilityScore``. /// /// > Important: This does not indicate the severity of harm for a piece of content. public let probability: HarmProbability + /// The confidence score that the response is associated with the corresponding harm ``category``. + /// + /// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal + /// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability + /// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores) + /// in the Google Cloud documentation for more details. public let probabilityScore: Float + /// The severity reflects the magnitude of how harmful a model response might be. + /// + /// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of + /// the ``severityScore``. public let severity: HarmSeverity + /// The severity score is the magnitude of how harmful a model response might be. + /// + /// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized + /// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores) + /// in the Google Cloud documentation for more details. public let severityScore: Float + /// If true, the response was blocked. public let blocked: Bool /// Initializes a new `SafetyRating` instance with the given category and probability. @@ -92,6 +109,7 @@ public struct SafetyRating: Equatable, Hashable, Sendable { VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability } + /// The magnitude of how harmful a model response might be for the respective ``HarmCategory``. public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable { enum Kind: String { case negligible = "HARM_SEVERITY_NEGLIGIBLE" @@ -100,12 +118,16 @@ public struct SafetyRating: Equatable, Hashable, Sendable { case high = "HARM_SEVERITY_HIGH" } + /// Negligible level of harm severity. public static let negligible = HarmSeverity(kind: .negligible) + /// Low level of harm severity. public static let low = HarmSeverity(kind: .low) + /// Medium level of harm severity. public static let medium = HarmSeverity(kind: .medium) + /// High level of harm severity. public static let high = HarmSeverity(kind: .high) /// Returns the raw string representation of the `HarmSeverity` value. diff --git a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift index c333077eb9f..5ffa94daf64 100644 --- a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift +++ b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift @@ -885,8 +885,11 @@ final class GenerativeModelTests: XCTestCase { for try await _ in stream { XCTFail("Content shouldn't be shown, this shouldn't happen.") } - } catch let GenerateContentError.responseStoppedEarly(reason, _) { + } catch let GenerateContentError.responseStoppedEarly(reason, response) { XCTAssertEqual(reason, .safety) + let candidate = try XCTUnwrap(response.candidates.first) + XCTAssertEqual(candidate.finishReason, reason) + XCTAssertTrue(candidate.safetyRatings.contains { $0.blocked }) return }