Skip to content

Commit

Permalink
Add docs, test and changelog entry
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewheard committed Oct 14, 2024
1 parent e1e9796 commit 53e3b7c
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 2 deletions.
3 changes: 3 additions & 0 deletions FirebaseVertexAI/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@
`totalBillableCharacters` counts, where applicable. (#13813)
- [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content
that may be used to harm civic integrity. (#13728)
- [added] Added `probabilityScore`, `severity` and `severityScore` in
`SafetyRating` to provide more fine-grained detail on blocked responses.
(#13875)
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
filter. (#13863)
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,
Expand Down
24 changes: 23 additions & 1 deletion FirebaseVertexAI/Sources/Safety.swift
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,34 @@ public struct SafetyRating: Equatable, Hashable, Sendable {

/// The model-generated probability that the content falls under the specified harm ``category``.
///
/// See ``HarmProbability`` for a list of possible values.
/// See ``HarmProbability`` for a list of possible values. This is a discretized representation
/// of the ``probabilityScore``.
///
/// > Important: This does not indicate the severity of harm for a piece of content.
public let probability: HarmProbability

/// The confidence score that the response is associated with the corresponding harm ``category``.
///
/// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
/// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
/// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
/// in the Google Cloud documentation for more details.
public let probabilityScore: Float

/// The severity reflects the magnitude of how harmful a model response might be.
///
/// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
/// the ``severityScore``.
public let severity: HarmSeverity

/// The severity score is the magnitude of how harmful a model response might be.
///
/// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
/// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
/// in the Google Cloud documentation for more details.
public let severityScore: Float

/// If true, the response was blocked.
public let blocked: Bool

/// Initializes a new `SafetyRating` instance with the given category and probability.
Expand Down Expand Up @@ -92,6 +109,7 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
}

/// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
enum Kind: String {
case negligible = "HARM_SEVERITY_NEGLIGIBLE"
Expand All @@ -100,12 +118,16 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
case high = "HARM_SEVERITY_HIGH"
}

/// Negligible level of harm severity.
public static let negligible = HarmSeverity(kind: .negligible)

/// Low level of harm severity.
public static let low = HarmSeverity(kind: .low)

/// Medium level of harm severity.
public static let medium = HarmSeverity(kind: .medium)

/// High level of harm severity.
public static let high = HarmSeverity(kind: .high)

/// Returns the raw string representation of the `HarmSeverity` value.
Expand Down
5 changes: 4 additions & 1 deletion FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift
Original file line number Diff line number Diff line change
Expand Up @@ -885,8 +885,11 @@ final class GenerativeModelTests: XCTestCase {
for try await _ in stream {
XCTFail("Content shouldn't be shown, this shouldn't happen.")
}
} catch let GenerateContentError.responseStoppedEarly(reason, _) {
} catch let GenerateContentError.responseStoppedEarly(reason, response) {
XCTAssertEqual(reason, .safety)
let candidate = try XCTUnwrap(response.candidates.first)
XCTAssertEqual(candidate.finishReason, reason)
XCTAssertTrue(candidate.safetyRatings.contains { $0.blocked })
return
}

Expand Down

0 comments on commit 53e3b7c

Please sign in to comment.