From 5dc2415cc160947fea0df9456b8a232d828da4e7 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 7 Oct 2024 20:50:21 -0400 Subject: [PATCH 1/5] Remove `unknown` static var and refactor implementation --- .../ChatSample/Views/ErrorDetailsView.swift | 8 ++- FirebaseVertexAI/Sources/Safety.swift | 60 +++++++++++++++---- .../Tests/Unit/GenerativeModelTests.swift | 4 +- 3 files changed, 58 insertions(+), 14 deletions(-) diff --git a/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift b/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift index 4bc18345cfb..cbf52fdfd8b 100644 --- a/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift +++ b/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift @@ -23,7 +23,13 @@ extension HarmCategory: CustomStringConvertible { case .harassment: "Harassment" case .hateSpeech: "Hate speech" case .sexuallyExplicit: "Sexually explicit" - case .unknown: "Unknown" + case .civicIntegrity: "Civic integrity" + default: + if isUnknown() { + "Unknown HarmCategory: \(rawValue)" + } else { + "Unhandled HarmCategory: \(rawValue)" + } } } } diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index a57900e7317..ca058a68413 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -97,21 +97,61 @@ public struct SafetySetting { } /// Categories describing the potential harm a piece of content may pose. -public enum HarmCategory: String, Sendable { - /// Unknown. A new server value that isn't recognized by the SDK. - case unknown = "HARM_CATEGORY_UNKNOWN" +public struct HarmCategory: Sendable, Equatable, Hashable { + enum Kind: String { + case harassment = "HARM_CATEGORY_HARASSMENT" + case hateSpeech = "HARM_CATEGORY_HATE_SPEECH" + case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT" + case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT" + case civicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY" + } /// Harassment content. - case harassment = "HARM_CATEGORY_HARASSMENT" + public static var harassment: HarmCategory { + return self.init(rawValue: Kind.harassment.rawValue) + } /// Negative or harmful comments targeting identity and/or protected attributes. - case hateSpeech = "HARM_CATEGORY_HATE_SPEECH" + public static var hateSpeech: HarmCategory { + return self.init(rawValue: Kind.hateSpeech.rawValue) + } /// Contains references to sexual acts or other lewd content. - case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT" + public static var sexuallyExplicit: HarmCategory { + return self.init(rawValue: Kind.sexuallyExplicit.rawValue) + } /// Promotes or enables access to harmful goods, services, or activities. - case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT" + public static var dangerousContent: HarmCategory { + return self.init(rawValue: Kind.dangerousContent.rawValue) + } + + /// Content related to civic integrity. + public static var civicIntegrity: HarmCategory { + return self.init(rawValue: Kind.civicIntegrity.rawValue) + } + + /// Returns true if the HarmCategory's `rawValue` is unknown to the SDK. + /// + /// > Important: If an unknown value is encountered, check for updates to the SDK as support for + /// > the new value may have been added; see + /// > [Release Notes](https://firebase.google.com/support/release-notes/ios). Alternatively, + /// > search for the `rawValue` in the Firebase Apple SDK + /// > [Issue Tracker](https://github.com/firebase/firebase-ios-sdk/issues) and file a + /// > [Bug Report](https://github.com/firebase/firebase-ios-sdk/issues/new/choose) if none found. + public func isUnknown() -> Bool { + return Kind(rawValue: rawValue) == nil + } + + /// Returns the raw string representation of the `HarmCategory` value. + /// + /// > Note: This value directly corresponds to the values in the + /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory). + public let rawValue: String + + init(rawValue: String) { + self.rawValue = rawValue + } } // MARK: - Codable Conformances @@ -140,15 +180,13 @@ extension SafetyRating: Decodable {} extension HarmCategory: Codable { public init(from decoder: Decoder) throws { let value = try decoder.singleValueContainer().decode(String.self) - guard let decodedCategory = HarmCategory(rawValue: value) else { + let decodedCategory = HarmCategory(rawValue: value) + if decodedCategory.isUnknown() { VertexLog.error( code: .generateContentResponseUnrecognizedHarmCategory, "Unrecognized HarmCategory with value \"\(value)\"." ) - self = .unknown - return } - self = decodedCategory } } diff --git a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift index 86d3e7f9c11..4921aa2823c 100644 --- a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift +++ b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift @@ -163,7 +163,7 @@ final class GenerativeModelTests: XCTestCase { let expectedSafetyRatings = [ SafetyRating(category: .harassment, probability: .medium), SafetyRating(category: .dangerousContent, probability: .unknown), - SafetyRating(category: .unknown, probability: .high), + SafetyRating(category: HarmCategory(rawValue: "FAKE_NEW_HARM_CATEGORY"), probability: .high), ] MockURLProtocol .requestHandler = try httpRequestHandler( @@ -978,7 +978,7 @@ final class GenerativeModelTests: XCTestCase { for try await content in stream { XCTAssertNotNil(content.text) if let ratings = content.candidates.first?.safetyRatings, - ratings.contains(where: { $0.category == .unknown }) { + ratings.contains(where: { $0.category.isUnknown() }) { hadUnknown = true } } From 4d35ba2d56da0c72e6c5cbf4478b1017f5e955e8 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 7 Oct 2024 20:57:57 -0400 Subject: [PATCH 2/5] Add changelog entry --- FirebaseVertexAI/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/FirebaseVertexAI/CHANGELOG.md b/FirebaseVertexAI/CHANGELOG.md index 1332b53f7e1..231206d1e38 100644 --- a/FirebaseVertexAI/CHANGELOG.md +++ b/FirebaseVertexAI/CHANGELOG.md @@ -36,6 +36,9 @@ as input. (#13767) - [changed] **Breaking Change**: All initializers for `ModelContent` now require the label `parts: `. (#13832) +- [changed] **Breaking Change**: `HarmCategory` is now a struct instead of an + enum type and the `unknown` case has been removed; use the `isUnknown()` + to check if a `HarmCategory` value is unknown. (#13728) - [changed] The default request timeout is now 180 seconds instead of the platform-default value of 60 seconds for a `URLRequest`; this timeout may still be customized in `RequestOptions`. (#13722) From 584cfe3bcfd728dd63f0755628ba236f90ff84cc Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 7 Oct 2024 21:18:33 -0400 Subject: [PATCH 3/5] Updated civic integrity comment --- FirebaseVertexAI/Sources/Safety.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index ca058a68413..3c301b54b4f 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -126,7 +126,7 @@ public struct HarmCategory: Sendable, Equatable, Hashable { return self.init(rawValue: Kind.dangerousContent.rawValue) } - /// Content related to civic integrity. + /// Content that may be used to harm civic integrity. public static var civicIntegrity: HarmCategory { return self.init(rawValue: Kind.civicIntegrity.rawValue) } From fb9bc245c5c4919bdb4fb91e714b241362aaa5cb Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Tue, 8 Oct 2024 18:03:21 -0400 Subject: [PATCH 4/5] Remove `isUnknown()` and update log messages --- FirebaseVertexAI/CHANGELOG.md | 4 +- .../ChatSample/Views/ErrorDetailsView.swift | 6 +-- FirebaseVertexAI/Sources/Safety.swift | 49 +++++++++---------- 3 files changed, 26 insertions(+), 33 deletions(-) diff --git a/FirebaseVertexAI/CHANGELOG.md b/FirebaseVertexAI/CHANGELOG.md index 231206d1e38..3a194a4a8b4 100644 --- a/FirebaseVertexAI/CHANGELOG.md +++ b/FirebaseVertexAI/CHANGELOG.md @@ -37,8 +37,8 @@ - [changed] **Breaking Change**: All initializers for `ModelContent` now require the label `parts: `. (#13832) - [changed] **Breaking Change**: `HarmCategory` is now a struct instead of an - enum type and the `unknown` case has been removed; use the `isUnknown()` - to check if a `HarmCategory` value is unknown. (#13728) + enum type and the `unknown` case has been removed; in a `switch` statement, + use the `default:` case to cover unknown or unhandled categories. (#13728) - [changed] The default request timeout is now 180 seconds instead of the platform-default value of 60 seconds for a `URLRequest`; this timeout may still be customized in `RequestOptions`. (#13722) diff --git a/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift b/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift index cbf52fdfd8b..ff6807a84b4 100644 --- a/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift +++ b/FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift @@ -25,11 +25,7 @@ extension HarmCategory: CustomStringConvertible { case .sexuallyExplicit: "Sexually explicit" case .civicIntegrity: "Civic integrity" default: - if isUnknown() { - "Unknown HarmCategory: \(rawValue)" - } else { - "Unhandled HarmCategory: \(rawValue)" - } + "Unknown HarmCategory: \(rawValue)" } } } diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 3c301b54b4f..3f6ce4658c1 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -108,39 +108,27 @@ public struct HarmCategory: Sendable, Equatable, Hashable { /// Harassment content. public static var harassment: HarmCategory { - return self.init(rawValue: Kind.harassment.rawValue) + return self.init(kind: .harassment) } /// Negative or harmful comments targeting identity and/or protected attributes. public static var hateSpeech: HarmCategory { - return self.init(rawValue: Kind.hateSpeech.rawValue) + return self.init(kind: .hateSpeech) } /// Contains references to sexual acts or other lewd content. public static var sexuallyExplicit: HarmCategory { - return self.init(rawValue: Kind.sexuallyExplicit.rawValue) + return self.init(kind: .sexuallyExplicit) } /// Promotes or enables access to harmful goods, services, or activities. public static var dangerousContent: HarmCategory { - return self.init(rawValue: Kind.dangerousContent.rawValue) + return self.init(kind: .dangerousContent) } /// Content that may be used to harm civic integrity. public static var civicIntegrity: HarmCategory { - return self.init(rawValue: Kind.civicIntegrity.rawValue) - } - - /// Returns true if the HarmCategory's `rawValue` is unknown to the SDK. - /// - /// > Important: If an unknown value is encountered, check for updates to the SDK as support for - /// > the new value may have been added; see - /// > [Release Notes](https://firebase.google.com/support/release-notes/ios). Alternatively, - /// > search for the `rawValue` in the Firebase Apple SDK - /// > [Issue Tracker](https://github.com/firebase/firebase-ios-sdk/issues) and file a - /// > [Bug Report](https://github.com/firebase/firebase-ios-sdk/issues/new/choose) if none found. - public func isUnknown() -> Bool { - return Kind(rawValue: rawValue) == nil + return self.init(kind: .civicIntegrity) } /// Returns the raw string representation of the `HarmCategory` value. @@ -149,7 +137,23 @@ public struct HarmCategory: Sendable, Equatable, Hashable { /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory). public let rawValue: String + init(kind: Kind) { + rawValue = kind.rawValue + } + init(rawValue: String) { + if Kind(rawValue: rawValue) == nil { + VertexLog.error( + code: .generateContentResponseUnrecognizedHarmCategory, + """ + Unrecognized HarmCategory with value "\(rawValue)": + - Check for updates to the SDK as support for "\(rawValue)" may have been added; see \ + release notes at https://firebase.google.com/support/release-notes/ios + - Search for "\(rawValue)" in the Firebase Apple SDK Issue Tracker at \ + https://github.com/firebase/firebase-ios-sdk/issues and file a Bug Report if none found + """ + ) + } self.rawValue = rawValue } } @@ -179,15 +183,8 @@ extension SafetyRating: Decodable {} @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *) extension HarmCategory: Codable { public init(from decoder: Decoder) throws { - let value = try decoder.singleValueContainer().decode(String.self) - let decodedCategory = HarmCategory(rawValue: value) - if decodedCategory.isUnknown() { - VertexLog.error( - code: .generateContentResponseUnrecognizedHarmCategory, - "Unrecognized HarmCategory with value \"\(value)\"." - ) - } - self = decodedCategory + let rawValue = try decoder.singleValueContainer().decode(String.self) + self = HarmCategory(rawValue: rawValue) } } From f29ac0166ca04b645aada4e7e54e63fc21d07a55 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Tue, 8 Oct 2024 18:18:29 -0400 Subject: [PATCH 5/5] Fix `testGenerateContentStream_successUnknownSafetyEnum` --- .../Tests/Unit/GenerativeModelTests.swift | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift index 4921aa2823c..254f81e96fb 100644 --- a/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift +++ b/FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift @@ -972,18 +972,22 @@ final class GenerativeModelTests: XCTestCase { forResource: "streaming-success-unknown-safety-enum", withExtension: "txt" ) + let unknownSafetyRating = SafetyRating( + category: HarmCategory(rawValue: "HARM_CATEGORY_DANGEROUS_CONTENT_NEW_ENUM"), + probability: .unknown + ) - var hadUnknown = false + var foundUnknownSafetyRating = false let stream = try model.generateContentStream("Hi") for try await content in stream { XCTAssertNotNil(content.text) if let ratings = content.candidates.first?.safetyRatings, - ratings.contains(where: { $0.category.isUnknown() }) { - hadUnknown = true + ratings.contains(where: { $0 == unknownSafetyRating }) { + foundUnknownSafetyRating = true } } - XCTAssertTrue(hadUnknown) + XCTAssertTrue(foundUnknownSafetyRating) } func testGenerateContentStream_successWithCitations() async throws {