Skip to content

Commit 5dc2415

Browse files
committed
Remove unknown static var and refactor implementation
1 parent 8cfaf2e commit 5dc2415

File tree

3 files changed

+58
-14
lines changed

3 files changed

+58
-14
lines changed

FirebaseVertexAI/Sample/ChatSample/Views/ErrorDetailsView.swift

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,13 @@ extension HarmCategory: CustomStringConvertible {
2323
case .harassment: "Harassment"
2424
case .hateSpeech: "Hate speech"
2525
case .sexuallyExplicit: "Sexually explicit"
26-
case .unknown: "Unknown"
26+
case .civicIntegrity: "Civic integrity"
27+
default:
28+
if isUnknown() {
29+
"Unknown HarmCategory: \(rawValue)"
30+
} else {
31+
"Unhandled HarmCategory: \(rawValue)"
32+
}
2733
}
2834
}
2935
}

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 49 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -97,21 +97,61 @@ public struct SafetySetting {
9797
}
9898

9999
/// Categories describing the potential harm a piece of content may pose.
100-
public enum HarmCategory: String, Sendable {
101-
/// Unknown. A new server value that isn't recognized by the SDK.
102-
case unknown = "HARM_CATEGORY_UNKNOWN"
100+
public struct HarmCategory: Sendable, Equatable, Hashable {
101+
enum Kind: String {
102+
case harassment = "HARM_CATEGORY_HARASSMENT"
103+
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
104+
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
105+
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
106+
case civicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY"
107+
}
103108

104109
/// Harassment content.
105-
case harassment = "HARM_CATEGORY_HARASSMENT"
110+
public static var harassment: HarmCategory {
111+
return self.init(rawValue: Kind.harassment.rawValue)
112+
}
106113

107114
/// Negative or harmful comments targeting identity and/or protected attributes.
108-
case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
115+
public static var hateSpeech: HarmCategory {
116+
return self.init(rawValue: Kind.hateSpeech.rawValue)
117+
}
109118

110119
/// Contains references to sexual acts or other lewd content.
111-
case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
120+
public static var sexuallyExplicit: HarmCategory {
121+
return self.init(rawValue: Kind.sexuallyExplicit.rawValue)
122+
}
112123

113124
/// Promotes or enables access to harmful goods, services, or activities.
114-
case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
125+
public static var dangerousContent: HarmCategory {
126+
return self.init(rawValue: Kind.dangerousContent.rawValue)
127+
}
128+
129+
/// Content related to civic integrity.
130+
public static var civicIntegrity: HarmCategory {
131+
return self.init(rawValue: Kind.civicIntegrity.rawValue)
132+
}
133+
134+
/// Returns true if the HarmCategory's `rawValue` is unknown to the SDK.
135+
///
136+
/// > Important: If an unknown value is encountered, check for updates to the SDK as support for
137+
/// > the new value may have been added; see
138+
/// > [Release Notes](https://firebase.google.com/support/release-notes/ios). Alternatively,
139+
/// > search for the `rawValue` in the Firebase Apple SDK
140+
/// > [Issue Tracker](https://github.com/firebase/firebase-ios-sdk/issues) and file a
141+
/// > [Bug Report](https://github.com/firebase/firebase-ios-sdk/issues/new/choose) if none found.
142+
public func isUnknown() -> Bool {
143+
return Kind(rawValue: rawValue) == nil
144+
}
145+
146+
/// Returns the raw string representation of the `HarmCategory` value.
147+
///
148+
/// > Note: This value directly corresponds to the values in the
149+
/// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
150+
public let rawValue: String
151+
152+
init(rawValue: String) {
153+
self.rawValue = rawValue
154+
}
115155
}
116156

117157
// MARK: - Codable Conformances
@@ -140,15 +180,13 @@ extension SafetyRating: Decodable {}
140180
extension HarmCategory: Codable {
141181
public init(from decoder: Decoder) throws {
142182
let value = try decoder.singleValueContainer().decode(String.self)
143-
guard let decodedCategory = HarmCategory(rawValue: value) else {
183+
let decodedCategory = HarmCategory(rawValue: value)
184+
if decodedCategory.isUnknown() {
144185
VertexLog.error(
145186
code: .generateContentResponseUnrecognizedHarmCategory,
146187
"Unrecognized HarmCategory with value \"\(value)\"."
147188
)
148-
self = .unknown
149-
return
150189
}
151-
152190
self = decodedCategory
153191
}
154192
}

FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ final class GenerativeModelTests: XCTestCase {
163163
let expectedSafetyRatings = [
164164
SafetyRating(category: .harassment, probability: .medium),
165165
SafetyRating(category: .dangerousContent, probability: .unknown),
166-
SafetyRating(category: .unknown, probability: .high),
166+
SafetyRating(category: HarmCategory(rawValue: "FAKE_NEW_HARM_CATEGORY"), probability: .high),
167167
]
168168
MockURLProtocol
169169
.requestHandler = try httpRequestHandler(
@@ -978,7 +978,7 @@ final class GenerativeModelTests: XCTestCase {
978978
for try await content in stream {
979979
XCTAssertNotNil(content.text)
980980
if let ratings = content.candidates.first?.safetyRatings,
981-
ratings.contains(where: { $0.category == .unknown }) {
981+
ratings.contains(where: { $0.category.isUnknown() }) {
982982
hadUnknown = true
983983
}
984984
}

0 commit comments

Comments
 (0)