@@ -4,22 +4,27 @@ import UIKit
44#endif
55
66final class RunLoopObserver {
7+
8+ static let SentryANRMechanismDataAppHangDuration = " app_hang_duration "
79
810 private let dateProvider : SentryCurrentDateProvider
911 private let threadInspector : ThreadInspector
1012 private let debugImageCache : DebugImageCache
1113 private let fileManager : SentryFileManager
14+ private let crashWrapper : CrashWrapper
1215
1316 init (
1417 dateProvider: SentryCurrentDateProvider ,
1518 threadInspector: ThreadInspector ,
1619 debugImageCache: DebugImageCache ,
1720 fileManager: SentryFileManager ,
21+ crashWrapper: CrashWrapper ,
1822 minHangTime: TimeInterval ) {
1923 self . dateProvider = dateProvider
2024 self . threadInspector = threadInspector
2125 self . debugImageCache = debugImageCache
2226 self . fileManager = fileManager
27+ self . crashWrapper = crashWrapper
2328 self . lastFrameTime = 0
2429 self . minHangTime = minHangTime
2530#if canImport(UIKit) && !SENTRY_NO_UIKIT
@@ -35,7 +40,7 @@ final class RunLoopObserver {
3540 #endif
3641 expectedFrameDuration = 1.0 / maxFPS
3742 thresholdForFrameStacktrace = expectedFrameDuration * 0.5
38- // TODO: Check for stored app hang
43+ captureStoredAppHang ( )
3944 }
4045
4146 // This queue is used to detect main thread hangs, they need to be detected on a background thread
@@ -131,6 +136,43 @@ final class RunLoopObserver {
131136 return currentTime
132137 }
133138
139+ func captureStoredAppHang( ) {
140+ DispatchQueue . global ( qos: . background) . async { [ weak self] in
141+ guard let self, let event = fileManager. readAppHangEvent ( ) else { return }
142+
143+ fileManager. deleteAppHangEvent ( )
144+ if crashWrapper. crashedLastLaunch {
145+ // The app crashed during an ongoing app hang. Capture the stored app hang as it is.
146+ // We already applied the scope. We use an empty scope to avoid overwriting exising
147+ // fields on the event.
148+ SentrySDK . capture ( event: event, scope: Scope ( ) )
149+ } else {
150+ // Fatal App Hang
151+ // We can't differ if the watchdog or the user terminated the app, because when the main
152+ // thread is blocked we don't receive the applicationWillTerminate notification. Further
153+ // investigations are required to validate if we somehow can differ between watchdog or
154+ // user terminations; see https://github.com/getsentry/sentry-cocoa/issues/4845.
155+ guard let exceptions = event. exceptions, let exception = exceptions. first, exceptions. count == 1 else {
156+ SentrySDKLog . warning ( " The stored app hang event is expected to have exactly one exception, so we don't capture it. " )
157+ return
158+ }
159+
160+ SentryLevelBridge . setBreadcrumbLevelOn ( event, level: SentryLevel . fatal. rawValue)
161+ event. exceptions? . first? . mechanism? . handled = false
162+ let fatalExceptionType = SentryAppHangTypeMapper . getFatalExceptionType ( nonFatalErrorType: exception. type)
163+ event. exceptions? . first? . type = fatalExceptionType
164+
165+ var mechanismData = exception. mechanism? . data
166+ let durationInfo = mechanismData ? [ Self . SentryANRMechanismDataAppHangDuration] as? String ?? " over \( minHangTime) seconds "
167+ mechanismData? . removeValue ( forKey: Self . SentryANRMechanismDataAppHangDuration)
168+ event. exceptions? . first? . value = " The user or the OS watchdog terminated your app while it blocked the main thread for \( durationInfo) "
169+ event. exceptions? . first? . mechanism? . data = mechanismData
170+ SentryDependencyContainerSwiftHelper . captureFatalAppHang ( event)
171+
172+ }
173+ }
174+ }
175+
134176 // MARK: Background queue
135177
136178 private var blockingDuration : TimeInterval ?
@@ -151,20 +193,7 @@ final class RunLoopObserver {
151193 break
152194 }
153195 }
154-
155- // TODO: Only write hang if it's long enough
156- // TODO: Need to clear hang details after the hang ends
157- // Problem: If we are detecting a multiple runloop hang, which then turns into a single long hang
158- // we might want to add the total time of that long hang to what is on disk from the multiple runloop hang
159- // Or we could not do that and just say we only overwrite what is on disk if the hang exceeds the time
160- // of the multiple runloop hang.
161- // Could have two paths, fullyBlocking only used when the semaphore times out, we keep tracking in memory until
162- // it exceeds the threshold then we write to disk.
163- // Non fully blocking only writes when the runloop finishes if it exceeds the threshold.
164- // Sampled stacktrace should be kept separate from time, because time for nonFullyBlocking is kep on main thread
165- // time for fullyBlocking is kept on background thread
166-
167- // TODO: Not using should sample
196+
168197 private func continueHang( started: TimeInterval , isStarting: Bool ) {
169198 dispatchPrecondition ( condition: . onQueue( queue) )
170199
@@ -186,11 +215,15 @@ final class RunLoopObserver {
186215
187216 // Safe to call from any thread
188217 private func makeEvent( duration: TimeInterval , threads: [ SentryThread ] , type: SentryANRType ) -> Event {
189- var event = Event ( )
218+ let event = Event ( )
190219 SentryLevelBridge . setBreadcrumbLevelOn ( event, level: SentryLevel . error. rawValue)
191220 let exceptionType = SentryAppHangTypeMapper . getExceptionType ( anrType: type)
192221 let exception = Exception ( value: String ( format: " App hanging for %.3f seconds. " , duration) , type: exceptionType)
193222 let mechanism = Mechanism ( type: " AppHang " )
223+ // We only temporarily store the app hang duration info, so we can change the error message
224+ // when either sending a normal or fatal app hang event. Otherwise, we would have to rely on
225+ // string parsing to retrieve the app hang duration info from the error message.
226+ mechanism. data = [ Self . SentryANRMechanismDataAppHangDuration: " \( duration) seconds " ]
194227 exception. mechanism = mechanism
195228 exception. stacktrace = threads [ 0 ] . stacktrace
196229 exception. stacktrace? . snapshot = true
@@ -212,12 +245,14 @@ final class RunLoopObserver {
212245 dateProvider: SentryCurrentDateProvider ,
213246 threadInspector: ThreadInspector ,
214247 debugImageCache: DebugImageCache ,
215- fileManager: SentryFileManager ) {
248+ fileManager: SentryFileManager ,
249+ crashWrapper: CrashWrapper ) {
216250 observer = RunLoopObserver (
217251 dateProvider: dateProvider,
218252 threadInspector: threadInspector,
219253 debugImageCache: debugImageCache,
220254 fileManager: fileManager,
255+ crashWrapper: crashWrapper,
221256 minHangTime: 2 )
222257 }
223258
@@ -233,3 +268,7 @@ final class RunLoopObserver {
233268@objc @_spi ( Private) public protocol DebugImageCache {
234269 func getDebugImagesFromCacheFor( threads: [ SentryThread ] ? ) -> [ DebugMeta ]
235270}
271+
272+ @objc @_spi ( Private) public protocol CrashWrapper {
273+ var crashedLastLaunch : Bool { get }
274+ }
0 commit comments