@@ -63,6 +63,8 @@ const volatile u64 kprobe_delays_max_ns = 2;
6363#define MIN (x , y ) ((x) < (y) ? (x) : (y))
6464#define MAX (x , y ) ((x) > (y) ? (x) : (y))
6565
66+ #define U64_MAX ((u64)~0ULL)
67+
6668enum chaos_timer_callbacks {
6769 CHAOS_TIMER_CHECK_QUEUES ,
6870 CHAOS_MAX_TIMERS ,
@@ -143,6 +145,33 @@ static __always_inline void chaos_stat_inc(enum chaos_stat_idx stat)
143145 (* cnt_p )++ ;
144146}
145147
148+ /*
149+ * Get the next time a delay DSQ needs processing.
150+ *
151+ * Safe for delay DSQs which use monotonic time (vtimes won't wrap to U64_MAX).
152+ * Must be called with RCU read lock held.
153+ */
154+ static __always_inline u64 delay_dsq_next_time (u64 dsq_id )
155+ {
156+ struct task_struct * first_p ;
157+ u64 vtime ;
158+
159+ // If we don't have native peek, fall back to always iterating
160+ if (!bpf_ksym_exists (scx_bpf_dsq_peek )) {
161+ chaos_stat_inc (CHAOS_STAT_PEEK_NEEDS_PROCESSING );
162+ return 0 ;
163+ }
164+
165+ first_p = scx_bpf_dsq_peek (dsq_id );
166+ if (!first_p ) {
167+ chaos_stat_inc (CHAOS_STAT_PEEK_EMPTY_DSQ );
168+ return U64_MAX ;
169+ }
170+
171+ vtime = first_p -> scx .dsq_vtime ;
172+ return vtime ;
173+ }
174+
146175static __always_inline enum chaos_trait_kind
147176choose_chaos (struct chaos_task_ctx * taskc )
148177{
@@ -362,9 +391,25 @@ __weak u64 check_dsq_times(int cpu_idx)
362391 u64 next_trigger_time = 0 ;
363392 u64 now = bpf_ktime_get_ns ();
364393 bool has_kicked = false;
394+ u64 dsq_id = get_cpu_delay_dsq (cpu_idx );
365395
366396 bpf_rcu_read_lock ();
367- bpf_for_each (scx_dsq , p , get_cpu_delay_dsq (cpu_idx ), 0 ) {
397+
398+ next_trigger_time = delay_dsq_next_time (dsq_id );
399+ if (next_trigger_time > now + chaos_timer_check_queues_slack_ns ) {
400+ chaos_stat_inc (CHAOS_STAT_PEEK_NOT_READY );
401+ // DSQ empty (U64_MAX) or first task beyond slack window
402+ bpf_rcu_read_unlock ();
403+ return next_trigger_time == U64_MAX ? 0 : next_trigger_time ;
404+ }
405+
406+ chaos_stat_inc (CHAOS_STAT_PEEK_NEEDS_PROCESSING );
407+
408+ // Need to iterate: no peek support (0), task ready, or task within slack window
409+ next_trigger_time = 0 ;
410+
411+ // Need to iterate to handle ready tasks
412+ bpf_for_each (scx_dsq , p , dsq_id , 0 ) {
368413 p = bpf_task_from_pid (p -> pid );
369414 if (!p )
370415 break ;
@@ -387,8 +432,8 @@ __weak u64 check_dsq_times(int cpu_idx)
387432 if (next_trigger_time > now + chaos_timer_check_queues_slack_ns )
388433 break ;
389434 }
390- bpf_rcu_read_unlock ();
391435
436+ bpf_rcu_read_unlock ();
392437 return next_trigger_time ;
393438}
394439
@@ -531,9 +576,17 @@ void BPF_STRUCT_OPS(chaos_dispatch, s32 cpu, struct task_struct *prev)
531576 struct enqueue_promise promise ;
532577 struct chaos_task_ctx * taskc ;
533578 struct task_struct * p ;
534- u64 now = bpf_ktime_get_ns ();
579+ u64 now = bpf_ktime_get_ns ();
580+ u64 dsq_id = get_cpu_delay_dsq (-1 );
581+
582+ // Check if we need to process the delay DSQ
583+ if (delay_dsq_next_time (dsq_id ) > now ) {
584+ chaos_stat_inc (CHAOS_STAT_PEEK_NOT_READY );
585+ goto p2dq ;
586+ }
587+ chaos_stat_inc (CHAOS_STAT_PEEK_NEEDS_PROCESSING );
535588
536- bpf_for_each (scx_dsq , p , get_cpu_delay_dsq ( -1 ) , 0 ) {
589+ bpf_for_each (scx_dsq , p , dsq_id , 0 ) {
537590 p = bpf_task_from_pid (p -> pid );
538591 if (!p )
539592 continue ;
@@ -557,6 +610,7 @@ void BPF_STRUCT_OPS(chaos_dispatch, s32 cpu, struct task_struct *prev)
557610 bpf_task_release (p );
558611 }
559612
613+ p2dq :
560614 return p2dq_dispatch_impl (cpu , prev );
561615}
562616
0 commit comments