@@ -68,6 +68,11 @@ const volatile u32 khugepaged_pid; /* khugepaged PID */
6868u64 usersched_last_run_at ; /* Timestamp of the last user-space scheduler execution */
6969static u64 nr_cpu_ids ; /* Maximum possible CPU number */
7070
71+ /*
72+ * Default task time slice.
73+ */
74+ const volatile u64 slice_ns ;
75+
7176/*
7277 * Number of tasks that are queued for scheduling.
7378 *
@@ -587,7 +592,7 @@ s32 BPF_STRUCT_OPS(rustland_select_cpu, struct task_struct *p, s32 prev_cpu,
587592 if (cpu >= 0 ) {
588593 if (can_direct_dispatch (cpu )) {
589594 scx_bpf_dsq_insert_vtime (p , cpu_to_dsq (cpu ),
590- SCX_SLICE_DFL , p -> scx .dsq_vtime , 0 );
595+ slice_ns , p -> scx .dsq_vtime , 0 );
591596 __sync_fetch_and_add (& nr_kernel_dispatches , 1 );
592597 }
593598 return cpu ;
@@ -695,7 +700,7 @@ void BPF_STRUCT_OPS(rustland_enqueue, struct task_struct *p, u64 enq_flags)
695700 * scheduling action to do.
696701 */
697702 if (is_usersched_task (p )) {
698- scx_bpf_dsq_insert (p , SCHED_DSQ , SCX_SLICE_DFL , enq_flags );
703+ scx_bpf_dsq_insert (p , SCHED_DSQ , slice_ns , enq_flags );
699704 goto out_kick ;
700705 }
701706
@@ -708,7 +713,7 @@ void BPF_STRUCT_OPS(rustland_enqueue, struct task_struct *p, u64 enq_flags)
708713 */
709714 if ((is_kthread (p ) && p -> nr_cpus_allowed == 1 ) || is_kswapd (p ) || is_khugepaged (p )) {
710715 scx_bpf_dsq_insert_vtime (p , cpu_to_dsq (prev_cpu ),
711- SCX_SLICE_DFL , p -> scx .dsq_vtime , enq_flags );
716+ slice_ns , p -> scx .dsq_vtime , enq_flags );
712717 __sync_fetch_and_add (& nr_kernel_dispatches , 1 );
713718 goto out_kick ;
714719 }
@@ -738,7 +743,7 @@ void BPF_STRUCT_OPS(rustland_enqueue, struct task_struct *p, u64 enq_flags)
738743
739744 if (can_direct_dispatch (cpu )) {
740745 scx_bpf_dsq_insert_vtime (p , cpu_to_dsq (cpu ),
741- SCX_SLICE_DFL , p -> scx .dsq_vtime , enq_flags );
746+ slice_ns , p -> scx .dsq_vtime , enq_flags );
742747 __sync_fetch_and_add (& nr_kernel_dispatches , 1 );
743748 goto out_kick ;
744749 }
@@ -761,7 +766,7 @@ void BPF_STRUCT_OPS(rustland_enqueue, struct task_struct *p, u64 enq_flags)
761766 if (!task ) {
762767 sched_congested (p );
763768 scx_bpf_dsq_insert_vtime (p , SHARED_DSQ ,
764- SCX_SLICE_DFL , p -> scx .dsq_vtime , enq_flags );
769+ slice_ns , p -> scx .dsq_vtime , enq_flags );
765770 __sync_fetch_and_add (& nr_kernel_dispatches , 1 );
766771 goto out_kick ;
767772 }
@@ -845,7 +850,7 @@ void BPF_STRUCT_OPS(rustland_dispatch, s32 cpu, struct task_struct *prev)
845850 */
846851 if (prev && is_queued (prev ) &&
847852 (!is_usersched_task (prev ) || usersched_has_pending_tasks ()))
848- prev -> scx .slice = SCX_SLICE_DFL ;
853+ prev -> scx .slice = slice_ns ;
849854}
850855
851856void BPF_STRUCT_OPS (rustland_runnable , struct task_struct * p , u64 enq_flags )
@@ -922,7 +927,7 @@ void BPF_STRUCT_OPS(rustland_stopping, struct task_struct *p, bool runnable)
922927void BPF_STRUCT_OPS (rustland_enable , struct task_struct * p )
923928{
924929 p -> scx .dsq_vtime = 0 ;
925- p -> scx .slice = SCX_SLICE_DFL ;
930+ p -> scx .slice = slice_ns ;
926931}
927932
928933/*
0 commit comments