| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Only give sleepers 50% of their service deficit. This allows | 
 | 3 |  * them to run sooner, but does not allow tons of sleepers to | 
 | 4 |  * rip the spread apart. | 
 | 5 |  */ | 
 | 6 | SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) | 
 | 7 |  | 
 | 8 | /* | 
 | 9 |  * Place new tasks ahead so that they do not starve already running | 
 | 10 |  * tasks | 
 | 11 |  */ | 
 | 12 | SCHED_FEAT(START_DEBIT, true) | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * Based on load and program behaviour, see if it makes sense to place | 
 | 16 |  * a newly woken task on the same cpu as the task that woke it -- | 
 | 17 |  * improve cache locality. Typically used with SYNC wakeups as | 
 | 18 |  * generated by pipes and the like, see also SYNC_WAKEUPS. | 
 | 19 |  */ | 
 | 20 | SCHED_FEAT(AFFINE_WAKEUPS, true) | 
 | 21 |  | 
 | 22 | /* | 
 | 23 |  * Prefer to schedule the task we woke last (assuming it failed | 
 | 24 |  * wakeup-preemption), since its likely going to consume data we | 
 | 25 |  * touched, increases cache locality. | 
 | 26 |  */ | 
 | 27 | SCHED_FEAT(NEXT_BUDDY, false) | 
 | 28 |  | 
 | 29 | /* | 
 | 30 |  * Prefer to schedule the task that ran last (when we did | 
 | 31 |  * wake-preempt) as that likely will touch the same data, increases | 
 | 32 |  * cache locality. | 
 | 33 |  */ | 
 | 34 | SCHED_FEAT(LAST_BUDDY, true) | 
 | 35 |  | 
 | 36 | /* | 
 | 37 |  * Consider buddies to be cache hot, decreases the likelyness of a | 
 | 38 |  * cache buddy being migrated away, increases cache locality. | 
 | 39 |  */ | 
 | 40 | SCHED_FEAT(CACHE_HOT_BUDDY, true) | 
 | 41 |  | 
 | 42 | /* | 
 | 43 |  * Use arch dependent cpu power functions | 
 | 44 |  */ | 
 | 45 | SCHED_FEAT(ARCH_POWER, false) | 
 | 46 |  | 
 | 47 | SCHED_FEAT(HRTICK, false) | 
 | 48 | SCHED_FEAT(DOUBLE_TICK, false) | 
 | 49 | SCHED_FEAT(LB_BIAS, true) | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * Spin-wait on mutex acquisition when the mutex owner is running on | 
 | 53 |  * another cpu -- assumes that when the owner is running, it will soon | 
 | 54 |  * release the lock. Decreases scheduling overhead. | 
 | 55 |  */ | 
 | 56 | SCHED_FEAT(OWNER_SPIN, true) | 
 | 57 |  | 
 | 58 | /* | 
 | 59 |  * Decrement CPU power based on time not spent running tasks | 
 | 60 |  */ | 
 | 61 | SCHED_FEAT(NONTASK_POWER, true) | 
 | 62 |  | 
 | 63 | #ifndef CONFIG_PREEMPT_RT_FULL | 
 | 64 | /* | 
 | 65 |  * Queue remote wakeups on the target CPU and process them | 
 | 66 |  * using the scheduler IPI. Reduces rq->lock contention/bounces. | 
 | 67 |  */ | 
 | 68 | SCHED_FEAT(TTWU_QUEUE, true) | 
 | 69 | #else | 
 | 70 | SCHED_FEAT(TTWU_QUEUE, false) | 
 | 71 | #endif | 
 | 72 |  | 
 | 73 | SCHED_FEAT(FORCE_SD_OVERLAP, false) | 
 | 74 | SCHED_FEAT(RT_RUNTIME_SHARE, true) | 
 | 75 | SCHED_FEAT(LB_MIN, false) |