b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * kernel/sched/cpupri.c |
| 4 | * |
| 5 | * CPU priority management |
| 6 | * |
| 7 | * Copyright (C) 2007-2008 Novell |
| 8 | * |
| 9 | * Author: Gregory Haskins <ghaskins@novell.com> |
| 10 | * |
| 11 | * This code tracks the priority of each CPU so that global migration |
| 12 | * decisions are easy to calculate. Each CPU can be in a state as follows: |
| 13 | * |
| 14 | * (INVALID), IDLE, NORMAL, RT1, ... RT99 |
| 15 | * |
| 16 | * going from the lowest priority to the highest. CPUs in the INVALID state |
| 17 | * are not eligible for routing. The system maintains this state with |
| 18 | * a 2 dimensional bitmap (the first for priority class, the second for CPUs |
| 19 | * in that class). Therefore a typical application without affinity |
| 20 | * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit |
| 21 | * searches). For tasks with affinity restrictions, the algorithm has a |
| 22 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that |
| 23 | * yields the worst case search is fairly contrived. |
| 24 | */ |
| 25 | #include "sched.h" |
| 26 | |
| 27 | /* Convert between a 140 based task->prio, and our 102 based cpupri */ |
| 28 | static int convert_prio(int prio) |
| 29 | { |
| 30 | int cpupri; |
| 31 | |
| 32 | if (prio == CPUPRI_INVALID) |
| 33 | cpupri = CPUPRI_INVALID; |
| 34 | else if (prio == MAX_PRIO) |
| 35 | cpupri = CPUPRI_IDLE; |
| 36 | else if (prio >= MAX_RT_PRIO) |
| 37 | cpupri = CPUPRI_NORMAL; |
| 38 | else |
| 39 | cpupri = MAX_RT_PRIO - prio + 1; |
| 40 | |
| 41 | return cpupri; |
| 42 | } |
| 43 | |
| 44 | static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, |
| 45 | struct cpumask *lowest_mask, int idx) |
| 46 | { |
| 47 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
| 48 | int skip = 0; |
| 49 | |
| 50 | if (!atomic_read(&(vec)->count)) |
| 51 | skip = 1; |
| 52 | /* |
| 53 | * When looking at the vector, we need to read the counter, |
| 54 | * do a memory barrier, then read the mask. |
| 55 | * |
| 56 | * Note: This is still all racey, but we can deal with it. |
| 57 | * Ideally, we only want to look at masks that are set. |
| 58 | * |
| 59 | * If a mask is not set, then the only thing wrong is that we |
| 60 | * did a little more work than necessary. |
| 61 | * |
| 62 | * If we read a zero count but the mask is set, because of the |
| 63 | * memory barriers, that can only happen when the highest prio |
| 64 | * task for a run queue has left the run queue, in which case, |
| 65 | * it will be followed by a pull. If the task we are processing |
| 66 | * fails to find a proper place to go, that pull request will |
| 67 | * pull this task if the run queue is running at a lower |
| 68 | * priority. |
| 69 | */ |
| 70 | smp_rmb(); |
| 71 | |
| 72 | /* Need to do the rmb for every iteration */ |
| 73 | if (skip) |
| 74 | return 0; |
| 75 | |
| 76 | if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) |
| 77 | return 0; |
| 78 | |
| 79 | if (lowest_mask) { |
| 80 | cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); |
| 81 | |
| 82 | /* |
| 83 | * We have to ensure that we have at least one bit |
| 84 | * still set in the array, since the map could have |
| 85 | * been concurrently emptied between the first and |
| 86 | * second reads of vec->mask. If we hit this |
| 87 | * condition, simply act as though we never hit this |
| 88 | * priority level and continue on. |
| 89 | */ |
| 90 | if (cpumask_empty(lowest_mask)) |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | return 1; |
| 95 | } |
| 96 | |
| 97 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
| 98 | struct cpumask *lowest_mask) |
| 99 | { |
| 100 | return cpupri_find_fitness(cp, p, lowest_mask, NULL); |
| 101 | } |
| 102 | |
| 103 | /** |
| 104 | * cpupri_find_fitness - find the best (lowest-pri) CPU in the system |
| 105 | * @cp: The cpupri context |
| 106 | * @p: The task |
| 107 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
| 108 | * @fitness_fn: A pointer to a function to do custom checks whether the CPU |
| 109 | * fits a specific criteria so that we only return those CPUs. |
| 110 | * |
| 111 | * Note: This function returns the recommended CPUs as calculated during the |
| 112 | * current invocation. By the time the call returns, the CPUs may have in |
| 113 | * fact changed priorities any number of times. While not ideal, it is not |
| 114 | * an issue of correctness since the normal rebalancer logic will correct |
| 115 | * any discrepancies created by racing against the uncertainty of the current |
| 116 | * priority configuration. |
| 117 | * |
| 118 | * Return: (int)bool - CPUs were found |
| 119 | */ |
| 120 | int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p, |
| 121 | struct cpumask *lowest_mask, |
| 122 | bool (*fitness_fn)(struct task_struct *p, int cpu)) |
| 123 | { |
| 124 | int task_pri = convert_prio(p->prio); |
| 125 | int idx, cpu; |
| 126 | |
| 127 | BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); |
| 128 | |
| 129 | for (idx = 0; idx < task_pri; idx++) { |
| 130 | |
| 131 | if (!__cpupri_find(cp, p, lowest_mask, idx)) |
| 132 | continue; |
| 133 | |
| 134 | if (!lowest_mask || !fitness_fn) |
| 135 | return 1; |
| 136 | |
| 137 | /* Ensure the capacity of the CPUs fit the task */ |
| 138 | for_each_cpu(cpu, lowest_mask) { |
| 139 | if (!fitness_fn(p, cpu)) |
| 140 | cpumask_clear_cpu(cpu, lowest_mask); |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * If no CPU at the current priority can fit the task |
| 145 | * continue looking |
| 146 | */ |
| 147 | if (cpumask_empty(lowest_mask)) |
| 148 | continue; |
| 149 | |
| 150 | return 1; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * If we failed to find a fitting lowest_mask, kick off a new search |
| 155 | * but without taking into account any fitness criteria this time. |
| 156 | * |
| 157 | * This rule favours honouring priority over fitting the task in the |
| 158 | * correct CPU (Capacity Awareness being the only user now). |
| 159 | * The idea is that if a higher priority task can run, then it should |
| 160 | * run even if this ends up being on unfitting CPU. |
| 161 | * |
| 162 | * The cost of this trade-off is not entirely clear and will probably |
| 163 | * be good for some workloads and bad for others. |
| 164 | * |
| 165 | * The main idea here is that if some CPUs were overcommitted, we try |
| 166 | * to spread which is what the scheduler traditionally did. Sys admins |
| 167 | * must do proper RT planning to avoid overloading the system if they |
| 168 | * really care. |
| 169 | */ |
| 170 | if (fitness_fn) |
| 171 | return cpupri_find(cp, p, lowest_mask); |
| 172 | |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | /** |
| 177 | * cpupri_set - update the CPU priority setting |
| 178 | * @cp: The cpupri context |
| 179 | * @cpu: The target CPU |
| 180 | * @newpri: The priority (INVALID-RT99) to assign to this CPU |
| 181 | * |
| 182 | * Note: Assumes cpu_rq(cpu)->lock is locked |
| 183 | * |
| 184 | * Returns: (void) |
| 185 | */ |
| 186 | void cpupri_set(struct cpupri *cp, int cpu, int newpri) |
| 187 | { |
| 188 | int *currpri = &cp->cpu_to_pri[cpu]; |
| 189 | int oldpri = *currpri; |
| 190 | int do_mb = 0; |
| 191 | |
| 192 | newpri = convert_prio(newpri); |
| 193 | |
| 194 | BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); |
| 195 | |
| 196 | if (newpri == oldpri) |
| 197 | return; |
| 198 | |
| 199 | /* |
| 200 | * If the CPU was currently mapped to a different value, we |
| 201 | * need to map it to the new value then remove the old value. |
| 202 | * Note, we must add the new value first, otherwise we risk the |
| 203 | * cpu being missed by the priority loop in cpupri_find. |
| 204 | */ |
| 205 | if (likely(newpri != CPUPRI_INVALID)) { |
| 206 | struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; |
| 207 | |
| 208 | cpumask_set_cpu(cpu, vec->mask); |
| 209 | /* |
| 210 | * When adding a new vector, we update the mask first, |
| 211 | * do a write memory barrier, and then update the count, to |
| 212 | * make sure the vector is visible when count is set. |
| 213 | */ |
| 214 | smp_mb__before_atomic(); |
| 215 | atomic_inc(&(vec)->count); |
| 216 | do_mb = 1; |
| 217 | } |
| 218 | if (likely(oldpri != CPUPRI_INVALID)) { |
| 219 | struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; |
| 220 | |
| 221 | /* |
| 222 | * Because the order of modification of the vec->count |
| 223 | * is important, we must make sure that the update |
| 224 | * of the new prio is seen before we decrement the |
| 225 | * old prio. This makes sure that the loop sees |
| 226 | * one or the other when we raise the priority of |
| 227 | * the run queue. We don't care about when we lower the |
| 228 | * priority, as that will trigger an rt pull anyway. |
| 229 | * |
| 230 | * We only need to do a memory barrier if we updated |
| 231 | * the new priority vec. |
| 232 | */ |
| 233 | if (do_mb) |
| 234 | smp_mb__after_atomic(); |
| 235 | |
| 236 | /* |
| 237 | * When removing from the vector, we decrement the counter first |
| 238 | * do a memory barrier and then clear the mask. |
| 239 | */ |
| 240 | atomic_dec(&(vec)->count); |
| 241 | smp_mb__after_atomic(); |
| 242 | cpumask_clear_cpu(cpu, vec->mask); |
| 243 | } |
| 244 | |
| 245 | *currpri = newpri; |
| 246 | } |
| 247 | |
| 248 | /** |
| 249 | * cpupri_init - initialize the cpupri structure |
| 250 | * @cp: The cpupri context |
| 251 | * |
| 252 | * Return: -ENOMEM on memory allocation failure. |
| 253 | */ |
| 254 | int cpupri_init(struct cpupri *cp) |
| 255 | { |
| 256 | int i; |
| 257 | |
| 258 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { |
| 259 | struct cpupri_vec *vec = &cp->pri_to_cpu[i]; |
| 260 | |
| 261 | atomic_set(&vec->count, 0); |
| 262 | if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) |
| 263 | goto cleanup; |
| 264 | } |
| 265 | |
| 266 | cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); |
| 267 | if (!cp->cpu_to_pri) |
| 268 | goto cleanup; |
| 269 | |
| 270 | for_each_possible_cpu(i) |
| 271 | cp->cpu_to_pri[i] = CPUPRI_INVALID; |
| 272 | |
| 273 | return 0; |
| 274 | |
| 275 | cleanup: |
| 276 | for (i--; i >= 0; i--) |
| 277 | free_cpumask_var(cp->pri_to_cpu[i].mask); |
| 278 | return -ENOMEM; |
| 279 | } |
| 280 | |
| 281 | /** |
| 282 | * cpupri_cleanup - clean up the cpupri structure |
| 283 | * @cp: The cpupri context |
| 284 | */ |
| 285 | void cpupri_cleanup(struct cpupri *cp) |
| 286 | { |
| 287 | int i; |
| 288 | |
| 289 | kfree(cp->cpu_to_pri); |
| 290 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
| 291 | free_cpumask_var(cp->pri_to_cpu[i].mask); |
| 292 | } |