b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright © 2016 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef __I915_UTILS_H |
| 26 | #define __I915_UTILS_H |
| 27 | |
| 28 | #include <linux/list.h> |
| 29 | #include <linux/overflow.h> |
| 30 | #include <linux/sched.h> |
| 31 | #include <linux/types.h> |
| 32 | #include <linux/workqueue.h> |
| 33 | |
| 34 | struct drm_i915_private; |
| 35 | |
| 36 | #undef WARN_ON |
| 37 | /* Many gcc seem to no see through this and fall over :( */ |
| 38 | #if 0 |
| 39 | #define WARN_ON(x) ({ \ |
| 40 | bool __i915_warn_cond = (x); \ |
| 41 | if (__builtin_constant_p(__i915_warn_cond)) \ |
| 42 | BUILD_BUG_ON(__i915_warn_cond); \ |
| 43 | WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) |
| 44 | #else |
| 45 | #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") |
| 46 | #endif |
| 47 | |
| 48 | #undef WARN_ON_ONCE |
| 49 | #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") |
| 50 | |
| 51 | #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ |
| 52 | __stringify(x), (long)(x)) |
| 53 | |
| 54 | void __printf(3, 4) |
| 55 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, |
| 56 | const char *fmt, ...); |
| 57 | |
| 58 | #define i915_report_error(dev_priv, fmt, ...) \ |
| 59 | __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) |
| 60 | |
| 61 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
| 62 | |
| 63 | int __i915_inject_load_error(struct drm_i915_private *i915, int err, |
| 64 | const char *func, int line); |
| 65 | #define i915_inject_load_error(_i915, _err) \ |
| 66 | __i915_inject_load_error((_i915), (_err), __func__, __LINE__) |
| 67 | bool i915_error_injected(void); |
| 68 | |
| 69 | #else |
| 70 | |
| 71 | #define i915_inject_load_error(_i915, _err) 0 |
| 72 | #define i915_error_injected() false |
| 73 | |
| 74 | #endif |
| 75 | |
| 76 | #define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV) |
| 77 | |
| 78 | #define i915_probe_error(i915, fmt, ...) \ |
| 79 | __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ |
| 80 | fmt, ##__VA_ARGS__) |
| 81 | |
| 82 | #if defined(GCC_VERSION) && GCC_VERSION >= 70000 |
| 83 | #define add_overflows_t(T, A, B) \ |
| 84 | __builtin_add_overflow_p((A), (B), (T)0) |
| 85 | #else |
| 86 | #define add_overflows_t(T, A, B) ({ \ |
| 87 | typeof(A) a = (A); \ |
| 88 | typeof(B) b = (B); \ |
| 89 | (T)(a + b) < a; \ |
| 90 | }) |
| 91 | #endif |
| 92 | |
| 93 | #define add_overflows(A, B) \ |
| 94 | add_overflows_t(typeof((A) + (B)), (A), (B)) |
| 95 | |
| 96 | #define range_overflows(start, size, max) ({ \ |
| 97 | typeof(start) start__ = (start); \ |
| 98 | typeof(size) size__ = (size); \ |
| 99 | typeof(max) max__ = (max); \ |
| 100 | (void)(&start__ == &size__); \ |
| 101 | (void)(&start__ == &max__); \ |
| 102 | start__ > max__ || size__ > max__ - start__; \ |
| 103 | }) |
| 104 | |
| 105 | #define range_overflows_t(type, start, size, max) \ |
| 106 | range_overflows((type)(start), (type)(size), (type)(max)) |
| 107 | |
| 108 | /* Note we don't consider signbits :| */ |
| 109 | #define overflows_type(x, T) \ |
| 110 | (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) |
| 111 | |
| 112 | static inline bool |
| 113 | __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) |
| 114 | { |
| 115 | size_t sz; |
| 116 | |
| 117 | if (check_mul_overflow(count, arr, &sz)) |
| 118 | return false; |
| 119 | |
| 120 | if (check_add_overflow(sz, base, &sz)) |
| 121 | return false; |
| 122 | |
| 123 | *size = sz; |
| 124 | return true; |
| 125 | } |
| 126 | |
| 127 | /** |
| 128 | * check_struct_size() - Calculate size of structure with trailing array. |
| 129 | * @p: Pointer to the structure. |
| 130 | * @member: Name of the array member. |
| 131 | * @n: Number of elements in the array. |
| 132 | * @sz: Total size of structure and array |
| 133 | * |
| 134 | * Calculates size of memory needed for structure @p followed by an |
| 135 | * array of @n @member elements, like struct_size() but reports |
| 136 | * whether it overflowed, and the resultant size in @sz |
| 137 | * |
| 138 | * Return: false if the calculation overflowed. |
| 139 | */ |
| 140 | #define check_struct_size(p, member, n, sz) \ |
| 141 | likely(__check_struct_size(sizeof(*(p)), \ |
| 142 | sizeof(*(p)->member) + __must_be_array((p)->member), \ |
| 143 | n, sz)) |
| 144 | |
| 145 | #define ptr_mask_bits(ptr, n) ({ \ |
| 146 | unsigned long __v = (unsigned long)(ptr); \ |
| 147 | (typeof(ptr))(__v & -BIT(n)); \ |
| 148 | }) |
| 149 | |
| 150 | #define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1)) |
| 151 | |
| 152 | #define ptr_unpack_bits(ptr, bits, n) ({ \ |
| 153 | unsigned long __v = (unsigned long)(ptr); \ |
| 154 | *(bits) = __v & (BIT(n) - 1); \ |
| 155 | (typeof(ptr))(__v & -BIT(n)); \ |
| 156 | }) |
| 157 | |
| 158 | #define ptr_pack_bits(ptr, bits, n) ({ \ |
| 159 | unsigned long __bits = (bits); \ |
| 160 | GEM_BUG_ON(__bits & -BIT(n)); \ |
| 161 | ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ |
| 162 | }) |
| 163 | |
| 164 | #define ptr_dec(ptr) ({ \ |
| 165 | unsigned long __v = (unsigned long)(ptr); \ |
| 166 | (typeof(ptr))(__v - 1); \ |
| 167 | }) |
| 168 | |
| 169 | #define ptr_inc(ptr) ({ \ |
| 170 | unsigned long __v = (unsigned long)(ptr); \ |
| 171 | (typeof(ptr))(__v + 1); \ |
| 172 | }) |
| 173 | |
| 174 | #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) |
| 175 | #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) |
| 176 | #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) |
| 177 | #define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT) |
| 178 | |
| 179 | #define struct_member(T, member) (((T *)0)->member) |
| 180 | |
| 181 | #define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member) |
| 182 | |
| 183 | #define fetch_and_zero(ptr) ({ \ |
| 184 | typeof(*ptr) __T = *(ptr); \ |
| 185 | *(ptr) = (typeof(*ptr))0; \ |
| 186 | __T; \ |
| 187 | }) |
| 188 | |
| 189 | /* |
| 190 | * container_of_user: Extract the superclass from a pointer to a member. |
| 191 | * |
| 192 | * Exactly like container_of() with the exception that it plays nicely |
| 193 | * with sparse for __user @ptr. |
| 194 | */ |
| 195 | #define container_of_user(ptr, type, member) ({ \ |
| 196 | void __user *__mptr = (void __user *)(ptr); \ |
| 197 | BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \ |
| 198 | !__same_type(*(ptr), void), \ |
| 199 | "pointer type mismatch in container_of()"); \ |
| 200 | ((type __user *)(__mptr - offsetof(type, member))); }) |
| 201 | |
| 202 | /* |
| 203 | * check_user_mbz: Check that a user value exists and is zero |
| 204 | * |
| 205 | * Frequently in our uABI we reserve space for future extensions, and |
| 206 | * two ensure that userspace is prepared we enforce that space must |
| 207 | * be zero. (Then any future extension can safely assume a default value |
| 208 | * of 0.) |
| 209 | * |
| 210 | * check_user_mbz() combines checking that the user pointer is accessible |
| 211 | * and that the contained value is zero. |
| 212 | * |
| 213 | * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. |
| 214 | */ |
| 215 | #define check_user_mbz(U) ({ \ |
| 216 | typeof(*(U)) mbz__; \ |
| 217 | get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ |
| 218 | }) |
| 219 | |
| 220 | static inline u64 ptr_to_u64(const void *ptr) |
| 221 | { |
| 222 | return (uintptr_t)ptr; |
| 223 | } |
| 224 | |
| 225 | #define u64_to_ptr(T, x) ({ \ |
| 226 | typecheck(u64, x); \ |
| 227 | (T *)(uintptr_t)(x); \ |
| 228 | }) |
| 229 | |
| 230 | #define __mask_next_bit(mask) ({ \ |
| 231 | int __idx = ffs(mask) - 1; \ |
| 232 | mask &= ~BIT(__idx); \ |
| 233 | __idx; \ |
| 234 | }) |
| 235 | |
| 236 | static inline bool is_power_of_2_u64(u64 n) |
| 237 | { |
| 238 | return (n != 0 && ((n & (n - 1)) == 0)); |
| 239 | } |
| 240 | |
| 241 | static inline void __list_del_many(struct list_head *head, |
| 242 | struct list_head *first) |
| 243 | { |
| 244 | first->prev = head; |
| 245 | WRITE_ONCE(head->next, first); |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * Wait until the work is finally complete, even if it tries to postpone |
| 250 | * by requeueing itself. Note, that if the worker never cancels itself, |
| 251 | * we will spin forever. |
| 252 | */ |
| 253 | static inline void drain_delayed_work(struct delayed_work *dw) |
| 254 | { |
| 255 | do { |
| 256 | while (flush_delayed_work(dw)) |
| 257 | ; |
| 258 | } while (delayed_work_pending(dw)); |
| 259 | } |
| 260 | |
| 261 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
| 262 | { |
| 263 | unsigned long j = msecs_to_jiffies(m); |
| 264 | |
| 265 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * If you need to wait X milliseconds between events A and B, but event B |
| 270 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of |
| 271 | * when event A happened, then just before event B you call this function and |
| 272 | * pass the timestamp as the first argument, and X as the second argument. |
| 273 | */ |
| 274 | static inline void |
| 275 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) |
| 276 | { |
| 277 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; |
| 278 | |
| 279 | /* |
| 280 | * Don't re-read the value of "jiffies" every time since it may change |
| 281 | * behind our back and break the math. |
| 282 | */ |
| 283 | tmp_jiffies = jiffies; |
| 284 | target_jiffies = timestamp_jiffies + |
| 285 | msecs_to_jiffies_timeout(to_wait_ms); |
| 286 | |
| 287 | if (time_after(target_jiffies, tmp_jiffies)) { |
| 288 | remaining_jiffies = target_jiffies - tmp_jiffies; |
| 289 | while (remaining_jiffies) |
| 290 | remaining_jiffies = |
| 291 | schedule_timeout_uninterruptible(remaining_jiffies); |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | /** |
| 296 | * __wait_for - magic wait macro |
| 297 | * |
| 298 | * Macro to help avoid open coding check/wait/timeout patterns. Note that it's |
| 299 | * important that we check the condition again after having timed out, since the |
| 300 | * timeout could be due to preemption or similar and we've never had a chance to |
| 301 | * check the condition before the timeout. |
| 302 | */ |
| 303 | #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ |
| 304 | const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ |
| 305 | long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ |
| 306 | int ret__; \ |
| 307 | might_sleep(); \ |
| 308 | for (;;) { \ |
| 309 | const bool expired__ = ktime_after(ktime_get_raw(), end__); \ |
| 310 | OP; \ |
| 311 | /* Guarantee COND check prior to timeout */ \ |
| 312 | barrier(); \ |
| 313 | if (COND) { \ |
| 314 | ret__ = 0; \ |
| 315 | break; \ |
| 316 | } \ |
| 317 | if (expired__) { \ |
| 318 | ret__ = -ETIMEDOUT; \ |
| 319 | break; \ |
| 320 | } \ |
| 321 | usleep_range(wait__, wait__ * 2); \ |
| 322 | if (wait__ < (Wmax)) \ |
| 323 | wait__ <<= 1; \ |
| 324 | } \ |
| 325 | ret__; \ |
| 326 | }) |
| 327 | |
| 328 | #define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \ |
| 329 | (Wmax)) |
| 330 | #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) |
| 331 | |
| 332 | /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ |
| 333 | #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) |
| 334 | # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) |
| 335 | #else |
| 336 | # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) |
| 337 | #endif |
| 338 | |
| 339 | #define _wait_for_atomic(COND, US, ATOMIC) \ |
| 340 | ({ \ |
| 341 | int cpu, ret, timeout = (US) * 1000; \ |
| 342 | u64 base; \ |
| 343 | _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \ |
| 344 | if (!(ATOMIC)) { \ |
| 345 | preempt_disable(); \ |
| 346 | cpu = smp_processor_id(); \ |
| 347 | } \ |
| 348 | base = local_clock(); \ |
| 349 | for (;;) { \ |
| 350 | u64 now = local_clock(); \ |
| 351 | if (!(ATOMIC)) \ |
| 352 | preempt_enable(); \ |
| 353 | /* Guarantee COND check prior to timeout */ \ |
| 354 | barrier(); \ |
| 355 | if (COND) { \ |
| 356 | ret = 0; \ |
| 357 | break; \ |
| 358 | } \ |
| 359 | if (now - base >= timeout) { \ |
| 360 | ret = -ETIMEDOUT; \ |
| 361 | break; \ |
| 362 | } \ |
| 363 | cpu_relax(); \ |
| 364 | if (!(ATOMIC)) { \ |
| 365 | preempt_disable(); \ |
| 366 | if (unlikely(cpu != smp_processor_id())) { \ |
| 367 | timeout -= now - base; \ |
| 368 | cpu = smp_processor_id(); \ |
| 369 | base = local_clock(); \ |
| 370 | } \ |
| 371 | } \ |
| 372 | } \ |
| 373 | ret; \ |
| 374 | }) |
| 375 | |
| 376 | #define wait_for_us(COND, US) \ |
| 377 | ({ \ |
| 378 | int ret__; \ |
| 379 | BUILD_BUG_ON(!__builtin_constant_p(US)); \ |
| 380 | if ((US) > 10) \ |
| 381 | ret__ = _wait_for((COND), (US), 10, 10); \ |
| 382 | else \ |
| 383 | ret__ = _wait_for_atomic((COND), (US), 0); \ |
| 384 | ret__; \ |
| 385 | }) |
| 386 | |
| 387 | #define wait_for_atomic_us(COND, US) \ |
| 388 | ({ \ |
| 389 | BUILD_BUG_ON(!__builtin_constant_p(US)); \ |
| 390 | BUILD_BUG_ON((US) > 50000); \ |
| 391 | _wait_for_atomic((COND), (US), 1); \ |
| 392 | }) |
| 393 | |
| 394 | #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000) |
| 395 | |
| 396 | #define KHz(x) (1000 * (x)) |
| 397 | #define MHz(x) KHz(1000 * (x)) |
| 398 | |
| 399 | #define KBps(x) (1000 * (x)) |
| 400 | #define MBps(x) KBps(1000 * (x)) |
| 401 | #define GBps(x) ((u64)1000 * MBps((x))) |
| 402 | |
| 403 | static inline const char *yesno(bool v) |
| 404 | { |
| 405 | return v ? "yes" : "no"; |
| 406 | } |
| 407 | |
| 408 | static inline const char *onoff(bool v) |
| 409 | { |
| 410 | return v ? "on" : "off"; |
| 411 | } |
| 412 | |
| 413 | static inline const char *enableddisabled(bool v) |
| 414 | { |
| 415 | return v ? "enabled" : "disabled"; |
| 416 | } |
| 417 | |
| 418 | static inline void add_taint_for_CI(unsigned int taint) |
| 419 | { |
| 420 | /* |
| 421 | * The system is "ok", just about surviving for the user, but |
| 422 | * CI results are now unreliable as the HW is very suspect. |
| 423 | * CI checks the taint state after every test and will reboot |
| 424 | * the machine if the kernel is tainted. |
| 425 | */ |
| 426 | add_taint(taint, LOCKDEP_STILL_OK); |
| 427 | } |
| 428 | |
| 429 | #endif /* !__I915_UTILS_H */ |