blob: 358449383720ec00be9022e24c4662ceb82946f3 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10
11#define pr_fmt(fmt) "ODEBUG: " fmt
12
13#include <linux/debugobjects.h>
14#include <linux/interrupt.h>
15#include <linux/sched.h>
16#include <linux/sched/task_stack.h>
17#include <linux/seq_file.h>
18#include <linux/debugfs.h>
19#include <linux/slab.h>
20#include <linux/hash.h>
21#include <linux/kmemleak.h>
22
23#define ODEBUG_HASH_BITS 14
24#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
25
26#define ODEBUG_POOL_SIZE 1024
27#define ODEBUG_POOL_MIN_LEVEL 256
28#define ODEBUG_POOL_PERCPU_SIZE 64
29#define ODEBUG_BATCH_SIZE 16
30
31#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
32#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
33#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
34
35/*
36 * We limit the freeing of debug objects via workqueue at a maximum
37 * frequency of 10Hz and about 1024 objects for each freeing operation.
38 * So it is freeing at most 10k debug objects per second.
39 */
40#define ODEBUG_FREE_WORK_MAX 1024
41#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
42
43struct debug_bucket {
44 struct hlist_head list;
45 raw_spinlock_t lock;
46};
47
48/*
49 * Debug object percpu free list
50 * Access is protected by disabling irq
51 */
52struct debug_percpu_free {
53 struct hlist_head free_objs;
54 int obj_free;
55};
56
57static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
59static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
60
61static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
62
63static DEFINE_RAW_SPINLOCK(pool_lock);
64
65static HLIST_HEAD(obj_pool);
66static HLIST_HEAD(obj_to_free);
67
68/*
69 * Because of the presence of percpu free pools, obj_pool_free will
70 * under-count those in the percpu free pools. Similarly, obj_pool_used
71 * will over-count those in the percpu free pools. Adjustments will be
72 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73 * can be off.
74 */
75static int obj_pool_min_free = ODEBUG_POOL_SIZE;
76static int obj_pool_free = ODEBUG_POOL_SIZE;
77static int obj_pool_used;
78static int obj_pool_max_used;
79static bool obj_freeing;
80/* The number of objs on the global free list */
81static int obj_nr_tofree;
82
83static int debug_objects_maxchain __read_mostly;
84static int __maybe_unused debug_objects_maxchecked __read_mostly;
85static int debug_objects_fixups __read_mostly;
86static int debug_objects_warnings __read_mostly;
87static int debug_objects_enabled __read_mostly
88 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89static int debug_objects_pool_size __read_mostly
90 = ODEBUG_POOL_SIZE;
91static int debug_objects_pool_min_level __read_mostly
92 = ODEBUG_POOL_MIN_LEVEL;
93static struct debug_obj_descr *descr_test __read_mostly;
94static struct kmem_cache *obj_cache __read_mostly;
95
96/*
97 * Track numbers of kmem_cache_alloc()/free() calls done.
98 */
99static int debug_objects_allocated;
100static int debug_objects_freed;
101
102static void free_obj_work(struct work_struct *work);
103static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
104
105static int __init enable_object_debug(char *str)
106{
107 debug_objects_enabled = 1;
108 return 0;
109}
110
111static int __init disable_object_debug(char *str)
112{
113 debug_objects_enabled = 0;
114 return 0;
115}
116
117early_param("debug_objects", enable_object_debug);
118early_param("no_debug_objects", disable_object_debug);
119
120static const char *obj_states[ODEBUG_STATE_MAX] = {
121 [ODEBUG_STATE_NONE] = "none",
122 [ODEBUG_STATE_INIT] = "initialized",
123 [ODEBUG_STATE_INACTIVE] = "inactive",
124 [ODEBUG_STATE_ACTIVE] = "active",
125 [ODEBUG_STATE_DESTROYED] = "destroyed",
126 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
127};
128
129static void fill_pool(void)
130{
131 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
132 struct debug_obj *obj;
133 unsigned long flags;
134
135 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
136 return;
137
138 /*
139 * Reuse objs from the global free list; they will be reinitialized
140 * when allocating.
141 *
142 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
143 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
144 * sections.
145 */
146 while (READ_ONCE(obj_nr_tofree) &&
147 READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
148 raw_spin_lock_irqsave(&pool_lock, flags);
149 /*
150 * Recheck with the lock held as the worker thread might have
151 * won the race and freed the global free list already.
152 */
153 while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
154 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 hlist_del(&obj->node);
156 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 hlist_add_head(&obj->node, &obj_pool);
158 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159 }
160 raw_spin_unlock_irqrestore(&pool_lock, flags);
161 }
162
163 if (unlikely(!obj_cache))
164 return;
165
166 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 struct debug_obj *new[ODEBUG_BATCH_SIZE];
168 int cnt;
169
170 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172 if (!new[cnt])
173 break;
174 }
175 if (!cnt)
176 return;
177
178 raw_spin_lock_irqsave(&pool_lock, flags);
179 while (cnt) {
180 hlist_add_head(&new[--cnt]->node, &obj_pool);
181 debug_objects_allocated++;
182 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183 }
184 raw_spin_unlock_irqrestore(&pool_lock, flags);
185 }
186}
187
188/*
189 * Lookup an object in the hash bucket.
190 */
191static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192{
193 struct debug_obj *obj;
194 int cnt = 0;
195
196 hlist_for_each_entry(obj, &b->list, node) {
197 cnt++;
198 if (obj->object == addr)
199 return obj;
200 }
201 if (cnt > debug_objects_maxchain)
202 debug_objects_maxchain = cnt;
203
204 return NULL;
205}
206
207/*
208 * Allocate a new object from the hlist
209 */
210static struct debug_obj *__alloc_object(struct hlist_head *list)
211{
212 struct debug_obj *obj = NULL;
213
214 if (list->first) {
215 obj = hlist_entry(list->first, typeof(*obj), node);
216 hlist_del(&obj->node);
217 }
218
219 return obj;
220}
221
222static struct debug_obj *
223alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
224{
225 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
226 struct debug_obj *obj;
227
228 if (likely(obj_cache)) {
229 obj = __alloc_object(&percpu_pool->free_objs);
230 if (obj) {
231 percpu_pool->obj_free--;
232 goto init_obj;
233 }
234 }
235
236 raw_spin_lock(&pool_lock);
237 obj = __alloc_object(&obj_pool);
238 if (obj) {
239 obj_pool_used++;
240 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
241
242 /*
243 * Looking ahead, allocate one batch of debug objects and
244 * put them into the percpu free pool.
245 */
246 if (likely(obj_cache)) {
247 int i;
248
249 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
250 struct debug_obj *obj2;
251
252 obj2 = __alloc_object(&obj_pool);
253 if (!obj2)
254 break;
255 hlist_add_head(&obj2->node,
256 &percpu_pool->free_objs);
257 percpu_pool->obj_free++;
258 obj_pool_used++;
259 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
260 }
261 }
262
263 if (obj_pool_used > obj_pool_max_used)
264 obj_pool_max_used = obj_pool_used;
265
266 if (obj_pool_free < obj_pool_min_free)
267 obj_pool_min_free = obj_pool_free;
268 }
269 raw_spin_unlock(&pool_lock);
270
271init_obj:
272 if (obj) {
273 obj->object = addr;
274 obj->descr = descr;
275 obj->state = ODEBUG_STATE_NONE;
276 obj->astate = 0;
277 hlist_add_head(&obj->node, &b->list);
278 }
279 return obj;
280}
281
282/*
283 * workqueue function to free objects.
284 *
285 * To reduce contention on the global pool_lock, the actual freeing of
286 * debug objects will be delayed if the pool_lock is busy.
287 */
288static void free_obj_work(struct work_struct *work)
289{
290 struct hlist_node *tmp;
291 struct debug_obj *obj;
292 unsigned long flags;
293 HLIST_HEAD(tofree);
294
295 WRITE_ONCE(obj_freeing, false);
296 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
297 return;
298
299 if (obj_pool_free >= debug_objects_pool_size)
300 goto free_objs;
301
302 /*
303 * The objs on the pool list might be allocated before the work is
304 * run, so recheck if pool list it full or not, if not fill pool
305 * list from the global free list. As it is likely that a workload
306 * may be gearing up to use more and more objects, don't free any
307 * of them until the next round.
308 */
309 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
310 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
311 hlist_del(&obj->node);
312 hlist_add_head(&obj->node, &obj_pool);
313 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
314 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
315 }
316 raw_spin_unlock_irqrestore(&pool_lock, flags);
317 return;
318
319free_objs:
320 /*
321 * Pool list is already full and there are still objs on the free
322 * list. Move remaining free objs to a temporary list to free the
323 * memory outside the pool_lock held region.
324 */
325 if (obj_nr_tofree) {
326 hlist_move_list(&obj_to_free, &tofree);
327 debug_objects_freed += obj_nr_tofree;
328 WRITE_ONCE(obj_nr_tofree, 0);
329 }
330 raw_spin_unlock_irqrestore(&pool_lock, flags);
331
332 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
333 hlist_del(&obj->node);
334 kmem_cache_free(obj_cache, obj);
335 }
336}
337
338static void __free_object(struct debug_obj *obj)
339{
340 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
341 struct debug_percpu_free *percpu_pool;
342 int lookahead_count = 0;
343 unsigned long flags;
344 bool work;
345
346 local_irq_save(flags);
347 if (!obj_cache)
348 goto free_to_obj_pool;
349
350 /*
351 * Try to free it into the percpu pool first.
352 */
353 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
354 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
355 hlist_add_head(&obj->node, &percpu_pool->free_objs);
356 percpu_pool->obj_free++;
357 local_irq_restore(flags);
358 return;
359 }
360
361 /*
362 * As the percpu pool is full, look ahead and pull out a batch
363 * of objects from the percpu pool and free them as well.
364 */
365 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
366 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
367 if (!objs[lookahead_count])
368 break;
369 percpu_pool->obj_free--;
370 }
371
372free_to_obj_pool:
373 raw_spin_lock(&pool_lock);
374 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
375 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
376 obj_pool_used--;
377
378 if (work) {
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
380 hlist_add_head(&obj->node, &obj_to_free);
381 if (lookahead_count) {
382 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
383 obj_pool_used -= lookahead_count;
384 while (lookahead_count) {
385 hlist_add_head(&objs[--lookahead_count]->node,
386 &obj_to_free);
387 }
388 }
389
390 if ((obj_pool_free > debug_objects_pool_size) &&
391 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392 int i;
393
394 /*
395 * Free one more batch of objects from obj_pool.
396 */
397 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
398 obj = __alloc_object(&obj_pool);
399 hlist_add_head(&obj->node, &obj_to_free);
400 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
401 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
402 }
403 }
404 } else {
405 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
406 hlist_add_head(&obj->node, &obj_pool);
407 if (lookahead_count) {
408 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
409 obj_pool_used -= lookahead_count;
410 while (lookahead_count) {
411 hlist_add_head(&objs[--lookahead_count]->node,
412 &obj_pool);
413 }
414 }
415 }
416 raw_spin_unlock(&pool_lock);
417 local_irq_restore(flags);
418}
419
420/*
421 * Put the object back into the pool and schedule work to free objects
422 * if necessary.
423 */
424static void free_object(struct debug_obj *obj)
425{
426 __free_object(obj);
427 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
428 WRITE_ONCE(obj_freeing, true);
429 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430 }
431}
432
433/*
434 * We run out of memory. That means we probably have tons of objects
435 * allocated.
436 */
437static void debug_objects_oom(void)
438{
439 struct debug_bucket *db = obj_hash;
440 struct hlist_node *tmp;
441 HLIST_HEAD(freelist);
442 struct debug_obj *obj;
443 unsigned long flags;
444 int i;
445
446 pr_warn("Out of memory. ODEBUG disabled\n");
447
448 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
449 raw_spin_lock_irqsave(&db->lock, flags);
450 hlist_move_list(&db->list, &freelist);
451 raw_spin_unlock_irqrestore(&db->lock, flags);
452
453 /* Now free them */
454 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
455 hlist_del(&obj->node);
456 free_object(obj);
457 }
458 }
459}
460
461/*
462 * We use the pfn of the address for the hash. That way we can check
463 * for freed objects simply by checking the affected bucket.
464 */
465static struct debug_bucket *get_bucket(unsigned long addr)
466{
467 unsigned long hash;
468
469 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
470 return &obj_hash[hash];
471}
472
473static void debug_print_object(struct debug_obj *obj, char *msg)
474{
475 struct debug_obj_descr *descr = obj->descr;
476 static int limit;
477
478 /*
479 * Don't report if lookup_object_or_alloc() by the current thread
480 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
481 * concurrent thread turned off debug_objects_enabled and cleared
482 * the hash buckets.
483 */
484 if (!debug_objects_enabled)
485 return;
486
487 if (limit < 5 && descr != descr_test) {
488 void *hint = descr->debug_hint ?
489 descr->debug_hint(obj->object) : NULL;
490 limit++;
491 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
492 "object type: %s hint: %pS\n",
493 msg, obj_states[obj->state], obj->astate,
494 descr->name, hint);
495 }
496 debug_objects_warnings++;
497}
498
499/*
500 * Try to repair the damage, so we have a better chance to get useful
501 * debug output.
502 */
503static bool
504debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
505 void * addr, enum debug_obj_state state)
506{
507 if (fixup && fixup(addr, state)) {
508 debug_objects_fixups++;
509 return true;
510 }
511 return false;
512}
513
514static void debug_object_is_on_stack(void *addr, int onstack)
515{
516 int is_on_stack;
517 static int limit;
518
519 if (limit > 4)
520 return;
521
522 is_on_stack = object_is_on_stack(addr);
523 if (is_on_stack == onstack)
524 return;
525
526 limit++;
527 if (is_on_stack)
528 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
529 task_stack_page(current));
530 else
531 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
532 task_stack_page(current));
533
534 WARN_ON(1);
535}
536
537static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
538 struct debug_obj_descr *descr,
539 bool onstack, bool alloc_ifstatic)
540{
541 struct debug_obj *obj = lookup_object(addr, b);
542 enum debug_obj_state state = ODEBUG_STATE_NONE;
543
544 if (likely(obj))
545 return obj;
546
547 /*
548 * debug_object_init() unconditionally allocates untracked
549 * objects. It does not matter whether it is a static object or
550 * not.
551 *
552 * debug_object_assert_init() and debug_object_activate() allow
553 * allocation only if the descriptor callback confirms that the
554 * object is static and considered initialized. For non-static
555 * objects the allocation needs to be done from the fixup callback.
556 */
557 if (unlikely(alloc_ifstatic)) {
558 if (!descr->is_static_object || !descr->is_static_object(addr))
559 return ERR_PTR(-ENOENT);
560 /* Statically allocated objects are considered initialized */
561 state = ODEBUG_STATE_INIT;
562 }
563
564 obj = alloc_object(addr, b, descr);
565 if (likely(obj)) {
566 obj->state = state;
567 debug_object_is_on_stack(addr, onstack);
568 return obj;
569 }
570
571 /* Out of memory. Do the cleanup outside of the locked region */
572 debug_objects_enabled = 0;
573 return NULL;
574}
575
576static void debug_objects_fill_pool(void)
577{
578 /*
579 * On RT enabled kernels the pool refill must happen in preemptible
580 * context:
581 */
582 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
583 fill_pool();
584}
585
586static void
587__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
588{
589 enum debug_obj_state state;
590 struct debug_bucket *db;
591 struct debug_obj *obj;
592 unsigned long flags;
593
594 debug_objects_fill_pool();
595
596 db = get_bucket((unsigned long) addr);
597
598 raw_spin_lock_irqsave(&db->lock, flags);
599
600 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
601 if (unlikely(!obj)) {
602 raw_spin_unlock_irqrestore(&db->lock, flags);
603 debug_objects_oom();
604 return;
605 }
606
607 switch (obj->state) {
608 case ODEBUG_STATE_NONE:
609 case ODEBUG_STATE_INIT:
610 case ODEBUG_STATE_INACTIVE:
611 obj->state = ODEBUG_STATE_INIT;
612 break;
613
614 case ODEBUG_STATE_ACTIVE:
615 state = obj->state;
616 raw_spin_unlock_irqrestore(&db->lock, flags);
617 debug_print_object(obj, "init");
618 debug_object_fixup(descr->fixup_init, addr, state);
619 return;
620
621 case ODEBUG_STATE_DESTROYED:
622 raw_spin_unlock_irqrestore(&db->lock, flags);
623 debug_print_object(obj, "init");
624 return;
625 default:
626 break;
627 }
628
629 raw_spin_unlock_irqrestore(&db->lock, flags);
630}
631
632/**
633 * debug_object_init - debug checks when an object is initialized
634 * @addr: address of the object
635 * @descr: pointer to an object specific debug description structure
636 */
637void debug_object_init(void *addr, struct debug_obj_descr *descr)
638{
639 if (!debug_objects_enabled)
640 return;
641
642 __debug_object_init(addr, descr, 0);
643}
644EXPORT_SYMBOL_GPL(debug_object_init);
645
646/**
647 * debug_object_init_on_stack - debug checks when an object on stack is
648 * initialized
649 * @addr: address of the object
650 * @descr: pointer to an object specific debug description structure
651 */
652void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
653{
654 if (!debug_objects_enabled)
655 return;
656
657 __debug_object_init(addr, descr, 1);
658}
659EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
660
661/**
662 * debug_object_activate - debug checks when an object is activated
663 * @addr: address of the object
664 * @descr: pointer to an object specific debug description structure
665 * Returns 0 for success, -EINVAL for check failed.
666 */
667int debug_object_activate(void *addr, struct debug_obj_descr *descr)
668{
669 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
670 enum debug_obj_state state;
671 struct debug_bucket *db;
672 struct debug_obj *obj;
673 unsigned long flags;
674 int ret;
675
676 if (!debug_objects_enabled)
677 return 0;
678
679 debug_objects_fill_pool();
680
681 db = get_bucket((unsigned long) addr);
682
683 raw_spin_lock_irqsave(&db->lock, flags);
684
685 obj = lookup_object_or_alloc(addr, db, descr, false, true);
686 if (likely(!IS_ERR_OR_NULL(obj))) {
687 bool print_object = false;
688
689 switch (obj->state) {
690 case ODEBUG_STATE_INIT:
691 case ODEBUG_STATE_INACTIVE:
692 obj->state = ODEBUG_STATE_ACTIVE;
693 ret = 0;
694 break;
695
696 case ODEBUG_STATE_ACTIVE:
697 state = obj->state;
698 raw_spin_unlock_irqrestore(&db->lock, flags);
699 debug_print_object(obj, "activate");
700 ret = debug_object_fixup(descr->fixup_activate, addr, state);
701 return ret ? 0 : -EINVAL;
702
703 case ODEBUG_STATE_DESTROYED:
704 print_object = true;
705 ret = -EINVAL;
706 break;
707 default:
708 ret = 0;
709 break;
710 }
711 raw_spin_unlock_irqrestore(&db->lock, flags);
712 if (print_object)
713 debug_print_object(obj, "activate");
714 return ret;
715 }
716
717 raw_spin_unlock_irqrestore(&db->lock, flags);
718
719 /* If NULL the allocation has hit OOM */
720 if (!obj) {
721 debug_objects_oom();
722 return 0;
723 }
724
725 /* Object is neither static nor tracked. It's not initialized */
726 debug_print_object(&o, "activate");
727 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
728 return ret ? 0 : -EINVAL;
729}
730EXPORT_SYMBOL_GPL(debug_object_activate);
731
732/**
733 * debug_object_deactivate - debug checks when an object is deactivated
734 * @addr: address of the object
735 * @descr: pointer to an object specific debug description structure
736 */
737void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
738{
739 struct debug_bucket *db;
740 struct debug_obj *obj;
741 unsigned long flags;
742 bool print_object = false;
743
744 if (!debug_objects_enabled)
745 return;
746
747 db = get_bucket((unsigned long) addr);
748
749 raw_spin_lock_irqsave(&db->lock, flags);
750
751 obj = lookup_object(addr, db);
752 if (obj) {
753 switch (obj->state) {
754 case ODEBUG_STATE_INIT:
755 case ODEBUG_STATE_INACTIVE:
756 case ODEBUG_STATE_ACTIVE:
757 if (!obj->astate)
758 obj->state = ODEBUG_STATE_INACTIVE;
759 else
760 print_object = true;
761 break;
762
763 case ODEBUG_STATE_DESTROYED:
764 print_object = true;
765 break;
766 default:
767 break;
768 }
769 }
770
771 raw_spin_unlock_irqrestore(&db->lock, flags);
772 if (!obj) {
773 struct debug_obj o = { .object = addr,
774 .state = ODEBUG_STATE_NOTAVAILABLE,
775 .descr = descr };
776
777 debug_print_object(&o, "deactivate");
778 } else if (print_object) {
779 debug_print_object(obj, "deactivate");
780 }
781}
782EXPORT_SYMBOL_GPL(debug_object_deactivate);
783
784/**
785 * debug_object_destroy - debug checks when an object is destroyed
786 * @addr: address of the object
787 * @descr: pointer to an object specific debug description structure
788 */
789void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
790{
791 enum debug_obj_state state;
792 struct debug_bucket *db;
793 struct debug_obj *obj;
794 unsigned long flags;
795 bool print_object = false;
796
797 if (!debug_objects_enabled)
798 return;
799
800 db = get_bucket((unsigned long) addr);
801
802 raw_spin_lock_irqsave(&db->lock, flags);
803
804 obj = lookup_object(addr, db);
805 if (!obj)
806 goto out_unlock;
807
808 switch (obj->state) {
809 case ODEBUG_STATE_NONE:
810 case ODEBUG_STATE_INIT:
811 case ODEBUG_STATE_INACTIVE:
812 obj->state = ODEBUG_STATE_DESTROYED;
813 break;
814 case ODEBUG_STATE_ACTIVE:
815 state = obj->state;
816 raw_spin_unlock_irqrestore(&db->lock, flags);
817 debug_print_object(obj, "destroy");
818 debug_object_fixup(descr->fixup_destroy, addr, state);
819 return;
820
821 case ODEBUG_STATE_DESTROYED:
822 print_object = true;
823 break;
824 default:
825 break;
826 }
827out_unlock:
828 raw_spin_unlock_irqrestore(&db->lock, flags);
829 if (print_object)
830 debug_print_object(obj, "destroy");
831}
832EXPORT_SYMBOL_GPL(debug_object_destroy);
833
834/**
835 * debug_object_free - debug checks when an object is freed
836 * @addr: address of the object
837 * @descr: pointer to an object specific debug description structure
838 */
839void debug_object_free(void *addr, struct debug_obj_descr *descr)
840{
841 enum debug_obj_state state;
842 struct debug_bucket *db;
843 struct debug_obj *obj;
844 unsigned long flags;
845
846 if (!debug_objects_enabled)
847 return;
848
849 db = get_bucket((unsigned long) addr);
850
851 raw_spin_lock_irqsave(&db->lock, flags);
852
853 obj = lookup_object(addr, db);
854 if (!obj)
855 goto out_unlock;
856
857 switch (obj->state) {
858 case ODEBUG_STATE_ACTIVE:
859 state = obj->state;
860 raw_spin_unlock_irqrestore(&db->lock, flags);
861 debug_print_object(obj, "free");
862 debug_object_fixup(descr->fixup_free, addr, state);
863 return;
864 default:
865 hlist_del(&obj->node);
866 raw_spin_unlock_irqrestore(&db->lock, flags);
867 free_object(obj);
868 return;
869 }
870out_unlock:
871 raw_spin_unlock_irqrestore(&db->lock, flags);
872}
873EXPORT_SYMBOL_GPL(debug_object_free);
874
875/**
876 * debug_object_assert_init - debug checks when object should be init-ed
877 * @addr: address of the object
878 * @descr: pointer to an object specific debug description structure
879 */
880void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
881{
882 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
883 struct debug_bucket *db;
884 struct debug_obj *obj;
885 unsigned long flags;
886
887 if (!debug_objects_enabled)
888 return;
889
890 debug_objects_fill_pool();
891
892 db = get_bucket((unsigned long) addr);
893
894 raw_spin_lock_irqsave(&db->lock, flags);
895 obj = lookup_object_or_alloc(addr, db, descr, false, true);
896 raw_spin_unlock_irqrestore(&db->lock, flags);
897 if (likely(!IS_ERR_OR_NULL(obj)))
898 return;
899
900 /* If NULL the allocation has hit OOM */
901 if (!obj) {
902 debug_objects_oom();
903 return;
904 }
905
906 /* Object is neither tracked nor static. It's not initialized. */
907 debug_print_object(&o, "assert_init");
908 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
909}
910EXPORT_SYMBOL_GPL(debug_object_assert_init);
911
912/**
913 * debug_object_active_state - debug checks object usage state machine
914 * @addr: address of the object
915 * @descr: pointer to an object specific debug description structure
916 * @expect: expected state
917 * @next: state to move to if expected state is found
918 */
919void
920debug_object_active_state(void *addr, struct debug_obj_descr *descr,
921 unsigned int expect, unsigned int next)
922{
923 struct debug_bucket *db;
924 struct debug_obj *obj;
925 unsigned long flags;
926 bool print_object = false;
927
928 if (!debug_objects_enabled)
929 return;
930
931 db = get_bucket((unsigned long) addr);
932
933 raw_spin_lock_irqsave(&db->lock, flags);
934
935 obj = lookup_object(addr, db);
936 if (obj) {
937 switch (obj->state) {
938 case ODEBUG_STATE_ACTIVE:
939 if (obj->astate == expect)
940 obj->astate = next;
941 else
942 print_object = true;
943 break;
944
945 default:
946 print_object = true;
947 break;
948 }
949 }
950
951 raw_spin_unlock_irqrestore(&db->lock, flags);
952 if (!obj) {
953 struct debug_obj o = { .object = addr,
954 .state = ODEBUG_STATE_NOTAVAILABLE,
955 .descr = descr };
956
957 debug_print_object(&o, "active_state");
958 } else if (print_object) {
959 debug_print_object(obj, "active_state");
960 }
961}
962EXPORT_SYMBOL_GPL(debug_object_active_state);
963
964#ifdef CONFIG_DEBUG_OBJECTS_FREE
965static void __debug_check_no_obj_freed(const void *address, unsigned long size)
966{
967 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
968 struct debug_obj_descr *descr;
969 enum debug_obj_state state;
970 struct debug_bucket *db;
971 struct hlist_node *tmp;
972 struct debug_obj *obj;
973 int cnt, objs_checked = 0;
974
975 saddr = (unsigned long) address;
976 eaddr = saddr + size;
977 paddr = saddr & ODEBUG_CHUNK_MASK;
978 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
979 chunks >>= ODEBUG_CHUNK_SHIFT;
980
981 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
982 db = get_bucket(paddr);
983
984repeat:
985 cnt = 0;
986 raw_spin_lock_irqsave(&db->lock, flags);
987 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
988 cnt++;
989 oaddr = (unsigned long) obj->object;
990 if (oaddr < saddr || oaddr >= eaddr)
991 continue;
992
993 switch (obj->state) {
994 case ODEBUG_STATE_ACTIVE:
995 descr = obj->descr;
996 state = obj->state;
997 raw_spin_unlock_irqrestore(&db->lock, flags);
998 debug_print_object(obj, "free");
999 debug_object_fixup(descr->fixup_free,
1000 (void *) oaddr, state);
1001 goto repeat;
1002 default:
1003 hlist_del(&obj->node);
1004 __free_object(obj);
1005 break;
1006 }
1007 }
1008 raw_spin_unlock_irqrestore(&db->lock, flags);
1009
1010 if (cnt > debug_objects_maxchain)
1011 debug_objects_maxchain = cnt;
1012
1013 objs_checked += cnt;
1014 }
1015
1016 if (objs_checked > debug_objects_maxchecked)
1017 debug_objects_maxchecked = objs_checked;
1018
1019 /* Schedule work to actually kmem_cache_free() objects */
1020 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1021 WRITE_ONCE(obj_freeing, true);
1022 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1023 }
1024}
1025
1026void debug_check_no_obj_freed(const void *address, unsigned long size)
1027{
1028 if (debug_objects_enabled)
1029 __debug_check_no_obj_freed(address, size);
1030}
1031#endif
1032
1033#ifdef CONFIG_DEBUG_FS
1034
1035static int debug_stats_show(struct seq_file *m, void *v)
1036{
1037 int cpu, obj_percpu_free = 0;
1038
1039 for_each_possible_cpu(cpu)
1040 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1041
1042 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1043 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1044 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1045 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1046 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1047 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1048 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1049 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1050 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1051 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1052 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1053 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1054 return 0;
1055}
1056
1057static int debug_stats_open(struct inode *inode, struct file *filp)
1058{
1059 return single_open(filp, debug_stats_show, NULL);
1060}
1061
1062static const struct file_operations debug_stats_fops = {
1063 .open = debug_stats_open,
1064 .read = seq_read,
1065 .llseek = seq_lseek,
1066 .release = single_release,
1067};
1068
1069static int __init debug_objects_init_debugfs(void)
1070{
1071 struct dentry *dbgdir;
1072
1073 if (!debug_objects_enabled)
1074 return 0;
1075
1076 dbgdir = debugfs_create_dir("debug_objects", NULL);
1077
1078 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1079
1080 return 0;
1081}
1082__initcall(debug_objects_init_debugfs);
1083
1084#else
1085static inline void debug_objects_init_debugfs(void) { }
1086#endif
1087
1088#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1089
1090/* Random data structure for the self test */
1091struct self_test {
1092 unsigned long dummy1[6];
1093 int static_init;
1094 unsigned long dummy2[3];
1095};
1096
1097static __initdata struct debug_obj_descr descr_type_test;
1098
1099static bool __init is_static_object(void *addr)
1100{
1101 struct self_test *obj = addr;
1102
1103 return obj->static_init;
1104}
1105
1106/*
1107 * fixup_init is called when:
1108 * - an active object is initialized
1109 */
1110static bool __init fixup_init(void *addr, enum debug_obj_state state)
1111{
1112 struct self_test *obj = addr;
1113
1114 switch (state) {
1115 case ODEBUG_STATE_ACTIVE:
1116 debug_object_deactivate(obj, &descr_type_test);
1117 debug_object_init(obj, &descr_type_test);
1118 return true;
1119 default:
1120 return false;
1121 }
1122}
1123
1124/*
1125 * fixup_activate is called when:
1126 * - an active object is activated
1127 * - an unknown non-static object is activated
1128 */
1129static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1130{
1131 struct self_test *obj = addr;
1132
1133 switch (state) {
1134 case ODEBUG_STATE_NOTAVAILABLE:
1135 return true;
1136 case ODEBUG_STATE_ACTIVE:
1137 debug_object_deactivate(obj, &descr_type_test);
1138 debug_object_activate(obj, &descr_type_test);
1139 return true;
1140
1141 default:
1142 return false;
1143 }
1144}
1145
1146/*
1147 * fixup_destroy is called when:
1148 * - an active object is destroyed
1149 */
1150static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1151{
1152 struct self_test *obj = addr;
1153
1154 switch (state) {
1155 case ODEBUG_STATE_ACTIVE:
1156 debug_object_deactivate(obj, &descr_type_test);
1157 debug_object_destroy(obj, &descr_type_test);
1158 return true;
1159 default:
1160 return false;
1161 }
1162}
1163
1164/*
1165 * fixup_free is called when:
1166 * - an active object is freed
1167 */
1168static bool __init fixup_free(void *addr, enum debug_obj_state state)
1169{
1170 struct self_test *obj = addr;
1171
1172 switch (state) {
1173 case ODEBUG_STATE_ACTIVE:
1174 debug_object_deactivate(obj, &descr_type_test);
1175 debug_object_free(obj, &descr_type_test);
1176 return true;
1177 default:
1178 return false;
1179 }
1180}
1181
1182static int __init
1183check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1184{
1185 struct debug_bucket *db;
1186 struct debug_obj *obj;
1187 unsigned long flags;
1188 int res = -EINVAL;
1189
1190 db = get_bucket((unsigned long) addr);
1191
1192 raw_spin_lock_irqsave(&db->lock, flags);
1193
1194 obj = lookup_object(addr, db);
1195 if (!obj && state != ODEBUG_STATE_NONE) {
1196 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1197 goto out;
1198 }
1199 if (obj && obj->state != state) {
1200 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1201 obj->state, state);
1202 goto out;
1203 }
1204 if (fixups != debug_objects_fixups) {
1205 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1206 fixups, debug_objects_fixups);
1207 goto out;
1208 }
1209 if (warnings != debug_objects_warnings) {
1210 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1211 warnings, debug_objects_warnings);
1212 goto out;
1213 }
1214 res = 0;
1215out:
1216 raw_spin_unlock_irqrestore(&db->lock, flags);
1217 if (res)
1218 debug_objects_enabled = 0;
1219 return res;
1220}
1221
1222static __initdata struct debug_obj_descr descr_type_test = {
1223 .name = "selftest",
1224 .is_static_object = is_static_object,
1225 .fixup_init = fixup_init,
1226 .fixup_activate = fixup_activate,
1227 .fixup_destroy = fixup_destroy,
1228 .fixup_free = fixup_free,
1229};
1230
1231static __initdata struct self_test obj = { .static_init = 0 };
1232
1233static void __init debug_objects_selftest(void)
1234{
1235 int fixups, oldfixups, warnings, oldwarnings;
1236 unsigned long flags;
1237
1238 local_irq_save(flags);
1239
1240 fixups = oldfixups = debug_objects_fixups;
1241 warnings = oldwarnings = debug_objects_warnings;
1242 descr_test = &descr_type_test;
1243
1244 debug_object_init(&obj, &descr_type_test);
1245 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1246 goto out;
1247 debug_object_activate(&obj, &descr_type_test);
1248 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1249 goto out;
1250 debug_object_activate(&obj, &descr_type_test);
1251 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1252 goto out;
1253 debug_object_deactivate(&obj, &descr_type_test);
1254 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1255 goto out;
1256 debug_object_destroy(&obj, &descr_type_test);
1257 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1258 goto out;
1259 debug_object_init(&obj, &descr_type_test);
1260 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1261 goto out;
1262 debug_object_activate(&obj, &descr_type_test);
1263 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1264 goto out;
1265 debug_object_deactivate(&obj, &descr_type_test);
1266 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1267 goto out;
1268 debug_object_free(&obj, &descr_type_test);
1269 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1270 goto out;
1271
1272 obj.static_init = 1;
1273 debug_object_activate(&obj, &descr_type_test);
1274 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1275 goto out;
1276 debug_object_init(&obj, &descr_type_test);
1277 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1278 goto out;
1279 debug_object_free(&obj, &descr_type_test);
1280 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1281 goto out;
1282
1283#ifdef CONFIG_DEBUG_OBJECTS_FREE
1284 debug_object_init(&obj, &descr_type_test);
1285 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1286 goto out;
1287 debug_object_activate(&obj, &descr_type_test);
1288 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1289 goto out;
1290 __debug_check_no_obj_freed(&obj, sizeof(obj));
1291 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1292 goto out;
1293#endif
1294 pr_info("selftest passed\n");
1295
1296out:
1297 debug_objects_fixups = oldfixups;
1298 debug_objects_warnings = oldwarnings;
1299 descr_test = NULL;
1300
1301 local_irq_restore(flags);
1302}
1303#else
1304static inline void debug_objects_selftest(void) { }
1305#endif
1306
1307/*
1308 * Called during early boot to initialize the hash buckets and link
1309 * the static object pool objects into the poll list. After this call
1310 * the object tracker is fully operational.
1311 */
1312void __init debug_objects_early_init(void)
1313{
1314 int i;
1315
1316 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1317 raw_spin_lock_init(&obj_hash[i].lock);
1318
1319 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1320 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1321}
1322
1323/*
1324 * Convert the statically allocated objects to dynamic ones:
1325 */
1326static int __init debug_objects_replace_static_objects(void)
1327{
1328 struct debug_bucket *db = obj_hash;
1329 struct hlist_node *tmp;
1330 struct debug_obj *obj, *new;
1331 HLIST_HEAD(objects);
1332 int i, cnt = 0;
1333
1334 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1335 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1336 if (!obj)
1337 goto free;
1338 hlist_add_head(&obj->node, &objects);
1339 }
1340
1341 /*
1342 * debug_objects_mem_init() is now called early that only one CPU is up
1343 * and interrupts have been disabled, so it is safe to replace the
1344 * active object references.
1345 */
1346
1347 /* Remove the statically allocated objects from the pool */
1348 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1349 hlist_del(&obj->node);
1350 /* Move the allocated objects to the pool */
1351 hlist_move_list(&objects, &obj_pool);
1352
1353 /* Replace the active object references */
1354 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1355 hlist_move_list(&db->list, &objects);
1356
1357 hlist_for_each_entry(obj, &objects, node) {
1358 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1359 hlist_del(&new->node);
1360 /* copy object data */
1361 *new = *obj;
1362 hlist_add_head(&new->node, &db->list);
1363 cnt++;
1364 }
1365 }
1366
1367 pr_debug("%d of %d active objects replaced\n",
1368 cnt, obj_pool_used);
1369 return 0;
1370free:
1371 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1372 hlist_del(&obj->node);
1373 kmem_cache_free(obj_cache, obj);
1374 }
1375 return -ENOMEM;
1376}
1377
1378/*
1379 * Called after the kmem_caches are functional to setup a dedicated
1380 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1381 * prevents that the debug code is called on kmem_cache_free() for the
1382 * debug tracker objects to avoid recursive calls.
1383 */
1384void __init debug_objects_mem_init(void)
1385{
1386 int cpu, extras;
1387
1388 if (!debug_objects_enabled)
1389 return;
1390
1391 /*
1392 * Initialize the percpu object pools
1393 *
1394 * Initialization is not strictly necessary, but was done for
1395 * completeness.
1396 */
1397 for_each_possible_cpu(cpu)
1398 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1399
1400 obj_cache = kmem_cache_create("debug_objects_cache",
1401 sizeof (struct debug_obj), 0,
1402 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1403 NULL);
1404
1405 if (!obj_cache || debug_objects_replace_static_objects()) {
1406 debug_objects_enabled = 0;
1407 kmem_cache_destroy(obj_cache);
1408 pr_warn("out of memory.\n");
1409 } else
1410 debug_objects_selftest();
1411
1412 /*
1413 * Increase the thresholds for allocating and freeing objects
1414 * according to the number of possible CPUs available in the system.
1415 */
1416 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1417 debug_objects_pool_size += extras;
1418 debug_objects_pool_min_level += extras;
1419}