blob: 84c2f50d23ac2327fa06ad5c46e9b5a11fa36d33 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Generic infrastructure for lifetime debugging of objects.
3 *
4 * Started by Thomas Gleixner
5 *
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10#include <linux/debugobjects.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/debugfs.h>
15#include <linux/slab.h>
16#include <linux/hash.h>
17
18#define ODEBUG_HASH_BITS 14
19#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
20
21#define ODEBUG_POOL_SIZE 512
22#define ODEBUG_POOL_MIN_LEVEL 256
23
24#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
25#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
26#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
27
28struct debug_bucket {
29 struct hlist_head list;
30 raw_spinlock_t lock;
31};
32
33static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
34
35static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
36
37static DEFINE_RAW_SPINLOCK(pool_lock);
38
39static HLIST_HEAD(obj_pool);
40
41static int obj_pool_min_free = ODEBUG_POOL_SIZE;
42static int obj_pool_free = ODEBUG_POOL_SIZE;
43static int obj_pool_used;
44static int obj_pool_max_used;
45static struct kmem_cache *obj_cache;
46
47static int debug_objects_maxchain __read_mostly;
48static int debug_objects_fixups __read_mostly;
49static int debug_objects_warnings __read_mostly;
50static int debug_objects_enabled __read_mostly
51 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
52
53static struct debug_obj_descr *descr_test __read_mostly;
54
55static void free_obj_work(struct work_struct *work);
56static DECLARE_WORK(debug_obj_work, free_obj_work);
57
58static int __init enable_object_debug(char *str)
59{
60 debug_objects_enabled = 1;
61 return 0;
62}
63
64static int __init disable_object_debug(char *str)
65{
66 debug_objects_enabled = 0;
67 return 0;
68}
69
70early_param("debug_objects", enable_object_debug);
71early_param("no_debug_objects", disable_object_debug);
72
73static const char *obj_states[ODEBUG_STATE_MAX] = {
74 [ODEBUG_STATE_NONE] = "none",
75 [ODEBUG_STATE_INIT] = "initialized",
76 [ODEBUG_STATE_INACTIVE] = "inactive",
77 [ODEBUG_STATE_ACTIVE] = "active",
78 [ODEBUG_STATE_DESTROYED] = "destroyed",
79 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
80};
81
82static int fill_pool(void)
83{
84 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
85 struct debug_obj *new;
86 unsigned long flags;
87
88 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
89 return obj_pool_free;
90
91 if (unlikely(!obj_cache))
92 return obj_pool_free;
93
94 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
95
96 new = kmem_cache_zalloc(obj_cache, gfp);
97 if (!new)
98 return obj_pool_free;
99
100 raw_spin_lock_irqsave(&pool_lock, flags);
101 hlist_add_head(&new->node, &obj_pool);
102 obj_pool_free++;
103 raw_spin_unlock_irqrestore(&pool_lock, flags);
104 }
105 return obj_pool_free;
106}
107
108/*
109 * Lookup an object in the hash bucket.
110 */
111static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
112{
113 struct hlist_node *node;
114 struct debug_obj *obj;
115 int cnt = 0;
116
117 hlist_for_each_entry(obj, node, &b->list, node) {
118 cnt++;
119 if (obj->object == addr)
120 return obj;
121 }
122 if (cnt > debug_objects_maxchain)
123 debug_objects_maxchain = cnt;
124
125 return NULL;
126}
127
128/*
129 * Allocate a new object. If the pool is empty, switch off the debugger.
130 * Must be called with interrupts disabled.
131 */
132static struct debug_obj *
133alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
134{
135 struct debug_obj *obj = NULL;
136
137 raw_spin_lock(&pool_lock);
138 if (obj_pool.first) {
139 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
140
141 obj->object = addr;
142 obj->descr = descr;
143 obj->state = ODEBUG_STATE_NONE;
144 obj->astate = 0;
145 hlist_del(&obj->node);
146
147 hlist_add_head(&obj->node, &b->list);
148
149 obj_pool_used++;
150 if (obj_pool_used > obj_pool_max_used)
151 obj_pool_max_used = obj_pool_used;
152
153 obj_pool_free--;
154 if (obj_pool_free < obj_pool_min_free)
155 obj_pool_min_free = obj_pool_free;
156 }
157 raw_spin_unlock(&pool_lock);
158
159 return obj;
160}
161
162/*
163 * workqueue function to free objects.
164 */
165static void free_obj_work(struct work_struct *work)
166{
167 struct debug_obj *obj;
168 unsigned long flags;
169
170 raw_spin_lock_irqsave(&pool_lock, flags);
171 while (obj_pool_free > ODEBUG_POOL_SIZE) {
172 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
173 hlist_del(&obj->node);
174 obj_pool_free--;
175 /*
176 * We release pool_lock across kmem_cache_free() to
177 * avoid contention on pool_lock.
178 */
179 raw_spin_unlock_irqrestore(&pool_lock, flags);
180 kmem_cache_free(obj_cache, obj);
181 raw_spin_lock_irqsave(&pool_lock, flags);
182 }
183 raw_spin_unlock_irqrestore(&pool_lock, flags);
184}
185
186/*
187 * Put the object back into the pool and schedule work to free objects
188 * if necessary.
189 */
190static void free_object(struct debug_obj *obj)
191{
192 unsigned long flags;
193 int sched = 0;
194
195 raw_spin_lock_irqsave(&pool_lock, flags);
196 /*
197 * schedule work when the pool is filled and the cache is
198 * initialized:
199 */
200 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
201 sched = keventd_up() && !work_pending(&debug_obj_work);
202 hlist_add_head(&obj->node, &obj_pool);
203 obj_pool_free++;
204 obj_pool_used--;
205 raw_spin_unlock_irqrestore(&pool_lock, flags);
206 if (sched)
207 schedule_work(&debug_obj_work);
208}
209
210/*
211 * We run out of memory. That means we probably have tons of objects
212 * allocated.
213 */
214static void debug_objects_oom(void)
215{
216 struct debug_bucket *db = obj_hash;
217 struct hlist_node *node, *tmp;
218 HLIST_HEAD(freelist);
219 struct debug_obj *obj;
220 unsigned long flags;
221 int i;
222
223 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
224
225 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
226 raw_spin_lock_irqsave(&db->lock, flags);
227 hlist_move_list(&db->list, &freelist);
228 raw_spin_unlock_irqrestore(&db->lock, flags);
229
230 /* Now free them */
231 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
232 hlist_del(&obj->node);
233 free_object(obj);
234 }
235 }
236}
237
238/*
239 * We use the pfn of the address for the hash. That way we can check
240 * for freed objects simply by checking the affected bucket.
241 */
242static struct debug_bucket *get_bucket(unsigned long addr)
243{
244 unsigned long hash;
245
246 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
247 return &obj_hash[hash];
248}
249
250static void debug_print_object(struct debug_obj *obj, char *msg)
251{
252 struct debug_obj_descr *descr = obj->descr;
253 static int limit;
254
255 if (limit < 5 && descr != descr_test) {
256 void *hint = descr->debug_hint ?
257 descr->debug_hint(obj->object) : NULL;
258 limit++;
259 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
260 "object type: %s hint: %pS\n",
261 msg, obj_states[obj->state], obj->astate,
262 descr->name, hint);
263 }
264 debug_objects_warnings++;
265}
266
267/*
268 * Try to repair the damage, so we have a better chance to get useful
269 * debug output.
270 */
271static int
272debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
273 void * addr, enum debug_obj_state state)
274{
275 int fixed = 0;
276
277 if (fixup)
278 fixed = fixup(addr, state);
279 debug_objects_fixups += fixed;
280 return fixed;
281}
282
283static void debug_object_is_on_stack(void *addr, int onstack)
284{
285 int is_on_stack;
286 static int limit;
287
288 if (limit > 4)
289 return;
290
291 is_on_stack = object_is_on_stack(addr);
292 if (is_on_stack == onstack)
293 return;
294
295 limit++;
296 if (is_on_stack)
297 printk(KERN_WARNING
298 "ODEBUG: object is on stack, but not annotated\n");
299 else
300 printk(KERN_WARNING
301 "ODEBUG: object is not on stack, but annotated\n");
302 WARN_ON(1);
303}
304
305static void
306__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
307{
308 enum debug_obj_state state;
309 struct debug_bucket *db;
310 struct debug_obj *obj;
311 unsigned long flags;
312
313#ifdef CONFIG_PREEMPT_RT_FULL
314 if (preempt_count() == 0 && !irqs_disabled())
315#endif
316 fill_pool();
317
318 db = get_bucket((unsigned long) addr);
319
320 raw_spin_lock_irqsave(&db->lock, flags);
321
322 obj = lookup_object(addr, db);
323 if (!obj) {
324 obj = alloc_object(addr, db, descr);
325 if (!obj) {
326 debug_objects_enabled = 0;
327 raw_spin_unlock_irqrestore(&db->lock, flags);
328 debug_objects_oom();
329 return;
330 }
331 debug_object_is_on_stack(addr, onstack);
332 }
333
334 switch (obj->state) {
335 case ODEBUG_STATE_NONE:
336 case ODEBUG_STATE_INIT:
337 case ODEBUG_STATE_INACTIVE:
338 obj->state = ODEBUG_STATE_INIT;
339 break;
340
341 case ODEBUG_STATE_ACTIVE:
342 debug_print_object(obj, "init");
343 state = obj->state;
344 raw_spin_unlock_irqrestore(&db->lock, flags);
345 debug_object_fixup(descr->fixup_init, addr, state);
346 return;
347
348 case ODEBUG_STATE_DESTROYED:
349 debug_print_object(obj, "init");
350 break;
351 default:
352 break;
353 }
354
355 raw_spin_unlock_irqrestore(&db->lock, flags);
356}
357
358/**
359 * debug_object_init - debug checks when an object is initialized
360 * @addr: address of the object
361 * @descr: pointer to an object specific debug description structure
362 */
363void debug_object_init(void *addr, struct debug_obj_descr *descr)
364{
365 if (!debug_objects_enabled)
366 return;
367
368 __debug_object_init(addr, descr, 0);
369}
370
371/**
372 * debug_object_init_on_stack - debug checks when an object on stack is
373 * initialized
374 * @addr: address of the object
375 * @descr: pointer to an object specific debug description structure
376 */
377void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
378{
379 if (!debug_objects_enabled)
380 return;
381
382 __debug_object_init(addr, descr, 1);
383}
384
385/**
386 * debug_object_activate - debug checks when an object is activated
387 * @addr: address of the object
388 * @descr: pointer to an object specific debug description structure
389 */
390void debug_object_activate(void *addr, struct debug_obj_descr *descr)
391{
392 enum debug_obj_state state;
393 struct debug_bucket *db;
394 struct debug_obj *obj;
395 unsigned long flags;
396 struct debug_obj o = { .object = addr,
397 .state = ODEBUG_STATE_NOTAVAILABLE,
398 .descr = descr };
399
400 if (!debug_objects_enabled)
401 return;
402
403 db = get_bucket((unsigned long) addr);
404
405 raw_spin_lock_irqsave(&db->lock, flags);
406
407 obj = lookup_object(addr, db);
408 if (obj) {
409 switch (obj->state) {
410 case ODEBUG_STATE_INIT:
411 case ODEBUG_STATE_INACTIVE:
412 obj->state = ODEBUG_STATE_ACTIVE;
413 break;
414
415 case ODEBUG_STATE_ACTIVE:
416 debug_print_object(obj, "activate");
417 state = obj->state;
418 raw_spin_unlock_irqrestore(&db->lock, flags);
419 debug_object_fixup(descr->fixup_activate, addr, state);
420 return;
421
422 case ODEBUG_STATE_DESTROYED:
423 debug_print_object(obj, "activate");
424 break;
425 default:
426 break;
427 }
428 raw_spin_unlock_irqrestore(&db->lock, flags);
429 return;
430 }
431
432 raw_spin_unlock_irqrestore(&db->lock, flags);
433 /*
434 * This happens when a static object is activated. We
435 * let the type specific code decide whether this is
436 * true or not.
437 */
438 if (debug_object_fixup(descr->fixup_activate, addr,
439 ODEBUG_STATE_NOTAVAILABLE))
440 debug_print_object(&o, "activate");
441}
442
443/**
444 * debug_object_deactivate - debug checks when an object is deactivated
445 * @addr: address of the object
446 * @descr: pointer to an object specific debug description structure
447 */
448void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
449{
450 struct debug_bucket *db;
451 struct debug_obj *obj;
452 unsigned long flags;
453
454 if (!debug_objects_enabled)
455 return;
456
457 db = get_bucket((unsigned long) addr);
458
459 raw_spin_lock_irqsave(&db->lock, flags);
460
461 obj = lookup_object(addr, db);
462 if (obj) {
463 switch (obj->state) {
464 case ODEBUG_STATE_INIT:
465 case ODEBUG_STATE_INACTIVE:
466 case ODEBUG_STATE_ACTIVE:
467 if (!obj->astate)
468 obj->state = ODEBUG_STATE_INACTIVE;
469 else
470 debug_print_object(obj, "deactivate");
471 break;
472
473 case ODEBUG_STATE_DESTROYED:
474 debug_print_object(obj, "deactivate");
475 break;
476 default:
477 break;
478 }
479 } else {
480 struct debug_obj o = { .object = addr,
481 .state = ODEBUG_STATE_NOTAVAILABLE,
482 .descr = descr };
483
484 debug_print_object(&o, "deactivate");
485 }
486
487 raw_spin_unlock_irqrestore(&db->lock, flags);
488}
489
490/**
491 * debug_object_destroy - debug checks when an object is destroyed
492 * @addr: address of the object
493 * @descr: pointer to an object specific debug description structure
494 */
495void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
496{
497 enum debug_obj_state state;
498 struct debug_bucket *db;
499 struct debug_obj *obj;
500 unsigned long flags;
501
502 if (!debug_objects_enabled)
503 return;
504
505 db = get_bucket((unsigned long) addr);
506
507 raw_spin_lock_irqsave(&db->lock, flags);
508
509 obj = lookup_object(addr, db);
510 if (!obj)
511 goto out_unlock;
512
513 switch (obj->state) {
514 case ODEBUG_STATE_NONE:
515 case ODEBUG_STATE_INIT:
516 case ODEBUG_STATE_INACTIVE:
517 obj->state = ODEBUG_STATE_DESTROYED;
518 break;
519 case ODEBUG_STATE_ACTIVE:
520 debug_print_object(obj, "destroy");
521 state = obj->state;
522 raw_spin_unlock_irqrestore(&db->lock, flags);
523 debug_object_fixup(descr->fixup_destroy, addr, state);
524 return;
525
526 case ODEBUG_STATE_DESTROYED:
527 debug_print_object(obj, "destroy");
528 break;
529 default:
530 break;
531 }
532out_unlock:
533 raw_spin_unlock_irqrestore(&db->lock, flags);
534}
535
536/**
537 * debug_object_free - debug checks when an object is freed
538 * @addr: address of the object
539 * @descr: pointer to an object specific debug description structure
540 */
541void debug_object_free(void *addr, struct debug_obj_descr *descr)
542{
543 enum debug_obj_state state;
544 struct debug_bucket *db;
545 struct debug_obj *obj;
546 unsigned long flags;
547
548 if (!debug_objects_enabled)
549 return;
550
551 db = get_bucket((unsigned long) addr);
552
553 raw_spin_lock_irqsave(&db->lock, flags);
554
555 obj = lookup_object(addr, db);
556 if (!obj)
557 goto out_unlock;
558
559 switch (obj->state) {
560 case ODEBUG_STATE_ACTIVE:
561 debug_print_object(obj, "free");
562 state = obj->state;
563 raw_spin_unlock_irqrestore(&db->lock, flags);
564 debug_object_fixup(descr->fixup_free, addr, state);
565 return;
566 default:
567 hlist_del(&obj->node);
568 raw_spin_unlock_irqrestore(&db->lock, flags);
569 free_object(obj);
570 return;
571 }
572out_unlock:
573 raw_spin_unlock_irqrestore(&db->lock, flags);
574}
575
576/**
577 * debug_object_assert_init - debug checks when object should be init-ed
578 * @addr: address of the object
579 * @descr: pointer to an object specific debug description structure
580 */
581void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
582{
583 struct debug_bucket *db;
584 struct debug_obj *obj;
585 unsigned long flags;
586
587 if (!debug_objects_enabled)
588 return;
589
590 db = get_bucket((unsigned long) addr);
591
592 raw_spin_lock_irqsave(&db->lock, flags);
593
594 obj = lookup_object(addr, db);
595 if (!obj) {
596 struct debug_obj o = { .object = addr,
597 .state = ODEBUG_STATE_NOTAVAILABLE,
598 .descr = descr };
599
600 raw_spin_unlock_irqrestore(&db->lock, flags);
601 /*
602 * Maybe the object is static. Let the type specific
603 * code decide what to do.
604 */
605 if (debug_object_fixup(descr->fixup_assert_init, addr,
606 ODEBUG_STATE_NOTAVAILABLE))
607 debug_print_object(&o, "assert_init");
608 return;
609 }
610
611 raw_spin_unlock_irqrestore(&db->lock, flags);
612}
613
614/**
615 * debug_object_active_state - debug checks object usage state machine
616 * @addr: address of the object
617 * @descr: pointer to an object specific debug description structure
618 * @expect: expected state
619 * @next: state to move to if expected state is found
620 */
621void
622debug_object_active_state(void *addr, struct debug_obj_descr *descr,
623 unsigned int expect, unsigned int next)
624{
625 struct debug_bucket *db;
626 struct debug_obj *obj;
627 unsigned long flags;
628
629 if (!debug_objects_enabled)
630 return;
631
632 db = get_bucket((unsigned long) addr);
633
634 raw_spin_lock_irqsave(&db->lock, flags);
635
636 obj = lookup_object(addr, db);
637 if (obj) {
638 switch (obj->state) {
639 case ODEBUG_STATE_ACTIVE:
640 if (obj->astate == expect)
641 obj->astate = next;
642 else
643 debug_print_object(obj, "active_state");
644 break;
645
646 default:
647 debug_print_object(obj, "active_state");
648 break;
649 }
650 } else {
651 struct debug_obj o = { .object = addr,
652 .state = ODEBUG_STATE_NOTAVAILABLE,
653 .descr = descr };
654
655 debug_print_object(&o, "active_state");
656 }
657
658 raw_spin_unlock_irqrestore(&db->lock, flags);
659}
660
661#ifdef CONFIG_DEBUG_OBJECTS_FREE
662static void __debug_check_no_obj_freed(const void *address, unsigned long size)
663{
664 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
665 struct hlist_node *node, *tmp;
666 HLIST_HEAD(freelist);
667 struct debug_obj_descr *descr;
668 enum debug_obj_state state;
669 struct debug_bucket *db;
670 struct debug_obj *obj;
671 int cnt;
672
673 saddr = (unsigned long) address;
674 eaddr = saddr + size;
675 paddr = saddr & ODEBUG_CHUNK_MASK;
676 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
677 chunks >>= ODEBUG_CHUNK_SHIFT;
678
679 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
680 db = get_bucket(paddr);
681
682repeat:
683 cnt = 0;
684 raw_spin_lock_irqsave(&db->lock, flags);
685 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
686 cnt++;
687 oaddr = (unsigned long) obj->object;
688 if (oaddr < saddr || oaddr >= eaddr)
689 continue;
690
691 switch (obj->state) {
692 case ODEBUG_STATE_ACTIVE:
693 debug_print_object(obj, "free");
694 descr = obj->descr;
695 state = obj->state;
696 raw_spin_unlock_irqrestore(&db->lock, flags);
697 debug_object_fixup(descr->fixup_free,
698 (void *) oaddr, state);
699 goto repeat;
700 default:
701 hlist_del(&obj->node);
702 hlist_add_head(&obj->node, &freelist);
703 break;
704 }
705 }
706 raw_spin_unlock_irqrestore(&db->lock, flags);
707
708 /* Now free them */
709 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
710 hlist_del(&obj->node);
711 free_object(obj);
712 }
713
714 if (cnt > debug_objects_maxchain)
715 debug_objects_maxchain = cnt;
716 }
717}
718
719void debug_check_no_obj_freed(const void *address, unsigned long size)
720{
721 if (debug_objects_enabled)
722 __debug_check_no_obj_freed(address, size);
723}
724#endif
725
726#ifdef CONFIG_DEBUG_FS
727
728static int debug_stats_show(struct seq_file *m, void *v)
729{
730 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
731 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
732 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
733 seq_printf(m, "pool_free :%d\n", obj_pool_free);
734 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
735 seq_printf(m, "pool_used :%d\n", obj_pool_used);
736 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
737 return 0;
738}
739
740static int debug_stats_open(struct inode *inode, struct file *filp)
741{
742 return single_open(filp, debug_stats_show, NULL);
743}
744
745static const struct file_operations debug_stats_fops = {
746 .open = debug_stats_open,
747 .read = seq_read,
748 .llseek = seq_lseek,
749 .release = single_release,
750};
751
752static int __init debug_objects_init_debugfs(void)
753{
754 struct dentry *dbgdir, *dbgstats;
755
756 if (!debug_objects_enabled)
757 return 0;
758
759 dbgdir = debugfs_create_dir("debug_objects", NULL);
760 if (!dbgdir)
761 return -ENOMEM;
762
763 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
764 &debug_stats_fops);
765 if (!dbgstats)
766 goto err;
767
768 return 0;
769
770err:
771 debugfs_remove(dbgdir);
772
773 return -ENOMEM;
774}
775__initcall(debug_objects_init_debugfs);
776
777#else
778static inline void debug_objects_init_debugfs(void) { }
779#endif
780
781#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
782
783/* Random data structure for the self test */
784struct self_test {
785 unsigned long dummy1[6];
786 int static_init;
787 unsigned long dummy2[3];
788};
789
790static __initdata struct debug_obj_descr descr_type_test;
791
792/*
793 * fixup_init is called when:
794 * - an active object is initialized
795 */
796static int __init fixup_init(void *addr, enum debug_obj_state state)
797{
798 struct self_test *obj = addr;
799
800 switch (state) {
801 case ODEBUG_STATE_ACTIVE:
802 debug_object_deactivate(obj, &descr_type_test);
803 debug_object_init(obj, &descr_type_test);
804 return 1;
805 default:
806 return 0;
807 }
808}
809
810/*
811 * fixup_activate is called when:
812 * - an active object is activated
813 * - an unknown object is activated (might be a statically initialized object)
814 */
815static int __init fixup_activate(void *addr, enum debug_obj_state state)
816{
817 struct self_test *obj = addr;
818
819 switch (state) {
820 case ODEBUG_STATE_NOTAVAILABLE:
821 if (obj->static_init == 1) {
822 debug_object_init(obj, &descr_type_test);
823 debug_object_activate(obj, &descr_type_test);
824 return 0;
825 }
826 return 1;
827
828 case ODEBUG_STATE_ACTIVE:
829 debug_object_deactivate(obj, &descr_type_test);
830 debug_object_activate(obj, &descr_type_test);
831 return 1;
832
833 default:
834 return 0;
835 }
836}
837
838/*
839 * fixup_destroy is called when:
840 * - an active object is destroyed
841 */
842static int __init fixup_destroy(void *addr, enum debug_obj_state state)
843{
844 struct self_test *obj = addr;
845
846 switch (state) {
847 case ODEBUG_STATE_ACTIVE:
848 debug_object_deactivate(obj, &descr_type_test);
849 debug_object_destroy(obj, &descr_type_test);
850 return 1;
851 default:
852 return 0;
853 }
854}
855
856/*
857 * fixup_free is called when:
858 * - an active object is freed
859 */
860static int __init fixup_free(void *addr, enum debug_obj_state state)
861{
862 struct self_test *obj = addr;
863
864 switch (state) {
865 case ODEBUG_STATE_ACTIVE:
866 debug_object_deactivate(obj, &descr_type_test);
867 debug_object_free(obj, &descr_type_test);
868 return 1;
869 default:
870 return 0;
871 }
872}
873
874static int __init
875check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
876{
877 struct debug_bucket *db;
878 struct debug_obj *obj;
879 unsigned long flags;
880 int res = -EINVAL;
881
882 db = get_bucket((unsigned long) addr);
883
884 raw_spin_lock_irqsave(&db->lock, flags);
885
886 obj = lookup_object(addr, db);
887 if (!obj && state != ODEBUG_STATE_NONE) {
888 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
889 goto out;
890 }
891 if (obj && obj->state != state) {
892 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
893 obj->state, state);
894 goto out;
895 }
896 if (fixups != debug_objects_fixups) {
897 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
898 fixups, debug_objects_fixups);
899 goto out;
900 }
901 if (warnings != debug_objects_warnings) {
902 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
903 warnings, debug_objects_warnings);
904 goto out;
905 }
906 res = 0;
907out:
908 raw_spin_unlock_irqrestore(&db->lock, flags);
909 if (res)
910 debug_objects_enabled = 0;
911 return res;
912}
913
914static __initdata struct debug_obj_descr descr_type_test = {
915 .name = "selftest",
916 .fixup_init = fixup_init,
917 .fixup_activate = fixup_activate,
918 .fixup_destroy = fixup_destroy,
919 .fixup_free = fixup_free,
920};
921
922static __initdata struct self_test obj = { .static_init = 0 };
923
924static void __init debug_objects_selftest(void)
925{
926 int fixups, oldfixups, warnings, oldwarnings;
927 unsigned long flags;
928
929 local_irq_save(flags);
930
931 fixups = oldfixups = debug_objects_fixups;
932 warnings = oldwarnings = debug_objects_warnings;
933 descr_test = &descr_type_test;
934
935 debug_object_init(&obj, &descr_type_test);
936 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
937 goto out;
938 debug_object_activate(&obj, &descr_type_test);
939 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
940 goto out;
941 debug_object_activate(&obj, &descr_type_test);
942 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
943 goto out;
944 debug_object_deactivate(&obj, &descr_type_test);
945 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
946 goto out;
947 debug_object_destroy(&obj, &descr_type_test);
948 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
949 goto out;
950 debug_object_init(&obj, &descr_type_test);
951 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
952 goto out;
953 debug_object_activate(&obj, &descr_type_test);
954 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
955 goto out;
956 debug_object_deactivate(&obj, &descr_type_test);
957 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
958 goto out;
959 debug_object_free(&obj, &descr_type_test);
960 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
961 goto out;
962
963 obj.static_init = 1;
964 debug_object_activate(&obj, &descr_type_test);
965 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
966 goto out;
967 debug_object_init(&obj, &descr_type_test);
968 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
969 goto out;
970 debug_object_free(&obj, &descr_type_test);
971 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
972 goto out;
973
974#ifdef CONFIG_DEBUG_OBJECTS_FREE
975 debug_object_init(&obj, &descr_type_test);
976 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
977 goto out;
978 debug_object_activate(&obj, &descr_type_test);
979 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
980 goto out;
981 __debug_check_no_obj_freed(&obj, sizeof(obj));
982 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
983 goto out;
984#endif
985 printk(KERN_INFO "ODEBUG: selftest passed\n");
986
987out:
988 debug_objects_fixups = oldfixups;
989 debug_objects_warnings = oldwarnings;
990 descr_test = NULL;
991
992 local_irq_restore(flags);
993}
994#else
995static inline void debug_objects_selftest(void) { }
996#endif
997
998/*
999 * Called during early boot to initialize the hash buckets and link
1000 * the static object pool objects into the poll list. After this call
1001 * the object tracker is fully operational.
1002 */
1003void __init debug_objects_early_init(void)
1004{
1005 int i;
1006
1007 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1008 raw_spin_lock_init(&obj_hash[i].lock);
1009
1010 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1011 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1012}
1013
1014/*
1015 * Convert the statically allocated objects to dynamic ones:
1016 */
1017static int __init debug_objects_replace_static_objects(void)
1018{
1019 struct debug_bucket *db = obj_hash;
1020 struct hlist_node *node, *tmp;
1021 struct debug_obj *obj, *new;
1022 HLIST_HEAD(objects);
1023 int i, cnt = 0;
1024
1025 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1026 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1027 if (!obj)
1028 goto free;
1029 hlist_add_head(&obj->node, &objects);
1030 }
1031
1032 /*
1033 * When debug_objects_mem_init() is called we know that only
1034 * one CPU is up, so disabling interrupts is enough
1035 * protection. This avoids the lockdep hell of lock ordering.
1036 */
1037 local_irq_disable();
1038
1039 /* Remove the statically allocated objects from the pool */
1040 hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
1041 hlist_del(&obj->node);
1042 /* Move the allocated objects to the pool */
1043 hlist_move_list(&objects, &obj_pool);
1044
1045 /* Replace the active object references */
1046 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1047 hlist_move_list(&db->list, &objects);
1048
1049 hlist_for_each_entry(obj, node, &objects, node) {
1050 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1051 hlist_del(&new->node);
1052 /* copy object data */
1053 *new = *obj;
1054 hlist_add_head(&new->node, &db->list);
1055 cnt++;
1056 }
1057 }
1058
1059 local_irq_enable();
1060 printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
1061 obj_pool_used);
1062 return 0;
1063free:
1064 hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
1065 hlist_del(&obj->node);
1066 kmem_cache_free(obj_cache, obj);
1067 }
1068 return -ENOMEM;
1069}
1070
1071/*
1072 * Called after the kmem_caches are functional to setup a dedicated
1073 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1074 * prevents that the debug code is called on kmem_cache_free() for the
1075 * debug tracker objects to avoid recursive calls.
1076 */
1077void __init debug_objects_mem_init(void)
1078{
1079 if (!debug_objects_enabled)
1080 return;
1081
1082 obj_cache = kmem_cache_create("debug_objects_cache",
1083 sizeof (struct debug_obj), 0,
1084 SLAB_DEBUG_OBJECTS, NULL);
1085
1086 if (!obj_cache || debug_objects_replace_static_objects()) {
1087 debug_objects_enabled = 0;
1088 if (obj_cache)
1089 kmem_cache_destroy(obj_cache);
1090 printk(KERN_WARNING "ODEBUG: out of memory.\n");
1091 } else
1092 debug_objects_selftest();
1093}