blob: 5eeabece0c17899e66850157062c8d925c9425ce [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/dev-tools/kmemleak.rst.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a red black tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * Locks and mutexes are acquired/nested in the following order:
57 *
58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59 *
60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61 * regions.
62 *
63 * The kmemleak_object structures have a use_count incremented or decremented
64 * using the get_object()/put_object() functions. When the use_count becomes
65 * 0, this count can no longer be incremented and put_object() schedules the
66 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67 * function must be protected by rcu_read_lock() to avoid accessing a freed
68 * structure.
69 */
70
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73#include <linux/init.h>
74#include <linux/kernel.h>
75#include <linux/list.h>
76#include <linux/sched/signal.h>
77#include <linux/sched/task.h>
78#include <linux/sched/task_stack.h>
79#include <linux/jiffies.h>
80#include <linux/delay.h>
81#include <linux/export.h>
82#include <linux/kthread.h>
83#include <linux/rbtree.h>
84#include <linux/fs.h>
85#include <linux/debugfs.h>
86#include <linux/seq_file.h>
87#include <linux/cpumask.h>
88#include <linux/spinlock.h>
89#include <linux/mutex.h>
90#include <linux/rcupdate.h>
91#include <linux/stacktrace.h>
92#include <linux/cache.h>
93#include <linux/percpu.h>
94#include <linux/bootmem.h>
95#include <linux/pfn.h>
96#include <linux/mmzone.h>
97#include <linux/slab.h>
98#include <linux/thread_info.h>
99#include <linux/err.h>
100#include <linux/uaccess.h>
101#include <linux/string.h>
102#include <linux/nodemask.h>
103#include <linux/mm.h>
104#include <linux/workqueue.h>
105#include <linux/crc32.h>
106
107#include <asm/sections.h>
108#include <asm/processor.h>
109#include <linux/atomic.h>
110
111#include <linux/kasan.h>
112#include <linux/kmemleak.h>
113#include <linux/memory_hotplug.h>
114
115/*
116 * Kmemleak configuration and common defines.
117 */
118#define MAX_TRACE 16 /* stack trace length */
119#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
120#define SECS_FIRST_SCAN 60 /* delay before the first scan */
121#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
122#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
123
124#define BYTES_PER_POINTER sizeof(void *)
125
126/* GFP bitmask for kmemleak internal allocations */
127#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
128 __GFP_NORETRY | __GFP_NOMEMALLOC | \
129 __GFP_NOWARN)
130
131/* scanning area inside a memory block */
132struct kmemleak_scan_area {
133 struct hlist_node node;
134 unsigned long start;
135 size_t size;
136};
137
138#define KMEMLEAK_GREY 0
139#define KMEMLEAK_BLACK -1
140
141/*
142 * Structure holding the metadata for each allocated memory block.
143 * Modifications to such objects should be made while holding the
144 * object->lock. Insertions or deletions from object_list, gray_list or
145 * rb_node are already protected by the corresponding locks or mutex (see
146 * the notes on locking above). These objects are reference-counted
147 * (use_count) and freed using the RCU mechanism.
148 */
149struct kmemleak_object {
150 spinlock_t lock;
151 unsigned int flags; /* object status flags */
152 struct list_head object_list;
153 struct list_head gray_list;
154 struct rb_node rb_node;
155 struct rcu_head rcu; /* object_list lockless traversal */
156 /* object usage count; object freed when use_count == 0 */
157 atomic_t use_count;
158 unsigned long pointer;
159 size_t size;
160 /* pass surplus references to this pointer */
161 unsigned long excess_ref;
162 /* minimum number of a pointers found before it is considered leak */
163 int min_count;
164 /* the total number of pointers found pointing to this object */
165 int count;
166 /* checksum for detecting modified objects */
167 u32 checksum;
168 /* memory ranges to be scanned inside an object (empty for all) */
169 struct hlist_head area_list;
170 unsigned long trace[MAX_TRACE];
171 unsigned int trace_len;
172 unsigned long jiffies; /* creation timestamp */
173 pid_t pid; /* pid of the current task */
174 char comm[TASK_COMM_LEN]; /* executable name */
175};
176
177/* flag representing the memory block allocation status */
178#define OBJECT_ALLOCATED (1 << 0)
179/* flag set after the first reporting of an unreference object */
180#define OBJECT_REPORTED (1 << 1)
181/* flag set to not scan the object */
182#define OBJECT_NO_SCAN (1 << 2)
183
184/* number of bytes to print per line; must be 16 or 32 */
185#define HEX_ROW_SIZE 16
186/* number of bytes to print at a time (1, 2, 4, 8) */
187#define HEX_GROUP_SIZE 1
188/* include ASCII after the hex output */
189#define HEX_ASCII 1
190/* max number of lines to be printed */
191#define HEX_MAX_LINES 2
192
193/* the list of all allocated objects */
194static LIST_HEAD(object_list);
195/* the list of gray-colored objects (see color_gray comment below) */
196static LIST_HEAD(gray_list);
197/* search tree for object boundaries */
198static struct rb_root object_tree_root = RB_ROOT;
199/* rw_lock protecting the access to object_list and object_tree_root */
200static DEFINE_RWLOCK(kmemleak_lock);
201
202/* allocation caches for kmemleak internal data */
203static struct kmem_cache *object_cache;
204static struct kmem_cache *scan_area_cache;
205
206/* set if tracing memory operations is enabled */
207static int kmemleak_enabled;
208/* same as above but only for the kmemleak_free() callback */
209static int kmemleak_free_enabled;
210/* set in the late_initcall if there were no errors */
211static int kmemleak_initialized;
212/* enables or disables early logging of the memory operations */
213static int kmemleak_early_log = 1;
214/* set if a kmemleak warning was issued */
215static int kmemleak_warning;
216/* set if a fatal kmemleak error has occurred */
217static int kmemleak_error;
218
219/* minimum and maximum address that may be valid pointers */
220static unsigned long min_addr = ULONG_MAX;
221static unsigned long max_addr;
222
223static struct task_struct *scan_thread;
224/* used to avoid reporting of recently allocated objects */
225static unsigned long jiffies_min_age;
226static unsigned long jiffies_last_scan;
227/* delay between automatic memory scannings */
228static signed long jiffies_scan_wait;
229/* enables or disables the task stacks scanning */
230static int kmemleak_stack_scan = 1;
231/* protects the memory scanning, parameters and debug/kmemleak file access */
232static DEFINE_MUTEX(scan_mutex);
233/* setting kmemleak=on, will set this var, skipping the disable */
234static int kmemleak_skip_disable;
235/* If there are leaks that can be reported */
236static bool kmemleak_found_leaks;
237
238/*
239 * Early object allocation/freeing logging. Kmemleak is initialized after the
240 * kernel allocator. However, both the kernel allocator and kmemleak may
241 * allocate memory blocks which need to be tracked. Kmemleak defines an
242 * arbitrary buffer to hold the allocation/freeing information before it is
243 * fully initialized.
244 */
245
246/* kmemleak operation type for early logging */
247enum {
248 KMEMLEAK_ALLOC,
249 KMEMLEAK_ALLOC_PERCPU,
250 KMEMLEAK_FREE,
251 KMEMLEAK_FREE_PART,
252 KMEMLEAK_FREE_PERCPU,
253 KMEMLEAK_NOT_LEAK,
254 KMEMLEAK_IGNORE,
255 KMEMLEAK_SCAN_AREA,
256 KMEMLEAK_NO_SCAN,
257 KMEMLEAK_SET_EXCESS_REF
258};
259
260/*
261 * Structure holding the information passed to kmemleak callbacks during the
262 * early logging.
263 */
264struct early_log {
265 int op_type; /* kmemleak operation type */
266 int min_count; /* minimum reference count */
267 const void *ptr; /* allocated/freed memory block */
268 union {
269 size_t size; /* memory block size */
270 unsigned long excess_ref; /* surplus reference passing */
271 };
272 unsigned long trace[MAX_TRACE]; /* stack trace */
273 unsigned int trace_len; /* stack trace length */
274};
275
276/* early logging buffer and current position */
277static struct early_log
278 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
279static int crt_early_log __initdata;
280
281static void kmemleak_disable(void);
282
283/*
284 * Print a warning and dump the stack trace.
285 */
286#define kmemleak_warn(x...) do { \
287 pr_warn(x); \
288 dump_stack(); \
289 kmemleak_warning = 1; \
290} while (0)
291
292/*
293 * Macro invoked when a serious kmemleak condition occurred and cannot be
294 * recovered from. Kmemleak will be disabled and further allocation/freeing
295 * tracing no longer available.
296 */
297#define kmemleak_stop(x...) do { \
298 kmemleak_warn(x); \
299 kmemleak_disable(); \
300} while (0)
301
302/*
303 * Printing of the objects hex dump to the seq file. The number of lines to be
304 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
305 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
306 * with the object->lock held.
307 */
308static void hex_dump_object(struct seq_file *seq,
309 struct kmemleak_object *object)
310{
311 const u8 *ptr = (const u8 *)object->pointer;
312 size_t len;
313
314 /* limit the number of lines to HEX_MAX_LINES */
315 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
316
317 seq_printf(seq, " hex dump (first %zu bytes):\n", len);
318 kasan_disable_current();
319 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
320 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
321 kasan_enable_current();
322}
323
324/*
325 * Object colors, encoded with count and min_count:
326 * - white - orphan object, not enough references to it (count < min_count)
327 * - gray - not orphan, not marked as false positive (min_count == 0) or
328 * sufficient references to it (count >= min_count)
329 * - black - ignore, it doesn't contain references (e.g. text section)
330 * (min_count == -1). No function defined for this color.
331 * Newly created objects don't have any color assigned (object->count == -1)
332 * before the next memory scan when they become white.
333 */
334static bool color_white(const struct kmemleak_object *object)
335{
336 return object->count != KMEMLEAK_BLACK &&
337 object->count < object->min_count;
338}
339
340static bool color_gray(const struct kmemleak_object *object)
341{
342 return object->min_count != KMEMLEAK_BLACK &&
343 object->count >= object->min_count;
344}
345
346/*
347 * Objects are considered unreferenced only if their color is white, they have
348 * not be deleted and have a minimum age to avoid false positives caused by
349 * pointers temporarily stored in CPU registers.
350 */
351static bool unreferenced_object(struct kmemleak_object *object)
352{
353 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
354 time_before_eq(object->jiffies + jiffies_min_age,
355 jiffies_last_scan);
356}
357
358/*
359 * Printing of the unreferenced objects information to the seq file. The
360 * print_unreferenced function must be called with the object->lock held.
361 */
362static void print_unreferenced(struct seq_file *seq,
363 struct kmemleak_object *object)
364{
365 int i;
366 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
367
368 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
369 object->pointer, object->size);
370 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
371 object->comm, object->pid, object->jiffies,
372 msecs_age / 1000, msecs_age % 1000);
373 hex_dump_object(seq, object);
374 seq_printf(seq, " backtrace:\n");
375
376 for (i = 0; i < object->trace_len; i++) {
377 void *ptr = (void *)object->trace[i];
378 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
379 }
380}
381
382/*
383 * Print the kmemleak_object information. This function is used mainly for
384 * debugging special cases when kmemleak operations. It must be called with
385 * the object->lock held.
386 */
387static void dump_object_info(struct kmemleak_object *object)
388{
389 struct stack_trace trace;
390
391 trace.nr_entries = object->trace_len;
392 trace.entries = object->trace;
393
394 pr_notice("Object 0x%08lx (size %zu):\n",
395 object->pointer, object->size);
396 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
397 object->comm, object->pid, object->jiffies);
398 pr_notice(" min_count = %d\n", object->min_count);
399 pr_notice(" count = %d\n", object->count);
400 pr_notice(" flags = 0x%x\n", object->flags);
401 pr_notice(" checksum = %u\n", object->checksum);
402 pr_notice(" backtrace:\n");
403 print_stack_trace(&trace, 4);
404}
405
406/*
407 * Look-up a memory block metadata (kmemleak_object) in the object search
408 * tree based on a pointer value. If alias is 0, only values pointing to the
409 * beginning of the memory block are allowed. The kmemleak_lock must be held
410 * when calling this function.
411 */
412static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
413{
414 struct rb_node *rb = object_tree_root.rb_node;
415
416 while (rb) {
417 struct kmemleak_object *object =
418 rb_entry(rb, struct kmemleak_object, rb_node);
419 if (ptr < object->pointer)
420 rb = object->rb_node.rb_left;
421 else if (object->pointer + object->size <= ptr)
422 rb = object->rb_node.rb_right;
423 else if (object->pointer == ptr || alias)
424 return object;
425 else {
426 kmemleak_warn("Found object by alias at 0x%08lx\n",
427 ptr);
428 dump_object_info(object);
429 break;
430 }
431 }
432 return NULL;
433}
434
435/*
436 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
437 * that once an object's use_count reached 0, the RCU freeing was already
438 * registered and the object should no longer be used. This function must be
439 * called under the protection of rcu_read_lock().
440 */
441static int get_object(struct kmemleak_object *object)
442{
443 return atomic_inc_not_zero(&object->use_count);
444}
445
446/*
447 * RCU callback to free a kmemleak_object.
448 */
449static void free_object_rcu(struct rcu_head *rcu)
450{
451 struct hlist_node *tmp;
452 struct kmemleak_scan_area *area;
453 struct kmemleak_object *object =
454 container_of(rcu, struct kmemleak_object, rcu);
455
456 /*
457 * Once use_count is 0 (guaranteed by put_object), there is no other
458 * code accessing this object, hence no need for locking.
459 */
460 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
461 hlist_del(&area->node);
462 kmem_cache_free(scan_area_cache, area);
463 }
464 kmem_cache_free(object_cache, object);
465}
466
467/*
468 * Decrement the object use_count. Once the count is 0, free the object using
469 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
470 * delete_object() path, the delayed RCU freeing ensures that there is no
471 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
472 * is also possible.
473 */
474static void put_object(struct kmemleak_object *object)
475{
476 if (!atomic_dec_and_test(&object->use_count))
477 return;
478
479 /* should only get here after delete_object was called */
480 WARN_ON(object->flags & OBJECT_ALLOCATED);
481
482 call_rcu(&object->rcu, free_object_rcu);
483}
484
485/*
486 * Look up an object in the object search tree and increase its use_count.
487 */
488static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
489{
490 unsigned long flags;
491 struct kmemleak_object *object;
492
493 rcu_read_lock();
494 read_lock_irqsave(&kmemleak_lock, flags);
495 object = lookup_object(ptr, alias);
496 read_unlock_irqrestore(&kmemleak_lock, flags);
497
498 /* check whether the object is still available */
499 if (object && !get_object(object))
500 object = NULL;
501 rcu_read_unlock();
502
503 return object;
504}
505
506/*
507 * Look up an object in the object search tree and remove it from both
508 * object_tree_root and object_list. The returned object's use_count should be
509 * at least 1, as initially set by create_object().
510 */
511static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
512{
513 unsigned long flags;
514 struct kmemleak_object *object;
515
516 write_lock_irqsave(&kmemleak_lock, flags);
517 object = lookup_object(ptr, alias);
518 if (object) {
519 rb_erase(&object->rb_node, &object_tree_root);
520 list_del_rcu(&object->object_list);
521 }
522 write_unlock_irqrestore(&kmemleak_lock, flags);
523
524 return object;
525}
526
527/*
528 * Save stack trace to the given array of MAX_TRACE size.
529 */
530static int __save_stack_trace(unsigned long *trace)
531{
532 struct stack_trace stack_trace;
533
534 stack_trace.max_entries = MAX_TRACE;
535 stack_trace.nr_entries = 0;
536 stack_trace.entries = trace;
537 stack_trace.skip = 2;
538 save_stack_trace(&stack_trace);
539
540 return stack_trace.nr_entries;
541}
542
543/*
544 * Create the metadata (struct kmemleak_object) corresponding to an allocated
545 * memory block and add it to the object_list and object_tree_root.
546 */
547static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
548 int min_count, gfp_t gfp)
549{
550 unsigned long flags;
551 struct kmemleak_object *object, *parent;
552 struct rb_node **link, *rb_parent;
553
554 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
555 if (!object) {
556 pr_warn("Cannot allocate a kmemleak_object structure\n");
557 kmemleak_disable();
558 return NULL;
559 }
560
561 INIT_LIST_HEAD(&object->object_list);
562 INIT_LIST_HEAD(&object->gray_list);
563 INIT_HLIST_HEAD(&object->area_list);
564 spin_lock_init(&object->lock);
565 atomic_set(&object->use_count, 1);
566 object->flags = OBJECT_ALLOCATED;
567 object->pointer = ptr;
568 object->size = size;
569 object->excess_ref = 0;
570 object->min_count = min_count;
571 object->count = 0; /* white color initially */
572 object->jiffies = jiffies;
573 object->checksum = 0;
574
575 /* task information */
576 if (in_irq()) {
577 object->pid = 0;
578 strncpy(object->comm, "hardirq", sizeof(object->comm));
579 } else if (in_serving_softirq()) {
580 object->pid = 0;
581 strncpy(object->comm, "softirq", sizeof(object->comm));
582 } else {
583 object->pid = current->pid;
584 /*
585 * There is a small chance of a race with set_task_comm(),
586 * however using get_task_comm() here may cause locking
587 * dependency issues with current->alloc_lock. In the worst
588 * case, the command line is not correct.
589 */
590 strncpy(object->comm, current->comm, sizeof(object->comm));
591 }
592
593 /* kernel backtrace */
594 object->trace_len = __save_stack_trace(object->trace);
595
596 write_lock_irqsave(&kmemleak_lock, flags);
597
598 min_addr = min(min_addr, ptr);
599 max_addr = max(max_addr, ptr + size);
600 link = &object_tree_root.rb_node;
601 rb_parent = NULL;
602 while (*link) {
603 rb_parent = *link;
604 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
605 if (ptr + size <= parent->pointer)
606 link = &parent->rb_node.rb_left;
607 else if (parent->pointer + parent->size <= ptr)
608 link = &parent->rb_node.rb_right;
609 else {
610 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
611 ptr);
612 /*
613 * No need for parent->lock here since "parent" cannot
614 * be freed while the kmemleak_lock is held.
615 */
616 dump_object_info(parent);
617 kmem_cache_free(object_cache, object);
618 object = NULL;
619 goto out;
620 }
621 }
622 rb_link_node(&object->rb_node, rb_parent, link);
623 rb_insert_color(&object->rb_node, &object_tree_root);
624
625 list_add_tail_rcu(&object->object_list, &object_list);
626out:
627 write_unlock_irqrestore(&kmemleak_lock, flags);
628 return object;
629}
630
631/*
632 * Mark the object as not allocated and schedule RCU freeing via put_object().
633 */
634static void __delete_object(struct kmemleak_object *object)
635{
636 unsigned long flags;
637
638 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
639 WARN_ON(atomic_read(&object->use_count) < 1);
640
641 /*
642 * Locking here also ensures that the corresponding memory block
643 * cannot be freed when it is being scanned.
644 */
645 spin_lock_irqsave(&object->lock, flags);
646 object->flags &= ~OBJECT_ALLOCATED;
647 spin_unlock_irqrestore(&object->lock, flags);
648 put_object(object);
649}
650
651/*
652 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
653 * delete it.
654 */
655static void delete_object_full(unsigned long ptr)
656{
657 struct kmemleak_object *object;
658
659 object = find_and_remove_object(ptr, 0);
660 if (!object) {
661#ifdef DEBUG
662 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
663 ptr);
664#endif
665 return;
666 }
667 __delete_object(object);
668}
669
670/*
671 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
672 * delete it. If the memory block is partially freed, the function may create
673 * additional metadata for the remaining parts of the block.
674 */
675static void delete_object_part(unsigned long ptr, size_t size)
676{
677 struct kmemleak_object *object;
678 unsigned long start, end;
679
680 object = find_and_remove_object(ptr, 1);
681 if (!object) {
682#ifdef DEBUG
683 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
684 ptr, size);
685#endif
686 return;
687 }
688
689 /*
690 * Create one or two objects that may result from the memory block
691 * split. Note that partial freeing is only done by free_bootmem() and
692 * this happens before kmemleak_init() is called. The path below is
693 * only executed during early log recording in kmemleak_init(), so
694 * GFP_KERNEL is enough.
695 */
696 start = object->pointer;
697 end = object->pointer + object->size;
698 if (ptr > start)
699 create_object(start, ptr - start, object->min_count,
700 GFP_KERNEL);
701 if (ptr + size < end)
702 create_object(ptr + size, end - ptr - size, object->min_count,
703 GFP_KERNEL);
704
705 __delete_object(object);
706}
707
708static void __paint_it(struct kmemleak_object *object, int color)
709{
710 object->min_count = color;
711 if (color == KMEMLEAK_BLACK)
712 object->flags |= OBJECT_NO_SCAN;
713}
714
715static void paint_it(struct kmemleak_object *object, int color)
716{
717 unsigned long flags;
718
719 spin_lock_irqsave(&object->lock, flags);
720 __paint_it(object, color);
721 spin_unlock_irqrestore(&object->lock, flags);
722}
723
724static void paint_ptr(unsigned long ptr, int color)
725{
726 struct kmemleak_object *object;
727
728 object = find_and_get_object(ptr, 0);
729 if (!object) {
730 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
731 ptr,
732 (color == KMEMLEAK_GREY) ? "Grey" :
733 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
734 return;
735 }
736 paint_it(object, color);
737 put_object(object);
738}
739
740/*
741 * Mark an object permanently as gray-colored so that it can no longer be
742 * reported as a leak. This is used in general to mark a false positive.
743 */
744static void make_gray_object(unsigned long ptr)
745{
746 paint_ptr(ptr, KMEMLEAK_GREY);
747}
748
749/*
750 * Mark the object as black-colored so that it is ignored from scans and
751 * reporting.
752 */
753static void make_black_object(unsigned long ptr)
754{
755 paint_ptr(ptr, KMEMLEAK_BLACK);
756}
757
758/*
759 * Add a scanning area to the object. If at least one such area is added,
760 * kmemleak will only scan these ranges rather than the whole memory block.
761 */
762static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
763{
764 unsigned long flags;
765 struct kmemleak_object *object;
766 struct kmemleak_scan_area *area;
767
768 object = find_and_get_object(ptr, 1);
769 if (!object) {
770 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
771 ptr);
772 return;
773 }
774
775 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
776 if (!area) {
777 pr_warn("Cannot allocate a scan area\n");
778 goto out;
779 }
780
781 spin_lock_irqsave(&object->lock, flags);
782 if (size == SIZE_MAX) {
783 size = object->pointer + object->size - ptr;
784 } else if (ptr + size > object->pointer + object->size) {
785 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
786 dump_object_info(object);
787 kmem_cache_free(scan_area_cache, area);
788 goto out_unlock;
789 }
790
791 INIT_HLIST_NODE(&area->node);
792 area->start = ptr;
793 area->size = size;
794
795 hlist_add_head(&area->node, &object->area_list);
796out_unlock:
797 spin_unlock_irqrestore(&object->lock, flags);
798out:
799 put_object(object);
800}
801
802/*
803 * Any surplus references (object already gray) to 'ptr' are passed to
804 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
805 * vm_struct may be used as an alternative reference to the vmalloc'ed object
806 * (see free_thread_stack()).
807 */
808static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
809{
810 unsigned long flags;
811 struct kmemleak_object *object;
812
813 object = find_and_get_object(ptr, 0);
814 if (!object) {
815 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
816 ptr);
817 return;
818 }
819
820 spin_lock_irqsave(&object->lock, flags);
821 object->excess_ref = excess_ref;
822 spin_unlock_irqrestore(&object->lock, flags);
823 put_object(object);
824}
825
826/*
827 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
828 * pointer. Such object will not be scanned by kmemleak but references to it
829 * are searched.
830 */
831static void object_no_scan(unsigned long ptr)
832{
833 unsigned long flags;
834 struct kmemleak_object *object;
835
836 object = find_and_get_object(ptr, 0);
837 if (!object) {
838 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
839 return;
840 }
841
842 spin_lock_irqsave(&object->lock, flags);
843 object->flags |= OBJECT_NO_SCAN;
844 spin_unlock_irqrestore(&object->lock, flags);
845 put_object(object);
846}
847
848/*
849 * Log an early kmemleak_* call to the early_log buffer. These calls will be
850 * processed later once kmemleak is fully initialized.
851 */
852static void __init log_early(int op_type, const void *ptr, size_t size,
853 int min_count)
854{
855 unsigned long flags;
856 struct early_log *log;
857
858 if (kmemleak_error) {
859 /* kmemleak stopped recording, just count the requests */
860 crt_early_log++;
861 return;
862 }
863
864 if (crt_early_log >= ARRAY_SIZE(early_log)) {
865 crt_early_log++;
866 kmemleak_disable();
867 return;
868 }
869
870 /*
871 * There is no need for locking since the kernel is still in UP mode
872 * at this stage. Disabling the IRQs is enough.
873 */
874 local_irq_save(flags);
875 log = &early_log[crt_early_log];
876 log->op_type = op_type;
877 log->ptr = ptr;
878 log->size = size;
879 log->min_count = min_count;
880 log->trace_len = __save_stack_trace(log->trace);
881 crt_early_log++;
882 local_irq_restore(flags);
883}
884
885/*
886 * Log an early allocated block and populate the stack trace.
887 */
888static void early_alloc(struct early_log *log)
889{
890 struct kmemleak_object *object;
891 unsigned long flags;
892 int i;
893
894 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
895 return;
896
897 /*
898 * RCU locking needed to ensure object is not freed via put_object().
899 */
900 rcu_read_lock();
901 object = create_object((unsigned long)log->ptr, log->size,
902 log->min_count, GFP_ATOMIC);
903 if (!object)
904 goto out;
905 spin_lock_irqsave(&object->lock, flags);
906 for (i = 0; i < log->trace_len; i++)
907 object->trace[i] = log->trace[i];
908 object->trace_len = log->trace_len;
909 spin_unlock_irqrestore(&object->lock, flags);
910out:
911 rcu_read_unlock();
912}
913
914/*
915 * Log an early allocated block and populate the stack trace.
916 */
917static void early_alloc_percpu(struct early_log *log)
918{
919 unsigned int cpu;
920 const void __percpu *ptr = log->ptr;
921
922 for_each_possible_cpu(cpu) {
923 log->ptr = per_cpu_ptr(ptr, cpu);
924 early_alloc(log);
925 }
926}
927
928/**
929 * kmemleak_alloc - register a newly allocated object
930 * @ptr: pointer to beginning of the object
931 * @size: size of the object
932 * @min_count: minimum number of references to this object. If during memory
933 * scanning a number of references less than @min_count is found,
934 * the object is reported as a memory leak. If @min_count is 0,
935 * the object is never reported as a leak. If @min_count is -1,
936 * the object is ignored (not scanned and not reported as a leak)
937 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
938 *
939 * This function is called from the kernel allocators when a new object
940 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
941 */
942void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
943 gfp_t gfp)
944{
945 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
946
947 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
948 create_object((unsigned long)ptr, size, min_count, gfp);
949 else if (kmemleak_early_log)
950 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
951}
952EXPORT_SYMBOL_GPL(kmemleak_alloc);
953
954/**
955 * kmemleak_alloc_percpu - register a newly allocated __percpu object
956 * @ptr: __percpu pointer to beginning of the object
957 * @size: size of the object
958 * @gfp: flags used for kmemleak internal memory allocations
959 *
960 * This function is called from the kernel percpu allocator when a new object
961 * (memory block) is allocated (alloc_percpu).
962 */
963void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
964 gfp_t gfp)
965{
966 unsigned int cpu;
967
968 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
969
970 /*
971 * Percpu allocations are only scanned and not reported as leaks
972 * (min_count is set to 0).
973 */
974 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
975 for_each_possible_cpu(cpu)
976 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
977 size, 0, gfp);
978 else if (kmemleak_early_log)
979 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
980}
981EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
982
983/**
984 * kmemleak_vmalloc - register a newly vmalloc'ed object
985 * @area: pointer to vm_struct
986 * @size: size of the object
987 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
988 *
989 * This function is called from the vmalloc() kernel allocator when a new
990 * object (memory block) is allocated.
991 */
992void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
993{
994 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
995
996 /*
997 * A min_count = 2 is needed because vm_struct contains a reference to
998 * the virtual address of the vmalloc'ed block.
999 */
1000 if (kmemleak_enabled) {
1001 create_object((unsigned long)area->addr, size, 2, gfp);
1002 object_set_excess_ref((unsigned long)area,
1003 (unsigned long)area->addr);
1004 } else if (kmemleak_early_log) {
1005 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1006 /* reusing early_log.size for storing area->addr */
1007 log_early(KMEMLEAK_SET_EXCESS_REF,
1008 area, (unsigned long)area->addr, 0);
1009 }
1010}
1011EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1012
1013/**
1014 * kmemleak_free - unregister a previously registered object
1015 * @ptr: pointer to beginning of the object
1016 *
1017 * This function is called from the kernel allocators when an object (memory
1018 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1019 */
1020void __ref kmemleak_free(const void *ptr)
1021{
1022 pr_debug("%s(0x%p)\n", __func__, ptr);
1023
1024 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1025 delete_object_full((unsigned long)ptr);
1026 else if (kmemleak_early_log)
1027 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1028}
1029EXPORT_SYMBOL_GPL(kmemleak_free);
1030
1031/**
1032 * kmemleak_free_part - partially unregister a previously registered object
1033 * @ptr: pointer to the beginning or inside the object. This also
1034 * represents the start of the range to be freed
1035 * @size: size to be unregistered
1036 *
1037 * This function is called when only a part of a memory block is freed
1038 * (usually from the bootmem allocator).
1039 */
1040void __ref kmemleak_free_part(const void *ptr, size_t size)
1041{
1042 pr_debug("%s(0x%p)\n", __func__, ptr);
1043
1044 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1045 delete_object_part((unsigned long)ptr, size);
1046 else if (kmemleak_early_log)
1047 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1048}
1049EXPORT_SYMBOL_GPL(kmemleak_free_part);
1050
1051/**
1052 * kmemleak_free_percpu - unregister a previously registered __percpu object
1053 * @ptr: __percpu pointer to beginning of the object
1054 *
1055 * This function is called from the kernel percpu allocator when an object
1056 * (memory block) is freed (free_percpu).
1057 */
1058void __ref kmemleak_free_percpu(const void __percpu *ptr)
1059{
1060 unsigned int cpu;
1061
1062 pr_debug("%s(0x%p)\n", __func__, ptr);
1063
1064 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1065 for_each_possible_cpu(cpu)
1066 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1067 cpu));
1068 else if (kmemleak_early_log)
1069 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1070}
1071EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1072
1073/**
1074 * kmemleak_update_trace - update object allocation stack trace
1075 * @ptr: pointer to beginning of the object
1076 *
1077 * Override the object allocation stack trace for cases where the actual
1078 * allocation place is not always useful.
1079 */
1080void __ref kmemleak_update_trace(const void *ptr)
1081{
1082 struct kmemleak_object *object;
1083 unsigned long flags;
1084
1085 pr_debug("%s(0x%p)\n", __func__, ptr);
1086
1087 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1088 return;
1089
1090 object = find_and_get_object((unsigned long)ptr, 1);
1091 if (!object) {
1092#ifdef DEBUG
1093 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1094 ptr);
1095#endif
1096 return;
1097 }
1098
1099 spin_lock_irqsave(&object->lock, flags);
1100 object->trace_len = __save_stack_trace(object->trace);
1101 spin_unlock_irqrestore(&object->lock, flags);
1102
1103 put_object(object);
1104}
1105EXPORT_SYMBOL(kmemleak_update_trace);
1106
1107/**
1108 * kmemleak_not_leak - mark an allocated object as false positive
1109 * @ptr: pointer to beginning of the object
1110 *
1111 * Calling this function on an object will cause the memory block to no longer
1112 * be reported as leak and always be scanned.
1113 */
1114void __ref kmemleak_not_leak(const void *ptr)
1115{
1116 pr_debug("%s(0x%p)\n", __func__, ptr);
1117
1118 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1119 make_gray_object((unsigned long)ptr);
1120 else if (kmemleak_early_log)
1121 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1122}
1123EXPORT_SYMBOL(kmemleak_not_leak);
1124
1125/**
1126 * kmemleak_ignore - ignore an allocated object
1127 * @ptr: pointer to beginning of the object
1128 *
1129 * Calling this function on an object will cause the memory block to be
1130 * ignored (not scanned and not reported as a leak). This is usually done when
1131 * it is known that the corresponding block is not a leak and does not contain
1132 * any references to other allocated memory blocks.
1133 */
1134void __ref kmemleak_ignore(const void *ptr)
1135{
1136 pr_debug("%s(0x%p)\n", __func__, ptr);
1137
1138 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1139 make_black_object((unsigned long)ptr);
1140 else if (kmemleak_early_log)
1141 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1142}
1143EXPORT_SYMBOL(kmemleak_ignore);
1144
1145/**
1146 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1147 * @ptr: pointer to beginning or inside the object. This also
1148 * represents the start of the scan area
1149 * @size: size of the scan area
1150 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1151 *
1152 * This function is used when it is known that only certain parts of an object
1153 * contain references to other objects. Kmemleak will only scan these areas
1154 * reducing the number false negatives.
1155 */
1156void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1157{
1158 pr_debug("%s(0x%p)\n", __func__, ptr);
1159
1160 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1161 add_scan_area((unsigned long)ptr, size, gfp);
1162 else if (kmemleak_early_log)
1163 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1164}
1165EXPORT_SYMBOL(kmemleak_scan_area);
1166
1167/**
1168 * kmemleak_no_scan - do not scan an allocated object
1169 * @ptr: pointer to beginning of the object
1170 *
1171 * This function notifies kmemleak not to scan the given memory block. Useful
1172 * in situations where it is known that the given object does not contain any
1173 * references to other objects. Kmemleak will not scan such objects reducing
1174 * the number of false negatives.
1175 */
1176void __ref kmemleak_no_scan(const void *ptr)
1177{
1178 pr_debug("%s(0x%p)\n", __func__, ptr);
1179
1180 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1181 object_no_scan((unsigned long)ptr);
1182 else if (kmemleak_early_log)
1183 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1184}
1185EXPORT_SYMBOL(kmemleak_no_scan);
1186
1187/**
1188 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1189 * address argument
1190 * @phys: physical address of the object
1191 * @size: size of the object
1192 * @min_count: minimum number of references to this object.
1193 * See kmemleak_alloc()
1194 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1195 */
1196void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1197 gfp_t gfp)
1198{
1199 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1200 kmemleak_alloc(__va(phys), size, min_count, gfp);
1201}
1202EXPORT_SYMBOL(kmemleak_alloc_phys);
1203
1204/**
1205 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1206 * physical address argument
1207 * @phys: physical address if the beginning or inside an object. This
1208 * also represents the start of the range to be freed
1209 * @size: size to be unregistered
1210 */
1211void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1212{
1213 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1214 kmemleak_free_part(__va(phys), size);
1215}
1216EXPORT_SYMBOL(kmemleak_free_part_phys);
1217
1218/**
1219 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1220 * address argument
1221 * @phys: physical address of the object
1222 */
1223void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1224{
1225 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1226 kmemleak_not_leak(__va(phys));
1227}
1228EXPORT_SYMBOL(kmemleak_not_leak_phys);
1229
1230/**
1231 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1232 * address argument
1233 * @phys: physical address of the object
1234 */
1235void __ref kmemleak_ignore_phys(phys_addr_t phys)
1236{
1237 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1238 kmemleak_ignore(__va(phys));
1239}
1240EXPORT_SYMBOL(kmemleak_ignore_phys);
1241
1242/*
1243 * Update an object's checksum and return true if it was modified.
1244 */
1245static bool update_checksum(struct kmemleak_object *object)
1246{
1247 u32 old_csum = object->checksum;
1248
1249 kasan_disable_current();
1250 object->checksum = crc32(0, (void *)object->pointer, object->size);
1251 kasan_enable_current();
1252
1253 return object->checksum != old_csum;
1254}
1255
1256/*
1257 * Update an object's references. object->lock must be held by the caller.
1258 */
1259static void update_refs(struct kmemleak_object *object)
1260{
1261 if (!color_white(object)) {
1262 /* non-orphan, ignored or new */
1263 return;
1264 }
1265
1266 /*
1267 * Increase the object's reference count (number of pointers to the
1268 * memory block). If this count reaches the required minimum, the
1269 * object's color will become gray and it will be added to the
1270 * gray_list.
1271 */
1272 object->count++;
1273 if (color_gray(object)) {
1274 /* put_object() called when removing from gray_list */
1275 WARN_ON(!get_object(object));
1276 list_add_tail(&object->gray_list, &gray_list);
1277 }
1278}
1279
1280/*
1281 * Memory scanning is a long process and it needs to be interruptable. This
1282 * function checks whether such interrupt condition occurred.
1283 */
1284static int scan_should_stop(void)
1285{
1286 if (!kmemleak_enabled)
1287 return 1;
1288
1289 /*
1290 * This function may be called from either process or kthread context,
1291 * hence the need to check for both stop conditions.
1292 */
1293 if (current->mm)
1294 return signal_pending(current);
1295 else
1296 return kthread_should_stop();
1297
1298 return 0;
1299}
1300
1301/*
1302 * Scan a memory block (exclusive range) for valid pointers and add those
1303 * found to the gray list.
1304 */
1305static void scan_block(void *_start, void *_end,
1306 struct kmemleak_object *scanned)
1307{
1308 unsigned long *ptr;
1309 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1310 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1311 unsigned long flags;
1312
1313 read_lock_irqsave(&kmemleak_lock, flags);
1314 for (ptr = start; ptr < end; ptr++) {
1315 struct kmemleak_object *object;
1316 unsigned long pointer;
1317 unsigned long excess_ref;
1318
1319 if (scan_should_stop())
1320 break;
1321
1322 kasan_disable_current();
1323 pointer = *ptr;
1324 kasan_enable_current();
1325
1326 if (pointer < min_addr || pointer >= max_addr)
1327 continue;
1328
1329 /*
1330 * No need for get_object() here since we hold kmemleak_lock.
1331 * object->use_count cannot be dropped to 0 while the object
1332 * is still present in object_tree_root and object_list
1333 * (with updates protected by kmemleak_lock).
1334 */
1335 object = lookup_object(pointer, 1);
1336 if (!object)
1337 continue;
1338 if (object == scanned)
1339 /* self referenced, ignore */
1340 continue;
1341
1342 /*
1343 * Avoid the lockdep recursive warning on object->lock being
1344 * previously acquired in scan_object(). These locks are
1345 * enclosed by scan_mutex.
1346 */
1347 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1348 /* only pass surplus references (object already gray) */
1349 if (color_gray(object)) {
1350 excess_ref = object->excess_ref;
1351 /* no need for update_refs() if object already gray */
1352 } else {
1353 excess_ref = 0;
1354 update_refs(object);
1355 }
1356 spin_unlock(&object->lock);
1357
1358 if (excess_ref) {
1359 object = lookup_object(excess_ref, 0);
1360 if (!object)
1361 continue;
1362 if (object == scanned)
1363 /* circular reference, ignore */
1364 continue;
1365 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1366 update_refs(object);
1367 spin_unlock(&object->lock);
1368 }
1369 }
1370 read_unlock_irqrestore(&kmemleak_lock, flags);
1371}
1372
1373/*
1374 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1375 */
1376#ifdef CONFIG_SMP
1377static void scan_large_block(void *start, void *end)
1378{
1379 void *next;
1380
1381 while (start < end) {
1382 next = min(start + MAX_SCAN_SIZE, end);
1383 scan_block(start, next, NULL);
1384 start = next;
1385 cond_resched();
1386 }
1387}
1388#endif
1389
1390/*
1391 * Scan a memory block corresponding to a kmemleak_object. A condition is
1392 * that object->use_count >= 1.
1393 */
1394static void scan_object(struct kmemleak_object *object)
1395{
1396 struct kmemleak_scan_area *area;
1397 unsigned long flags;
1398
1399 /*
1400 * Once the object->lock is acquired, the corresponding memory block
1401 * cannot be freed (the same lock is acquired in delete_object).
1402 */
1403 spin_lock_irqsave(&object->lock, flags);
1404 if (object->flags & OBJECT_NO_SCAN)
1405 goto out;
1406 if (!(object->flags & OBJECT_ALLOCATED))
1407 /* already freed object */
1408 goto out;
1409 if (hlist_empty(&object->area_list)) {
1410 void *start = (void *)object->pointer;
1411 void *end = (void *)(object->pointer + object->size);
1412 void *next;
1413
1414 do {
1415 next = min(start + MAX_SCAN_SIZE, end);
1416 scan_block(start, next, object);
1417
1418 start = next;
1419 if (start >= end)
1420 break;
1421
1422 spin_unlock_irqrestore(&object->lock, flags);
1423 cond_resched();
1424 spin_lock_irqsave(&object->lock, flags);
1425 } while (object->flags & OBJECT_ALLOCATED);
1426 } else
1427 hlist_for_each_entry(area, &object->area_list, node)
1428 scan_block((void *)area->start,
1429 (void *)(area->start + area->size),
1430 object);
1431out:
1432 spin_unlock_irqrestore(&object->lock, flags);
1433}
1434
1435/*
1436 * Scan the objects already referenced (gray objects). More objects will be
1437 * referenced and, if there are no memory leaks, all the objects are scanned.
1438 */
1439static void scan_gray_list(void)
1440{
1441 struct kmemleak_object *object, *tmp;
1442
1443 /*
1444 * The list traversal is safe for both tail additions and removals
1445 * from inside the loop. The kmemleak objects cannot be freed from
1446 * outside the loop because their use_count was incremented.
1447 */
1448 object = list_entry(gray_list.next, typeof(*object), gray_list);
1449 while (&object->gray_list != &gray_list) {
1450 cond_resched();
1451
1452 /* may add new objects to the list */
1453 if (!scan_should_stop())
1454 scan_object(object);
1455
1456 tmp = list_entry(object->gray_list.next, typeof(*object),
1457 gray_list);
1458
1459 /* remove the object from the list and release it */
1460 list_del(&object->gray_list);
1461 put_object(object);
1462
1463 object = tmp;
1464 }
1465 WARN_ON(!list_empty(&gray_list));
1466}
1467
1468/*
1469 * Scan data sections and all the referenced memory blocks allocated via the
1470 * kernel's standard allocators. This function must be called with the
1471 * scan_mutex held.
1472 */
1473static void kmemleak_scan(void)
1474{
1475 unsigned long flags;
1476 struct kmemleak_object *object;
1477 int i;
1478 int new_leaks = 0;
1479
1480 jiffies_last_scan = jiffies;
1481
1482 /* prepare the kmemleak_object's */
1483 rcu_read_lock();
1484 list_for_each_entry_rcu(object, &object_list, object_list) {
1485 spin_lock_irqsave(&object->lock, flags);
1486#ifdef DEBUG
1487 /*
1488 * With a few exceptions there should be a maximum of
1489 * 1 reference to any object at this point.
1490 */
1491 if (atomic_read(&object->use_count) > 1) {
1492 pr_debug("object->use_count = %d\n",
1493 atomic_read(&object->use_count));
1494 dump_object_info(object);
1495 }
1496#endif
1497 /* reset the reference count (whiten the object) */
1498 object->count = 0;
1499 if (color_gray(object) && get_object(object))
1500 list_add_tail(&object->gray_list, &gray_list);
1501
1502 spin_unlock_irqrestore(&object->lock, flags);
1503 }
1504 rcu_read_unlock();
1505
1506#ifdef CONFIG_SMP
1507 /* per-cpu sections scanning */
1508 for_each_possible_cpu(i)
1509 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1510 __per_cpu_end + per_cpu_offset(i));
1511#endif
1512
1513 /*
1514 * Struct page scanning for each node.
1515 */
1516 get_online_mems();
1517 for_each_online_node(i) {
1518 unsigned long start_pfn = node_start_pfn(i);
1519 unsigned long end_pfn = node_end_pfn(i);
1520 unsigned long pfn;
1521
1522 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1523 struct page *page;
1524
1525 if (!pfn_valid(pfn))
1526 continue;
1527 page = pfn_to_page(pfn);
1528 /* only scan if page is in use */
1529 if (page_count(page) == 0)
1530 continue;
1531 scan_block(page, page + 1, NULL);
1532 if (!(pfn & 63))
1533 cond_resched();
1534 }
1535 }
1536 put_online_mems();
1537
1538 /*
1539 * Scanning the task stacks (may introduce false negatives).
1540 */
1541 if (kmemleak_stack_scan) {
1542 struct task_struct *p, *g;
1543
1544 read_lock(&tasklist_lock);
1545 do_each_thread(g, p) {
1546 void *stack = try_get_task_stack(p);
1547 if (stack) {
1548 scan_block(stack, stack + THREAD_SIZE, NULL);
1549 put_task_stack(p);
1550 }
1551 } while_each_thread(g, p);
1552 read_unlock(&tasklist_lock);
1553 }
1554
1555 /*
1556 * Scan the objects already referenced from the sections scanned
1557 * above.
1558 */
1559 scan_gray_list();
1560
1561 /*
1562 * Check for new or unreferenced objects modified since the previous
1563 * scan and color them gray until the next scan.
1564 */
1565 rcu_read_lock();
1566 list_for_each_entry_rcu(object, &object_list, object_list) {
1567 spin_lock_irqsave(&object->lock, flags);
1568 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1569 && update_checksum(object) && get_object(object)) {
1570 /* color it gray temporarily */
1571 object->count = object->min_count;
1572 list_add_tail(&object->gray_list, &gray_list);
1573 }
1574 spin_unlock_irqrestore(&object->lock, flags);
1575 }
1576 rcu_read_unlock();
1577
1578 /*
1579 * Re-scan the gray list for modified unreferenced objects.
1580 */
1581 scan_gray_list();
1582
1583 /*
1584 * If scanning was stopped do not report any new unreferenced objects.
1585 */
1586 if (scan_should_stop())
1587 return;
1588
1589 /*
1590 * Scanning result reporting.
1591 */
1592 rcu_read_lock();
1593 list_for_each_entry_rcu(object, &object_list, object_list) {
1594 spin_lock_irqsave(&object->lock, flags);
1595 if (unreferenced_object(object) &&
1596 !(object->flags & OBJECT_REPORTED)) {
1597 object->flags |= OBJECT_REPORTED;
1598 new_leaks++;
1599 }
1600 spin_unlock_irqrestore(&object->lock, flags);
1601 }
1602 rcu_read_unlock();
1603
1604 if (new_leaks) {
1605 kmemleak_found_leaks = true;
1606
1607 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1608 new_leaks);
1609 }
1610
1611}
1612
1613/*
1614 * Thread function performing automatic memory scanning. Unreferenced objects
1615 * at the end of a memory scan are reported but only the first time.
1616 */
1617static int kmemleak_scan_thread(void *arg)
1618{
1619 static int first_run = 1;
1620
1621 pr_info("Automatic memory scanning thread started\n");
1622 set_user_nice(current, 10);
1623
1624 /*
1625 * Wait before the first scan to allow the system to fully initialize.
1626 */
1627 if (first_run) {
1628 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1629 first_run = 0;
1630 while (timeout && !kthread_should_stop())
1631 timeout = schedule_timeout_interruptible(timeout);
1632 }
1633
1634 while (!kthread_should_stop()) {
1635 signed long timeout = jiffies_scan_wait;
1636
1637 mutex_lock(&scan_mutex);
1638 kmemleak_scan();
1639 mutex_unlock(&scan_mutex);
1640
1641 /* wait before the next scan */
1642 while (timeout && !kthread_should_stop())
1643 timeout = schedule_timeout_interruptible(timeout);
1644 }
1645
1646 pr_info("Automatic memory scanning thread ended\n");
1647
1648 return 0;
1649}
1650
1651/*
1652 * Start the automatic memory scanning thread. This function must be called
1653 * with the scan_mutex held.
1654 */
1655static void start_scan_thread(void)
1656{
1657 if (scan_thread)
1658 return;
1659 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1660 if (IS_ERR(scan_thread)) {
1661 pr_warn("Failed to create the scan thread\n");
1662 scan_thread = NULL;
1663 }
1664}
1665
1666/*
1667 * Stop the automatic memory scanning thread.
1668 */
1669static void stop_scan_thread(void)
1670{
1671 if (scan_thread) {
1672 kthread_stop(scan_thread);
1673 scan_thread = NULL;
1674 }
1675}
1676
1677/*
1678 * Iterate over the object_list and return the first valid object at or after
1679 * the required position with its use_count incremented. The function triggers
1680 * a memory scanning when the pos argument points to the first position.
1681 */
1682static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1683{
1684 struct kmemleak_object *object;
1685 loff_t n = *pos;
1686 int err;
1687
1688 err = mutex_lock_interruptible(&scan_mutex);
1689 if (err < 0)
1690 return ERR_PTR(err);
1691
1692 rcu_read_lock();
1693 list_for_each_entry_rcu(object, &object_list, object_list) {
1694 if (n-- > 0)
1695 continue;
1696 if (get_object(object))
1697 goto out;
1698 }
1699 object = NULL;
1700out:
1701 return object;
1702}
1703
1704/*
1705 * Return the next object in the object_list. The function decrements the
1706 * use_count of the previous object and increases that of the next one.
1707 */
1708static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1709{
1710 struct kmemleak_object *prev_obj = v;
1711 struct kmemleak_object *next_obj = NULL;
1712 struct kmemleak_object *obj = prev_obj;
1713
1714 ++(*pos);
1715
1716 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1717 if (get_object(obj)) {
1718 next_obj = obj;
1719 break;
1720 }
1721 }
1722
1723 put_object(prev_obj);
1724 return next_obj;
1725}
1726
1727/*
1728 * Decrement the use_count of the last object required, if any.
1729 */
1730static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1731{
1732 if (!IS_ERR(v)) {
1733 /*
1734 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1735 * waiting was interrupted, so only release it if !IS_ERR.
1736 */
1737 rcu_read_unlock();
1738 mutex_unlock(&scan_mutex);
1739 if (v)
1740 put_object(v);
1741 }
1742}
1743
1744/*
1745 * Print the information for an unreferenced object to the seq file.
1746 */
1747static int kmemleak_seq_show(struct seq_file *seq, void *v)
1748{
1749 struct kmemleak_object *object = v;
1750 unsigned long flags;
1751
1752 spin_lock_irqsave(&object->lock, flags);
1753 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1754 print_unreferenced(seq, object);
1755 spin_unlock_irqrestore(&object->lock, flags);
1756 return 0;
1757}
1758
1759static const struct seq_operations kmemleak_seq_ops = {
1760 .start = kmemleak_seq_start,
1761 .next = kmemleak_seq_next,
1762 .stop = kmemleak_seq_stop,
1763 .show = kmemleak_seq_show,
1764};
1765
1766static int kmemleak_open(struct inode *inode, struct file *file)
1767{
1768 return seq_open(file, &kmemleak_seq_ops);
1769}
1770
1771static int dump_str_object_info(const char *str)
1772{
1773 unsigned long flags;
1774 struct kmemleak_object *object;
1775 unsigned long addr;
1776
1777 if (kstrtoul(str, 0, &addr))
1778 return -EINVAL;
1779 object = find_and_get_object(addr, 0);
1780 if (!object) {
1781 pr_info("Unknown object at 0x%08lx\n", addr);
1782 return -EINVAL;
1783 }
1784
1785 spin_lock_irqsave(&object->lock, flags);
1786 dump_object_info(object);
1787 spin_unlock_irqrestore(&object->lock, flags);
1788
1789 put_object(object);
1790 return 0;
1791}
1792
1793/*
1794 * We use grey instead of black to ensure we can do future scans on the same
1795 * objects. If we did not do future scans these black objects could
1796 * potentially contain references to newly allocated objects in the future and
1797 * we'd end up with false positives.
1798 */
1799static void kmemleak_clear(void)
1800{
1801 struct kmemleak_object *object;
1802 unsigned long flags;
1803
1804 rcu_read_lock();
1805 list_for_each_entry_rcu(object, &object_list, object_list) {
1806 spin_lock_irqsave(&object->lock, flags);
1807 if ((object->flags & OBJECT_REPORTED) &&
1808 unreferenced_object(object))
1809 __paint_it(object, KMEMLEAK_GREY);
1810 spin_unlock_irqrestore(&object->lock, flags);
1811 }
1812 rcu_read_unlock();
1813
1814 kmemleak_found_leaks = false;
1815}
1816
1817static void __kmemleak_do_cleanup(void);
1818
1819/*
1820 * File write operation to configure kmemleak at run-time. The following
1821 * commands can be written to the /sys/kernel/debug/kmemleak file:
1822 * off - disable kmemleak (irreversible)
1823 * stack=on - enable the task stacks scanning
1824 * stack=off - disable the tasks stacks scanning
1825 * scan=on - start the automatic memory scanning thread
1826 * scan=off - stop the automatic memory scanning thread
1827 * scan=... - set the automatic memory scanning period in seconds (0 to
1828 * disable it)
1829 * scan - trigger a memory scan
1830 * clear - mark all current reported unreferenced kmemleak objects as
1831 * grey to ignore printing them, or free all kmemleak objects
1832 * if kmemleak has been disabled.
1833 * dump=... - dump information about the object found at the given address
1834 */
1835static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1836 size_t size, loff_t *ppos)
1837{
1838 char buf[64];
1839 int buf_size;
1840 int ret;
1841
1842 buf_size = min(size, (sizeof(buf) - 1));
1843 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1844 return -EFAULT;
1845 buf[buf_size] = 0;
1846
1847 ret = mutex_lock_interruptible(&scan_mutex);
1848 if (ret < 0)
1849 return ret;
1850
1851 if (strncmp(buf, "clear", 5) == 0) {
1852 if (kmemleak_enabled)
1853 kmemleak_clear();
1854 else
1855 __kmemleak_do_cleanup();
1856 goto out;
1857 }
1858
1859 if (!kmemleak_enabled) {
1860 ret = -EBUSY;
1861 goto out;
1862 }
1863
1864 if (strncmp(buf, "off", 3) == 0)
1865 kmemleak_disable();
1866 else if (strncmp(buf, "stack=on", 8) == 0)
1867 kmemleak_stack_scan = 1;
1868 else if (strncmp(buf, "stack=off", 9) == 0)
1869 kmemleak_stack_scan = 0;
1870 else if (strncmp(buf, "scan=on", 7) == 0)
1871 start_scan_thread();
1872 else if (strncmp(buf, "scan=off", 8) == 0)
1873 stop_scan_thread();
1874 else if (strncmp(buf, "scan=", 5) == 0) {
1875 unsigned long secs;
1876
1877 ret = kstrtoul(buf + 5, 0, &secs);
1878 if (ret < 0)
1879 goto out;
1880 stop_scan_thread();
1881 if (secs) {
1882 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1883 start_scan_thread();
1884 }
1885 } else if (strncmp(buf, "scan", 4) == 0)
1886 kmemleak_scan();
1887 else if (strncmp(buf, "dump=", 5) == 0)
1888 ret = dump_str_object_info(buf + 5);
1889 else
1890 ret = -EINVAL;
1891
1892out:
1893 mutex_unlock(&scan_mutex);
1894 if (ret < 0)
1895 return ret;
1896
1897 /* ignore the rest of the buffer, only one command at a time */
1898 *ppos += size;
1899 return size;
1900}
1901
1902static const struct file_operations kmemleak_fops = {
1903 .owner = THIS_MODULE,
1904 .open = kmemleak_open,
1905 .read = seq_read,
1906 .write = kmemleak_write,
1907 .llseek = seq_lseek,
1908 .release = seq_release,
1909};
1910
1911static void __kmemleak_do_cleanup(void)
1912{
1913 struct kmemleak_object *object;
1914
1915 rcu_read_lock();
1916 list_for_each_entry_rcu(object, &object_list, object_list)
1917 delete_object_full(object->pointer);
1918 rcu_read_unlock();
1919}
1920
1921/*
1922 * Stop the memory scanning thread and free the kmemleak internal objects if
1923 * no previous scan thread (otherwise, kmemleak may still have some useful
1924 * information on memory leaks).
1925 */
1926static void kmemleak_do_cleanup(struct work_struct *work)
1927{
1928 stop_scan_thread();
1929
1930 mutex_lock(&scan_mutex);
1931 /*
1932 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1933 * longer track object freeing. Ordering of the scan thread stopping and
1934 * the memory accesses below is guaranteed by the kthread_stop()
1935 * function.
1936 */
1937 kmemleak_free_enabled = 0;
1938 mutex_unlock(&scan_mutex);
1939
1940 if (!kmemleak_found_leaks)
1941 __kmemleak_do_cleanup();
1942 else
1943 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1944}
1945
1946static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1947
1948/*
1949 * Disable kmemleak. No memory allocation/freeing will be traced once this
1950 * function is called. Disabling kmemleak is an irreversible operation.
1951 */
1952static void kmemleak_disable(void)
1953{
1954 /* atomically check whether it was already invoked */
1955 if (cmpxchg(&kmemleak_error, 0, 1))
1956 return;
1957
1958 /* stop any memory operation tracing */
1959 kmemleak_enabled = 0;
1960
1961 /* check whether it is too early for a kernel thread */
1962 if (kmemleak_initialized)
1963 schedule_work(&cleanup_work);
1964 else
1965 kmemleak_free_enabled = 0;
1966
1967 pr_info("Kernel memory leak detector disabled\n");
1968}
1969
1970/*
1971 * Allow boot-time kmemleak disabling (enabled by default).
1972 */
1973static int __init kmemleak_boot_config(char *str)
1974{
1975 if (!str)
1976 return -EINVAL;
1977 if (strcmp(str, "off") == 0)
1978 kmemleak_disable();
1979 else if (strcmp(str, "on") == 0)
1980 kmemleak_skip_disable = 1;
1981 else
1982 return -EINVAL;
1983 return 0;
1984}
1985early_param("kmemleak", kmemleak_boot_config);
1986
1987static void __init print_log_trace(struct early_log *log)
1988{
1989 struct stack_trace trace;
1990
1991 trace.nr_entries = log->trace_len;
1992 trace.entries = log->trace;
1993
1994 pr_notice("Early log backtrace:\n");
1995 print_stack_trace(&trace, 2);
1996}
1997
1998/*
1999 * Kmemleak initialization.
2000 */
2001void __init kmemleak_init(void)
2002{
2003 int i;
2004 unsigned long flags;
2005
2006#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2007 if (!kmemleak_skip_disable) {
2008 kmemleak_early_log = 0;
2009 kmemleak_disable();
2010 return;
2011 }
2012#endif
2013
2014 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2015 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2016
2017 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2018 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2019
2020 if (crt_early_log > ARRAY_SIZE(early_log))
2021 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2022 crt_early_log);
2023
2024 /* the kernel is still in UP mode, so disabling the IRQs is enough */
2025 local_irq_save(flags);
2026 kmemleak_early_log = 0;
2027 if (kmemleak_error) {
2028 local_irq_restore(flags);
2029 return;
2030 } else {
2031 kmemleak_enabled = 1;
2032 kmemleak_free_enabled = 1;
2033 }
2034 local_irq_restore(flags);
2035
2036 /* register the data/bss sections */
2037 create_object((unsigned long)_sdata, _edata - _sdata,
2038 KMEMLEAK_GREY, GFP_ATOMIC);
2039 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2040 KMEMLEAK_GREY, GFP_ATOMIC);
2041 /* only register .data..ro_after_init if not within .data */
2042 if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
2043 create_object((unsigned long)__start_ro_after_init,
2044 __end_ro_after_init - __start_ro_after_init,
2045 KMEMLEAK_GREY, GFP_ATOMIC);
2046
2047 /*
2048 * This is the point where tracking allocations is safe. Automatic
2049 * scanning is started during the late initcall. Add the early logged
2050 * callbacks to the kmemleak infrastructure.
2051 */
2052 for (i = 0; i < crt_early_log; i++) {
2053 struct early_log *log = &early_log[i];
2054
2055 switch (log->op_type) {
2056 case KMEMLEAK_ALLOC:
2057 early_alloc(log);
2058 break;
2059 case KMEMLEAK_ALLOC_PERCPU:
2060 early_alloc_percpu(log);
2061 break;
2062 case KMEMLEAK_FREE:
2063 kmemleak_free(log->ptr);
2064 break;
2065 case KMEMLEAK_FREE_PART:
2066 kmemleak_free_part(log->ptr, log->size);
2067 break;
2068 case KMEMLEAK_FREE_PERCPU:
2069 kmemleak_free_percpu(log->ptr);
2070 break;
2071 case KMEMLEAK_NOT_LEAK:
2072 kmemleak_not_leak(log->ptr);
2073 break;
2074 case KMEMLEAK_IGNORE:
2075 kmemleak_ignore(log->ptr);
2076 break;
2077 case KMEMLEAK_SCAN_AREA:
2078 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2079 break;
2080 case KMEMLEAK_NO_SCAN:
2081 kmemleak_no_scan(log->ptr);
2082 break;
2083 case KMEMLEAK_SET_EXCESS_REF:
2084 object_set_excess_ref((unsigned long)log->ptr,
2085 log->excess_ref);
2086 break;
2087 default:
2088 kmemleak_warn("Unknown early log operation: %d\n",
2089 log->op_type);
2090 }
2091
2092 if (kmemleak_warning) {
2093 print_log_trace(log);
2094 kmemleak_warning = 0;
2095 }
2096 }
2097}
2098
2099/*
2100 * Late initialization function.
2101 */
2102static int __init kmemleak_late_init(void)
2103{
2104 struct dentry *dentry;
2105
2106 kmemleak_initialized = 1;
2107
2108 dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2109 &kmemleak_fops);
2110 if (!dentry)
2111 pr_warn("Failed to create the debugfs kmemleak file\n");
2112
2113 if (kmemleak_error) {
2114 /*
2115 * Some error occurred and kmemleak was disabled. There is a
2116 * small chance that kmemleak_disable() was called immediately
2117 * after setting kmemleak_initialized and we may end up with
2118 * two clean-up threads but serialized by scan_mutex.
2119 */
2120 schedule_work(&cleanup_work);
2121 return -ENOMEM;
2122 }
2123
2124 mutex_lock(&scan_mutex);
2125 start_scan_thread();
2126 mutex_unlock(&scan_mutex);
2127
2128 pr_info("Kernel memory leak detector initialized\n");
2129
2130 return 0;
2131}
2132late_initcall(kmemleak_late_init);