blob: df3c5ffba1ed3c5ccdc35dcfb0870d2263744ee5 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Generic stack depot for storing stack traces.
3 *
4 * Some debugging tools need to save stack traces of certain events which can
5 * be later presented to the user. For example, KASAN needs to safe alloc and
6 * free stacks for each object, but storing two stack traces per object
7 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
8 * that).
9 *
10 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
11 * and free stacks repeat a lot, we save about 100x space.
12 * Stacks are never removed from depot, so we store them contiguously one after
13 * another in a contiguos memory allocation.
14 *
15 * Author: Alexander Potapenko <glider@google.com>
16 * Copyright (C) 2016 Google, Inc.
17 *
18 * Based on code by Dmitry Chernenkov.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 *
24 * This program is distributed in the hope that it will be useful, but
25 * WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 * General Public License for more details.
28 *
29 */
30
31#include <linux/gfp.h>
32#include <linux/jhash.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/percpu.h>
36#include <linux/printk.h>
37#include <linux/slab.h>
38#include <linux/stacktrace.h>
39#include <linux/stackdepot.h>
40#include <linux/string.h>
41#include <linux/types.h>
42
43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
44
45#define STACK_ALLOC_NULL_PROTECTION_BITS 1
46#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
47#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
48#define STACK_ALLOC_ALIGN 4
49#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
50 STACK_ALLOC_ALIGN)
51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
52 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
53#define STACK_ALLOC_SLABS_CAP 8192
54#define STACK_ALLOC_MAX_SLABS \
55 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
56 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
57
58/* The compact structure to store the reference to stacks. */
59union handle_parts {
60 depot_stack_handle_t handle;
61 struct {
62 u32 slabindex : STACK_ALLOC_INDEX_BITS;
63 u32 offset : STACK_ALLOC_OFFSET_BITS;
64 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
65 };
66};
67
68struct stack_record {
69 struct stack_record *next; /* Link in the hashtable */
70 u32 hash; /* Hash in the hastable */
71 u32 size; /* Number of frames in the stack */
72#ifdef CONFIG_PAGE_OWNER
73 u32 hit;
74#endif
75 union handle_parts handle;
76 unsigned long entries[1]; /* Variable-sized array of entries. */
77};
78
79static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
80
81static int depot_index;
82static int next_slab_inited;
83static size_t depot_offset;
84static DEFINE_SPINLOCK(depot_lock);
85#ifdef CONFIG_PAGE_OWNER
86static struct stack_record *max_found;
87static DEFINE_SPINLOCK(max_found_lock);
88#endif
89
90
91static bool init_stack_slab(void **prealloc)
92{
93 if (!*prealloc)
94 return false;
95 /*
96 * This smp_load_acquire() pairs with smp_store_release() to
97 * |next_slab_inited| below and in depot_alloc_stack().
98 */
99 if (smp_load_acquire(&next_slab_inited))
100 return true;
101 if (stack_slabs[depot_index] == NULL) {
102 stack_slabs[depot_index] = *prealloc;
103 } else {
104 stack_slabs[depot_index + 1] = *prealloc;
105 /*
106 * This smp_store_release pairs with smp_load_acquire() from
107 * |next_slab_inited| above and in depot_save_stack().
108 */
109 smp_store_release(&next_slab_inited, 1);
110 }
111 *prealloc = NULL;
112 return true;
113}
114
115/* Allocation of a new stack in raw storage */
116static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
117 u32 hash, void **prealloc, gfp_t alloc_flags)
118{
119 int required_size = offsetof(struct stack_record, entries) +
120 sizeof(unsigned long) * size;
121 struct stack_record *stack;
122
123 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
124
125 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
126 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
127 WARN_ONCE(1, "Stack depot reached limit capacity");
128 return NULL;
129 }
130 depot_index++;
131 depot_offset = 0;
132 /*
133 * smp_store_release() here pairs with smp_load_acquire() from
134 * |next_slab_inited| in depot_save_stack() and
135 * init_stack_slab().
136 */
137 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
138 smp_store_release(&next_slab_inited, 0);
139 }
140 init_stack_slab(prealloc);
141 if (stack_slabs[depot_index] == NULL)
142 return NULL;
143
144 stack = stack_slabs[depot_index] + depot_offset;
145
146 stack->hash = hash;
147 stack->size = size;
148#ifdef CONFIG_PAGE_OWNER
149 stack->hit = 0;
150#endif
151 stack->handle.slabindex = depot_index;
152 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
153 stack->handle.valid = 1;
154 memcpy(stack->entries, entries, size * sizeof(unsigned long));
155 depot_offset += required_size;
156
157 return stack;
158}
159
160#define STACK_HASH_ORDER 20
161#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
162#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
163#define STACK_HASH_SEED 0x9747b28c
164
165static struct stack_record *stack_table[STACK_HASH_SIZE] = {
166 [0 ... STACK_HASH_SIZE - 1] = NULL
167};
168
169/* Calculate hash for a stack */
170static inline u32 hash_stack(unsigned long *entries, unsigned int size)
171{
172 return jhash2((u32 *)entries,
173 size * sizeof(unsigned long) / sizeof(u32),
174 STACK_HASH_SEED);
175}
176
177/* Use our own, non-instrumented version of memcmp().
178 *
179 * We actually don't care about the order, just the equality.
180 */
181static inline
182int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
183 unsigned int n)
184{
185 for ( ; n-- ; u1++, u2++) {
186 if (*u1 != *u2)
187 return 1;
188 }
189 return 0;
190}
191
192/* Find a stack that is equal to the one stored in entries in the hash */
193static inline struct stack_record *find_stack(struct stack_record *bucket,
194 unsigned long *entries, int size,
195 u32 hash)
196{
197 struct stack_record *found;
198
199 for (found = bucket; found; found = found->next) {
200 if (found->hash == hash &&
201 found->size == size &&
202 !stackdepot_memcmp(entries, found->entries, size))
203 return found;
204 }
205 return NULL;
206}
207
208void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
209{
210 union handle_parts parts = { .handle = handle };
211 void *slab = stack_slabs[parts.slabindex];
212 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
213 struct stack_record *stack = slab + offset;
214
215 trace->nr_entries = trace->max_entries = stack->size;
216 trace->entries = stack->entries;
217 trace->skip = 0;
218}
219EXPORT_SYMBOL_GPL(depot_fetch_stack);
220
221#ifdef CONFIG_PAGE_OWNER
222void depot_hit_stack(depot_stack_handle_t handle, struct stack_trace *trace,
223 int cnt)
224{
225 union handle_parts parts = { .handle = handle };
226 void *slab = stack_slabs[parts.slabindex];
227 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
228 struct stack_record *stack = slab + offset;
229 unsigned long flags;
230
231 stack->hit += cnt;
232 spin_lock_irqsave(&max_found_lock, flags);
233 if ((!max_found) || (stack->hit > max_found->hit))
234 max_found = stack;
235 spin_unlock_irqrestore(&max_found_lock, flags);
236}
237
238void show_max_hit_page(void)
239{
240 unsigned long entries[16];
241 unsigned long flags;
242 struct stack_trace trace = {
243 .nr_entries = 0,
244 .entries = entries,
245 .max_entries = 16,
246 .skip = 0
247 };
248 spin_lock_irqsave(&max_found_lock, flags);
249 depot_fetch_stack(max_found->handle.handle, &trace);
250 pr_info("max found hit=%d\n", max_found->hit);
251 print_stack_trace(&trace, 2);
252 spin_unlock_irqrestore(&max_found_lock, flags);
253}
254#endif
255
256/**
257 * depot_save_stack - save stack in a stack depot.
258 * @trace - the stacktrace to save.
259 * @alloc_flags - flags for allocating additional memory if required.
260 *
261 * Returns the handle of the stack struct stored in depot.
262 */
263depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
264 gfp_t alloc_flags)
265{
266 u32 hash;
267 depot_stack_handle_t retval = 0;
268 struct stack_record *found = NULL, **bucket;
269 unsigned long flags;
270 struct page *page = NULL;
271 void *prealloc = NULL;
272
273 if (unlikely(trace->nr_entries == 0))
274 goto fast_exit;
275
276 hash = hash_stack(trace->entries, trace->nr_entries);
277 bucket = &stack_table[hash & STACK_HASH_MASK];
278
279 /*
280 * Fast path: look the stack trace up without locking.
281 * The smp_load_acquire() here pairs with smp_store_release() to
282 * |bucket| below.
283 */
284 found = find_stack(smp_load_acquire(bucket), trace->entries,
285 trace->nr_entries, hash);
286 if (found)
287 goto exit;
288
289 /*
290 * Check if the current or the next stack slab need to be initialized.
291 * If so, allocate the memory - we won't be able to do that under the
292 * lock.
293 *
294 * The smp_load_acquire() here pairs with smp_store_release() to
295 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
296 */
297 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
298 /*
299 * Zero out zone modifiers, as we don't have specific zone
300 * requirements. Keep the flags related to allocation in atomic
301 * contexts and I/O.
302 */
303 alloc_flags &= ~GFP_ZONEMASK;
304 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
305 alloc_flags |= __GFP_NOWARN;
306 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
307 if (page)
308 prealloc = page_address(page);
309 }
310
311 spin_lock_irqsave(&depot_lock, flags);
312
313 found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
314 if (!found) {
315 struct stack_record *new =
316 depot_alloc_stack(trace->entries, trace->nr_entries,
317 hash, &prealloc, alloc_flags);
318 if (new) {
319 new->next = *bucket;
320 /*
321 * This smp_store_release() pairs with
322 * smp_load_acquire() from |bucket| above.
323 */
324 smp_store_release(bucket, new);
325 found = new;
326 }
327 } else if (prealloc) {
328 /*
329 * We didn't need to store this stack trace, but let's keep
330 * the preallocated memory for the future.
331 */
332 WARN_ON(!init_stack_slab(&prealloc));
333 }
334
335 spin_unlock_irqrestore(&depot_lock, flags);
336exit:
337 if (prealloc) {
338 /* Nobody used this memory, ok to free it. */
339 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
340 }
341 if (found)
342 retval = found->handle.handle;
343fast_exit:
344 return retval;
345}
346EXPORT_SYMBOL_GPL(depot_save_stack);