blob: 979ed15724a211ac6ccf3ad6bb1ee099ed80d587 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/* include/asm-generic/tlb.h
2 *
3 * Generic TLB shootdown code
4 *
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
18#include <linux/swap.h>
19#include <asm/pgalloc.h>
20#include <asm/tlbflush.h>
21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23/*
24 * Semi RCU freeing of the page directories.
25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
50 */
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
65/*
66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
68 */
69#define MMU_GATHER_BUNDLE 8
70
71struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
81/*
82 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
83 * lockups for non-preemptible kernels on huge machines when a lot of memory
84 * is zapped during unmapping.
85 * 10K pages freed at once should be safe even without a preemption point.
86 */
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
89/* struct mmu_gather is an opaque type used by the mm code for passing around
90 * any data needed by arch specific code for tlb_remove_page.
91 */
92struct mmu_gather {
93 struct mm_struct *mm;
94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
97 unsigned int need_flush : 1, /* Did free PTEs */
98 fast_mode : 1; /* No batching */
99
100 unsigned int fullmm;
101
102 struct mmu_gather_batch *active;
103 struct mmu_gather_batch local;
104 struct page *__pages[MMU_GATHER_BUNDLE];
105 unsigned int batch_count;
106};
107
108#define HAVE_GENERIC_MMU_GATHER
109
110static inline int tlb_fast_mode(struct mmu_gather *tlb)
111{
112#ifdef CONFIG_SMP
113 return tlb->fast_mode;
114#else
115 /*
116 * For UP we don't need to worry about TLB flush
117 * and page free order so much..
118 */
119 return 1;
120#endif
121}
122
123void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
124void tlb_flush_mmu(struct mmu_gather *tlb);
125void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
126int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
127
128/* tlb_remove_page
129 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
130 * required.
131 */
132static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
133{
134 if (!__tlb_remove_page(tlb, page))
135 tlb_flush_mmu(tlb);
136}
137
138/**
139 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
140 *
141 * Record the fact that pte's were really umapped in ->need_flush, so we can
142 * later optimise away the tlb invalidate. This helps when userspace is
143 * unmapping already-unmapped pages, which happens quite a lot.
144 */
145#define tlb_remove_tlb_entry(tlb, ptep, address) \
146 do { \
147 tlb->need_flush = 1; \
148 __tlb_remove_tlb_entry(tlb, ptep, address); \
149 } while (0)
150
151/**
152 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
153 * This is a nop so far, because only x86 needs it.
154 */
155#ifndef __tlb_remove_pmd_tlb_entry
156#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
157#endif
158
159#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
160 do { \
161 tlb->need_flush = 1; \
162 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
163 } while (0)
164
165#define pte_free_tlb(tlb, ptep, address) \
166 do { \
167 tlb->need_flush = 1; \
168 __pte_free_tlb(tlb, ptep, address); \
169 } while (0)
170
171#ifndef __ARCH_HAS_4LEVEL_HACK
172#define pud_free_tlb(tlb, pudp, address) \
173 do { \
174 tlb->need_flush = 1; \
175 __pud_free_tlb(tlb, pudp, address); \
176 } while (0)
177#endif
178
179#define pmd_free_tlb(tlb, pmdp, address) \
180 do { \
181 tlb->need_flush = 1; \
182 __pmd_free_tlb(tlb, pmdp, address); \
183 } while (0)
184
185#define tlb_migrate_finish(mm) do {} while (0)
186
187#endif /* _ASM_GENERIC__TLB_H */