blob: 15dec9994c780d7b2bbde15d57a6b7bef9ecb349 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_PGTABLE_H
4
5#if defined(CONFIG_PPC64)
6#include <asm/nohash/64/pgtable.h>
7#else
8#include <asm/nohash/32/pgtable.h>
9#endif
10
11/* Permission masks used for kernel mappings */
12#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
13#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
14#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
15 _PAGE_NO_CACHE | _PAGE_GUARDED)
16#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
17#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
18#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
19
20/*
21 * Protection used for kernel text. We want the debuggers to be able to
22 * set breakpoints anywhere, so don't write protect the kernel text
23 * on platforms where such control is possible.
24 */
25#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
26 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
27#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
28#else
29#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
30#endif
31
32/* Make modules code happy. We don't set RO yet */
33#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
34
35/* Advertise special mapping type for AGP */
36#define PAGE_AGP (PAGE_KERNEL_NC)
37#define HAVE_PAGE_AGP
38
39#ifndef __ASSEMBLY__
40
41/* Generic accessors to PTE bits */
42#ifndef pte_write
43static inline int pte_write(pte_t pte)
44{
45 return pte_val(pte) & _PAGE_RW;
46}
47#endif
48#ifndef pte_read
49static inline int pte_read(pte_t pte) { return 1; }
50#endif
51static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
52static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
53static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
54static inline bool pte_hashpte(pte_t pte) { return false; }
55static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
56static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
57
58#ifdef CONFIG_NUMA_BALANCING
59/*
60 * These work without NUMA balancing but the kernel does not care. See the
61 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
62 * work for user pages and always return true for kernel pages.
63 */
64static inline int pte_protnone(pte_t pte)
65{
66 return pte_present(pte) && !pte_user(pte);
67}
68
69static inline int pmd_protnone(pmd_t pmd)
70{
71 return pte_protnone(pmd_pte(pmd));
72}
73#endif /* CONFIG_NUMA_BALANCING */
74
75static inline int pte_present(pte_t pte)
76{
77 return pte_val(pte) & _PAGE_PRESENT;
78}
79
80static inline bool pte_hw_valid(pte_t pte)
81{
82 return pte_val(pte) & _PAGE_PRESENT;
83}
84
85/*
86 * Don't just check for any non zero bits in __PAGE_USER, since for book3e
87 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
88 * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
89 */
90#ifndef pte_user
91static inline bool pte_user(pte_t pte)
92{
93 return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
94}
95#endif
96
97/*
98 * We only find page table entry in the last level
99 * Hence no need for other accessors
100 */
101#define pte_access_permitted pte_access_permitted
102static inline bool pte_access_permitted(pte_t pte, bool write)
103{
104 /*
105 * A read-only access is controlled by _PAGE_USER bit.
106 * We have _PAGE_READ set for WRITE and EXECUTE
107 */
108 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
109 return false;
110
111 if (write && !pte_write(pte))
112 return false;
113
114 return true;
115}
116
117/* Conversion functions: convert a page and protection to a page entry,
118 * and a page entry and page directory to the page they refer to.
119 *
120 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
121 * long for now.
122 */
123static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
124 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
125 pgprot_val(pgprot)); }
126static inline unsigned long pte_pfn(pte_t pte) {
127 return pte_val(pte) >> PTE_RPN_SHIFT; }
128
129/* Generic modifiers for PTE bits */
130static inline pte_t pte_exprotect(pte_t pte)
131{
132 return __pte(pte_val(pte) & ~_PAGE_EXEC);
133}
134
135#ifndef pte_mkclean
136static inline pte_t pte_mkclean(pte_t pte)
137{
138 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
139}
140#endif
141
142static inline pte_t pte_mkold(pte_t pte)
143{
144 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
145}
146
147static inline pte_t pte_mkpte(pte_t pte)
148{
149 return pte;
150}
151
152static inline pte_t pte_mkspecial(pte_t pte)
153{
154 return __pte(pte_val(pte) | _PAGE_SPECIAL);
155}
156
157#ifndef pte_mkhuge
158static inline pte_t pte_mkhuge(pte_t pte)
159{
160 return __pte(pte_val(pte));
161}
162#endif
163
164#ifndef pte_mkprivileged
165static inline pte_t pte_mkprivileged(pte_t pte)
166{
167 return __pte(pte_val(pte) & ~_PAGE_USER);
168}
169#endif
170
171#ifndef pte_mkuser
172static inline pte_t pte_mkuser(pte_t pte)
173{
174 return __pte(pte_val(pte) | _PAGE_USER);
175}
176#endif
177
178static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
179{
180 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
181}
182
183/* Insert a PTE, top-level function is out of line. It uses an inline
184 * low level function in the respective pgtable-* files
185 */
186extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
187 pte_t pte);
188
189/* This low level function performs the actual PTE insertion
190 * Setting the PTE depends on the MMU type and other factors. It's
191 * an horrible mess that I'm not going to try to clean up now but
192 * I'm keeping it in one place rather than spread around
193 */
194static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
195 pte_t *ptep, pte_t pte, int percpu)
196{
197 /* Second case is 32-bit with 64-bit PTE. In this case, we
198 * can just store as long as we do the two halves in the right order
199 * with a barrier in between.
200 * In the percpu case, we also fallback to the simple update
201 */
202 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
203 __asm__ __volatile__("\
204 stw%X0 %2,%0\n\
205 eieio\n\
206 stw%X1 %L2,%1"
207 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
208 : "r" (pte) : "memory");
209 return;
210 }
211 /* Anything else just stores the PTE normally. That covers all 64-bit
212 * cases, and 32-bit non-hash with 32-bit PTEs.
213 */
214#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
215 ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
216#else
217 *ptep = pte;
218#endif
219
220 /*
221 * With hardware tablewalk, a sync is needed to ensure that
222 * subsequent accesses see the PTE we just wrote. Unlike userspace
223 * mappings, we can't tolerate spurious faults, so make sure
224 * the new PTE will be seen the first time.
225 */
226 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
227 mb();
228}
229
230
231#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
232extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
233 pte_t *ptep, pte_t entry, int dirty);
234
235/*
236 * Macro to mark a page protection value as "uncacheable".
237 */
238
239#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
240 _PAGE_WRITETHRU)
241
242#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
243 _PAGE_NO_CACHE | _PAGE_GUARDED))
244
245#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
246 _PAGE_NO_CACHE))
247
248#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
249 _PAGE_COHERENT))
250
251#if _PAGE_WRITETHRU != 0
252#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
253 _PAGE_COHERENT | _PAGE_WRITETHRU))
254#else
255#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
256#endif
257
258#define pgprot_cached_noncoherent(prot) \
259 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
260
261#define pgprot_writecombine pgprot_noncached_wc
262
263struct file;
264extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 unsigned long size, pgprot_t vma_prot);
266#define __HAVE_PHYS_MEM_ACCESS_PROT
267
268#ifdef CONFIG_HUGETLB_PAGE
269static inline int hugepd_ok(hugepd_t hpd)
270{
271#ifdef CONFIG_PPC_8xx
272 return ((hpd_val(hpd) & 0x4) != 0);
273#else
274 /* We clear the top bit to indicate hugepd */
275 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
276#endif
277}
278
279static inline int pmd_huge(pmd_t pmd)
280{
281 return 0;
282}
283
284static inline int pud_huge(pud_t pud)
285{
286 return 0;
287}
288
289static inline int pgd_huge(pgd_t pgd)
290{
291 return 0;
292}
293#define pgd_huge pgd_huge
294
295#define is_hugepd(hpd) (hugepd_ok(hpd))
296#endif
297
298/*
299 * This gets called at the end of handling a page fault, when
300 * the kernel has put a new PTE into the page table for the process.
301 * We use it to ensure coherency between the i-cache and d-cache
302 * for the page which has just been mapped in.
303 */
304#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
305void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
306#else
307static inline
308void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
309#endif
310
311#endif /* __ASSEMBLY__ */
312#endif