blob: 41e782f126d6e6019fa5301647825bc8de030b29 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
16#include <asm/ppc-opcode.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/trace.h>
20#include <asm/cputhreads.h>
21
22#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbie_va(unsigned long va, unsigned long pid,
27 unsigned long ap, unsigned long ric)
28{
29 unsigned long rb,rs,prs,r;
30
31 rb = va & ~(PPC_BITMASK(52, 63));
32 rb |= ap << PPC_BITLSHIFT(58);
33 rs = pid << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
36
37 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
38 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
39 trace_tlbie(0, 0, rb, rs, ric, prs, r);
40}
41
42
43static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
44 unsigned long ap)
45{
46 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
47 asm volatile("ptesync": : :"memory");
48 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
49 }
50
51 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
52 asm volatile("ptesync": : :"memory");
53 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
54 }
55}
56
57static inline void __tlbiel_pid(unsigned long pid, int set,
58 unsigned long ric)
59{
60 unsigned long rb,rs,prs,r;
61
62 rb = PPC_BIT(53); /* IS = 1 */
63 rb |= set << PPC_BITLSHIFT(51);
64 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
65 prs = 1; /* process scoped */
66 r = 1; /* raidx format */
67
68 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
69 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
70 trace_tlbie(0, 1, rb, rs, ric, prs, r);
71}
72
73/*
74 * We use 128 set in radix mode and 256 set in hpt mode.
75 */
76static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
77{
78 int set;
79
80 asm volatile("ptesync": : :"memory");
81
82 /*
83 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
84 * also flush the entire Page Walk Cache.
85 */
86 __tlbiel_pid(pid, 0, ric);
87
88 /* For PWC, only one flush is needed */
89 if (ric == RIC_FLUSH_PWC) {
90 asm volatile("ptesync": : :"memory");
91 return;
92 }
93
94 /* For the remaining sets, just flush the TLB */
95 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
96 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
97
98 asm volatile("ptesync": : :"memory");
99 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
100}
101
102static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
103{
104 unsigned long rb,rs,prs,r;
105
106 rb = PPC_BIT(53); /* IS = 1 */
107 rs = pid << PPC_BITLSHIFT(31);
108 prs = 1; /* process scoped */
109 r = 1; /* radix format */
110
111 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
112 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
113 trace_tlbie(0, 0, rb, rs, ric, prs, r);
114}
115
116static inline void fixup_tlbie_pid(unsigned long pid)
117{
118 /*
119 * We can use any address for the invalidation, pick one which is
120 * probably unused as an optimisation.
121 */
122 unsigned long va = ((1UL << 52) - 1);
123
124 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
125 asm volatile("ptesync": : :"memory");
126 __tlbie_pid(0, RIC_FLUSH_TLB);
127 }
128
129 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
130 asm volatile("ptesync": : :"memory");
131 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
132 }
133}
134
135static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
136{
137 asm volatile("ptesync": : :"memory");
138
139 /*
140 * Workaround the fact that the "ric" argument to __tlbie_pid
141 * must be a compile-time contraint to match the "i" constraint
142 * in the asm statement.
143 */
144 switch (ric) {
145 case RIC_FLUSH_TLB:
146 __tlbie_pid(pid, RIC_FLUSH_TLB);
147 fixup_tlbie_pid(pid);
148 break;
149 case RIC_FLUSH_PWC:
150 __tlbie_pid(pid, RIC_FLUSH_PWC);
151 break;
152 case RIC_FLUSH_ALL:
153 default:
154 __tlbie_pid(pid, RIC_FLUSH_ALL);
155 fixup_tlbie_pid(pid);
156 }
157 asm volatile("eieio; tlbsync; ptesync": : :"memory");
158}
159
160static inline void _tlbiel_va(unsigned long va, unsigned long pid,
161 unsigned long ap, unsigned long ric)
162{
163 unsigned long rb,rs,prs,r;
164
165 rb = va & ~(PPC_BITMASK(52, 63));
166 rb |= ap << PPC_BITLSHIFT(58);
167 rs = pid << PPC_BITLSHIFT(31);
168 prs = 1; /* process scoped */
169 r = 1; /* raidx format */
170
171 asm volatile("ptesync": : :"memory");
172 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
173 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
174 asm volatile("ptesync": : :"memory");
175 trace_tlbie(0, 1, rb, rs, ric, prs, r);
176}
177
178static inline void _tlbie_va(unsigned long va, unsigned long pid,
179 unsigned long ap, unsigned long ric)
180{
181 asm volatile("ptesync": : :"memory");
182 __tlbie_va(va, pid, ap, ric);
183 fixup_tlbie_va(va, pid, ap);
184 asm volatile("eieio; tlbsync; ptesync": : :"memory");
185}
186
187/*
188 * Base TLB flushing operations:
189 *
190 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
191 * - flush_tlb_page(vma, vmaddr) flushes one page
192 * - flush_tlb_range(vma, start, end) flushes a range of pages
193 * - flush_tlb_kernel_range(start, end) flushes kernel pages
194 *
195 * - local_* variants of page and mm only apply to the current
196 * processor
197 */
198void radix__local_flush_tlb_mm(struct mm_struct *mm)
199{
200 unsigned long pid;
201
202 preempt_disable();
203 pid = mm->context.id;
204 if (pid != MMU_NO_CONTEXT)
205 _tlbiel_pid(pid, RIC_FLUSH_TLB);
206 preempt_enable();
207}
208EXPORT_SYMBOL(radix__local_flush_tlb_mm);
209
210#ifndef CONFIG_SMP
211static void radix__local_flush_all_mm(struct mm_struct *mm)
212{
213 unsigned long pid;
214
215 preempt_disable();
216 pid = mm->context.id;
217 if (pid != MMU_NO_CONTEXT)
218 _tlbiel_pid(pid, RIC_FLUSH_ALL);
219 preempt_enable();
220}
221#endif /* CONFIG_SMP */
222
223void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
224 int psize)
225{
226 unsigned long pid;
227 unsigned long ap = mmu_get_ap(psize);
228
229 preempt_disable();
230 pid = mm ? mm->context.id : 0;
231 if (pid != MMU_NO_CONTEXT)
232 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
233 preempt_enable();
234}
235
236void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
237{
238#ifdef CONFIG_HUGETLB_PAGE
239 /* need the return fix for nohash.c */
240 if (vma && is_vm_hugetlb_page(vma))
241 return __local_flush_hugetlb_page(vma, vmaddr);
242#endif
243 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
244 mmu_virtual_psize);
245}
246EXPORT_SYMBOL(radix__local_flush_tlb_page);
247
248#ifdef CONFIG_SMP
249void radix__flush_tlb_mm(struct mm_struct *mm)
250{
251 unsigned long pid;
252
253 preempt_disable();
254 pid = mm->context.id;
255 if (unlikely(pid == MMU_NO_CONTEXT))
256 goto no_context;
257
258 if (!mm_is_thread_local(mm))
259 _tlbie_pid(pid, RIC_FLUSH_TLB);
260 else
261 _tlbiel_pid(pid, RIC_FLUSH_TLB);
262no_context:
263 preempt_enable();
264}
265EXPORT_SYMBOL(radix__flush_tlb_mm);
266
267static void radix__flush_all_mm(struct mm_struct *mm)
268{
269 unsigned long pid;
270
271 preempt_disable();
272 pid = mm->context.id;
273 if (unlikely(pid == MMU_NO_CONTEXT))
274 goto no_context;
275
276 if (!mm_is_thread_local(mm))
277 _tlbie_pid(pid, RIC_FLUSH_ALL);
278 else
279 _tlbiel_pid(pid, RIC_FLUSH_ALL);
280no_context:
281 preempt_enable();
282}
283
284void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
285{
286 tlb->need_flush_all = 1;
287}
288EXPORT_SYMBOL(radix__flush_tlb_pwc);
289
290void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
291 int psize)
292{
293 unsigned long pid;
294 unsigned long ap = mmu_get_ap(psize);
295
296 preempt_disable();
297 pid = mm ? mm->context.id : 0;
298 if (unlikely(pid == MMU_NO_CONTEXT))
299 goto bail;
300 if (!mm_is_thread_local(mm))
301 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
302 else
303 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
304bail:
305 preempt_enable();
306}
307
308void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
309{
310#ifdef CONFIG_HUGETLB_PAGE
311 if (vma && is_vm_hugetlb_page(vma))
312 return flush_hugetlb_page(vma, vmaddr);
313#endif
314 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
315 mmu_virtual_psize);
316}
317EXPORT_SYMBOL(radix__flush_tlb_page);
318
319#else /* CONFIG_SMP */
320#define radix__flush_all_mm radix__local_flush_all_mm
321#endif /* CONFIG_SMP */
322
323void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
324{
325 _tlbie_pid(0, RIC_FLUSH_ALL);
326}
327EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
328
329/*
330 * Currently, for range flushing, we just do a full mm flush. Because
331 * we use this in code path where we don' track the page size.
332 */
333void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
334 unsigned long end)
335
336{
337 struct mm_struct *mm = vma->vm_mm;
338
339 radix__flush_tlb_mm(mm);
340}
341EXPORT_SYMBOL(radix__flush_tlb_range);
342
343static int radix_get_mmu_psize(int page_size)
344{
345 int psize;
346
347 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
348 psize = mmu_virtual_psize;
349 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
350 psize = MMU_PAGE_2M;
351 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
352 psize = MMU_PAGE_1G;
353 else
354 return -1;
355 return psize;
356}
357
358void radix__tlb_flush(struct mmu_gather *tlb)
359{
360 int psize = 0;
361 struct mm_struct *mm = tlb->mm;
362 int page_size = tlb->page_size;
363
364 psize = radix_get_mmu_psize(page_size);
365 /*
366 * if page size is not something we understand, do a full mm flush
367 */
368 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
369 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
370 else if (tlb->need_flush_all) {
371 tlb->need_flush_all = 0;
372 radix__flush_all_mm(mm);
373 } else
374 radix__flush_tlb_mm(mm);
375}
376
377#define TLB_FLUSH_ALL -1UL
378/*
379 * Number of pages above which we will do a bcast tlbie. Just a
380 * number at this point copied from x86
381 */
382static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
383
384void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
385 unsigned long end, int psize)
386{
387 unsigned long pid;
388 unsigned long addr;
389 int local = mm_is_thread_local(mm);
390 unsigned long ap = mmu_get_ap(psize);
391 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
392
393
394 preempt_disable();
395 pid = mm ? mm->context.id : 0;
396 if (unlikely(pid == MMU_NO_CONTEXT))
397 goto err_out;
398
399 if (end == TLB_FLUSH_ALL ||
400 (end - start) > tlb_single_page_flush_ceiling * page_size) {
401 if (local)
402 _tlbiel_pid(pid, RIC_FLUSH_TLB);
403 else
404 _tlbie_pid(pid, RIC_FLUSH_TLB);
405 goto err_out;
406 }
407 for (addr = start; addr < end; addr += page_size) {
408
409 if (local)
410 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
411 else
412 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
413 }
414err_out:
415 preempt_enable();
416}
417
418#ifdef CONFIG_TRANSPARENT_HUGEPAGE
419void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
420{
421 int local = mm_is_thread_local(mm);
422 unsigned long ap = mmu_get_ap(mmu_virtual_psize);
423 unsigned long pid, end;
424
425
426 pid = mm ? mm->context.id : 0;
427 preempt_disable();
428 if (unlikely(pid == MMU_NO_CONTEXT))
429 goto no_context;
430
431 /* 4k page size, just blow the world */
432 if (PAGE_SIZE == 0x1000) {
433 radix__flush_all_mm(mm);
434 preempt_enable();
435 return;
436 }
437
438 /* Otherwise first do the PWC */
439 if (local)
440 _tlbiel_pid(pid, RIC_FLUSH_PWC);
441 else
442 _tlbie_pid(pid, RIC_FLUSH_PWC);
443
444 /* Then iterate the pages */
445 end = addr + HPAGE_PMD_SIZE;
446 for (; addr < end; addr += PAGE_SIZE) {
447 if (local)
448 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
449 else
450 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
451 }
452no_context:
453 preempt_enable();
454}
455#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
456
457void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
458 unsigned long page_size)
459{
460 unsigned long rb,rs,prs,r;
461 unsigned long ap;
462 unsigned long ric = RIC_FLUSH_TLB;
463
464 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
465 rb = gpa & ~(PPC_BITMASK(52, 63));
466 rb |= ap << PPC_BITLSHIFT(58);
467 rs = lpid & ((1UL << 32) - 1);
468 prs = 0; /* process scoped */
469 r = 1; /* raidx format */
470
471 asm volatile("ptesync": : :"memory");
472 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
473 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
474 asm volatile("eieio; tlbsync; ptesync": : :"memory");
475 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
476}
477EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
478
479void radix__flush_tlb_lpid(unsigned long lpid)
480{
481 unsigned long rb,rs,prs,r;
482 unsigned long ric = RIC_FLUSH_ALL;
483
484 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
485 rs = lpid & ((1UL << 32) - 1);
486 prs = 0; /* partition scoped */
487 r = 1; /* raidx format */
488
489 asm volatile("ptesync": : :"memory");
490 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
491 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
492 asm volatile("eieio; tlbsync; ptesync": : :"memory");
493 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
494}
495EXPORT_SYMBOL(radix__flush_tlb_lpid);
496
497void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
498 unsigned long start, unsigned long end)
499{
500 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
501}
502EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
503
504void radix__flush_tlb_all(void)
505{
506 unsigned long rb,prs,r,rs;
507 unsigned long ric = RIC_FLUSH_ALL;
508
509 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
510 prs = 0; /* partition scoped */
511 r = 1; /* raidx format */
512 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
513
514 asm volatile("ptesync": : :"memory");
515 /*
516 * now flush guest entries by passing PRS = 1 and LPID != 0
517 */
518 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
519 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
520 /*
521 * now flush host entires by passing PRS = 0 and LPID == 0
522 */
523 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
524 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
525 asm volatile("eieio; tlbsync; ptesync": : :"memory");
526}
527
528void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
529 unsigned long address)
530{
531 /*
532 * We track page size in pte only for DD1, So we can
533 * call this only on DD1.
534 */
535 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
536 VM_WARN_ON(1);
537 return;
538 }
539
540 if (old_pte & R_PAGE_LARGE)
541 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
542 else
543 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
544}
545
546#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
547extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
548{
549 unsigned int pid = mm->context.id;
550
551 if (unlikely(pid == MMU_NO_CONTEXT))
552 return;
553
554 /*
555 * If this context hasn't run on that CPU before and KVM is
556 * around, there's a slim chance that the guest on another
557 * CPU just brought in obsolete translation into the TLB of
558 * this CPU due to a bad prefetch using the guest PID on
559 * the way into the hypervisor.
560 *
561 * We work around this here. If KVM is possible, we check if
562 * any sibling thread is in KVM. If it is, the window may exist
563 * and thus we flush that PID from the core.
564 *
565 * A potential future improvement would be to mark which PIDs
566 * have never been used on the system and avoid it if the PID
567 * is new and the process has no other cpumask bit set.
568 */
569 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
570 int cpu = smp_processor_id();
571 int sib = cpu_first_thread_sibling(cpu);
572 bool flush = false;
573
574 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
575 if (sib == cpu)
576 continue;
577 if (paca[sib].kvm_hstate.kvm_vcpu)
578 flush = true;
579 }
580 if (flush)
581 _tlbiel_pid(pid, RIC_FLUSH_ALL);
582 }
583}
584EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
585#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */