| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/tlb-v7.S | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1997-2002 Russell King | 
|  | 5 | *  Modified for ARMv7 by Catalin Marinas | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License version 2 as | 
|  | 9 | * published by the Free Software Foundation. | 
|  | 10 | * | 
|  | 11 | *  ARM architecture version 6 TLB handling functions. | 
|  | 12 | *  These assume a split I/D TLB. | 
|  | 13 | */ | 
|  | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/linkage.h> | 
|  | 16 | #include <asm/assembler.h> | 
|  | 17 | #include <asm/asm-offsets.h> | 
|  | 18 | #include <asm/page.h> | 
|  | 19 | #include <asm/tlbflush.h> | 
|  | 20 | #include "proc-macros.S" | 
|  | 21 |  | 
|  | 22 | /* | 
|  | 23 | *	v7wbi_flush_user_tlb_range(start, end, vma) | 
|  | 24 | * | 
|  | 25 | *	Invalidate a range of TLB entries in the specified address space. | 
|  | 26 | * | 
|  | 27 | *	- start - start address (may not be aligned) | 
|  | 28 | *	- end   - end address (exclusive, may not be aligned) | 
|  | 29 | *	- vma   - vma_struct describing address range | 
|  | 30 | * | 
|  | 31 | *	It is assumed that: | 
|  | 32 | *	- the "Invalidate single entry" instruction will invalidate | 
|  | 33 | *	  both the I and the D TLBs on Harvard-style TLBs | 
|  | 34 | */ | 
|  | 35 | ENTRY(v7wbi_flush_user_tlb_range) | 
|  | 36 | vma_vm_mm r3, r2			@ get vma->vm_mm | 
|  | 37 | mmid	r3, r3				@ get vm_mm->context.id | 
|  | 38 | dsb	ish | 
|  | 39 | mov	r0, r0, lsr #PAGE_SHIFT		@ align address | 
|  | 40 | mov	r1, r1, lsr #PAGE_SHIFT | 
|  | 41 | asid	r3, r3				@ mask ASID | 
|  | 42 | #ifdef CONFIG_ARM_ERRATA_720789 | 
|  | 43 | ALT_SMP(W(mov)	r3, #0	) | 
|  | 44 | ALT_UP(W(nop)		) | 
|  | 45 | #endif | 
|  | 46 | orr	r0, r3, r0, lsl #PAGE_SHIFT	@ Create initial MVA | 
|  | 47 | mov	r1, r1, lsl #PAGE_SHIFT | 
|  | 48 | 1: | 
|  | 49 | #ifdef CONFIG_ARM_ERRATA_720789 | 
|  | 50 | ALT_SMP(mcr	p15, 0, r0, c8, c3, 3)	@ TLB invalidate U MVA all ASID (shareable) | 
|  | 51 | #else | 
|  | 52 | ALT_SMP(mcr	p15, 0, r0, c8, c3, 1)	@ TLB invalidate U MVA (shareable) | 
|  | 53 | #endif | 
|  | 54 | ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA | 
|  | 55 |  | 
|  | 56 | add	r0, r0, #PAGE_SZ | 
|  | 57 | cmp	r0, r1 | 
|  | 58 | blo	1b | 
|  | 59 | dsb	ish | 
|  | 60 | ret	lr | 
|  | 61 | ENDPROC(v7wbi_flush_user_tlb_range) | 
|  | 62 |  | 
|  | 63 | /* | 
|  | 64 | *	v7wbi_flush_kern_tlb_range(start,end) | 
|  | 65 | * | 
|  | 66 | *	Invalidate a range of kernel TLB entries | 
|  | 67 | * | 
|  | 68 | *	- start - start address (may not be aligned) | 
|  | 69 | *	- end   - end address (exclusive, may not be aligned) | 
|  | 70 | */ | 
|  | 71 | ENTRY(v7wbi_flush_kern_tlb_range) | 
|  | 72 | dsb	ish | 
|  | 73 | mov	r0, r0, lsr #PAGE_SHIFT		@ align address | 
|  | 74 | mov	r1, r1, lsr #PAGE_SHIFT | 
|  | 75 | mov	r0, r0, lsl #PAGE_SHIFT | 
|  | 76 | mov	r1, r1, lsl #PAGE_SHIFT | 
|  | 77 | 1: | 
|  | 78 | #ifdef CONFIG_ARM_ERRATA_720789 | 
|  | 79 | ALT_SMP(mcr	p15, 0, r0, c8, c3, 3)	@ TLB invalidate U MVA all ASID (shareable) | 
|  | 80 | #else | 
|  | 81 | ALT_SMP(mcr	p15, 0, r0, c8, c3, 1)	@ TLB invalidate U MVA (shareable) | 
|  | 82 | #endif | 
|  | 83 | ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA | 
|  | 84 | add	r0, r0, #PAGE_SZ | 
|  | 85 | cmp	r0, r1 | 
|  | 86 | blo	1b | 
|  | 87 | dsb	ish | 
|  | 88 | isb | 
|  | 89 | ret	lr | 
|  | 90 | ENDPROC(v7wbi_flush_kern_tlb_range) | 
|  | 91 |  | 
|  | 92 | __INIT | 
|  | 93 |  | 
|  | 94 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ | 
|  | 95 | define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp |