blob: 04ffef84829088bed2cab8631bc36f9320506459 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2014, Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <asm.h>
25#include <arch/ops.h>
26#include <arch/defines.h>
27
28#define LOC_SHIFT 24
29#define CLIDR_FIELD_WIDTH 3
30#define LEVEL_SHIFT 1
31#define DCISW 0x0
32#define DCCISW 0x1
33
34.text
35
36.macro cache_range_op, cache op
37 add x2, x0, x1 // calculate the end address
38 bic x3, x0, #(CACHE_LINE-1) // align the start with a cache line
39.Lcache_range_op_loop\@:
40 \cache \op, x3
41 add x3, x3, #CACHE_LINE
42 cmp x3, x2
43 blo .Lcache_range_op_loop\@
44 dsb sy
45.endm
46
47 /* void arch_flush_cache_range(addr_t start, size_t len); */
48FUNCTION(arch_clean_cache_range)
49 cache_range_op dc cvac // clean cache to PoC by MVA
50 ret
51
52 /* void arch_flush_invalidate_cache_range(addr_t start, size_t len); */
53FUNCTION(arch_clean_invalidate_cache_range)
54 cache_range_op dc civac // clean & invalidate dcache to PoC by MVA
55 ret
56
57 /* void arch_invalidate_cache_range(addr_t start, size_t len); */
58FUNCTION(arch_invalidate_cache_range)
59 cache_range_op dc ivac // invalidate dcache to PoC by MVA
60 ret
61
62 /* void arch_sync_cache_range(addr_t start, size_t len); */
63FUNCTION(arch_sync_cache_range)
64 cache_range_op dc cvau // clean dcache to PoU by MVA
65 cache_range_op ic ivau // invalidate icache to PoU by MVA
66 ret
67
68/* will trash x0-x2, x4-x9, x11, x14, x16-x17 */
69LOCAL_FUNCTION(do_dcsw_op)
70 cbz x3, exit
71 adr x14, dcsw_loop_table // compute inner loop address
72 add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
73 mov x0, x9
74 mov w8, #1
75loop1:
76 add x2, x10, x10, lsr #1 // work out 3x current cache level
77 lsr x1, x0, x2 // extract cache type bits from clidr
78 and x1, x1, #7 // mask the bits for current cache only
79 cmp x1, #2 // see what cache we have at this level
80 b.lt level_done // nothing to do if no cache or icache
81
82 msr csselr_el1, x10 // select current cache level in csselr
83 isb // isb to sych the new cssr&csidr
84 mrs x1, ccsidr_el1 // read the new ccsidr
85 and x2, x1, #7 // extract the length of the cache lines
86 add x2, x2, #4 // add 4 (line length offset)
87 ubfx x4, x1, #3, #10 // maximum way number
88 clz w5, w4 // bit position of way size increment
89 lsl w9, w4, w5 // w9 = aligned max way number
90 lsl w16, w8, w5 // w16 = way number loop decrement
91 orr w9, w10, w9 // w9 = combine way and cache number
92 ubfx w6, w1, #13, #15 // w6 = max set number
93 lsl w17, w8, w2 // w17 = set number loop decrement
94 dsb sy // barrier before we start this level
95 br x14 // jump to DC operation specific loop
96
97 .macro dcsw_loop _op
98loop2_\_op:
99 lsl w7, w6, w2 // w7 = aligned max set number
100
101loop3_\_op:
102 orr w11, w9, w7 // combine cache, way and set number
103 dc \_op, x11
104 subs w7, w7, w17 // decrement set number
105 b.ge loop3_\_op
106
107 subs x9, x9, x16 // decrement way number
108 b.ge loop2_\_op
109
110 b level_done
111.endm
112
113level_done:
114 add x10, x10, #2 // increment cache number
115 cmp x3, x10
116 b.gt loop1
117 msr csselr_el1, xzr // select cache level 0 in csselr
118 dsb sy // barrier to complete final cache operation
119 isb
120exit:
121 ret
122
123dcsw_loop_table:
124 dcsw_loop isw
125 dcsw_loop cisw
126 dcsw_loop csw
127
128/* will trash x3, x9, x10 */
129.macro dcsw_op shift, fw, ls
130 mrs x9, clidr_el1
131 ubfx x3, x9, \shift, \fw
132 lsl x3, x3, \ls
133 mov x10, xzr
134 bl do_dcsw_op
135.endm
136
137/* void arch_enable_cache(uint flags);
138 * For EL1 only.
139 */
140FUNCTION(arch_enable_cache)
141 stp x29, x30, [sp, #-32]!
142 stp x24, x25, [sp, #16]
143
144 mov x25, x0
145 /* check DCACHE flag */
146 tst x25, #DCACHE
147 b.eq .L__enable_icache
148 mrs x24, sctlr_el1
149 tst x24, #(1<<2)
150 b.ne .L__enable_icache
151
152 /* invalidate dcache */
153 mov x0, #DCISW
154 dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
155
156 /* enable dcache enable bit */
157 orr x24, x24, #(1<<2)
158 msr sctlr_el1, x24
159
160.L__enable_icache:
161 /* check ICACHE flag */
162 tst x25, #ICACHE
163 b.eq .L__done_enable
164 mrs x24, sctlr_el1
165 tst x24, #(1<<12)
166 b.ne .L__done_enable
167
168 /* invalidate icache */
169 dsb sy
170 ic iallu
171 dsb sy
172 isb
173
174 /* enable icache enable bit */
175 mrs x24, sctlr_el1
176 orr x24, x24, #(1<<12)
177 msr sctlr_el1, x24
178
179.L__done_enable:
180 ldp x24, x25, [sp, #16]
181 ldp x29, x30, [sp], #32
182 ret
183
184/* void arch_disable_cache(uint flags) */
185/* only for el1 here */
186FUNCTION(arch_disable_cache)
187 stp x29, x30, [sp, #-32]!
188 str x25, [sp, #16]
189
190 mov x25, x0
191 /* check DCACHE flag */
192 tst x25, #DCACHE
193 b.eq .L__disable_icache
194 mrs x1, sctlr_el1
195 tst x1, #(1<<2)
196 b.eq .L__dcache_already_disabled
197
198 /* disable dcache enable bit */
199 bic x1, x1, #(1<<2)
200 msr sctlr_el1, x1
201
202 /* clean & invalidate dcache */
203 mov x0, #DCCISW
204 b .L__flush_dcache
205
206.L__dcache_already_disabled:
207 /* invalidate dcache */
208 mov x0, #DCISW
209.L__flush_dcache:
210 dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
211
212.L__disable_icache:
213 /* check ICACHE flag */
214 tst x25, #ICACHE
215 b.eq .L__done_disable
216 /* disable icache enable bit */
217 mrs x1, sctlr_el1
218 bic x1, x1, #(1<<12)
219 msr sctlr_el1, x1
220
221 /* invalidate icache for PE to PoU */
222 dsb sy
223 ic iallu
224 dsb sy
225 isb
226
227.L__done_disable:
228 ldr x25, [sp, #16]
229 ldp x29, x30, [sp], #32
230 ret