| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | --- a/arch/mips/include/asm/r4kcache.h |
| 2 | +++ b/arch/mips/include/asm/r4kcache.h |
| 3 | @@ -26,6 +26,38 @@ |
| 4 | extern void (*r4k_blast_dcache)(void); |
| 5 | extern void (*r4k_blast_icache)(void); |
| 6 | |
| 7 | +#if defined(CONFIG_BCM47XX) && !defined(CONFIG_CPU_MIPS32_R2) |
| 8 | +#include <asm/paccess.h> |
| 9 | +#include <linux/ssb/ssb.h> |
| 10 | +#define BCM4710_DUMMY_RREG() bcm4710_dummy_rreg() |
| 11 | + |
| 12 | +static inline unsigned long bcm4710_dummy_rreg(void) |
| 13 | +{ |
| 14 | + return *(volatile unsigned long *)(KSEG1ADDR(SSB_ENUM_BASE)); |
| 15 | +} |
| 16 | + |
| 17 | +#define BCM4710_FILL_TLB(addr) bcm4710_fill_tlb((void *)(addr)) |
| 18 | + |
| 19 | +static inline unsigned long bcm4710_fill_tlb(void *addr) |
| 20 | +{ |
| 21 | + return *(unsigned long *)addr; |
| 22 | +} |
| 23 | + |
| 24 | +#define BCM4710_PROTECTED_FILL_TLB(addr) bcm4710_protected_fill_tlb((void *)(addr)) |
| 25 | + |
| 26 | +static inline void bcm4710_protected_fill_tlb(void *addr) |
| 27 | +{ |
| 28 | + unsigned long x; |
| 29 | + get_dbe(x, (unsigned long *)addr);; |
| 30 | +} |
| 31 | + |
| 32 | +#else |
| 33 | +#define BCM4710_DUMMY_RREG() |
| 34 | + |
| 35 | +#define BCM4710_FILL_TLB(addr) |
| 36 | +#define BCM4710_PROTECTED_FILL_TLB(addr) |
| 37 | +#endif |
| 38 | + |
| 39 | /* |
| 40 | * This macro return a properly sign-extended address suitable as base address |
| 41 | * for indexed cache operations. Two issues here: |
| 42 | @@ -56,6 +88,7 @@ static inline void flush_icache_line_ind |
| 43 | |
| 44 | static inline void flush_dcache_line_indexed(unsigned long addr) |
| 45 | { |
| 46 | + BCM4710_DUMMY_RREG(); |
| 47 | cache_op(Index_Writeback_Inv_D, addr); |
| 48 | } |
| 49 | |
| 50 | @@ -79,11 +112,13 @@ static inline void flush_icache_line(uns |
| 51 | |
| 52 | static inline void flush_dcache_line(unsigned long addr) |
| 53 | { |
| 54 | + BCM4710_DUMMY_RREG(); |
| 55 | cache_op(Hit_Writeback_Inv_D, addr); |
| 56 | } |
| 57 | |
| 58 | static inline void invalidate_dcache_line(unsigned long addr) |
| 59 | { |
| 60 | + BCM4710_DUMMY_RREG(); |
| 61 | cache_op(Hit_Invalidate_D, addr); |
| 62 | } |
| 63 | |
| 64 | @@ -156,6 +191,7 @@ static inline int protected_flush_icache |
| 65 | #ifdef CONFIG_EVA |
| 66 | return protected_cachee_op(Hit_Invalidate_I, addr); |
| 67 | #else |
| 68 | + BCM4710_DUMMY_RREG(); |
| 69 | return protected_cache_op(Hit_Invalidate_I, addr); |
| 70 | #endif |
| 71 | } |
| 72 | @@ -169,6 +205,7 @@ static inline int protected_flush_icache |
| 73 | */ |
| 74 | static inline int protected_writeback_dcache_line(unsigned long addr) |
| 75 | { |
| 76 | + BCM4710_DUMMY_RREG(); |
| 77 | #ifdef CONFIG_EVA |
| 78 | return protected_cachee_op(Hit_Writeback_Inv_D, addr); |
| 79 | #else |
| 80 | @@ -526,8 +563,51 @@ static inline void invalidate_tcache_pag |
| 81 | : "r" (base), \ |
| 82 | "i" (op)); |
| 83 | |
| 84 | +static inline void blast_dcache(void) |
| 85 | +{ |
| 86 | + unsigned long start = KSEG0; |
| 87 | + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways; |
| 88 | + unsigned long end = (start + dcache_size); |
| 89 | + |
| 90 | + do { |
| 91 | + BCM4710_DUMMY_RREG(); |
| 92 | + cache_op(Index_Writeback_Inv_D, start); |
| 93 | + start += current_cpu_data.dcache.linesz; |
| 94 | + } while(start < end); |
| 95 | +} |
| 96 | + |
| 97 | +static inline void blast_dcache_page(unsigned long page) |
| 98 | +{ |
| 99 | + unsigned long start = page; |
| 100 | + unsigned long end = start + PAGE_SIZE; |
| 101 | + |
| 102 | + BCM4710_FILL_TLB(start); |
| 103 | + do { |
| 104 | + BCM4710_DUMMY_RREG(); |
| 105 | + cache_op(Hit_Writeback_Inv_D, start); |
| 106 | + start += current_cpu_data.dcache.linesz; |
| 107 | + } while(start < end); |
| 108 | +} |
| 109 | + |
| 110 | +static inline void blast_dcache_page_indexed(unsigned long page) |
| 111 | +{ |
| 112 | + unsigned long start = page; |
| 113 | + unsigned long end = start + PAGE_SIZE; |
| 114 | + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit; |
| 115 | + unsigned long ws_end = current_cpu_data.dcache.ways << |
| 116 | + current_cpu_data.dcache.waybit; |
| 117 | + unsigned long ws, addr; |
| 118 | + for (ws = 0; ws < ws_end; ws += ws_inc) { |
| 119 | + start = page + ws; |
| 120 | + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) { |
| 121 | + BCM4710_DUMMY_RREG(); |
| 122 | + cache_op(Index_Writeback_Inv_D, addr); |
| 123 | + } |
| 124 | + } |
| 125 | +} |
| 126 | + |
| 127 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
| 128 | -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ |
| 129 | +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra, war) \ |
| 130 | static inline void extra##blast_##pfx##cache##lsize(void) \ |
| 131 | { \ |
| 132 | unsigned long start = INDEX_BASE; \ |
| 133 | @@ -537,6 +617,7 @@ static inline void extra##blast_##pfx##c |
| 134 | current_cpu_data.desc.waybit; \ |
| 135 | unsigned long ws, addr; \ |
| 136 | \ |
| 137 | + war \ |
| 138 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
| 139 | for (addr = start; addr < end; addr += lsize * 32) \ |
| 140 | cache##lsize##_unroll32(addr|ws, indexop); \ |
| 141 | @@ -547,6 +628,7 @@ static inline void extra##blast_##pfx##c |
| 142 | unsigned long start = page; \ |
| 143 | unsigned long end = page + PAGE_SIZE; \ |
| 144 | \ |
| 145 | + war \ |
| 146 | do { \ |
| 147 | cache##lsize##_unroll32(start, hitop); \ |
| 148 | start += lsize * 32; \ |
| 149 | @@ -563,31 +645,32 @@ static inline void extra##blast_##pfx##c |
| 150 | current_cpu_data.desc.waybit; \ |
| 151 | unsigned long ws, addr; \ |
| 152 | \ |
| 153 | + war \ |
| 154 | for (ws = 0; ws < ws_end; ws += ws_inc) \ |
| 155 | for (addr = start; addr < end; addr += lsize * 32) \ |
| 156 | cache##lsize##_unroll32(addr|ws, indexop); \ |
| 157 | } |
| 158 | |
| 159 | -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) |
| 160 | -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) |
| 161 | -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) |
| 162 | -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) |
| 163 | -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) |
| 164 | -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) |
| 165 | -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) |
| 166 | -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) |
| 167 | -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) |
| 168 | -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) |
| 169 | -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, ) |
| 170 | -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, ) |
| 171 | -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) |
| 172 | - |
| 173 | -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) |
| 174 | -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) |
| 175 | -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) |
| 176 | -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) |
| 177 | -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) |
| 178 | -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) |
| 179 | +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, , ) |
| 180 | +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, , BCM4710_FILL_TLB(start);) |
| 181 | +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, , ) |
| 182 | +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, , ) |
| 183 | +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, , BCM4710_FILL_TLB(start);) |
| 184 | +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_, BCM4710_FILL_TLB(start);) |
| 185 | +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, , ) |
| 186 | +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, , ) |
| 187 | +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, , BCM4710_FILL_TLB(start);) |
| 188 | +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, , ) |
| 189 | +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, , ) |
| 190 | +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, , ) |
| 191 | +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, , ) |
| 192 | + |
| 193 | +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, , ) |
| 194 | +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, , ) |
| 195 | +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, , ) |
| 196 | +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, , ) |
| 197 | +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, , ) |
| 198 | +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, , ) |
| 199 | |
| 200 | #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \ |
| 201 | static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ |
| 202 | @@ -612,58 +695,29 @@ __BUILD_BLAST_USER_CACHE(d, dcache, Inde |
| 203 | __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) |
| 204 | |
| 205 | /* build blast_xxx_range, protected_blast_xxx_range */ |
| 206 | -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ |
| 207 | +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra, war, war2) \ |
| 208 | static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ |
| 209 | unsigned long end) \ |
| 210 | { \ |
| 211 | unsigned long lsize = cpu_##desc##_line_size(); \ |
| 212 | - unsigned long lsize_2 = lsize * 2; \ |
| 213 | - unsigned long lsize_3 = lsize * 3; \ |
| 214 | - unsigned long lsize_4 = lsize * 4; \ |
| 215 | - unsigned long lsize_5 = lsize * 5; \ |
| 216 | - unsigned long lsize_6 = lsize * 6; \ |
| 217 | - unsigned long lsize_7 = lsize * 7; \ |
| 218 | - unsigned long lsize_8 = lsize * 8; \ |
| 219 | unsigned long addr = start & ~(lsize - 1); \ |
| 220 | - unsigned long aend = (end + lsize - 1) & ~(lsize - 1); \ |
| 221 | - int lines = (aend - addr) / lsize; \ |
| 222 | - \ |
| 223 | - while (lines >= 8) { \ |
| 224 | - prot##cache_op(hitop, addr); \ |
| 225 | - prot##cache_op(hitop, addr + lsize); \ |
| 226 | - prot##cache_op(hitop, addr + lsize_2); \ |
| 227 | - prot##cache_op(hitop, addr + lsize_3); \ |
| 228 | - prot##cache_op(hitop, addr + lsize_4); \ |
| 229 | - prot##cache_op(hitop, addr + lsize_5); \ |
| 230 | - prot##cache_op(hitop, addr + lsize_6); \ |
| 231 | - prot##cache_op(hitop, addr + lsize_7); \ |
| 232 | - addr += lsize_8; \ |
| 233 | - lines -= 8; \ |
| 234 | - } \ |
| 235 | - \ |
| 236 | - if (lines & 0x4) { \ |
| 237 | - prot##cache_op(hitop, addr); \ |
| 238 | - prot##cache_op(hitop, addr + lsize); \ |
| 239 | - prot##cache_op(hitop, addr + lsize_2); \ |
| 240 | - prot##cache_op(hitop, addr + lsize_3); \ |
| 241 | - addr += lsize_4; \ |
| 242 | - } \ |
| 243 | + unsigned long aend = (end - 1) & ~(lsize - 1); \ |
| 244 | \ |
| 245 | - if (lines & 0x2) { \ |
| 246 | - prot##cache_op(hitop, addr); \ |
| 247 | - prot##cache_op(hitop, addr + lsize); \ |
| 248 | - addr += lsize_2; \ |
| 249 | - } \ |
| 250 | + war \ |
| 251 | \ |
| 252 | - if (lines & 0x1) { \ |
| 253 | + while (1) { \ |
| 254 | + war2 \ |
| 255 | prot##cache_op(hitop, addr); \ |
| 256 | + if (addr == aend) \ |
| 257 | + break; \ |
| 258 | + addr += lsize; \ |
| 259 | } \ |
| 260 | } |
| 261 | |
| 262 | #ifndef CONFIG_EVA |
| 263 | |
| 264 | -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) |
| 265 | -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) |
| 266 | +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, , BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();) |
| 267 | +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, , , ) |
| 268 | |
| 269 | #else |
| 270 | |
| 271 | @@ -697,15 +751,15 @@ __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache |
| 272 | __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I) |
| 273 | |
| 274 | #endif |
| 275 | -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) |
| 276 | +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, , , ) |
| 277 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ |
| 278 | - protected_, loongson2_) |
| 279 | -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) |
| 280 | -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , ) |
| 281 | -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) |
| 282 | + protected_, loongson2_, , ) |
| 283 | +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , , BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();) |
| 284 | +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , , , ) |
| 285 | +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , , , ) |
| 286 | /* blast_inv_dcache_range */ |
| 287 | -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) |
| 288 | -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) |
| 289 | +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , , , BCM4710_DUMMY_RREG();) |
| 290 | +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , , , ) |
| 291 | |
| 292 | /* Currently, this is very specific to Loongson-3 */ |
| 293 | #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \ |
| 294 | --- a/arch/mips/include/asm/stackframe.h |
| 295 | +++ b/arch/mips/include/asm/stackframe.h |
| 296 | @@ -429,6 +429,10 @@ |
| 297 | #else |
| 298 | .set push |
| 299 | .set arch=r4000 |
| 300 | +#ifdef CONFIG_BCM47XX |
| 301 | + nop |
| 302 | + nop |
| 303 | +#endif |
| 304 | eret |
| 305 | .set pop |
| 306 | #endif |
| 307 | --- a/arch/mips/kernel/genex.S |
| 308 | +++ b/arch/mips/kernel/genex.S |
| 309 | @@ -21,6 +21,19 @@ |
| 310 | #include <asm/war.h> |
| 311 | #include <asm/thread_info.h> |
| 312 | |
| 313 | +#ifdef CONFIG_BCM47XX |
| 314 | +# ifdef eret |
| 315 | +# undef eret |
| 316 | +# endif |
| 317 | +# define eret \ |
| 318 | + .set push; \ |
| 319 | + .set noreorder; \ |
| 320 | + nop; \ |
| 321 | + nop; \ |
| 322 | + eret; \ |
| 323 | + .set pop; |
| 324 | +#endif |
| 325 | + |
| 326 | __INIT |
| 327 | |
| 328 | /* |
| 329 | @@ -32,6 +45,9 @@ |
| 330 | NESTED(except_vec3_generic, 0, sp) |
| 331 | .set push |
| 332 | .set noat |
| 333 | +#ifdef CONFIG_BCM47XX |
| 334 | + nop |
| 335 | +#endif |
| 336 | mfc0 k1, CP0_CAUSE |
| 337 | andi k1, k1, 0x7c |
| 338 | #ifdef CONFIG_64BIT |
| 339 | @@ -52,6 +68,9 @@ NESTED(except_vec3_r4000, 0, sp) |
| 340 | .set push |
| 341 | .set arch=r4000 |
| 342 | .set noat |
| 343 | +#ifdef CONFIG_BCM47XX |
| 344 | + nop |
| 345 | +#endif |
| 346 | mfc0 k1, CP0_CAUSE |
| 347 | li k0, 31<<2 |
| 348 | andi k1, k1, 0x7c |
| 349 | --- a/arch/mips/mm/c-r4k.c |
| 350 | +++ b/arch/mips/mm/c-r4k.c |
| 351 | @@ -39,6 +39,9 @@ |
| 352 | #include <asm/dma-coherence.h> |
| 353 | #include <asm/mips-cps.h> |
| 354 | |
| 355 | +/* For enabling BCM4710 cache workarounds */ |
| 356 | +static int bcm4710 = 0; |
| 357 | + |
| 358 | /* |
| 359 | * Bits describing what cache ops an SMP callback function may perform. |
| 360 | * |
| 361 | @@ -190,6 +193,9 @@ static void r4k_blast_dcache_user_page_s |
| 362 | { |
| 363 | unsigned long dc_lsize = cpu_dcache_line_size(); |
| 364 | |
| 365 | + if (bcm4710) |
| 366 | + r4k_blast_dcache_page = blast_dcache_page; |
| 367 | + else |
| 368 | if (dc_lsize == 0) |
| 369 | r4k_blast_dcache_user_page = (void *)cache_noop; |
| 370 | else if (dc_lsize == 16) |
| 371 | @@ -208,6 +214,9 @@ static void r4k_blast_dcache_page_indexe |
| 372 | { |
| 373 | unsigned long dc_lsize = cpu_dcache_line_size(); |
| 374 | |
| 375 | + if (bcm4710) |
| 376 | + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed; |
| 377 | + else |
| 378 | if (dc_lsize == 0) |
| 379 | r4k_blast_dcache_page_indexed = (void *)cache_noop; |
| 380 | else if (dc_lsize == 16) |
| 381 | @@ -227,6 +236,9 @@ static void r4k_blast_dcache_setup(void) |
| 382 | { |
| 383 | unsigned long dc_lsize = cpu_dcache_line_size(); |
| 384 | |
| 385 | + if (bcm4710) |
| 386 | + r4k_blast_dcache = blast_dcache; |
| 387 | + else |
| 388 | if (dc_lsize == 0) |
| 389 | r4k_blast_dcache = (void *)cache_noop; |
| 390 | else if (dc_lsize == 16) |
| 391 | @@ -1779,6 +1791,17 @@ static void coherency_setup(void) |
| 392 | * silly idea of putting something else there ... |
| 393 | */ |
| 394 | switch (current_cpu_type()) { |
| 395 | + case CPU_BMIPS3300: |
| 396 | + { |
| 397 | + u32 cm; |
| 398 | + cm = read_c0_diag(); |
| 399 | + /* Enable icache */ |
| 400 | + cm |= (1 << 31); |
| 401 | + /* Enable dcache */ |
| 402 | + cm |= (1 << 30); |
| 403 | + write_c0_diag(cm); |
| 404 | + } |
| 405 | + break; |
| 406 | case CPU_R4000PC: |
| 407 | case CPU_R4000SC: |
| 408 | case CPU_R4000MC: |
| 409 | @@ -1825,6 +1848,15 @@ void r4k_cache_init(void) |
| 410 | extern void build_copy_page(void); |
| 411 | struct cpuinfo_mips *c = ¤t_cpu_data; |
| 412 | |
| 413 | + /* Check if special workarounds are required */ |
| 414 | +#if defined(CONFIG_BCM47XX) && !defined(CONFIG_CPU_MIPS32_R2) |
| 415 | + if (current_cpu_data.cputype == CPU_BMIPS32 && (current_cpu_data.processor_id & 0xff) == 0) { |
| 416 | + printk("Enabling BCM4710A0 cache workarounds.\n"); |
| 417 | + bcm4710 = 1; |
| 418 | + } else |
| 419 | +#endif |
| 420 | + bcm4710 = 0; |
| 421 | + |
| 422 | probe_pcache(); |
| 423 | probe_vcache(); |
| 424 | setup_scache(); |
| 425 | @@ -1901,7 +1933,15 @@ void r4k_cache_init(void) |
| 426 | */ |
| 427 | local_r4k___flush_cache_all(NULL); |
| 428 | |
| 429 | +#ifdef CONFIG_BCM47XX |
| 430 | + { |
| 431 | + static void (*_coherency_setup)(void); |
| 432 | + _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup); |
| 433 | + _coherency_setup(); |
| 434 | + } |
| 435 | +#else |
| 436 | coherency_setup(); |
| 437 | +#endif |
| 438 | board_cache_error_setup = r4k_cache_error_setup; |
| 439 | |
| 440 | /* |
| 441 | --- a/arch/mips/mm/tlbex.c |
| 442 | +++ b/arch/mips/mm/tlbex.c |
| 443 | @@ -980,6 +980,9 @@ void build_get_pgde32(u32 **p, unsigned |
| 444 | uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT); |
| 445 | uasm_i_addu(p, ptr, tmp, ptr); |
| 446 | #else |
| 447 | +#ifdef CONFIG_BCM47XX |
| 448 | + uasm_i_nop(p); |
| 449 | +#endif |
| 450 | UASM_i_LA_mostly(p, ptr, pgdc); |
| 451 | #endif |
| 452 | uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ |
| 453 | @@ -1341,6 +1344,9 @@ static void build_r4000_tlb_refill_handl |
| 454 | #ifdef CONFIG_64BIT |
| 455 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ |
| 456 | #else |
| 457 | +# ifdef CONFIG_BCM47XX |
| 458 | + uasm_i_nop(&p); |
| 459 | +# endif |
| 460 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ |
| 461 | #endif |
| 462 | |
| 463 | @@ -1352,6 +1358,9 @@ static void build_r4000_tlb_refill_handl |
| 464 | build_update_entries(&p, K0, K1); |
| 465 | build_tlb_write_entry(&p, &l, &r, tlb_random); |
| 466 | uasm_l_leave(&l, p); |
| 467 | +#ifdef CONFIG_BCM47XX |
| 468 | + uasm_i_nop(&p); |
| 469 | +#endif |
| 470 | uasm_i_eret(&p); /* return from trap */ |
| 471 | } |
| 472 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
| 473 | @@ -2052,6 +2061,9 @@ build_r4000_tlbchange_handler_head(u32 * |
| 474 | #ifdef CONFIG_64BIT |
| 475 | build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ |
| 476 | #else |
| 477 | +# ifdef CONFIG_BCM47XX |
| 478 | + uasm_i_nop(p); |
| 479 | +# endif |
| 480 | build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ |
| 481 | #endif |
| 482 | |
| 483 | @@ -2098,6 +2110,9 @@ build_r4000_tlbchange_handler_tail(u32 * |
| 484 | build_tlb_write_entry(p, l, r, tlb_indexed); |
| 485 | uasm_l_leave(l, *p); |
| 486 | build_restore_work_registers(p); |
| 487 | +#ifdef CONFIG_BCM47XX |
| 488 | + uasm_i_nop(p); |
| 489 | +#endif |
| 490 | uasm_i_eret(p); /* return from trap */ |
| 491 | |
| 492 | #ifdef CONFIG_64BIT |