b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/arm/mach-pxa/ramdump.c |
| 3 | * |
| 4 | * Support for the Marvell PXA RAMDUMP error handling capability. |
| 5 | * |
| 6 | * Author: Anton Eidelman (anton.eidelman@marvell.com) |
| 7 | * Created: May 20, 2010 |
| 8 | * Copyright: (C) Copyright 2006 Marvell International Ltd. |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * publishhed by the Free Software Foundation. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/io.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/notifier.h> |
| 20 | #include <linux/crc32.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/delay.h> |
| 23 | #include <linux/kmemleak.h> |
| 24 | #ifndef CONFIG_ARM64 |
| 25 | //TODO |
| 26 | //#include <asm/system.h> |
| 27 | #endif |
| 28 | #include <linux/ptrace.h> /*pt_regs*/ |
| 29 | #include <linux/sched.h> /* task_struct */ |
| 30 | #include <asm/cacheflush.h> |
| 31 | #include <asm/system_misc.h> /* arm_pm_restart */ |
| 32 | #include <soc/asr/ramdump.h> |
| 33 | #include <soc/asr/ramdump_defs.h> /* common definitions reused in OSL */ |
| 34 | #include <soc/asr/ramdump_util.h> |
| 35 | #ifndef CONFIG_ARM64 |
| 36 | #include <soc/asr/hardware.h> |
| 37 | #include <soc/asr/regs-apmu.h> |
| 38 | #include <soc/asr/regs-accu.h> |
| 39 | #endif |
| 40 | #ifdef CONFIG_PXA_MIPSRAM |
| 41 | #include <linux/mipsram.h> |
| 42 | #endif |
| 43 | #include <linux/hardirq.h> |
| 44 | #include <asm/sections.h> |
| 45 | |
| 46 | //#include "ramdump_defs.h" /* common definitions reused in OSL */ |
| 47 | |
| 48 | extern char bug_str[]; /*filled by bug.c:report_bug()*/ |
| 49 | extern void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
| 50 | |
| 51 | static const char *panic_str; /* set this to panic text */ |
| 52 | |
| 53 | #ifdef CONFIG_KALLSYMS |
| 54 | /* |
| 55 | * These will be re-linked against their real values |
| 56 | * during the second link stage. |
| 57 | */ |
| 58 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); |
| 59 | extern const u8 kallsyms_names[] __attribute__((weak)); |
| 60 | |
| 61 | /* |
| 62 | * Tell the compiler that the count isn't in the small data section if the arch |
| 63 | * has one (eg: FRV). |
| 64 | */ |
| 65 | extern const unsigned long kallsyms_num_syms |
| 66 | __attribute__((weak, section(".rodata"))); |
| 67 | |
| 68 | extern const u8 kallsyms_token_table[] __attribute__((weak)); |
| 69 | extern const u16 kallsyms_token_index[] __attribute__((weak)); |
| 70 | |
| 71 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); |
| 72 | static struct kallsyms_record records; |
| 73 | #endif |
| 74 | |
| 75 | #ifndef CONFIG_ARM64 |
| 76 | /* CPU mode registers */ |
| 77 | struct mode_regs { |
| 78 | unsigned spsr; |
| 79 | unsigned sp; |
| 80 | unsigned lr; |
| 81 | }; |
| 82 | struct usr_regs { |
| 83 | unsigned sp; |
| 84 | unsigned lr; |
| 85 | }; |
| 86 | struct fiq_regs { |
| 87 | unsigned spsr; |
| 88 | unsigned r8; |
| 89 | unsigned r9; |
| 90 | unsigned r10; |
| 91 | unsigned r11; |
| 92 | unsigned r12; |
| 93 | unsigned sp; |
| 94 | unsigned lr; |
| 95 | }; |
| 96 | |
| 97 | /* CP15 registers */ |
| 98 | struct cp15_regs { |
| 99 | unsigned id; /* CPU ID */ |
| 100 | unsigned cr; /* Control */ |
| 101 | unsigned aux_cr; /* Auxiliary Control */ |
| 102 | unsigned ttb; /* TTB; PJ4: ttb0 */ |
| 103 | unsigned da_ctrl; /* Domain Access Control */ |
| 104 | unsigned cpar; /* Co-processor access control */ |
| 105 | unsigned fsr; /* PJ4: DFSR */ |
| 106 | unsigned far; /* PJ4: DFAR */ |
| 107 | unsigned procid; /* Process ID; PJ4: Context ID */ |
| 108 | }; |
| 109 | |
| 110 | /* CP14 registers */ |
| 111 | struct cp14_regs { |
| 112 | unsigned ccnt; |
| 113 | unsigned pmnc; |
| 114 | }; |
| 115 | |
| 116 | /* CP7 registers (L2C/BIU errors)*/ |
| 117 | struct cp7_regs { |
| 118 | unsigned errlog; |
| 119 | unsigned erradrl; |
| 120 | unsigned erradru; |
| 121 | }; |
| 122 | |
| 123 | /* CP6 registers (Interrupt Controller) */ |
| 124 | struct cp6_regs { |
| 125 | unsigned icip; |
| 126 | unsigned icmr; |
| 127 | unsigned iclr; |
| 128 | unsigned icfp; |
| 129 | unsigned icpr; |
| 130 | unsigned ichp; |
| 131 | unsigned icip2; |
| 132 | unsigned icmr2; |
| 133 | unsigned iclr2; |
| 134 | unsigned icfp2; |
| 135 | unsigned icpr2; |
| 136 | unsigned icip3; |
| 137 | unsigned icmr3; |
| 138 | unsigned iclr3; |
| 139 | unsigned icfp3; |
| 140 | unsigned icpr3; |
| 141 | }; |
| 142 | |
| 143 | /* ARMV7 specific cp15 regs. DONT EXTEND, ADD NEW STRUCTURE TO ramdump_state */ |
| 144 | struct cp15_regs_pj4 { |
| 145 | unsigned seccfg; /* Secure Configuration */ |
| 146 | unsigned secdbg; /* Secure Debug Enable */ |
| 147 | unsigned nsecac; /* Non-secure Access Control */ |
| 148 | unsigned ttb1; /* TTB1; TTB0 is cp15_regs.ttb */ |
| 149 | unsigned ttbc; /* TTB Control */ |
| 150 | unsigned ifsr; /* Instruction FSR; Data FSR is cp15_regs.fsr */ |
| 151 | unsigned ifar; /* Instruction FAR; Data FAR is cp15_regs.far */ |
| 152 | unsigned auxdfsr; /* Auxiliary DFSR */ |
| 153 | unsigned auxifsr; /* Auxiliary IFSR */ |
| 154 | unsigned pa; /* PA: physical address after translation */ |
| 155 | unsigned prremap; /* Primary Region Remap */ |
| 156 | unsigned nmremap; /* Normal Memory Remap */ |
| 157 | unsigned istat; /* Interrupt Status */ |
| 158 | unsigned fcsepid; /* FCSE PID */ |
| 159 | unsigned urwpid; /* User Read/Write Thread and Process ID */ |
| 160 | unsigned uropid; /* User Read/Only Thread and Process ID */ |
| 161 | unsigned privpid; /* Priviledged Only Thread and Process ID */ |
| 162 | unsigned auxdmc0; /* Auxiliary Debug Modes Control 0 */ |
| 163 | unsigned auxdmc1; /* Auxiliary Debug Modes Control 1 */ |
| 164 | unsigned auxfmc; /* Auxiliary Functional Modes Control */ |
| 165 | unsigned idext; /* CPU ID code extension */ |
| 166 | }; |
| 167 | |
| 168 | struct l2c_pj4_regs { /* 3 words */ |
| 169 | unsigned mpidr; |
| 170 | unsigned reserved1; |
| 171 | unsigned reserved2; |
| 172 | }; |
| 173 | |
| 174 | /* ARMV7 performance monitor */ |
| 175 | struct pfm_pj4_regs { |
| 176 | unsigned ctrl; |
| 177 | unsigned ceset; |
| 178 | unsigned ceclr; |
| 179 | unsigned ovf; |
| 180 | unsigned softinc; |
| 181 | unsigned csel; |
| 182 | unsigned ccnt; |
| 183 | unsigned evsel; |
| 184 | unsigned pmcnt; |
| 185 | unsigned uen; |
| 186 | unsigned ieset; |
| 187 | unsigned ieclr; |
| 188 | }; |
| 189 | |
| 190 | /* ACCU and APMU registers: 8 words only */ |
| 191 | struct acc_regs { |
| 192 | #ifdef CONFIG_CPU_PXA1986 |
| 193 | unsigned apmu_pcr[5]; |
| 194 | unsigned apmu_psr; |
| 195 | unsigned apmu_core_status; |
| 196 | unsigned apmu_core_idle_status; |
| 197 | #else |
| 198 | unsigned reserved[8]; |
| 199 | #endif |
| 200 | }; |
| 201 | |
| 202 | /* Other SoC registers */ |
| 203 | struct soc_regs { |
| 204 | unsigned reserved1; |
| 205 | unsigned ser_fuse_reg2; |
| 206 | union { |
| 207 | struct { |
| 208 | unsigned reserved2; |
| 209 | unsigned reserved3; |
| 210 | }; |
| 211 | }; |
| 212 | unsigned oem_unique_id_l; |
| 213 | unsigned oem_unique_id_h; |
| 214 | }; |
| 215 | #else |
| 216 | /* EL1 bank SPRSs */ |
| 217 | struct spr_regs { |
| 218 | u32 midr; |
| 219 | u32 revidr; |
| 220 | u32 current_el; |
| 221 | u32 sctlr; |
| 222 | u32 actlr; |
| 223 | u32 cpacr; |
| 224 | u32 isr; |
| 225 | u64 tcr; |
| 226 | u64 ttbr0; |
| 227 | u64 ttbr1; |
| 228 | u64 mair; |
| 229 | u64 tpidr; |
| 230 | u64 vbar; |
| 231 | u32 esr; |
| 232 | u32 reserved1; |
| 233 | u64 far; |
| 234 | }; |
| 235 | struct soc_regs { |
| 236 | unsigned reserved1; |
| 237 | }; |
| 238 | #endif |
| 239 | |
| 240 | /* Main RAMDUMP data structure */ |
| 241 | #define RAMDUMP_TXT_SIZE 100 |
| 242 | struct ramdump_state { |
| 243 | unsigned reserved; /* was rdc_va - RDC header virtual addres */ |
| 244 | unsigned rdc_pa; /* RDC header physical addres */ |
| 245 | char text[RAMDUMP_TXT_SIZE]; |
| 246 | unsigned err; |
| 247 | struct pt_regs regs; /* saved context */ |
| 248 | struct thread_info *thread; |
| 249 | #ifndef CONFIG_ARM64 |
| 250 | struct mode_regs svc; |
| 251 | struct usr_regs usr; |
| 252 | struct mode_regs abt; |
| 253 | struct mode_regs und; |
| 254 | struct mode_regs irq; |
| 255 | struct fiq_regs fiq; |
| 256 | struct cp15_regs cp15; |
| 257 | /* Up to this point same structure for XSC and PJ4 */ |
| 258 | union { |
| 259 | struct { /* 21 total */ |
| 260 | struct cp14_regs cp14; /* 2 */ |
| 261 | struct cp6_regs cp6; /* 16 */ |
| 262 | struct cp7_regs cp7; /* 3 */ |
| 263 | }; /* XSC */ |
| 264 | struct { /* 21 total */ |
| 265 | struct cp15_regs_pj4 cp15pj4; |
| 266 | }; /* PJ4 */ |
| 267 | }; |
| 268 | struct acc_regs acc; |
| 269 | struct l2c_pj4_regs l2cpj4; |
| 270 | struct pfm_pj4_regs pfmpj4; |
| 271 | struct soc_regs soc; |
| 272 | #else |
| 273 | unsigned spr_size; |
| 274 | struct spr_regs spr; |
| 275 | unsigned soc_size; |
| 276 | struct soc_regs soc; |
| 277 | #endif |
| 278 | } ramdump_data; |
| 279 | |
| 280 | static void *isram_va; /* ioremapped va for ISRAM access */ |
| 281 | static struct rdc_area *rdc_va;/* ioremapped va for rdc_area access */ |
| 282 | static unsigned ramdump_in_advance_active; |
| 283 | |
| 284 | /* ramdump_level: |
| 285 | * 0: do not request ramdump at all |
| 286 | * 1: for panic only, ignore User-Space fatal signals and modems |
| 287 | * RAMDUMP_LEVEL_PANIC_ONLY (used on "reboot" Graceful Shutdown) |
| 288 | * 2: always ramdump RAMDUMP_LEVEL_FULL |
| 289 | * >2: same as 2, just for better debug |
| 290 | */ |
| 291 | unsigned ramdump_level = RAMDUMP_LEVEL_FULL; |
| 292 | |
| 293 | /************************************************************************/ |
| 294 | /* Internal prototypes */ |
| 295 | /************************************************************************/ |
| 296 | static void ramdump_save_static_context(struct ramdump_state *d); |
| 297 | static void ramdump_save_current_context(struct ramdump_state *d); |
| 298 | static void ramdump_description_update(struct ramdump_state *d, |
| 299 | char *txt, int append); |
| 300 | /*GLOBAL void ramdump_description_print(void);*/ |
| 301 | static void ramdump_save_isram(void); |
| 302 | static void ramdump_flush_caches(void); |
| 303 | static void ramdump_fill_rdc(void); |
| 304 | static void save_peripheral_regs(struct ramdump_state *d); |
| 305 | static void ramdump_rdc_reset_only(void); |
| 306 | |
| 307 | /************************************************************************ |
| 308 | * Ramdump (RD, RDP) enable/disable setup and Global utility |
| 309 | * |
| 310 | * Customer may enable/disable ramdump over cmdline as "configuration" |
| 311 | * or from other Kernel, .KO module in run-time |
| 312 | * |
| 313 | * The RDP ENABLE could have different "levels" defined by customer |
| 314 | * and set over RDEN=nn |
| 315 | * The tokens like XXXX=0 are not propagated into Kernel-command-line |
| 316 | * and so the RDEN=0 cannot be used for disable. Use RDPD=1 for that |
| 317 | * |
| 318 | ************************************************************************ |
| 319 | */ |
| 320 | void ramdump_enable(unsigned level) |
| 321 | { |
| 322 | ramdump_level = level; |
| 323 | if (ramdump_level <= RAMDUMP_LEVEL_PANIC_ONLY) |
| 324 | ramdump_ignore_fatal_signals(0); |
| 325 | } |
| 326 | EXPORT_SYMBOL(ramdump_enable); |
| 327 | |
| 328 | static int __init ramdump_enable_setup(char *str) |
| 329 | { |
| 330 | ramdump_enable(simple_strtoul(str, NULL, 0)); |
| 331 | return 1; |
| 332 | } |
| 333 | __setup("RDEN=", ramdump_enable_setup); |
| 334 | |
| 335 | static int __init ramdump_disable_setup(char *str) |
| 336 | { |
| 337 | ramdump_enable(!simple_strtoul(str, NULL, 0)); |
| 338 | return 1; |
| 339 | } |
| 340 | __setup("RDPD=", ramdump_disable_setup); |
| 341 | |
| 342 | /************************************************************************/ |
| 343 | /* RDC address setup */ |
| 344 | /************************************************************************/ |
| 345 | static unsigned long rdc_pa; |
| 346 | |
| 347 | static int __init ramdump_rdc_setup(char *str) |
| 348 | { |
| 349 | if (kstrtoul(str, 16, &rdc_pa)) |
| 350 | return 0; |
| 351 | return 1; |
| 352 | } |
| 353 | __setup("RDCA=", ramdump_rdc_setup); |
| 354 | |
| 355 | #if defined(CONFIG_SMP) && !defined(CONFIG_MRVL_PANIC_FLUSH) |
| 356 | static atomic_t waiting_for_cpus; |
| 357 | /* Based on CONFIG_KEXEC code see arch/arm/kernel/machine_kexec.c */ |
| 358 | void ramdump_other_cpu_handler(void *parm) |
| 359 | { |
| 360 | (void)parm; |
| 361 | pr_err("RAMDUMP: CPU %u stops\n", smp_processor_id()); |
| 362 | dump_stack(); |
| 363 | flush_cache_all(); |
| 364 | atomic_dec(&waiting_for_cpus); |
| 365 | /* |
| 366 | * should return, otherwise csd locks will prevent any further |
| 367 | * smp calls, which might be attempted by other error handlers. |
| 368 | */ |
| 369 | } |
| 370 | |
| 371 | void ramdump_signal_all_cpus(void) |
| 372 | { |
| 373 | int cpu; |
| 374 | unsigned long msecs; |
| 375 | struct cpumask mask; |
| 376 | pr_err("RAMDUMP: Signalling %d CPUs\n", |
| 377 | num_online_cpus() - 1); |
| 378 | |
| 379 | cpumask_copy(&mask, cpu_online_mask); |
| 380 | cpumask_clear_cpu(smp_processor_id(), &mask); |
| 381 | |
| 382 | for_each_cpu(cpu, &mask) { |
| 383 | atomic_set(&waiting_for_cpus, 1); |
| 384 | smp_call_function_single(cpu, ramdump_other_cpu_handler, |
| 385 | NULL, 0); |
| 386 | msecs = 10; |
| 387 | while ((atomic_read(&waiting_for_cpus) > 0) && msecs) { |
| 388 | mdelay(1); |
| 389 | msecs--; |
| 390 | } |
| 391 | if (!msecs) |
| 392 | pr_err("Waiting for other CPUs timed out\n"); |
| 393 | } |
| 394 | } |
| 395 | #endif |
| 396 | |
| 397 | /************************************************************************/ |
| 398 | /* RAMDUMP panic notifier */ |
| 399 | /************************************************************************/ |
| 400 | void ramdump_panic(void) |
| 401 | { |
| 402 | if (!ramdump_level) { |
| 403 | ramdump_rdc_reset_only(); /* clear if already set */ |
| 404 | __arm_pm_restart('h', NULL); |
| 405 | return; |
| 406 | } |
| 407 | get_kernel_text_crc16_on_panic(); |
| 408 | pr_err("RAMDUMP STARTED\n"); |
| 409 | ramdump_fill_rdc(); |
| 410 | ramdump_save_current_context(&ramdump_data); |
| 411 | ramdump_save_static_context(&ramdump_data); |
| 412 | ramdump_save_isram(); |
| 413 | |
| 414 | if (get_kernel_text_crc16_valid() == -1) { |
| 415 | ramdump_description_update(&ramdump_data, |
| 416 | "!Bad Kernel CRC!", 1); |
| 417 | } |
| 418 | |
| 419 | #ifndef CONFIG_MRVL_PANIC_FLUSH |
| 420 | #ifdef CONFIG_SMP |
| 421 | ramdump_signal_all_cpus(); |
| 422 | #endif |
| 423 | ramdump_flush_caches(); |
| 424 | |
| 425 | #if 0 //TODO |
| 426 | ramdump_description_print(&ramdump_data); |
| 427 | #endif |
| 428 | pr_err("RAMDUMP DONE\n"); |
| 429 | /* Reset right away, do not return. Two issues with reset done |
| 430 | by the main panic() implementation: |
| 431 | 1) The setup_mm_for_reboot() called from arm_machine_restart() |
| 432 | corrupts the current MM page tables, |
| 433 | which is bad for offline debug. |
| 434 | 2) The current kernel stack is corrupted by other functions |
| 435 | invoked after this returns: |
| 436 | the stack frame becomes invalid in offline debug. |
| 437 | */ |
| 438 | __arm_pm_restart('h', NULL); |
| 439 | #else |
| 440 | pr_err("RAMDUMP DONE\n"); |
| 441 | #endif |
| 442 | } |
| 443 | |
| 444 | #ifndef CONFIG_MRVL_PANIC_FLUSH |
| 445 | static int ramdump_panic_nfunc(struct notifier_block *nb, |
| 446 | unsigned long action, void *data) |
| 447 | { |
| 448 | ramdump_panic(); |
| 449 | return 0; |
| 450 | } |
| 451 | #endif |
| 452 | |
| 453 | /* Prepare RAMDUMP "in advance" |
| 454 | * So in case of stuck the GPIO-reset the OBM would detect the RAMDUMP |
| 455 | * it would be executed |
| 456 | * The context is also saved but may be irrelevant for problem. |
| 457 | * Use this API carefully! |
| 458 | * NOTE: |
| 459 | * EMMD signature required for CONFIG_MRVL_PANIC_FLUSH |
| 460 | */ |
| 461 | #ifdef CONFIG_MRVL_PANIC_FLUSH |
| 462 | extern void set_emmd_indicator_flush(void); |
| 463 | extern void clr_emmd_indicator(void); |
| 464 | #endif |
| 465 | |
| 466 | void ramdump_prepare_in_advance(void) |
| 467 | { |
| 468 | unsigned long flags; |
| 469 | |
| 470 | if (!ramdump_level) |
| 471 | return; |
| 472 | ramdump_fill_rdc(); |
| 473 | local_irq_save(flags); |
| 474 | ramdump_save_current_context(&ramdump_data); |
| 475 | ramdump_save_static_context(&ramdump_data); |
| 476 | #ifdef CONFIG_ARM64 |
| 477 | __flush_dcache_area(&ramdump_data, sizeof(ramdump_data)); |
| 478 | #else |
| 479 | __cpuc_flush_dcache_area(&ramdump_data, sizeof(ramdump_data)); |
| 480 | #endif |
| 481 | #ifdef CONFIG_MRVL_PANIC_FLUSH |
| 482 | set_emmd_indicator_flush(); |
| 483 | #endif |
| 484 | ramdump_in_advance_active = 1; |
| 485 | local_irq_restore(flags); |
| 486 | } |
| 487 | EXPORT_SYMBOL(ramdump_prepare_in_advance); |
| 488 | |
| 489 | |
| 490 | static void ramdump_rdc_reset_only(void) |
| 491 | { |
| 492 | /* Clean RDC_SIGNATURE only, keep others |
| 493 | * IO-remapped => non-cached |
| 494 | * The RDC may be used also for other purposes |
| 495 | * (for example FOTA) => make reset conditionally |
| 496 | */ |
| 497 | if (rdc_va->header.signature != RDC_SIGNATURE) |
| 498 | return; |
| 499 | |
| 500 | rdc_va->header.signature = 0; |
| 501 | #ifdef CONFIG_MRVL_PANIC_FLUSH |
| 502 | clr_emmd_indicator(); |
| 503 | #endif |
| 504 | } |
| 505 | |
| 506 | /* ramdump_rdc_reset: |
| 507 | * Is called by "reboot" (Graceful Shutdown) |
| 508 | * Cleans ramdump_prepare_in_advance |
| 509 | * Disables ramdump on potential errors (ignore theses) |
| 510 | */ |
| 511 | void ramdump_rdc_reset(void) |
| 512 | { |
| 513 | if (ramdump_level) /* if ==0 keep it */ |
| 514 | ramdump_level = RAMDUMP_LEVEL_PANIC_ONLY; |
| 515 | ramdump_ignore_fatal_signals(1); |
| 516 | if (ramdump_in_advance_active) { |
| 517 | ramdump_rdc_reset_only(); |
| 518 | /*ramdump_in_advance_active = 0;*/ |
| 519 | } |
| 520 | } |
| 521 | EXPORT_SYMBOL(ramdump_rdc_reset); |
| 522 | |
| 523 | #ifndef CONFIG_MRVL_PANIC_FLUSH |
| 524 | static int |
| 525 | ramdump_panic_notifier(struct notifier_block *this, |
| 526 | unsigned long event, void *ptr) |
| 527 | { |
| 528 | ramdump_panic(); |
| 529 | return NOTIFY_DONE; |
| 530 | } |
| 531 | |
| 532 | static struct notifier_block panic_block = { |
| 533 | .notifier_call = ramdump_panic_nfunc, |
| 534 | }; |
| 535 | #endif |
| 536 | |
| 537 | static void save_peripheral_regs(struct ramdump_state *d) |
| 538 | { |
| 539 | #ifdef CONFIG_CPU_PXA1986 |
| 540 | d->acc.apmu_pcr[0] = __raw_readl(APMU_REG(APMU_PCR_0)); |
| 541 | d->acc.apmu_pcr[1] = __raw_readl(APMU_REG(APMU_PCR_1)); |
| 542 | d->acc.apmu_pcr[2] = __raw_readl(APMU_REG(APMU_PCR_2)); |
| 543 | d->acc.apmu_pcr[3] = __raw_readl(APMU_REG(APMU_PCR_3)); |
| 544 | d->acc.apmu_pcr[4] = __raw_readl(APMU_REG(APMU_PCR_4)); |
| 545 | d->acc.apmu_psr = __raw_readl(APMU_REG(APMU_PSR)); |
| 546 | d->acc.apmu_core_status = __raw_readl(APMU_REG(APMU_CORE_STATUS)); |
| 547 | d->acc.apmu_core_idle_status = __raw_readl(APMU_REG(APMU_CORE_IDLE_ST)); |
| 548 | #endif |
| 549 | } |
| 550 | |
| 551 | #ifndef CONFIG_ARM64 |
| 552 | /************************************************************************/ |
| 553 | /* inline asm helpers */ |
| 554 | /************************************************************************/ |
| 555 | #define get_reg_asm(instruction) ({ \ |
| 556 | unsigned reg; \ |
| 557 | asm(instruction : "=r" (reg) : ); \ |
| 558 | reg; }) |
| 559 | |
| 560 | static inline void get_banked_regs(unsigned *dest, unsigned mode) |
| 561 | { |
| 562 | register unsigned *rdest asm("r0") = dest; |
| 563 | register unsigned rmode asm("r1") = mode; |
| 564 | register unsigned cpsr asm("r2"); |
| 565 | register unsigned scr asm("r3"); |
| 566 | asm volatile( |
| 567 | "mrs r2, cpsr\n" |
| 568 | "bic r3, r2, #0x1f @ clear mode\n" |
| 569 | "orr r3, r3, r1 @ set target mode\n" |
| 570 | "orr r3, r3, #0xc0 @ lockout irq/fiq or may trap on bad mode" |
| 571 | "msr cpsr, r3\n" |
| 572 | "mrs r3,spsr\n" |
| 573 | "cmp r2, #0x11\n" |
| 574 | #ifndef CONFIG_THUMB2_KERNEL |
| 575 | "stmne r0, {r3,r13-r14}\n" |
| 576 | "stmeq r0, {r3,r8-r14}\n" |
| 577 | #else |
| 578 | "strne r3, [r0]\n" |
| 579 | "strne r13, [r0, #4]\n" |
| 580 | "strne r14, [r0, #8]\n" |
| 581 | "stmeq r0, {r3,r8-r12}\n" |
| 582 | "streq r13, [r0, #0x18]\n" |
| 583 | "streq r14, [r0, #0x1c]\n" |
| 584 | #endif |
| 585 | "msr cpsr, r2 @ restore original mode\n" |
| 586 | : "=r" (cpsr), "=r" (scr) |
| 587 | : "r" (rdest), "r" (rmode), "r" (cpsr), "r" (scr) |
| 588 | : "memory", "cc"); |
| 589 | } |
| 590 | static inline void get_usr_regs(unsigned *dest, unsigned mode) |
| 591 | { |
| 592 | #ifndef CONFIG_THUMB2_KERNEL |
| 593 | /* TBD: how to get user mode registers in thumb 2 */ |
| 594 | register unsigned *rdest asm("r0") = dest; |
| 595 | register unsigned rmode asm("r1") = mode; |
| 596 | register unsigned cpsr asm("r2"); |
| 597 | register unsigned scr asm("r3"); |
| 598 | asm volatile( |
| 599 | "mrs r2, spsr\n" |
| 600 | "bic r3, r2, #0x1f @ clear mode\n" |
| 601 | "orr r3, r3, r1 @ set usr mode\n" |
| 602 | "msr spsr, r3\n" |
| 603 | "stm r0, {r13-r14}^\n" |
| 604 | "msr spsr, r2 @ restore original spsr\n" |
| 605 | : "=r" (cpsr), "=r" (scr) |
| 606 | : "r" (rdest), "r" (rmode), "r" (cpsr), "r" (scr) |
| 607 | : "memory", "cc"); |
| 608 | #endif |
| 609 | } |
| 610 | |
| 611 | /************************************************************************/ |
| 612 | /* RAMDUMP state save */ |
| 613 | /************************************************************************/ |
| 614 | /* |
| 615 | ramdump_save_static_context |
| 616 | Saves general CPU registers state into the ramdump. |
| 617 | */ |
| 618 | static void ramdump_save_static_context(struct ramdump_state *d) |
| 619 | { |
| 620 | /* mode banked regs */ |
| 621 | get_banked_regs(&d->abt.spsr, ABT_MODE); |
| 622 | get_banked_regs(&d->und.spsr, UND_MODE); |
| 623 | get_banked_regs(&d->irq.spsr, IRQ_MODE); |
| 624 | get_banked_regs(&d->fiq.spsr, FIQ_MODE); |
| 625 | get_banked_regs(&d->svc.spsr, SVC_MODE); |
| 626 | |
| 627 | /* USR mode banked regs */ |
| 628 | get_usr_regs(&d->usr.sp, USR_MODE); |
| 629 | |
| 630 | /* cp15 */ |
| 631 | d->cp15.id = get_reg_asm("mrc p15, 0, %0, c0, c0, 0"); |
| 632 | d->cp15.cr = get_reg_asm("mrc p15, 0, %0, c1, c0, 0"); |
| 633 | d->cp15.aux_cr = get_reg_asm("mrc p15, 0, %0, c1, c0, 1"); |
| 634 | d->cp15.ttb = get_reg_asm("mrc p15, 0, %0, c2, c0, 0"); |
| 635 | d->cp15.da_ctrl = get_reg_asm("mrc p15, 0, %0, c3, c0, 0"); |
| 636 | d->cp15.cpar = get_reg_asm("mrc p15, 0, %0, c1, c0, 2"); |
| 637 | d->cp15.fsr = get_reg_asm("mrc p15, 0, %0, c5, c0, 0"); |
| 638 | d->cp15.far = get_reg_asm("mrc p15, 0, %0, c6, c0, 0"); |
| 639 | /* PJ4: context id */ |
| 640 | d->cp15.procid = get_reg_asm("mrc p15, 0, %0, c13,c0, 0"); |
| 641 | |
| 642 | /* |
| 643 | * Removed SCR, NSACR, NSDBG: not accessible in NS, |
| 644 | * and not relevant otherwise. |
| 645 | */ |
| 646 | d->cp15pj4.ttb1 = get_reg_asm("mrc p15, 0, %0, c2, c0, 1"); |
| 647 | d->cp15pj4.ttbc = get_reg_asm("mrc p15, 0, %0, c2, c0, 2"); |
| 648 | d->cp15pj4.ifsr = get_reg_asm("mrc p15, 0, %0, c5, c0, 1"); |
| 649 | d->cp15pj4.ifar = get_reg_asm("mrc p15, 0, %0, c6, c0, 2"); |
| 650 | d->cp15pj4.auxdfsr = get_reg_asm("mrc p15, 0, %0, c5, c1, 0"); |
| 651 | d->cp15pj4.auxifsr = get_reg_asm("mrc p15, 0, %0, c5, c1, 1"); |
| 652 | d->cp15pj4.pa = get_reg_asm("mrc p15, 0, %0, c7, c4, 0"); |
| 653 | d->cp15pj4.prremap = get_reg_asm("mrc p15, 0, %0, c10, c2, 0"); |
| 654 | d->cp15pj4.nmremap = get_reg_asm("mrc p15, 0, %0, c10, c2, 1"); |
| 655 | d->cp15pj4.istat = get_reg_asm("mrc p15, 0, %0, c12, c1, 0"); |
| 656 | d->cp15pj4.fcsepid = get_reg_asm("mrc p15, 0, %0, c13, c0, 0"); |
| 657 | d->cp15pj4.urwpid = get_reg_asm("mrc p15, 0, %0, c13, c0, 2"); |
| 658 | d->cp15pj4.uropid = get_reg_asm("mrc p15, 0, %0, c13, c0, 3"); |
| 659 | d->cp15pj4.privpid = get_reg_asm("mrc p15, 0, %0, c13, c0, 4"); |
| 660 | |
| 661 | d->pfmpj4.ctrl = get_reg_asm("mrc p15, 0, %0, c9, c12, 0"); |
| 662 | d->pfmpj4.ceset = get_reg_asm("mrc p15, 0, %0, c9, c12, 1"); |
| 663 | d->pfmpj4.ceclr = get_reg_asm("mrc p15, 0, %0, c9, c12, 2"); |
| 664 | d->pfmpj4.ovf = get_reg_asm("mrc p15, 0, %0, c9, c12, 3"); |
| 665 | d->pfmpj4.csel = get_reg_asm("mrc p15, 0, %0, c9, c12, 5"); |
| 666 | d->pfmpj4.ccnt = get_reg_asm("mrc p15, 0, %0, c9, c13, 0"); |
| 667 | d->pfmpj4.evsel = get_reg_asm("mrc p15, 0, %0, c9, c13, 1"); |
| 668 | d->pfmpj4.pmcnt = get_reg_asm("mrc p15, 0, %0, c9, c13, 2"); |
| 669 | d->pfmpj4.uen = get_reg_asm("mrc p15, 0, %0, c9, c14, 0"); |
| 670 | d->pfmpj4.ieset = get_reg_asm("mrc p15, 0, %0, c9, c14, 1"); |
| 671 | d->pfmpj4.ieclr = get_reg_asm("mrc p15, 0, %0, c9, c14, 2"); |
| 672 | |
| 673 | save_peripheral_regs(d); |
| 674 | } |
| 675 | |
| 676 | static void ramdump_save_current_cpu_context(struct ramdump_state *d) |
| 677 | { |
| 678 | /* check if panic was called directly, then regs will be empty */ |
| 679 | if (d->regs.uregs[16] == 0) { |
| 680 | /* let's fill up regs as current */ |
| 681 | d->regs.uregs[0] = 0xDEADDEAD; /* use DEADDEAD as a marking */ |
| 682 | d->regs.uregs[1] = get_reg_asm("mov %0, r1"); |
| 683 | d->regs.uregs[2] = get_reg_asm("mov %0, r2"); |
| 684 | d->regs.uregs[3] = get_reg_asm("mov %0, r3"); |
| 685 | d->regs.uregs[4] = get_reg_asm("mov %0, r4"); |
| 686 | d->regs.uregs[5] = get_reg_asm("mov %0, r5"); |
| 687 | d->regs.uregs[6] = get_reg_asm("mov %0, r6"); |
| 688 | d->regs.uregs[7] = get_reg_asm("mov %0, r7"); |
| 689 | d->regs.uregs[8] = get_reg_asm("mov %0, r8"); |
| 690 | d->regs.uregs[9] = get_reg_asm("mov %0, r9"); |
| 691 | d->regs.uregs[10] = get_reg_asm("mov %0, r10"); |
| 692 | d->regs.uregs[11] = get_reg_asm("mov %0, r11"); |
| 693 | d->regs.uregs[12] = get_reg_asm("mov %0, r12"); |
| 694 | d->regs.uregs[13] = get_reg_asm("mov %0, r13"); |
| 695 | d->regs.uregs[14] = get_reg_asm("mov %0, r14"); |
| 696 | d->regs.uregs[15] = get_reg_asm("mov %0, r15"); |
| 697 | d->regs.uregs[16] = get_reg_asm("mrs %0, cpsr"); |
| 698 | } |
| 699 | } |
| 700 | |
| 701 | #else /* CONFIG_ARM64 */ |
| 702 | |
| 703 | #define get_reg_asm(instruction) ({ \ |
| 704 | u64 reg; \ |
| 705 | asm(instruction : "=r" (reg) : ); \ |
| 706 | reg; }) |
| 707 | /* |
| 708 | ramdump_save_static_context |
| 709 | Saves general CPU registers state into the ramdump. |
| 710 | */ |
| 711 | static void ramdump_save_static_context(struct ramdump_state *d) |
| 712 | { |
| 713 | d->spr.midr = get_reg_asm("mrs %0, midr_el1"); |
| 714 | d->spr.revidr = get_reg_asm("mrs %0, revidr_el1"); |
| 715 | d->spr.sctlr = get_reg_asm("mrs %0, sctlr_el1"); |
| 716 | d->spr.actlr = get_reg_asm("mrs %0, actlr_el1"); |
| 717 | d->spr.cpacr = get_reg_asm("mrs %0, cpacr_el1"); |
| 718 | d->spr.isr = get_reg_asm("mrs %0, isr_el1"); |
| 719 | d->spr.tcr = get_reg_asm("mrs %0, tcr_el1"); |
| 720 | d->spr.ttbr0 = get_reg_asm("mrs %0, ttbr0_el1"); |
| 721 | d->spr.ttbr1 = get_reg_asm("mrs %0, ttbr1_el1"); |
| 722 | d->spr.mair = get_reg_asm("mrs %0, mair_el1"); |
| 723 | d->spr.tpidr = get_reg_asm("mrs %0, tpidr_el1"); |
| 724 | d->spr.vbar = get_reg_asm("mrs %0, vbar_el1"); |
| 725 | d->spr.esr = get_reg_asm("mrs %0, esr_el1"); |
| 726 | d->spr.far = get_reg_asm("mrs %0, far_el1"); |
| 727 | d->spr.current_el = get_reg_asm("mrs %0, currentEL"); |
| 728 | /* Fill in the SPR and SOC size: needed for parser compatibility */ |
| 729 | d->spr_size = sizeof(d->spr); |
| 730 | d->soc_size = sizeof(d->soc); |
| 731 | /* Save SoC register state */ |
| 732 | save_peripheral_regs(d); |
| 733 | } |
| 734 | static void ramdump_save_current_cpu_context(struct ramdump_state *d) |
| 735 | { |
| 736 | int i = 0; |
| 737 | /* check if panic was called directly, then regs will be empty */ |
| 738 | if (d->regs.pc == 0) { |
| 739 | /* let's fill up regs as current */ |
| 740 | d->regs.regs[i++] = 0xDEADDEAD; /* use DEADDEAD as a marking */ |
| 741 | d->regs.regs[i++] = get_reg_asm("mov %0, x1"); |
| 742 | d->regs.regs[i++] = get_reg_asm("mov %0, x2"); |
| 743 | d->regs.regs[i++] = get_reg_asm("mov %0, x3"); |
| 744 | d->regs.regs[i++] = get_reg_asm("mov %0, x4"); |
| 745 | d->regs.regs[i++] = get_reg_asm("mov %0, x5"); |
| 746 | d->regs.regs[i++] = get_reg_asm("mov %0, x6"); |
| 747 | d->regs.regs[i++] = get_reg_asm("mov %0, x7"); |
| 748 | d->regs.regs[i++] = get_reg_asm("mov %0, x8"); |
| 749 | d->regs.regs[i++] = get_reg_asm("mov %0, x9"); |
| 750 | d->regs.regs[i++] = get_reg_asm("mov %0, x10"); |
| 751 | d->regs.regs[i++] = get_reg_asm("mov %0, x11"); |
| 752 | d->regs.regs[i++] = get_reg_asm("mov %0, x12"); |
| 753 | d->regs.regs[i++] = get_reg_asm("mov %0, x13"); |
| 754 | d->regs.regs[i++] = get_reg_asm("mov %0, x14"); |
| 755 | d->regs.regs[i++] = get_reg_asm("mov %0, x15"); |
| 756 | d->regs.regs[i++] = get_reg_asm("mov %0, x16"); |
| 757 | d->regs.regs[i++] = get_reg_asm("mov %0, x17"); |
| 758 | d->regs.regs[i++] = get_reg_asm("mov %0, x18"); |
| 759 | d->regs.regs[i++] = get_reg_asm("mov %0, x19"); |
| 760 | d->regs.regs[i++] = get_reg_asm("mov %0, x20"); |
| 761 | d->regs.regs[i++] = get_reg_asm("mov %0, x21"); |
| 762 | d->regs.regs[i++] = get_reg_asm("mov %0, x22"); |
| 763 | d->regs.regs[i++] = get_reg_asm("mov %0, x23"); |
| 764 | d->regs.regs[i++] = get_reg_asm("mov %0, x24"); |
| 765 | d->regs.regs[i++] = get_reg_asm("mov %0, x25"); |
| 766 | d->regs.regs[i++] = get_reg_asm("mov %0, x26"); |
| 767 | d->regs.regs[i++] = get_reg_asm("mov %0, x27"); |
| 768 | d->regs.regs[i++] = get_reg_asm("mov %0, x28"); |
| 769 | d->regs.regs[i++] = get_reg_asm("mov %0, x29"); |
| 770 | d->regs.regs[i++] = get_reg_asm("mov %0, x30"); |
| 771 | d->regs.sp = get_reg_asm("mov %0, sp"); |
| 772 | d->regs.pstate = 0; /* no direct access */ |
| 773 | d->regs.pc = (u64)&ramdump_save_current_cpu_context; |
| 774 | } |
| 775 | } |
| 776 | |
| 777 | #define ARM_pc pc /* alias for pt_regs field in the code below */ |
| 778 | #endif /* CONFIG_ARM64 */ |
| 779 | |
| 780 | /* |
| 781 | Save current register context if no dynamic context has been filled in |
| 782 | */ |
| 783 | static void ramdump_save_current_context(struct ramdump_state *d) |
| 784 | { |
| 785 | ramdump_save_current_cpu_context(d); |
| 786 | if (d->thread == NULL) |
| 787 | d->thread = current_thread_info(); |
| 788 | if (strlen(d->text) == 0) { |
| 789 | strcpy(d->text, "[KR] Panic in "); |
| 790 | if (d->thread && d->thread->task) |
| 791 | /* 0 is always appended even after n chars copied */ |
| 792 | strncat(d->text, d->thread->task->comm, |
| 793 | sizeof(d->text) - 1 - strlen(d->text)); |
| 794 | } |
| 795 | if (panic_str) { |
| 796 | strncat(d->text, ": ", sizeof(d->text) - 3); |
| 797 | strncat(d->text, panic_str, |
| 798 | sizeof(d->text) - 1 - strlen(d->text)); |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | /* */ |
| 803 | void ramdump_save_panic_text(const char *s) |
| 804 | { |
| 805 | panic_str = s; |
| 806 | } |
| 807 | |
| 808 | /* Update RAMDUMP.TXT description with given text */ |
| 809 | static void ramdump_description_update(struct ramdump_state *d, |
| 810 | char *txt, int append) |
| 811 | { |
| 812 | int len; |
| 813 | if (append) { |
| 814 | len = strlen(d->text); |
| 815 | if (d->text[len] == '\n') |
| 816 | d->text[len] = ' '; |
| 817 | snprintf(d->text + len, RAMDUMP_TXT_SIZE - len -1, "%s", txt); |
| 818 | } else { |
| 819 | sprintf(d->text, txt); |
| 820 | } |
| 821 | } |
| 822 | |
| 823 | /* Print RAMDUMP.TXT description with given text */ |
| 824 | /*GLOG*/ void ramdump_description_print(void) |
| 825 | { |
| 826 | pr_err("\n---\n%s\n---\n\n", ramdump_data.text); |
| 827 | } |
| 828 | |
| 829 | /* |
| 830 | ramdump_save_dynamic_context |
| 831 | Saves register context (regs) into the ramdump. |
| 832 | */ |
| 833 | void ramdump_save_dynamic_context(const char *str, int err, |
| 834 | struct thread_info *thread, struct pt_regs *regs) |
| 835 | { |
| 836 | int len; |
| 837 | const char *idtext = "??"; /* error type string */ |
| 838 | if (regs) |
| 839 | ramdump_data.regs = *regs; |
| 840 | if (err == RAMDUMP_ERR_EEH_CP) |
| 841 | idtext = "CP"; |
| 842 | else if (err == RAMDUMP_ERR_EEH_AP) |
| 843 | idtext = "AP"; |
| 844 | else if ((err & RAMDUMP_ERR_EEH_CP) == 0) |
| 845 | idtext = "KR"; |
| 846 | |
| 847 | len = sprintf(ramdump_data.text, "[%s] ", idtext); |
| 848 | |
| 849 | if (str) { |
| 850 | /* The bug_str[] filled by bug.c:report_bug() |
| 851 | * It is much more helpfull then regular "str" |
| 852 | */ |
| 853 | if (bug_str[0]) |
| 854 | str = (const char*)bug_str; |
| 855 | |
| 856 | /* 0 is always appended even after n chars copied */ |
| 857 | strncat(ramdump_data.text, str, |
| 858 | sizeof(ramdump_data.text) - 1 - len); |
| 859 | } |
| 860 | ramdump_data.err = (unsigned)err; |
| 861 | ramdump_data.thread = thread; |
| 862 | if ((err&RAMDUMP_ERR_EEH_CP) == 0) { |
| 863 | /* For kernel oops/panic add more data to text */ |
| 864 | char info[40]; |
| 865 | len = strlen(ramdump_data.text); |
| 866 | |
| 867 | /* Do not print address for BUG */ |
| 868 | if (bug_str[0]) |
| 869 | sprintf(info, ", (irq=%d) in ", !!in_interrupt()); |
| 870 | else |
| 871 | sprintf(info, " at 0x%lx in ", (unsigned long)regs->ARM_pc); |
| 872 | |
| 873 | if (thread && thread->task) |
| 874 | /* 0 is always appended even after n chars copied */ |
| 875 | strncat(info, thread->task->comm, |
| 876 | sizeof(info) - 1 - strlen(info)); |
| 877 | |
| 878 | len = sizeof(ramdump_data.text) - len; |
| 879 | if (len > 0) |
| 880 | /* 0 is always appended even after n chars copied */ |
| 881 | strncat(ramdump_data.text, info, len - 1); |
| 882 | } |
| 883 | #ifdef CONFIG_PXA_MIPSRAM |
| 884 | /* |
| 885 | * MIPS_RAM_MARK_END_OF_LOG(MIPSRAM_get_descriptor(), 0); may be best, |
| 886 | * but it us not safe in real panic. |
| 887 | * So add END-MARK which is absolutelly safety. |
| 888 | */ |
| 889 | MIPS_RAM_ADD_TRACE(MIPSRAM_LOG_END_MARK_EVENT); |
| 890 | #endif |
| 891 | } |
| 892 | EXPORT_SYMBOL(ramdump_save_dynamic_context); |
| 893 | |
| 894 | /* |
| 895 | ramdump_save_isram |
| 896 | Saves the ISRAM contents into the RDC. |
| 897 | Caches should be flushed prior to calling this (so SRAM is in sync). |
| 898 | Cacheable access to both SRAM and DDR is used for performance reasons. |
| 899 | Caches are flushed also by this function to sync the DDR. |
| 900 | */ |
| 901 | #define ISRAM_SIZE 0 /* Actually 128K, but we do not have room in RDC */ |
| 902 | #define RDC_HEADROOM (ISRAM_SIZE) |
| 903 | #define RDC_START(header) ((void *)(((unsigned long)header)-RDC_HEADROOM)) |
| 904 | #define RDC_HEAD(start) \ |
| 905 | ((struct rdc_area *)(((unsigned long)start) \ |
| 906 | + RDC_HEADROOM)) |
| 907 | static void ramdump_save_isram(void) |
| 908 | { |
| 909 | void *rdc_isram_va = RDC_ISRAM_START(rdc_va, ISRAM_SIZE); |
| 910 | if (!isram_va) |
| 911 | return; |
| 912 | ramdump_flush_caches(); |
| 913 | #ifdef RAMDUMP_ISRAM_CRC_CHECK |
| 914 | rdc_va->header.isram_crc32 = crc32_le(0, (unsigned char *)isram_va, |
| 915 | rdc_va->header.isram_size); |
| 916 | #else |
| 917 | rdc_va->header.isram_crc32 = 0; |
| 918 | #endif |
| 919 | memcpy(rdc_isram_va, isram_va, rdc_va->header.isram_size); |
| 920 | } |
| 921 | |
| 922 | static void ramdump_flush_caches(void) |
| 923 | { |
| 924 | flush_cache_all(); |
| 925 | #ifdef CONFIG_OUTER_CACHE |
| 926 | outer_flush_range(0, -1ul); |
| 927 | #endif |
| 928 | } |
| 929 | |
| 930 | static void ramdump_fill_rdc(void) |
| 931 | { |
| 932 | /* Some RDC fields are already set at this point: retain these */ |
| 933 | rdc_va->header.signature = RDC_SIGNATURE; |
| 934 | rdc_va->header.error_id = ramdump_data.err; |
| 935 | rdc_va->header.ramdump_data_addr = |
| 936 | __virt_to_phys((unsigned long)&ramdump_data); |
| 937 | rdc_va->header.isram_size = ISRAM_SIZE; |
| 938 | rdc_va->header.isram_pa = |
| 939 | (unsigned)(unsigned long)RDC_ISRAM_START(rdc_pa, ISRAM_SIZE); |
| 940 | ramdump_data.rdc_pa = rdc_pa; |
| 941 | #ifdef CONFIG_PXA_MIPSRAM |
| 942 | rdc_va->header.mipsram_pa = mipsram_desc.buffer_phys_ptr; |
| 943 | rdc_va->header.mipsram_size = MIPS_RAM_BUFFER_SZ_BYTES; |
| 944 | #endif |
| 945 | pr_err("RAMDUMP pa=0x%x, signature 0x%x placed on va=0x%x\n", |
| 946 | (unsigned)rdc_pa, rdc_va->header.signature, |
| 947 | (unsigned)&rdc_va->header.signature); |
| 948 | } |
| 949 | |
| 950 | /************************************************************************/ |
| 951 | /* RAMDUMP RAMFILE support */ |
| 952 | /************************************************************************/ |
| 953 | int ramdump_attach_ramfile(struct ramfile_desc *desc) |
| 954 | { |
| 955 | /* Legacy: ramfiles linked list are physical: |
| 956 | both head=rdc_va->header.ramfile_addr, and all next's. |
| 957 | Kernel cannot always translate physical addresses, |
| 958 | e.g. with vmalloc or himem. Just keep the virtual address |
| 959 | of the last item next pointer. No need to walk the list then.*/ |
| 960 | static unsigned *link; |
| 961 | |
| 962 | if (!link) |
| 963 | link = &rdc_va->header.ramfile_addr; |
| 964 | /* Link in the new desc: by physical address */ |
| 965 | if (desc->flags & RAMFILE_PHYCONT) |
| 966 | *link = __virt_to_phys((unsigned long)desc); |
| 967 | else |
| 968 | *link = vmalloc_to_pfn(desc)<<PAGE_SHIFT; |
| 969 | |
| 970 | /* keep virtual addr for discontinous pages (vmalloc) */ |
| 971 | desc->vaddr = (unsigned)(unsigned long)desc; |
| 972 | #ifdef CONFIG_ARM64 |
| 973 | desc->vaddr_hi = (unsigned)(((unsigned long)desc) >> 32); |
| 974 | #endif |
| 975 | /* Make sure the new desc is properly ending the list */ |
| 976 | desc->next = 0; |
| 977 | /* Finally: advance the last element pointer */ |
| 978 | link = &desc->next; |
| 979 | return 0; |
| 980 | } |
| 981 | EXPORT_SYMBOL(ramdump_attach_ramfile); |
| 982 | |
| 983 | static atomic_t ramdump_item_offset; |
| 984 | int ramdump_attach_item(enum rdi_type type, |
| 985 | const char *name, |
| 986 | unsigned attrbits, |
| 987 | unsigned nwords, ...) |
| 988 | { |
| 989 | struct rdc_dataitem *rdi; |
| 990 | unsigned size; |
| 991 | unsigned offset; |
| 992 | va_list args; |
| 993 | int i; |
| 994 | |
| 995 | /* name should be present, and at most MAX_RDI_NAME chars */ |
| 996 | if (!name || (strlen(name) > sizeof(rdi->name)) || (nwords > 16)) |
| 997 | return -EINVAL; |
| 998 | size = offsetof(struct rdc_dataitem, body) |
| 999 | + nwords * sizeof(unsigned long); |
| 1000 | offset = atomic_read(&ramdump_item_offset); |
| 1001 | if ((offset + size) > sizeof(rdc_va->body)) |
| 1002 | return -ENOMEM; |
| 1003 | offset = atomic_add_return(size, &ramdump_item_offset); |
| 1004 | if (offset > sizeof(rdc_va->body)) { |
| 1005 | atomic_sub_return(size, &ramdump_item_offset); |
| 1006 | return -ENOMEM; |
| 1007 | } |
| 1008 | |
| 1009 | /* Allocated space: set pointer */ |
| 1010 | offset -= size; |
| 1011 | rdi = (struct rdc_dataitem *)&rdc_va->body.space[offset]; |
| 1012 | rdi->size = size; |
| 1013 | rdi->type = type; |
| 1014 | rdi->attrbits = attrbits; |
| 1015 | strncpy(rdi->name, name, sizeof(rdi->name)); |
| 1016 | va_start(args, nwords); |
| 1017 | /* Copy the data words */ |
| 1018 | for (i = 0; nwords; nwords--) { |
| 1019 | unsigned long w = va_arg(args, unsigned long); |
| 1020 | rdi->body.w[i++] = (unsigned)w; |
| 1021 | #ifdef CONFIG_ARM64 |
| 1022 | rdi->body.w[i++] = (unsigned)(w>>32); |
| 1023 | #endif |
| 1024 | } |
| 1025 | va_end(args); |
| 1026 | return 0; |
| 1027 | } |
| 1028 | EXPORT_SYMBOL(ramdump_attach_item); |
| 1029 | |
| 1030 | int ramdump_attach_cust_item(enum rdi_type type, |
| 1031 | const char *name, |
| 1032 | unsigned attrbits, |
| 1033 | unsigned nwords, |
| 1034 | unsigned buf, |
| 1035 | unsigned buf_size) |
| 1036 | { |
| 1037 | struct rdc_dataitem *rdi; |
| 1038 | unsigned size; |
| 1039 | unsigned offset; |
| 1040 | |
| 1041 | /* name should be present, and at most MAX_RDI_NAME chars */ |
| 1042 | if (!name || (strlen(name) > sizeof(rdi->name)) || (nwords > 16)) |
| 1043 | return -EINVAL; |
| 1044 | size = offsetof(struct rdc_dataitem, body) |
| 1045 | + nwords * sizeof(unsigned long); |
| 1046 | offset = atomic_read(&ramdump_item_offset); |
| 1047 | if ((offset + size) > sizeof(rdc_va->body)) |
| 1048 | return -ENOMEM; |
| 1049 | offset = atomic_add_return(size, &ramdump_item_offset); |
| 1050 | if (offset > sizeof(rdc_va->body)) { |
| 1051 | atomic_sub_return(size, &ramdump_item_offset); |
| 1052 | return -ENOMEM; |
| 1053 | } |
| 1054 | |
| 1055 | /* Allocated space: set pointer */ |
| 1056 | offset -= size; |
| 1057 | rdi = (struct rdc_dataitem *)&rdc_va->body.space[offset]; |
| 1058 | rdi->size = size; |
| 1059 | rdi->type = type; |
| 1060 | rdi->attrbits = attrbits; |
| 1061 | strncpy(rdi->name, name, sizeof(rdi->name)); |
| 1062 | rdi->body.w[0] = (unsigned)buf; |
| 1063 | rdi->body.w[1] = (unsigned)buf_size; |
| 1064 | return 0; |
| 1065 | } |
| 1066 | |
| 1067 | /************************************************************************/ |
| 1068 | /* RAMDUMP init */ |
| 1069 | /************************************************************************/ |
| 1070 | |
| 1071 | static int __init ramdump_init(void) |
| 1072 | { |
| 1073 | void *p; |
| 1074 | if (!rdc_pa) |
| 1075 | /* |
| 1076 | * RDC address is not overriden on CMDLINE: assume RAM at 0 |
| 1077 | * with the legacy offset for backward compatibility. |
| 1078 | */ |
| 1079 | rdc_pa = RDC_OFFSET; |
| 1080 | if (ISRAM_SIZE) { |
| 1081 | isram_va = ioremap_wc(ISRAM_PA, ISRAM_SIZE); |
| 1082 | if (!isram_va) |
| 1083 | return -ENOMEM; |
| 1084 | } |
| 1085 | |
| 1086 | if (pfn_valid(rdc_pa >> PAGE_SHIFT)) |
| 1087 | rdc_va = (struct rdc_area *)phys_to_virt(rdc_pa); |
| 1088 | else |
| 1089 | rdc_va = (struct rdc_area *)ioremap_nocache( |
| 1090 | (unsigned long)RDC_START(rdc_pa), |
| 1091 | sizeof(struct rdc_area)+RDC_HEADROOM); |
| 1092 | if (!rdc_va) { |
| 1093 | if (isram_va) |
| 1094 | iounmap(isram_va); |
| 1095 | return -ENOMEM; |
| 1096 | } else |
| 1097 | rdc_va = RDC_HEAD(rdc_va); |
| 1098 | |
| 1099 | /* Set up certain RDC fields that won't change later. |
| 1100 | These will be available even when dump is taken after a forced reset */ |
| 1101 | memset((void *)rdc_va, 0, sizeof(*rdc_va)); /* zero reserved fields */ |
| 1102 | /* init_mm.pgd, so rdp can translate vmalloc addresses */ |
| 1103 | rdc_va->header.pgd = __virt_to_phys((unsigned long)(pgd_offset_k(0))); |
| 1104 | #ifdef CONFIG_KALLSYMS |
| 1105 | records.kallsyms_addresses = (unsigned long)kallsyms_addresses; |
| 1106 | records.kallsyms_names = (unsigned long)kallsyms_names; |
| 1107 | records.kallsyms_num_syms = (unsigned)kallsyms_num_syms; |
| 1108 | records.kallsyms_token_table = (unsigned long)kallsyms_token_table; |
| 1109 | records.kallsyms_token_index = (unsigned long)kallsyms_token_index; |
| 1110 | records.kallsyms_markers = (unsigned long)kallsyms_markers; |
| 1111 | rdc_va->header.kallsyms = (unsigned)(unsigned long)&records; |
| 1112 | #ifdef CONFIG_ARM64 |
| 1113 | rdc_va->header.kallsyms_hi = (unsigned)(((unsigned long)&records)>>32); |
| 1114 | #endif |
| 1115 | #endif |
| 1116 | |
| 1117 | /* Set up a copy of linux version string to identify kernel version */ |
| 1118 | rdc_va->header.kernel_build_id = 0; |
| 1119 | rdc_va->header.kernel_build_id_hi = 0; |
| 1120 | p = kmalloc(strlen(linux_banner) + 1, GFP_KERNEL); |
| 1121 | if (p) { |
| 1122 | strcpy(p, linux_banner); |
| 1123 | /* |
| 1124 | * RDC is a reserved area, so kmemleak does not scan it |
| 1125 | * and reports this allocation as a leak. |
| 1126 | */ |
| 1127 | kmemleak_ignore(p); |
| 1128 | rdc_va->header.kernel_build_id = (unsigned)(unsigned long)p; |
| 1129 | #ifdef CONFIG_ARM64 |
| 1130 | rdc_va->header.kernel_build_id_hi = |
| 1131 | (unsigned)(((unsigned long)p)>>32); |
| 1132 | #endif |
| 1133 | } |
| 1134 | #ifndef CONFIG_MRVL_PANIC_FLUSH |
| 1135 | /* Otherwise panic_flush() will call us directly */ |
| 1136 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
| 1137 | #endif |
| 1138 | rdc_va->header.reserved1[0] = (u32)virt_to_phys((void *)__bss_start); |
| 1139 | return 0; |
| 1140 | } |
| 1141 | core_initcall(ramdump_init); /*TBD: option early_initcall*/ |
| 1142 | |
| 1143 | /* |
| 1144 | * RAMDUMP RD_RAM option: rdrp token on cmdline provides the location |
| 1145 | * of the compressed dump and the dump title text in DDR. |
| 1146 | */ |
| 1147 | static struct rdrp { |
| 1148 | unsigned addr; |
| 1149 | unsigned size; |
| 1150 | } rdrp_text, rdrp_dump; |
| 1151 | static void *rdrp_va; |
| 1152 | static enum { rdrm_text, rdrm_dump, rdrm_reset } rdr_mode; |
| 1153 | |
| 1154 | static inline int rdrp_get(char *str, char **retstr, |
| 1155 | struct rdrp *rdrp) |
| 1156 | { |
| 1157 | rdrp->size = simple_strtoul(str, retstr, 0); |
| 1158 | if (**retstr != '@') |
| 1159 | return 1; |
| 1160 | (*retstr)++; |
| 1161 | rdrp->addr = simple_strtoul(*retstr, retstr, 0); |
| 1162 | return 0; |
| 1163 | } |
| 1164 | |
| 1165 | static int __init ramdump_rdrp_setup(char *str) |
| 1166 | { |
| 1167 | int ret = 1; |
| 1168 | char *s = str; |
| 1169 | if (rdrp_get(str, &s, &rdrp_text) || (*s++ != ',')) |
| 1170 | goto bail; |
| 1171 | if (!rdrp_get(s, &s, &rdrp_dump)) |
| 1172 | ret = 0; |
| 1173 | bail: |
| 1174 | if (ret) |
| 1175 | pr_err("Unsupported rdrp token format: %s\n", str); |
| 1176 | return 1; |
| 1177 | } |
| 1178 | __setup("rdrp=", ramdump_rdrp_setup); |
| 1179 | |
| 1180 | #include <linux/fs.h> |
| 1181 | #include <linux/miscdevice.h> |
| 1182 | #include <asm/uaccess.h> |
| 1183 | |
| 1184 | /* |
| 1185 | * We should not leave the compressed data in memory |
| 1186 | * after it has been uploaded, otherwise this might be retained in |
| 1187 | * memory until the next dump, and increase complexity, i.e. decrease |
| 1188 | * the compression rate. |
| 1189 | * If this happens, the dump eventually will become too big leaving |
| 1190 | * no space for the system to boot. |
| 1191 | */ |
| 1192 | static void rdrp_clear(void) |
| 1193 | { |
| 1194 | unsigned size = max(rdrp_dump.addr + rdrp_dump.size, |
| 1195 | rdrp_text.addr + rdrp_text.size); |
| 1196 | memset(rdrp_va, 0, size); |
| 1197 | #ifdef CONFIG_ARM64 |
| 1198 | __flush_dcache_area(rdrp_va, size); |
| 1199 | #else |
| 1200 | __cpuc_flush_dcache_area(rdrp_va, size); |
| 1201 | #endif |
| 1202 | } |
| 1203 | static ssize_t rdr_read(struct file *filp, char __user *buf, |
| 1204 | size_t count, loff_t *f_pos) |
| 1205 | { |
| 1206 | struct rdrp *rdrp; |
| 1207 | if (rdr_mode == rdrm_reset) |
| 1208 | return 0; |
| 1209 | rdrp = rdr_mode == rdrm_dump ? &rdrp_dump : &rdrp_text; |
| 1210 | |
| 1211 | if ((*f_pos + count) > rdrp->size) |
| 1212 | count = *f_pos > rdrp->size ? 0 : rdrp->size - *f_pos; |
| 1213 | if (copy_to_user(buf, (void *)rdrp_va + rdrp->addr + *f_pos, count)) |
| 1214 | return -EFAULT; |
| 1215 | *f_pos += count; |
| 1216 | return count; |
| 1217 | } |
| 1218 | |
| 1219 | static ssize_t rdr_write(struct file *filp, const char __user *buf, |
| 1220 | size_t count, loff_t *f_pos) |
| 1221 | { |
| 1222 | char input[1]; |
| 1223 | if (count > 0) { |
| 1224 | if (copy_from_user(&input, buf, 1)) |
| 1225 | return -EFAULT; |
| 1226 | switch (input[0]) { |
| 1227 | case 'd': |
| 1228 | case 'D': |
| 1229 | rdr_mode = rdrm_dump; |
| 1230 | break; |
| 1231 | case 't': |
| 1232 | case 'T': |
| 1233 | rdr_mode = rdrm_text; |
| 1234 | break; |
| 1235 | case 'c': |
| 1236 | case 'C': |
| 1237 | rdr_mode = rdrm_reset; |
| 1238 | rdrp_clear(); |
| 1239 | break; |
| 1240 | default: |
| 1241 | return -EFAULT; |
| 1242 | } |
| 1243 | } |
| 1244 | return count; |
| 1245 | } |
| 1246 | |
| 1247 | static const struct file_operations rdr_fops = { |
| 1248 | .owner = THIS_MODULE, |
| 1249 | .read = rdr_read, |
| 1250 | .write = rdr_write, |
| 1251 | }; |
| 1252 | |
| 1253 | static struct miscdevice rdr_miscdev = { |
| 1254 | MISC_DYNAMIC_MINOR, |
| 1255 | "rdr", |
| 1256 | &rdr_fops |
| 1257 | }; |
| 1258 | |
| 1259 | static int __init ramdump_rdr_init(void) |
| 1260 | { |
| 1261 | unsigned addr, size; |
| 1262 | |
| 1263 | if (!(rdrp_text.size + rdrp_dump.size)) |
| 1264 | return 0; |
| 1265 | |
| 1266 | if (rdrp_dump.addr < rdrp_text.addr) { |
| 1267 | addr = rdrp_dump.addr; |
| 1268 | size = rdrp_text.addr + rdrp_text.size - addr; |
| 1269 | } else { |
| 1270 | addr = rdrp_text.addr; |
| 1271 | if ((rdrp_text.addr + rdrp_text.size) > (rdrp_dump.addr + rdrp_dump.size)) |
| 1272 | size = rdrp_text.size; |
| 1273 | else |
| 1274 | size = rdrp_dump.addr + rdrp_dump.size - addr; |
| 1275 | } |
| 1276 | |
| 1277 | rdrp_va = ioremap_cached(addr, size); |
| 1278 | if (rdrp_va) { |
| 1279 | rdrp_dump.addr -= addr; |
| 1280 | rdrp_text.addr -= addr; |
| 1281 | if (misc_register(&rdr_miscdev)) { |
| 1282 | pr_err("Failed to register the rdr device\n"); |
| 1283 | iounmap(rdrp_va); |
| 1284 | return -ENOMEM; |
| 1285 | } |
| 1286 | } |
| 1287 | return 0; |
| 1288 | } |
| 1289 | late_initcall(ramdump_rdr_init); |
| 1290 | |