b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2001 - 2008 Tensilica Inc. |
| 7 | * Copyright (C) 2015 Cadence Design Systems Inc. |
| 8 | */ |
| 9 | |
| 10 | #ifndef _XTENSA_PROCESSOR_H |
| 11 | #define _XTENSA_PROCESSOR_H |
| 12 | |
| 13 | #include <asm/core.h> |
| 14 | |
| 15 | #include <linux/compiler.h> |
| 16 | #include <linux/stringify.h> |
| 17 | #include <asm/ptrace.h> |
| 18 | #include <asm/types.h> |
| 19 | #include <asm/regs.h> |
| 20 | |
| 21 | /* Assertions. */ |
| 22 | |
| 23 | #if (XCHAL_HAVE_WINDOWED != 1) |
| 24 | # error Linux requires the Xtensa Windowed Registers Option. |
| 25 | #endif |
| 26 | |
| 27 | /* Xtensa ABI requires stack alignment to be at least 16 */ |
| 28 | |
| 29 | #define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) |
| 30 | |
| 31 | #define ARCH_SLAB_MINALIGN STACK_ALIGN |
| 32 | |
| 33 | /* |
| 34 | * User space process size: 1 GB. |
| 35 | * Windowed call ABI requires caller and callee to be located within the same |
| 36 | * 1 GB region. The C compiler places trampoline code on the stack for sources |
| 37 | * that take the address of a nested C function (a feature used by glibc), so |
| 38 | * the 1 GB requirement applies to the stack as well. |
| 39 | */ |
| 40 | |
| 41 | #ifdef CONFIG_MMU |
| 42 | #define TASK_SIZE __XTENSA_UL_CONST(0x40000000) |
| 43 | #else |
| 44 | #define TASK_SIZE __XTENSA_UL_CONST(0xffffffff) |
| 45 | #endif |
| 46 | |
| 47 | #define STACK_TOP TASK_SIZE |
| 48 | #define STACK_TOP_MAX STACK_TOP |
| 49 | |
| 50 | /* |
| 51 | * General exception cause assigned to fake NMI. Fake NMI needs to be handled |
| 52 | * differently from other interrupts, but it uses common kernel entry/exit |
| 53 | * code. |
| 54 | */ |
| 55 | |
| 56 | #define EXCCAUSE_MAPPED_NMI 62 |
| 57 | |
| 58 | /* |
| 59 | * General exception cause assigned to debug exceptions. Debug exceptions go |
| 60 | * to their own vector, rather than the general exception vectors (user, |
| 61 | * kernel, double); and their specific causes are reported via DEBUGCAUSE |
| 62 | * rather than EXCCAUSE. However it is sometimes convenient to redirect debug |
| 63 | * exceptions to the general exception mechanism. To do this, an otherwise |
| 64 | * unused EXCCAUSE value was assigned to debug exceptions for this purpose. |
| 65 | */ |
| 66 | |
| 67 | #define EXCCAUSE_MAPPED_DEBUG 63 |
| 68 | |
| 69 | /* |
| 70 | * We use DEPC also as a flag to distinguish between double and regular |
| 71 | * exceptions. For performance reasons, DEPC might contain the value of |
| 72 | * EXCCAUSE for regular exceptions, so we use this definition to mark a |
| 73 | * valid double exception address. |
| 74 | * (Note: We use it in bgeui, so it should be 64, 128, or 256) |
| 75 | */ |
| 76 | |
| 77 | #define VALID_DOUBLE_EXCEPTION_ADDRESS 64 |
| 78 | |
| 79 | #define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno) |
| 80 | #define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL |
| 81 | |
| 82 | #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level) |
| 83 | #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK) |
| 84 | |
| 85 | #define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l) |
| 86 | #define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK) |
| 87 | |
| 88 | #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT) |
| 89 | |
| 90 | /* LOCKLEVEL defines the interrupt level that masks all |
| 91 | * general-purpose interrupts. |
| 92 | */ |
| 93 | #if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT) |
| 94 | #define LOCKLEVEL (PROFILING_INTLEVEL - 1) |
| 95 | #else |
| 96 | #define LOCKLEVEL XCHAL_EXCM_LEVEL |
| 97 | #endif |
| 98 | |
| 99 | #define TOPLEVEL XCHAL_EXCM_LEVEL |
| 100 | #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL) |
| 101 | |
| 102 | /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE |
| 103 | * registers |
| 104 | */ |
| 105 | #define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */ |
| 106 | #define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */ |
| 107 | |
| 108 | #ifndef __ASSEMBLY__ |
| 109 | |
| 110 | /* Build a valid return address for the specified call winsize. |
| 111 | * winsize must be 1 (call4), 2 (call8), or 3 (call12) |
| 112 | */ |
| 113 | #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30) |
| 114 | |
| 115 | /* Convert return address to a valid pc |
| 116 | * Note: We assume that the stack pointer is in the same 1GB ranges as the ra |
| 117 | */ |
| 118 | #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) |
| 119 | |
| 120 | /* Spill slot location for the register reg in the spill area under the stack |
| 121 | * pointer sp. reg must be in the range [0..4). |
| 122 | */ |
| 123 | #define SPILL_SLOT(sp, reg) (*(((unsigned long *)(sp)) - 4 + (reg))) |
| 124 | |
| 125 | /* Spill slot location for the register reg in the spill area under the stack |
| 126 | * pointer sp for the call8. reg must be in the range [4..8). |
| 127 | */ |
| 128 | #define SPILL_SLOT_CALL8(sp, reg) (*(((unsigned long *)(sp)) - 12 + (reg))) |
| 129 | |
| 130 | /* Spill slot location for the register reg in the spill area under the stack |
| 131 | * pointer sp for the call12. reg must be in the range [4..12). |
| 132 | */ |
| 133 | #define SPILL_SLOT_CALL12(sp, reg) (*(((unsigned long *)(sp)) - 16 + (reg))) |
| 134 | |
| 135 | typedef struct { |
| 136 | unsigned long seg; |
| 137 | } mm_segment_t; |
| 138 | |
| 139 | struct thread_struct { |
| 140 | |
| 141 | /* kernel's return address and stack pointer for context switching */ |
| 142 | unsigned long ra; /* kernel's a0: return address and window call size */ |
| 143 | unsigned long sp; /* kernel's a1: stack pointer */ |
| 144 | |
| 145 | mm_segment_t current_ds; /* see uaccess.h for example uses */ |
| 146 | |
| 147 | /* struct xtensa_cpuinfo info; */ |
| 148 | |
| 149 | unsigned long bad_vaddr; /* last user fault */ |
| 150 | unsigned long bad_uaddr; /* last kernel fault accessing user space */ |
| 151 | unsigned long error_code; |
| 152 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 153 | struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK]; |
| 154 | struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK]; |
| 155 | #endif |
| 156 | /* Make structure 16 bytes aligned. */ |
| 157 | int align[0] __attribute__ ((aligned(16))); |
| 158 | }; |
| 159 | |
| 160 | /* This decides where the kernel will search for a free chunk of vm |
| 161 | * space during mmap's. |
| 162 | */ |
| 163 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 2) |
| 164 | |
| 165 | #define INIT_THREAD \ |
| 166 | { \ |
| 167 | ra: 0, \ |
| 168 | sp: sizeof(init_stack) + (long) &init_stack, \ |
| 169 | current_ds: {0}, \ |
| 170 | /*info: {0}, */ \ |
| 171 | bad_vaddr: 0, \ |
| 172 | bad_uaddr: 0, \ |
| 173 | error_code: 0, \ |
| 174 | } |
| 175 | |
| 176 | |
| 177 | /* |
| 178 | * Do necessary setup to start up a newly executed thread. |
| 179 | * Note: When windowed ABI is used for userspace we set-up ps |
| 180 | * as if we did a call4 to the new pc. |
| 181 | * set_thread_state in signal.c depends on it. |
| 182 | */ |
| 183 | #if IS_ENABLED(CONFIG_USER_ABI_CALL0) |
| 184 | #define USER_PS_VALUE ((USER_RING << PS_RING_SHIFT) | \ |
| 185 | (1 << PS_UM_BIT) | \ |
| 186 | (1 << PS_EXCM_BIT)) |
| 187 | #else |
| 188 | #define USER_PS_VALUE (PS_WOE_MASK | \ |
| 189 | (1 << PS_CALLINC_SHIFT) | \ |
| 190 | (USER_RING << PS_RING_SHIFT) | \ |
| 191 | (1 << PS_UM_BIT) | \ |
| 192 | (1 << PS_EXCM_BIT)) |
| 193 | #endif |
| 194 | |
| 195 | /* Clearing a0 terminates the backtrace. */ |
| 196 | #define start_thread(regs, new_pc, new_sp) \ |
| 197 | do { \ |
| 198 | memset((regs), 0, sizeof(*(regs))); \ |
| 199 | (regs)->pc = (new_pc); \ |
| 200 | (regs)->ps = USER_PS_VALUE; \ |
| 201 | (regs)->areg[1] = (new_sp); \ |
| 202 | (regs)->areg[0] = 0; \ |
| 203 | (regs)->wmask = 1; \ |
| 204 | (regs)->depc = 0; \ |
| 205 | (regs)->windowbase = 0; \ |
| 206 | (regs)->windowstart = 1; \ |
| 207 | (regs)->syscall = NO_SYSCALL; \ |
| 208 | } while (0) |
| 209 | |
| 210 | /* Forward declaration */ |
| 211 | struct task_struct; |
| 212 | struct mm_struct; |
| 213 | |
| 214 | /* Free all resources held by a thread. */ |
| 215 | #define release_thread(thread) do { } while(0) |
| 216 | |
| 217 | extern unsigned long get_wchan(struct task_struct *p); |
| 218 | |
| 219 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) |
| 220 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1]) |
| 221 | |
| 222 | #define cpu_relax() barrier() |
| 223 | |
| 224 | /* Special register access. */ |
| 225 | |
| 226 | #define xtensa_set_sr(x, sr) \ |
| 227 | ({ \ |
| 228 | __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \ |
| 229 | "a"((unsigned int)(x))); \ |
| 230 | }) |
| 231 | |
| 232 | #define xtensa_get_sr(sr) \ |
| 233 | ({ \ |
| 234 | unsigned int v; \ |
| 235 | __asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \ |
| 236 | v; \ |
| 237 | }) |
| 238 | |
| 239 | #ifndef XCHAL_HAVE_EXTERN_REGS |
| 240 | #define XCHAL_HAVE_EXTERN_REGS 0 |
| 241 | #endif |
| 242 | |
| 243 | #if XCHAL_HAVE_EXTERN_REGS |
| 244 | |
| 245 | static inline void set_er(unsigned long value, unsigned long addr) |
| 246 | { |
| 247 | asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory"); |
| 248 | } |
| 249 | |
| 250 | static inline unsigned long get_er(unsigned long addr) |
| 251 | { |
| 252 | register unsigned long value; |
| 253 | asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory"); |
| 254 | return value; |
| 255 | } |
| 256 | |
| 257 | #endif /* XCHAL_HAVE_EXTERN_REGS */ |
| 258 | |
| 259 | #endif /* __ASSEMBLY__ */ |
| 260 | #endif /* _XTENSA_PROCESSOR_H */ |