b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Kernel Probes (KProbes) |
| 4 | * arch/ia64/kernel/kprobes.c |
| 5 | * |
| 6 | * Copyright (C) IBM Corporation, 2002, 2004 |
| 7 | * Copyright (C) Intel Corporation, 2005 |
| 8 | * |
| 9 | * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy |
| 10 | * <anil.s.keshavamurthy@intel.com> adapted from i386 |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kprobes.h> |
| 14 | #include <linux/ptrace.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/preempt.h> |
| 18 | #include <linux/extable.h> |
| 19 | #include <linux/kdebug.h> |
| 20 | |
| 21 | #include <asm/pgtable.h> |
| 22 | #include <asm/sections.h> |
| 23 | #include <asm/exception.h> |
| 24 | |
| 25 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
| 26 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
| 27 | |
| 28 | struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; |
| 29 | |
| 30 | enum instruction_type {A, I, M, F, B, L, X, u}; |
| 31 | static enum instruction_type bundle_encoding[32][3] = { |
| 32 | { M, I, I }, /* 00 */ |
| 33 | { M, I, I }, /* 01 */ |
| 34 | { M, I, I }, /* 02 */ |
| 35 | { M, I, I }, /* 03 */ |
| 36 | { M, L, X }, /* 04 */ |
| 37 | { M, L, X }, /* 05 */ |
| 38 | { u, u, u }, /* 06 */ |
| 39 | { u, u, u }, /* 07 */ |
| 40 | { M, M, I }, /* 08 */ |
| 41 | { M, M, I }, /* 09 */ |
| 42 | { M, M, I }, /* 0A */ |
| 43 | { M, M, I }, /* 0B */ |
| 44 | { M, F, I }, /* 0C */ |
| 45 | { M, F, I }, /* 0D */ |
| 46 | { M, M, F }, /* 0E */ |
| 47 | { M, M, F }, /* 0F */ |
| 48 | { M, I, B }, /* 10 */ |
| 49 | { M, I, B }, /* 11 */ |
| 50 | { M, B, B }, /* 12 */ |
| 51 | { M, B, B }, /* 13 */ |
| 52 | { u, u, u }, /* 14 */ |
| 53 | { u, u, u }, /* 15 */ |
| 54 | { B, B, B }, /* 16 */ |
| 55 | { B, B, B }, /* 17 */ |
| 56 | { M, M, B }, /* 18 */ |
| 57 | { M, M, B }, /* 19 */ |
| 58 | { u, u, u }, /* 1A */ |
| 59 | { u, u, u }, /* 1B */ |
| 60 | { M, F, B }, /* 1C */ |
| 61 | { M, F, B }, /* 1D */ |
| 62 | { u, u, u }, /* 1E */ |
| 63 | { u, u, u }, /* 1F */ |
| 64 | }; |
| 65 | |
| 66 | /* Insert a long branch code */ |
| 67 | static void __kprobes set_brl_inst(void *from, void *to) |
| 68 | { |
| 69 | s64 rel = ((s64) to - (s64) from) >> 4; |
| 70 | bundle_t *brl; |
| 71 | brl = (bundle_t *) ((u64) from & ~0xf); |
| 72 | brl->quad0.template = 0x05; /* [MLX](stop) */ |
| 73 | brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ |
| 74 | brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; |
| 75 | brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); |
| 76 | /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ |
| 77 | brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); |
| 78 | } |
| 79 | |
| 80 | /* |
| 81 | * In this function we check to see if the instruction |
| 82 | * is IP relative instruction and update the kprobe |
| 83 | * inst flag accordingly |
| 84 | */ |
| 85 | static void __kprobes update_kprobe_inst_flag(uint template, uint slot, |
| 86 | uint major_opcode, |
| 87 | unsigned long kprobe_inst, |
| 88 | struct kprobe *p) |
| 89 | { |
| 90 | p->ainsn.inst_flag = 0; |
| 91 | p->ainsn.target_br_reg = 0; |
| 92 | p->ainsn.slot = slot; |
| 93 | |
| 94 | /* Check for Break instruction |
| 95 | * Bits 37:40 Major opcode to be zero |
| 96 | * Bits 27:32 X6 to be zero |
| 97 | * Bits 32:35 X3 to be zero |
| 98 | */ |
| 99 | if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { |
| 100 | /* is a break instruction */ |
| 101 | p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; |
| 102 | return; |
| 103 | } |
| 104 | |
| 105 | if (bundle_encoding[template][slot] == B) { |
| 106 | switch (major_opcode) { |
| 107 | case INDIRECT_CALL_OPCODE: |
| 108 | p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; |
| 109 | p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); |
| 110 | break; |
| 111 | case IP_RELATIVE_PREDICT_OPCODE: |
| 112 | case IP_RELATIVE_BRANCH_OPCODE: |
| 113 | p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; |
| 114 | break; |
| 115 | case IP_RELATIVE_CALL_OPCODE: |
| 116 | p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; |
| 117 | p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; |
| 118 | p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); |
| 119 | break; |
| 120 | } |
| 121 | } else if (bundle_encoding[template][slot] == X) { |
| 122 | switch (major_opcode) { |
| 123 | case LONG_CALL_OPCODE: |
| 124 | p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; |
| 125 | p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); |
| 126 | break; |
| 127 | } |
| 128 | } |
| 129 | return; |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * In this function we check to see if the instruction |
| 134 | * (qp) cmpx.crel.ctype p1,p2=r2,r3 |
| 135 | * on which we are inserting kprobe is cmp instruction |
| 136 | * with ctype as unc. |
| 137 | */ |
| 138 | static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, |
| 139 | uint major_opcode, |
| 140 | unsigned long kprobe_inst) |
| 141 | { |
| 142 | cmp_inst_t cmp_inst; |
| 143 | uint ctype_unc = 0; |
| 144 | |
| 145 | if (!((bundle_encoding[template][slot] == I) || |
| 146 | (bundle_encoding[template][slot] == M))) |
| 147 | goto out; |
| 148 | |
| 149 | if (!((major_opcode == 0xC) || (major_opcode == 0xD) || |
| 150 | (major_opcode == 0xE))) |
| 151 | goto out; |
| 152 | |
| 153 | cmp_inst.l = kprobe_inst; |
| 154 | if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { |
| 155 | /* Integer compare - Register Register (A6 type)*/ |
| 156 | if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) |
| 157 | &&(cmp_inst.f.c == 1)) |
| 158 | ctype_unc = 1; |
| 159 | } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { |
| 160 | /* Integer compare - Immediate Register (A8 type)*/ |
| 161 | if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) |
| 162 | ctype_unc = 1; |
| 163 | } |
| 164 | out: |
| 165 | return ctype_unc; |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * In this function we check to see if the instruction |
| 170 | * on which we are inserting kprobe is supported. |
| 171 | * Returns qp value if supported |
| 172 | * Returns -EINVAL if unsupported |
| 173 | */ |
| 174 | static int __kprobes unsupported_inst(uint template, uint slot, |
| 175 | uint major_opcode, |
| 176 | unsigned long kprobe_inst, |
| 177 | unsigned long addr) |
| 178 | { |
| 179 | int qp; |
| 180 | |
| 181 | qp = kprobe_inst & 0x3f; |
| 182 | if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { |
| 183 | if (slot == 1 && qp) { |
| 184 | printk(KERN_WARNING "Kprobes on cmp unc " |
| 185 | "instruction on slot 1 at <0x%lx> " |
| 186 | "is not supported\n", addr); |
| 187 | return -EINVAL; |
| 188 | |
| 189 | } |
| 190 | qp = 0; |
| 191 | } |
| 192 | else if (bundle_encoding[template][slot] == I) { |
| 193 | if (major_opcode == 0) { |
| 194 | /* |
| 195 | * Check for Integer speculation instruction |
| 196 | * - Bit 33-35 to be equal to 0x1 |
| 197 | */ |
| 198 | if (((kprobe_inst >> 33) & 0x7) == 1) { |
| 199 | printk(KERN_WARNING |
| 200 | "Kprobes on speculation inst at <0x%lx> not supported\n", |
| 201 | addr); |
| 202 | return -EINVAL; |
| 203 | } |
| 204 | /* |
| 205 | * IP relative mov instruction |
| 206 | * - Bit 27-35 to be equal to 0x30 |
| 207 | */ |
| 208 | if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { |
| 209 | printk(KERN_WARNING |
| 210 | "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", |
| 211 | addr); |
| 212 | return -EINVAL; |
| 213 | |
| 214 | } |
| 215 | } |
| 216 | else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && |
| 217 | (kprobe_inst & (0x1UL << 12))) { |
| 218 | /* test bit instructions, tbit,tnat,tf |
| 219 | * bit 33-36 to be equal to 0 |
| 220 | * bit 12 to be equal to 1 |
| 221 | */ |
| 222 | if (slot == 1 && qp) { |
| 223 | printk(KERN_WARNING "Kprobes on test bit " |
| 224 | "instruction on slot at <0x%lx> " |
| 225 | "is not supported\n", addr); |
| 226 | return -EINVAL; |
| 227 | } |
| 228 | qp = 0; |
| 229 | } |
| 230 | } |
| 231 | else if (bundle_encoding[template][slot] == B) { |
| 232 | if (major_opcode == 7) { |
| 233 | /* IP-Relative Predict major code is 7 */ |
| 234 | printk(KERN_WARNING "Kprobes on IP-Relative" |
| 235 | "Predict is not supported\n"); |
| 236 | return -EINVAL; |
| 237 | } |
| 238 | else if (major_opcode == 2) { |
| 239 | /* Indirect Predict, major code is 2 |
| 240 | * bit 27-32 to be equal to 10 or 11 |
| 241 | */ |
| 242 | int x6=(kprobe_inst >> 27) & 0x3F; |
| 243 | if ((x6 == 0x10) || (x6 == 0x11)) { |
| 244 | printk(KERN_WARNING "Kprobes on " |
| 245 | "Indirect Predict is not supported\n"); |
| 246 | return -EINVAL; |
| 247 | } |
| 248 | } |
| 249 | } |
| 250 | /* kernel does not use float instruction, here for safety kprobe |
| 251 | * will judge whether it is fcmp/flass/float approximation instruction |
| 252 | */ |
| 253 | else if (unlikely(bundle_encoding[template][slot] == F)) { |
| 254 | if ((major_opcode == 4 || major_opcode == 5) && |
| 255 | (kprobe_inst & (0x1 << 12))) { |
| 256 | /* fcmp/fclass unc instruction */ |
| 257 | if (slot == 1 && qp) { |
| 258 | printk(KERN_WARNING "Kprobes on fcmp/fclass " |
| 259 | "instruction on slot at <0x%lx> " |
| 260 | "is not supported\n", addr); |
| 261 | return -EINVAL; |
| 262 | |
| 263 | } |
| 264 | qp = 0; |
| 265 | } |
| 266 | if ((major_opcode == 0 || major_opcode == 1) && |
| 267 | (kprobe_inst & (0x1UL << 33))) { |
| 268 | /* float Approximation instruction */ |
| 269 | if (slot == 1 && qp) { |
| 270 | printk(KERN_WARNING "Kprobes on float Approx " |
| 271 | "instr at <0x%lx> is not supported\n", |
| 272 | addr); |
| 273 | return -EINVAL; |
| 274 | } |
| 275 | qp = 0; |
| 276 | } |
| 277 | } |
| 278 | return qp; |
| 279 | } |
| 280 | |
| 281 | /* |
| 282 | * In this function we override the bundle with |
| 283 | * the break instruction at the given slot. |
| 284 | */ |
| 285 | static void __kprobes prepare_break_inst(uint template, uint slot, |
| 286 | uint major_opcode, |
| 287 | unsigned long kprobe_inst, |
| 288 | struct kprobe *p, |
| 289 | int qp) |
| 290 | { |
| 291 | unsigned long break_inst = BREAK_INST; |
| 292 | bundle_t *bundle = &p->opcode.bundle; |
| 293 | |
| 294 | /* |
| 295 | * Copy the original kprobe_inst qualifying predicate(qp) |
| 296 | * to the break instruction |
| 297 | */ |
| 298 | break_inst |= qp; |
| 299 | |
| 300 | switch (slot) { |
| 301 | case 0: |
| 302 | bundle->quad0.slot0 = break_inst; |
| 303 | break; |
| 304 | case 1: |
| 305 | bundle->quad0.slot1_p0 = break_inst; |
| 306 | bundle->quad1.slot1_p1 = break_inst >> (64-46); |
| 307 | break; |
| 308 | case 2: |
| 309 | bundle->quad1.slot2 = break_inst; |
| 310 | break; |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * Update the instruction flag, so that we can |
| 315 | * emulate the instruction properly after we |
| 316 | * single step on original instruction |
| 317 | */ |
| 318 | update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); |
| 319 | } |
| 320 | |
| 321 | static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, |
| 322 | unsigned long *kprobe_inst, uint *major_opcode) |
| 323 | { |
| 324 | unsigned long kprobe_inst_p0, kprobe_inst_p1; |
| 325 | unsigned int template; |
| 326 | |
| 327 | template = bundle->quad0.template; |
| 328 | |
| 329 | switch (slot) { |
| 330 | case 0: |
| 331 | *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); |
| 332 | *kprobe_inst = bundle->quad0.slot0; |
| 333 | break; |
| 334 | case 1: |
| 335 | *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); |
| 336 | kprobe_inst_p0 = bundle->quad0.slot1_p0; |
| 337 | kprobe_inst_p1 = bundle->quad1.slot1_p1; |
| 338 | *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); |
| 339 | break; |
| 340 | case 2: |
| 341 | *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); |
| 342 | *kprobe_inst = bundle->quad1.slot2; |
| 343 | break; |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | /* Returns non-zero if the addr is in the Interrupt Vector Table */ |
| 348 | static int __kprobes in_ivt_functions(unsigned long addr) |
| 349 | { |
| 350 | return (addr >= (unsigned long)__start_ivt_text |
| 351 | && addr < (unsigned long)__end_ivt_text); |
| 352 | } |
| 353 | |
| 354 | static int __kprobes valid_kprobe_addr(int template, int slot, |
| 355 | unsigned long addr) |
| 356 | { |
| 357 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { |
| 358 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " |
| 359 | "at 0x%lx\n", addr); |
| 360 | return -EINVAL; |
| 361 | } |
| 362 | |
| 363 | if (in_ivt_functions(addr)) { |
| 364 | printk(KERN_WARNING "Kprobes can't be inserted inside " |
| 365 | "IVT functions at 0x%lx\n", addr); |
| 366 | return -EINVAL; |
| 367 | } |
| 368 | |
| 369 | return 0; |
| 370 | } |
| 371 | |
| 372 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 373 | { |
| 374 | unsigned int i; |
| 375 | i = atomic_add_return(1, &kcb->prev_kprobe_index); |
| 376 | kcb->prev_kprobe[i-1].kp = kprobe_running(); |
| 377 | kcb->prev_kprobe[i-1].status = kcb->kprobe_status; |
| 378 | } |
| 379 | |
| 380 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 381 | { |
| 382 | unsigned int i; |
| 383 | i = atomic_read(&kcb->prev_kprobe_index); |
| 384 | __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp); |
| 385 | kcb->kprobe_status = kcb->prev_kprobe[i-1].status; |
| 386 | atomic_sub(1, &kcb->prev_kprobe_index); |
| 387 | } |
| 388 | |
| 389 | static void __kprobes set_current_kprobe(struct kprobe *p, |
| 390 | struct kprobe_ctlblk *kcb) |
| 391 | { |
| 392 | __this_cpu_write(current_kprobe, p); |
| 393 | } |
| 394 | |
| 395 | static void kretprobe_trampoline(void) |
| 396 | { |
| 397 | } |
| 398 | |
| 399 | /* |
| 400 | * At this point the target function has been tricked into |
| 401 | * returning into our trampoline. Lookup the associated instance |
| 402 | * and then: |
| 403 | * - call the handler function |
| 404 | * - cleanup by marking the instance as unused |
| 405 | * - long jump back to the original return address |
| 406 | */ |
| 407 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
| 408 | { |
| 409 | struct kretprobe_instance *ri = NULL; |
| 410 | struct hlist_head *head, empty_rp; |
| 411 | struct hlist_node *tmp; |
| 412 | unsigned long flags, orig_ret_address = 0; |
| 413 | unsigned long trampoline_address = |
| 414 | (unsigned long)dereference_function_descriptor(kretprobe_trampoline); |
| 415 | |
| 416 | INIT_HLIST_HEAD(&empty_rp); |
| 417 | kretprobe_hash_lock(current, &head, &flags); |
| 418 | |
| 419 | /* |
| 420 | * It is possible to have multiple instances associated with a given |
| 421 | * task either because an multiple functions in the call path |
| 422 | * have a return probe installed on them, and/or more than one return |
| 423 | * return probe was registered for a target function. |
| 424 | * |
| 425 | * We can handle this because: |
| 426 | * - instances are always inserted at the head of the list |
| 427 | * - when multiple return probes are registered for the same |
| 428 | * function, the first instance's ret_addr will point to the |
| 429 | * real return address, and all the rest will point to |
| 430 | * kretprobe_trampoline |
| 431 | */ |
| 432 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
| 433 | if (ri->task != current) |
| 434 | /* another task is sharing our hash bucket */ |
| 435 | continue; |
| 436 | |
| 437 | orig_ret_address = (unsigned long)ri->ret_addr; |
| 438 | if (orig_ret_address != trampoline_address) |
| 439 | /* |
| 440 | * This is the real return address. Any other |
| 441 | * instances associated with this task are for |
| 442 | * other calls deeper on the call stack |
| 443 | */ |
| 444 | break; |
| 445 | } |
| 446 | |
| 447 | regs->cr_iip = orig_ret_address; |
| 448 | |
| 449 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { |
| 450 | if (ri->task != current) |
| 451 | /* another task is sharing our hash bucket */ |
| 452 | continue; |
| 453 | |
| 454 | if (ri->rp && ri->rp->handler) |
| 455 | ri->rp->handler(ri, regs); |
| 456 | |
| 457 | orig_ret_address = (unsigned long)ri->ret_addr; |
| 458 | recycle_rp_inst(ri, &empty_rp); |
| 459 | |
| 460 | if (orig_ret_address != trampoline_address) |
| 461 | /* |
| 462 | * This is the real return address. Any other |
| 463 | * instances associated with this task are for |
| 464 | * other calls deeper on the call stack |
| 465 | */ |
| 466 | break; |
| 467 | } |
| 468 | kretprobe_assert(ri, orig_ret_address, trampoline_address); |
| 469 | |
| 470 | kretprobe_hash_unlock(current, &flags); |
| 471 | |
| 472 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { |
| 473 | hlist_del(&ri->hlist); |
| 474 | kfree(ri); |
| 475 | } |
| 476 | /* |
| 477 | * By returning a non-zero value, we are telling |
| 478 | * kprobe_handler() that we don't want the post_handler |
| 479 | * to run (and have re-enabled preemption) |
| 480 | */ |
| 481 | return 1; |
| 482 | } |
| 483 | |
| 484 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, |
| 485 | struct pt_regs *regs) |
| 486 | { |
| 487 | ri->ret_addr = (kprobe_opcode_t *)regs->b0; |
| 488 | |
| 489 | /* Replace the return addr with trampoline addr */ |
| 490 | regs->b0 = (unsigned long)dereference_function_descriptor(kretprobe_trampoline); |
| 491 | } |
| 492 | |
| 493 | /* Check the instruction in the slot is break */ |
| 494 | static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) |
| 495 | { |
| 496 | unsigned int major_opcode; |
| 497 | unsigned int template = bundle->quad0.template; |
| 498 | unsigned long kprobe_inst; |
| 499 | |
| 500 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ |
| 501 | if (slot == 1 && bundle_encoding[template][1] == L) |
| 502 | slot++; |
| 503 | |
| 504 | /* Get Kprobe probe instruction at given slot*/ |
| 505 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); |
| 506 | |
| 507 | /* For break instruction, |
| 508 | * Bits 37:40 Major opcode to be zero |
| 509 | * Bits 27:32 X6 to be zero |
| 510 | * Bits 32:35 X3 to be zero |
| 511 | */ |
| 512 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { |
| 513 | /* Not a break instruction */ |
| 514 | return 0; |
| 515 | } |
| 516 | |
| 517 | /* Is a break instruction */ |
| 518 | return 1; |
| 519 | } |
| 520 | |
| 521 | /* |
| 522 | * In this function, we check whether the target bundle modifies IP or |
| 523 | * it triggers an exception. If so, it cannot be boostable. |
| 524 | */ |
| 525 | static int __kprobes can_boost(bundle_t *bundle, uint slot, |
| 526 | unsigned long bundle_addr) |
| 527 | { |
| 528 | unsigned int template = bundle->quad0.template; |
| 529 | |
| 530 | do { |
| 531 | if (search_exception_tables(bundle_addr + slot) || |
| 532 | __is_ia64_break_inst(bundle, slot)) |
| 533 | return 0; /* exception may occur in this bundle*/ |
| 534 | } while ((++slot) < 3); |
| 535 | template &= 0x1e; |
| 536 | if (template >= 0x10 /* including B unit */ || |
| 537 | template == 0x04 /* including X unit */ || |
| 538 | template == 0x06) /* undefined */ |
| 539 | return 0; |
| 540 | |
| 541 | return 1; |
| 542 | } |
| 543 | |
| 544 | /* Prepare long jump bundle and disables other boosters if need */ |
| 545 | static void __kprobes prepare_booster(struct kprobe *p) |
| 546 | { |
| 547 | unsigned long addr = (unsigned long)p->addr & ~0xFULL; |
| 548 | unsigned int slot = (unsigned long)p->addr & 0xf; |
| 549 | struct kprobe *other_kp; |
| 550 | |
| 551 | if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { |
| 552 | set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); |
| 553 | p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; |
| 554 | } |
| 555 | |
| 556 | /* disables boosters in previous slots */ |
| 557 | for (; addr < (unsigned long)p->addr; addr++) { |
| 558 | other_kp = get_kprobe((void *)addr); |
| 559 | if (other_kp) |
| 560 | other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 565 | { |
| 566 | unsigned long addr = (unsigned long) p->addr; |
| 567 | unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); |
| 568 | unsigned long kprobe_inst=0; |
| 569 | unsigned int slot = addr & 0xf, template, major_opcode = 0; |
| 570 | bundle_t *bundle; |
| 571 | int qp; |
| 572 | |
| 573 | bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; |
| 574 | template = bundle->quad0.template; |
| 575 | |
| 576 | if(valid_kprobe_addr(template, slot, addr)) |
| 577 | return -EINVAL; |
| 578 | |
| 579 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ |
| 580 | if (slot == 1 && bundle_encoding[template][1] == L) |
| 581 | slot++; |
| 582 | |
| 583 | /* Get kprobe_inst and major_opcode from the bundle */ |
| 584 | get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); |
| 585 | |
| 586 | qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); |
| 587 | if (qp < 0) |
| 588 | return -EINVAL; |
| 589 | |
| 590 | p->ainsn.insn = get_insn_slot(); |
| 591 | if (!p->ainsn.insn) |
| 592 | return -ENOMEM; |
| 593 | memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); |
| 594 | memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); |
| 595 | |
| 596 | prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); |
| 597 | |
| 598 | prepare_booster(p); |
| 599 | |
| 600 | return 0; |
| 601 | } |
| 602 | |
| 603 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
| 604 | { |
| 605 | unsigned long arm_addr; |
| 606 | bundle_t *src, *dest; |
| 607 | |
| 608 | arm_addr = ((unsigned long)p->addr) & ~0xFUL; |
| 609 | dest = &((kprobe_opcode_t *)arm_addr)->bundle; |
| 610 | src = &p->opcode.bundle; |
| 611 | |
| 612 | flush_icache_range((unsigned long)p->ainsn.insn, |
| 613 | (unsigned long)p->ainsn.insn + |
| 614 | sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); |
| 615 | |
| 616 | switch (p->ainsn.slot) { |
| 617 | case 0: |
| 618 | dest->quad0.slot0 = src->quad0.slot0; |
| 619 | break; |
| 620 | case 1: |
| 621 | dest->quad1.slot1_p1 = src->quad1.slot1_p1; |
| 622 | break; |
| 623 | case 2: |
| 624 | dest->quad1.slot2 = src->quad1.slot2; |
| 625 | break; |
| 626 | } |
| 627 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); |
| 628 | } |
| 629 | |
| 630 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
| 631 | { |
| 632 | unsigned long arm_addr; |
| 633 | bundle_t *src, *dest; |
| 634 | |
| 635 | arm_addr = ((unsigned long)p->addr) & ~0xFUL; |
| 636 | dest = &((kprobe_opcode_t *)arm_addr)->bundle; |
| 637 | /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ |
| 638 | src = &p->ainsn.insn->bundle; |
| 639 | switch (p->ainsn.slot) { |
| 640 | case 0: |
| 641 | dest->quad0.slot0 = src->quad0.slot0; |
| 642 | break; |
| 643 | case 1: |
| 644 | dest->quad1.slot1_p1 = src->quad1.slot1_p1; |
| 645 | break; |
| 646 | case 2: |
| 647 | dest->quad1.slot2 = src->quad1.slot2; |
| 648 | break; |
| 649 | } |
| 650 | flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); |
| 651 | } |
| 652 | |
| 653 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
| 654 | { |
| 655 | if (p->ainsn.insn) { |
| 656 | free_insn_slot(p->ainsn.insn, |
| 657 | p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); |
| 658 | p->ainsn.insn = NULL; |
| 659 | } |
| 660 | } |
| 661 | /* |
| 662 | * We are resuming execution after a single step fault, so the pt_regs |
| 663 | * structure reflects the register state after we executed the instruction |
| 664 | * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust |
| 665 | * the ip to point back to the original stack address. To set the IP address |
| 666 | * to original stack address, handle the case where we need to fixup the |
| 667 | * relative IP address and/or fixup branch register. |
| 668 | */ |
| 669 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
| 670 | { |
| 671 | unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); |
| 672 | unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; |
| 673 | unsigned long template; |
| 674 | int slot = ((unsigned long)p->addr & 0xf); |
| 675 | |
| 676 | template = p->ainsn.insn->bundle.quad0.template; |
| 677 | |
| 678 | if (slot == 1 && bundle_encoding[template][1] == L) |
| 679 | slot = 2; |
| 680 | |
| 681 | if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { |
| 682 | |
| 683 | if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { |
| 684 | /* Fix relative IP address */ |
| 685 | regs->cr_iip = (regs->cr_iip - bundle_addr) + |
| 686 | resume_addr; |
| 687 | } |
| 688 | |
| 689 | if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { |
| 690 | /* |
| 691 | * Fix target branch register, software convention is |
| 692 | * to use either b0 or b6 or b7, so just checking |
| 693 | * only those registers |
| 694 | */ |
| 695 | switch (p->ainsn.target_br_reg) { |
| 696 | case 0: |
| 697 | if ((regs->b0 == bundle_addr) || |
| 698 | (regs->b0 == bundle_addr + 0x10)) { |
| 699 | regs->b0 = (regs->b0 - bundle_addr) + |
| 700 | resume_addr; |
| 701 | } |
| 702 | break; |
| 703 | case 6: |
| 704 | if ((regs->b6 == bundle_addr) || |
| 705 | (regs->b6 == bundle_addr + 0x10)) { |
| 706 | regs->b6 = (regs->b6 - bundle_addr) + |
| 707 | resume_addr; |
| 708 | } |
| 709 | break; |
| 710 | case 7: |
| 711 | if ((regs->b7 == bundle_addr) || |
| 712 | (regs->b7 == bundle_addr + 0x10)) { |
| 713 | regs->b7 = (regs->b7 - bundle_addr) + |
| 714 | resume_addr; |
| 715 | } |
| 716 | break; |
| 717 | } /* end switch */ |
| 718 | } |
| 719 | goto turn_ss_off; |
| 720 | } |
| 721 | |
| 722 | if (slot == 2) { |
| 723 | if (regs->cr_iip == bundle_addr + 0x10) { |
| 724 | regs->cr_iip = resume_addr + 0x10; |
| 725 | } |
| 726 | } else { |
| 727 | if (regs->cr_iip == bundle_addr) { |
| 728 | regs->cr_iip = resume_addr; |
| 729 | } |
| 730 | } |
| 731 | |
| 732 | turn_ss_off: |
| 733 | /* Turn off Single Step bit */ |
| 734 | ia64_psr(regs)->ss = 0; |
| 735 | } |
| 736 | |
| 737 | static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) |
| 738 | { |
| 739 | unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; |
| 740 | unsigned long slot = (unsigned long)p->addr & 0xf; |
| 741 | |
| 742 | /* single step inline if break instruction */ |
| 743 | if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) |
| 744 | regs->cr_iip = (unsigned long)p->addr & ~0xFULL; |
| 745 | else |
| 746 | regs->cr_iip = bundle_addr & ~0xFULL; |
| 747 | |
| 748 | if (slot > 2) |
| 749 | slot = 0; |
| 750 | |
| 751 | ia64_psr(regs)->ri = slot; |
| 752 | |
| 753 | /* turn on single stepping */ |
| 754 | ia64_psr(regs)->ss = 1; |
| 755 | } |
| 756 | |
| 757 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
| 758 | { |
| 759 | unsigned int slot = ia64_psr(regs)->ri; |
| 760 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; |
| 761 | bundle_t bundle; |
| 762 | |
| 763 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); |
| 764 | |
| 765 | return __is_ia64_break_inst(&bundle, slot); |
| 766 | } |
| 767 | |
| 768 | static int __kprobes pre_kprobes_handler(struct die_args *args) |
| 769 | { |
| 770 | struct kprobe *p; |
| 771 | int ret = 0; |
| 772 | struct pt_regs *regs = args->regs; |
| 773 | kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); |
| 774 | struct kprobe_ctlblk *kcb; |
| 775 | |
| 776 | /* |
| 777 | * We don't want to be preempted for the entire |
| 778 | * duration of kprobe processing |
| 779 | */ |
| 780 | preempt_disable(); |
| 781 | kcb = get_kprobe_ctlblk(); |
| 782 | |
| 783 | /* Handle recursion cases */ |
| 784 | if (kprobe_running()) { |
| 785 | p = get_kprobe(addr); |
| 786 | if (p) { |
| 787 | if ((kcb->kprobe_status == KPROBE_HIT_SS) && |
| 788 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { |
| 789 | ia64_psr(regs)->ss = 0; |
| 790 | goto no_kprobe; |
| 791 | } |
| 792 | /* We have reentered the pre_kprobe_handler(), since |
| 793 | * another probe was hit while within the handler. |
| 794 | * We here save the original kprobes variables and |
| 795 | * just single step on the instruction of the new probe |
| 796 | * without calling any user handlers. |
| 797 | */ |
| 798 | save_previous_kprobe(kcb); |
| 799 | set_current_kprobe(p, kcb); |
| 800 | kprobes_inc_nmissed_count(p); |
| 801 | prepare_ss(p, regs); |
| 802 | kcb->kprobe_status = KPROBE_REENTER; |
| 803 | return 1; |
| 804 | } else if (!is_ia64_break_inst(regs)) { |
| 805 | /* The breakpoint instruction was removed by |
| 806 | * another cpu right after we hit, no further |
| 807 | * handling of this interrupt is appropriate |
| 808 | */ |
| 809 | ret = 1; |
| 810 | goto no_kprobe; |
| 811 | } else { |
| 812 | /* Not our break */ |
| 813 | goto no_kprobe; |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | p = get_kprobe(addr); |
| 818 | if (!p) { |
| 819 | if (!is_ia64_break_inst(regs)) { |
| 820 | /* |
| 821 | * The breakpoint instruction was removed right |
| 822 | * after we hit it. Another cpu has removed |
| 823 | * either a probepoint or a debugger breakpoint |
| 824 | * at this address. In either case, no further |
| 825 | * handling of this interrupt is appropriate. |
| 826 | */ |
| 827 | ret = 1; |
| 828 | |
| 829 | } |
| 830 | |
| 831 | /* Not one of our break, let kernel handle it */ |
| 832 | goto no_kprobe; |
| 833 | } |
| 834 | |
| 835 | set_current_kprobe(p, kcb); |
| 836 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 837 | |
| 838 | if (p->pre_handler && p->pre_handler(p, regs)) { |
| 839 | reset_current_kprobe(); |
| 840 | preempt_enable_no_resched(); |
| 841 | return 1; |
| 842 | } |
| 843 | |
| 844 | #if !defined(CONFIG_PREEMPT) |
| 845 | if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { |
| 846 | /* Boost up -- we can execute copied instructions directly */ |
| 847 | ia64_psr(regs)->ri = p->ainsn.slot; |
| 848 | regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; |
| 849 | /* turn single stepping off */ |
| 850 | ia64_psr(regs)->ss = 0; |
| 851 | |
| 852 | reset_current_kprobe(); |
| 853 | preempt_enable_no_resched(); |
| 854 | return 1; |
| 855 | } |
| 856 | #endif |
| 857 | prepare_ss(p, regs); |
| 858 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 859 | return 1; |
| 860 | |
| 861 | no_kprobe: |
| 862 | preempt_enable_no_resched(); |
| 863 | return ret; |
| 864 | } |
| 865 | |
| 866 | static int __kprobes post_kprobes_handler(struct pt_regs *regs) |
| 867 | { |
| 868 | struct kprobe *cur = kprobe_running(); |
| 869 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 870 | |
| 871 | if (!cur) |
| 872 | return 0; |
| 873 | |
| 874 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 875 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 876 | cur->post_handler(cur, regs, 0); |
| 877 | } |
| 878 | |
| 879 | resume_execution(cur, regs); |
| 880 | |
| 881 | /*Restore back the original saved kprobes variables and continue. */ |
| 882 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 883 | restore_previous_kprobe(kcb); |
| 884 | goto out; |
| 885 | } |
| 886 | reset_current_kprobe(); |
| 887 | |
| 888 | out: |
| 889 | preempt_enable_no_resched(); |
| 890 | return 1; |
| 891 | } |
| 892 | |
| 893 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
| 894 | { |
| 895 | struct kprobe *cur = kprobe_running(); |
| 896 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 897 | |
| 898 | |
| 899 | switch(kcb->kprobe_status) { |
| 900 | case KPROBE_HIT_SS: |
| 901 | case KPROBE_REENTER: |
| 902 | /* |
| 903 | * We are here because the instruction being single |
| 904 | * stepped caused a page fault. We reset the current |
| 905 | * kprobe and the instruction pointer points back to |
| 906 | * the probe address and allow the page fault handler |
| 907 | * to continue as a normal page fault. |
| 908 | */ |
| 909 | regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; |
| 910 | ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; |
| 911 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 912 | restore_previous_kprobe(kcb); |
| 913 | else |
| 914 | reset_current_kprobe(); |
| 915 | preempt_enable_no_resched(); |
| 916 | break; |
| 917 | case KPROBE_HIT_ACTIVE: |
| 918 | case KPROBE_HIT_SSDONE: |
| 919 | /* |
| 920 | * We increment the nmissed count for accounting, |
| 921 | * we can also use npre/npostfault count for accounting |
| 922 | * these specific fault cases. |
| 923 | */ |
| 924 | kprobes_inc_nmissed_count(cur); |
| 925 | |
| 926 | /* |
| 927 | * We come here because instructions in the pre/post |
| 928 | * handler caused the page_fault, this could happen |
| 929 | * if handler tries to access user space by |
| 930 | * copy_from_user(), get_user() etc. Let the |
| 931 | * user-specified handler try to fix it first. |
| 932 | */ |
| 933 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
| 934 | return 1; |
| 935 | /* |
| 936 | * In case the user-specified fault handler returned |
| 937 | * zero, try to fix up. |
| 938 | */ |
| 939 | if (ia64_done_with_exception(regs)) |
| 940 | return 1; |
| 941 | |
| 942 | /* |
| 943 | * Let ia64_do_page_fault() fix it. |
| 944 | */ |
| 945 | break; |
| 946 | default: |
| 947 | break; |
| 948 | } |
| 949 | |
| 950 | return 0; |
| 951 | } |
| 952 | |
| 953 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
| 954 | unsigned long val, void *data) |
| 955 | { |
| 956 | struct die_args *args = (struct die_args *)data; |
| 957 | int ret = NOTIFY_DONE; |
| 958 | |
| 959 | if (args->regs && user_mode(args->regs)) |
| 960 | return ret; |
| 961 | |
| 962 | switch(val) { |
| 963 | case DIE_BREAK: |
| 964 | /* err is break number from ia64_bad_break() */ |
| 965 | if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) |
| 966 | || args->err == 0) |
| 967 | if (pre_kprobes_handler(args)) |
| 968 | ret = NOTIFY_STOP; |
| 969 | break; |
| 970 | case DIE_FAULT: |
| 971 | /* err is vector number from ia64_fault() */ |
| 972 | if (args->err == 36) |
| 973 | if (post_kprobes_handler(args->regs)) |
| 974 | ret = NOTIFY_STOP; |
| 975 | break; |
| 976 | default: |
| 977 | break; |
| 978 | } |
| 979 | return ret; |
| 980 | } |
| 981 | |
| 982 | unsigned long arch_deref_entry_point(void *entry) |
| 983 | { |
| 984 | return ((struct fnptr *)entry)->ip; |
| 985 | } |
| 986 | |
| 987 | static struct kprobe trampoline_p = { |
| 988 | .pre_handler = trampoline_probe_handler |
| 989 | }; |
| 990 | |
| 991 | int __init arch_init_kprobes(void) |
| 992 | { |
| 993 | trampoline_p.addr = |
| 994 | dereference_function_descriptor(kretprobe_trampoline); |
| 995 | return register_kprobe(&trampoline_p); |
| 996 | } |
| 997 | |
| 998 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
| 999 | { |
| 1000 | if (p->addr == |
| 1001 | dereference_function_descriptor(kretprobe_trampoline)) |
| 1002 | return 1; |
| 1003 | |
| 1004 | return 0; |
| 1005 | } |