b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Hibernation support for x86-64 |
| 4 | * |
| 5 | * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> |
| 6 | * Copyright 2005 Andi Kleen <ak@suse.de> |
| 7 | * Copyright 2004 Pavel Machek <pavel@suse.cz> |
| 8 | * |
| 9 | * swsusp_arch_resume must not use any stack or any nonlocal variables while |
| 10 | * copying pages: |
| 11 | * |
| 12 | * Its rewriting one kernel image with another. What is stack in "old" |
| 13 | * image could very well be data page in "new" image, and overwriting |
| 14 | * your own stack under you is bad idea. |
| 15 | */ |
| 16 | |
| 17 | .text |
| 18 | #include <linux/linkage.h> |
| 19 | #include <asm/segment.h> |
| 20 | #include <asm/page_types.h> |
| 21 | #include <asm/asm-offsets.h> |
| 22 | #include <asm/processor-flags.h> |
| 23 | #include <asm/frame.h> |
| 24 | |
| 25 | ENTRY(swsusp_arch_suspend) |
| 26 | movq $saved_context, %rax |
| 27 | movq %rsp, pt_regs_sp(%rax) |
| 28 | movq %rbp, pt_regs_bp(%rax) |
| 29 | movq %rsi, pt_regs_si(%rax) |
| 30 | movq %rdi, pt_regs_di(%rax) |
| 31 | movq %rbx, pt_regs_bx(%rax) |
| 32 | movq %rcx, pt_regs_cx(%rax) |
| 33 | movq %rdx, pt_regs_dx(%rax) |
| 34 | movq %r8, pt_regs_r8(%rax) |
| 35 | movq %r9, pt_regs_r9(%rax) |
| 36 | movq %r10, pt_regs_r10(%rax) |
| 37 | movq %r11, pt_regs_r11(%rax) |
| 38 | movq %r12, pt_regs_r12(%rax) |
| 39 | movq %r13, pt_regs_r13(%rax) |
| 40 | movq %r14, pt_regs_r14(%rax) |
| 41 | movq %r15, pt_regs_r15(%rax) |
| 42 | pushfq |
| 43 | popq pt_regs_flags(%rax) |
| 44 | |
| 45 | /* save cr3 */ |
| 46 | movq %cr3, %rax |
| 47 | movq %rax, restore_cr3(%rip) |
| 48 | |
| 49 | FRAME_BEGIN |
| 50 | call swsusp_save |
| 51 | FRAME_END |
| 52 | ret |
| 53 | ENDPROC(swsusp_arch_suspend) |
| 54 | |
| 55 | ENTRY(restore_image) |
| 56 | /* prepare to jump to the image kernel */ |
| 57 | movq restore_jump_address(%rip), %r8 |
| 58 | movq restore_cr3(%rip), %r9 |
| 59 | |
| 60 | /* prepare to switch to temporary page tables */ |
| 61 | movq temp_pgt(%rip), %rax |
| 62 | movq mmu_cr4_features(%rip), %rbx |
| 63 | |
| 64 | /* prepare to copy image data to their original locations */ |
| 65 | movq restore_pblist(%rip), %rdx |
| 66 | |
| 67 | /* jump to relocated restore code */ |
| 68 | movq relocated_restore_code(%rip), %rcx |
| 69 | jmpq *%rcx |
| 70 | |
| 71 | /* code below has been relocated to a safe page */ |
| 72 | ENTRY(core_restore_code) |
| 73 | /* switch to temporary page tables */ |
| 74 | movq %rax, %cr3 |
| 75 | /* flush TLB */ |
| 76 | movq %rbx, %rcx |
| 77 | andq $~(X86_CR4_PGE), %rcx |
| 78 | movq %rcx, %cr4; # turn off PGE |
| 79 | movq %cr3, %rcx; # flush TLB |
| 80 | movq %rcx, %cr3; |
| 81 | movq %rbx, %cr4; # turn PGE back on |
| 82 | .Lloop: |
| 83 | testq %rdx, %rdx |
| 84 | jz .Ldone |
| 85 | |
| 86 | /* get addresses from the pbe and copy the page */ |
| 87 | movq pbe_address(%rdx), %rsi |
| 88 | movq pbe_orig_address(%rdx), %rdi |
| 89 | movq $(PAGE_SIZE >> 3), %rcx |
| 90 | rep |
| 91 | movsq |
| 92 | |
| 93 | /* progress to the next pbe */ |
| 94 | movq pbe_next(%rdx), %rdx |
| 95 | jmp .Lloop |
| 96 | |
| 97 | .Ldone: |
| 98 | /* jump to the restore_registers address from the image header */ |
| 99 | jmpq *%r8 |
| 100 | |
| 101 | /* code below belongs to the image kernel */ |
| 102 | .align PAGE_SIZE |
| 103 | ENTRY(restore_registers) |
| 104 | /* go back to the original page tables */ |
| 105 | movq %r9, %cr3 |
| 106 | |
| 107 | /* Flush TLB, including "global" things (vmalloc) */ |
| 108 | movq mmu_cr4_features(%rip), %rax |
| 109 | movq %rax, %rdx |
| 110 | andq $~(X86_CR4_PGE), %rdx |
| 111 | movq %rdx, %cr4; # turn off PGE |
| 112 | movq %cr3, %rcx; # flush TLB |
| 113 | movq %rcx, %cr3 |
| 114 | movq %rax, %cr4; # turn PGE back on |
| 115 | |
| 116 | /* We don't restore %rax, it must be 0 anyway */ |
| 117 | movq $saved_context, %rax |
| 118 | movq pt_regs_sp(%rax), %rsp |
| 119 | movq pt_regs_bp(%rax), %rbp |
| 120 | movq pt_regs_si(%rax), %rsi |
| 121 | movq pt_regs_di(%rax), %rdi |
| 122 | movq pt_regs_bx(%rax), %rbx |
| 123 | movq pt_regs_cx(%rax), %rcx |
| 124 | movq pt_regs_dx(%rax), %rdx |
| 125 | movq pt_regs_r8(%rax), %r8 |
| 126 | movq pt_regs_r9(%rax), %r9 |
| 127 | movq pt_regs_r10(%rax), %r10 |
| 128 | movq pt_regs_r11(%rax), %r11 |
| 129 | movq pt_regs_r12(%rax), %r12 |
| 130 | movq pt_regs_r13(%rax), %r13 |
| 131 | movq pt_regs_r14(%rax), %r14 |
| 132 | movq pt_regs_r15(%rax), %r15 |
| 133 | pushq pt_regs_flags(%rax) |
| 134 | popfq |
| 135 | |
| 136 | /* Saved in save_processor_state. */ |
| 137 | lgdt saved_context_gdt_desc(%rax) |
| 138 | |
| 139 | xorl %eax, %eax |
| 140 | |
| 141 | /* tell the hibernation core that we've just restored the memory */ |
| 142 | movq %rax, in_suspend(%rip) |
| 143 | |
| 144 | ret |
| 145 | ENDPROC(restore_registers) |