b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Memory preserving reboot related code. |
| 4 | * |
| 5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) |
| 6 | * Copyright (C) IBM Corporation, 2004. All rights reserved |
| 7 | */ |
| 8 | |
| 9 | #include <linux/errno.h> |
| 10 | #include <linux/crash_dump.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <linux/io.h> |
| 13 | |
| 14 | static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
| 15 | unsigned long offset, int userbuf, |
| 16 | bool encrypted) |
| 17 | { |
| 18 | void *vaddr; |
| 19 | |
| 20 | if (!csize) |
| 21 | return 0; |
| 22 | |
| 23 | if (encrypted) |
| 24 | vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); |
| 25 | else |
| 26 | vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
| 27 | |
| 28 | if (!vaddr) |
| 29 | return -ENOMEM; |
| 30 | |
| 31 | if (userbuf) { |
| 32 | if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { |
| 33 | iounmap((void __iomem *)vaddr); |
| 34 | return -EFAULT; |
| 35 | } |
| 36 | } else |
| 37 | memcpy(buf, vaddr + offset, csize); |
| 38 | |
| 39 | set_iounmap_nonlazy(); |
| 40 | iounmap((void __iomem *)vaddr); |
| 41 | return csize; |
| 42 | } |
| 43 | |
| 44 | /** |
| 45 | * copy_oldmem_page - copy one page of memory |
| 46 | * @pfn: page frame number to be copied |
| 47 | * @buf: target memory address for the copy; this can be in kernel address |
| 48 | * space or user address space (see @userbuf) |
| 49 | * @csize: number of bytes to copy |
| 50 | * @offset: offset in bytes into the page (based on pfn) to begin the copy |
| 51 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
| 52 | * otherwise @buf is in kernel address space, use memcpy(). |
| 53 | * |
| 54 | * Copy a page from the old kernel's memory. For this page, there is no pte |
| 55 | * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. |
| 56 | */ |
| 57 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
| 58 | unsigned long offset, int userbuf) |
| 59 | { |
| 60 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); |
| 61 | } |
| 62 | |
| 63 | /** |
| 64 | * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the |
| 65 | * memory with the encryption mask set to accommodate kdump on SME-enabled |
| 66 | * machines. |
| 67 | */ |
| 68 | ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, |
| 69 | unsigned long offset, int userbuf) |
| 70 | { |
| 71 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); |
| 72 | } |
| 73 | |
| 74 | ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) |
| 75 | { |
| 76 | return read_from_oldmem(buf, count, ppos, 0, sev_active()); |
| 77 | } |