b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_S390_MEM_DETECT_H |
| 3 | #define _ASM_S390_MEM_DETECT_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | |
| 7 | enum mem_info_source { |
| 8 | MEM_DETECT_NONE = 0, |
| 9 | MEM_DETECT_SCLP_STOR_INFO, |
| 10 | MEM_DETECT_DIAG260, |
| 11 | MEM_DETECT_SCLP_READ_INFO, |
| 12 | MEM_DETECT_BIN_SEARCH |
| 13 | }; |
| 14 | |
| 15 | struct mem_detect_block { |
| 16 | u64 start; |
| 17 | u64 end; |
| 18 | }; |
| 19 | |
| 20 | /* |
| 21 | * Storage element id is defined as 1 byte (up to 256 storage elements). |
| 22 | * In practise only storage element id 0 and 1 are used). |
| 23 | * According to architecture one storage element could have as much as |
| 24 | * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info. |
| 25 | * If more mem_detect_blocks are required, a block of memory from already |
| 26 | * known mem_detect_block is taken (entries_extended points to it). |
| 27 | */ |
| 28 | #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */ |
| 29 | |
| 30 | struct mem_detect_info { |
| 31 | u32 count; |
| 32 | u8 info_source; |
| 33 | struct mem_detect_block entries[MEM_INLINED_ENTRIES]; |
| 34 | struct mem_detect_block *entries_extended; |
| 35 | }; |
| 36 | extern struct mem_detect_info mem_detect; |
| 37 | |
| 38 | void add_mem_detect_block(u64 start, u64 end); |
| 39 | |
| 40 | static inline int __get_mem_detect_block(u32 n, unsigned long *start, |
| 41 | unsigned long *end) |
| 42 | { |
| 43 | if (n >= mem_detect.count) { |
| 44 | *start = 0; |
| 45 | *end = 0; |
| 46 | return -1; |
| 47 | } |
| 48 | |
| 49 | if (n < MEM_INLINED_ENTRIES) { |
| 50 | *start = (unsigned long)mem_detect.entries[n].start; |
| 51 | *end = (unsigned long)mem_detect.entries[n].end; |
| 52 | } else { |
| 53 | *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start; |
| 54 | *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end; |
| 55 | } |
| 56 | return 0; |
| 57 | } |
| 58 | |
| 59 | /** |
| 60 | * for_each_mem_detect_block - early online memory range iterator |
| 61 | * @i: an integer used as loop variable |
| 62 | * @p_start: ptr to unsigned long for start address of the range |
| 63 | * @p_end: ptr to unsigned long for end address of the range |
| 64 | * |
| 65 | * Walks over detected online memory ranges. |
| 66 | */ |
| 67 | #define for_each_mem_detect_block(i, p_start, p_end) \ |
| 68 | for (i = 0, __get_mem_detect_block(i, p_start, p_end); \ |
| 69 | i < mem_detect.count; \ |
| 70 | i++, __get_mem_detect_block(i, p_start, p_end)) |
| 71 | |
| 72 | static inline void get_mem_detect_reserved(unsigned long *start, |
| 73 | unsigned long *size) |
| 74 | { |
| 75 | *start = (unsigned long)mem_detect.entries_extended; |
| 76 | if (mem_detect.count > MEM_INLINED_ENTRIES) |
| 77 | *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block); |
| 78 | else |
| 79 | *size = 0; |
| 80 | } |
| 81 | |
| 82 | static inline unsigned long get_mem_detect_end(void) |
| 83 | { |
| 84 | unsigned long start; |
| 85 | unsigned long end; |
| 86 | |
| 87 | if (mem_detect.count) { |
| 88 | __get_mem_detect_block(mem_detect.count - 1, &start, &end); |
| 89 | return end; |
| 90 | } |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | #endif |