blob: 7529ff1b4a5571eba1cbbc12963b1e71a4ae7c72 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-64 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 *
8 * Sep, 2003: add numa support
9 * Feb, 2004: dynamic hugetlb page size via boot parameter
10 */
11
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/pagemap.h>
17#include <linux/module.h>
18#include <linux/sysctl.h>
19#include <linux/log2.h>
20#include <asm/mman.h>
21#include <asm/pgalloc.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
26EXPORT_SYMBOL(hpage_shift);
27
28pte_t *
29huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
30 unsigned long addr, unsigned long sz)
31{
32 unsigned long taddr = htlbpage_to_page(addr);
33 pgd_t *pgd;
34 pud_t *pud;
35 pmd_t *pmd;
36 pte_t *pte = NULL;
37
38 pgd = pgd_offset(mm, taddr);
39 pud = pud_alloc(mm, pgd, taddr);
40 if (pud) {
41 pmd = pmd_alloc(mm, pud, taddr);
42 if (pmd)
43 pte = pte_alloc_map(mm, pmd, taddr);
44 }
45 return pte;
46}
47
48pte_t *
49huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
50{
51 unsigned long taddr = htlbpage_to_page(addr);
52 pgd_t *pgd;
53 pud_t *pud;
54 pmd_t *pmd;
55 pte_t *pte = NULL;
56
57 pgd = pgd_offset(mm, taddr);
58 if (pgd_present(*pgd)) {
59 pud = pud_offset(pgd, taddr);
60 if (pud_present(*pud)) {
61 pmd = pmd_offset(pud, taddr);
62 if (pmd_present(*pmd))
63 pte = pte_offset_map(pmd, taddr);
64 }
65 }
66
67 return pte;
68}
69
70#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
71
72/*
73 * Don't actually need to do any preparation, but need to make sure
74 * the address is in the right region.
75 */
76int prepare_hugepage_range(struct file *file,
77 unsigned long addr, unsigned long len)
78{
79 if (len & ~HPAGE_MASK)
80 return -EINVAL;
81 if (addr & ~HPAGE_MASK)
82 return -EINVAL;
83 if (REGION_NUMBER(addr) != RGN_HPAGE)
84 return -EINVAL;
85
86 return 0;
87}
88
89struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
90{
91 struct page *page;
92 pte_t *ptep;
93
94 if (REGION_NUMBER(addr) != RGN_HPAGE)
95 return ERR_PTR(-EINVAL);
96
97 ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
98 if (!ptep || pte_none(*ptep))
99 return NULL;
100 page = pte_page(*ptep);
101 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
102 return page;
103}
104int pmd_huge(pmd_t pmd)
105{
106 return 0;
107}
108
109int pud_huge(pud_t pud)
110{
111 return 0;
112}
113
114void hugetlb_free_pgd_range(struct mmu_gather *tlb,
115 unsigned long addr, unsigned long end,
116 unsigned long floor, unsigned long ceiling)
117{
118 /*
119 * This is called to free hugetlb page tables.
120 *
121 * The offset of these addresses from the base of the hugetlb
122 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
123 * the standard free_pgd_range will free the right page tables.
124 *
125 * If floor and ceiling are also in the hugetlb region, they
126 * must likewise be scaled down; but if outside, left unchanged.
127 */
128
129 addr = htlbpage_to_page(addr);
130 end = htlbpage_to_page(end);
131 if (REGION_NUMBER(floor) == RGN_HPAGE)
132 floor = htlbpage_to_page(floor);
133 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
134 ceiling = htlbpage_to_page(ceiling);
135
136 free_pgd_range(tlb, addr, end, floor, ceiling);
137}
138
139unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
140 unsigned long pgoff, unsigned long flags)
141{
142 struct vm_unmapped_area_info info;
143
144 if (len > RGN_MAP_LIMIT)
145 return -ENOMEM;
146 if (len & ~HPAGE_MASK)
147 return -EINVAL;
148
149 /* Handle MAP_FIXED */
150 if (flags & MAP_FIXED) {
151 if (prepare_hugepage_range(file, addr, len))
152 return -EINVAL;
153 return addr;
154 }
155
156 /* This code assumes that RGN_HPAGE != 0. */
157 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
158 addr = HPAGE_REGION_BASE;
159
160 info.flags = 0;
161 info.length = len;
162 info.low_limit = addr;
163 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
164 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
165 info.align_offset = 0;
166 return vm_unmapped_area(&info);
167}
168
169static int __init hugetlb_setup_sz(char *str)
170{
171 u64 tr_pages;
172 unsigned long long size;
173
174 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
175 /*
176 * shouldn't happen, but just in case.
177 */
178 tr_pages = 0x15557000UL;
179
180 size = memparse(str, &str);
181 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
182 size <= PAGE_SIZE ||
183 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
184 printk(KERN_WARNING "Invalid huge page size specified\n");
185 return 1;
186 }
187
188 hpage_shift = __ffs(size);
189 /*
190 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
191 * override here with new page shift.
192 */
193 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
194 return 0;
195}
196early_param("hugepagesz", hugetlb_setup_sz);