blob: 487725e49b961c1fca8500b7e8492552a6dcbd6e [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * IBM System z Huge TLB Page Support for Kernel.
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#ifndef _ASM_S390_HUGETLB_H
10#define _ASM_S390_HUGETLB_H
11
12#include <asm/page.h>
13#include <asm/pgtable.h>
14
15#define hugetlb_free_pgd_range free_pgd_range
16#define hugepages_supported() (MACHINE_HAS_EDAT1)
17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20pte_t huge_ptep_get(pte_t *ptep);
21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
22 unsigned long addr, pte_t *ptep);
23
24static inline bool is_hugepage_only_range(struct mm_struct *mm,
25 unsigned long addr,
26 unsigned long len)
27{
28 return false;
29}
30
31/*
32 * If the arch doesn't supply something else, assume that hugepage
33 * size aligned regions are ok without further preparation.
34 */
35static inline int prepare_hugepage_range(struct file *file,
36 unsigned long addr, unsigned long len)
37{
38 struct hstate *h = hstate_file(file);
39
40 if (len & ~huge_page_mask(h))
41 return -EINVAL;
42 if (addr & ~huge_page_mask(h))
43 return -EINVAL;
44 return 0;
45}
46
47static inline void arch_clear_hugepage_flags(struct page *page)
48{
49 clear_bit(PG_arch_1, &page->flags);
50}
51
52static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
53 pte_t *ptep, unsigned long sz)
54{
55 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
56 pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
57 else
58 pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
59}
60
61static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
62 unsigned long address, pte_t *ptep)
63{
64 huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
65}
66
67static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
68 unsigned long addr, pte_t *ptep,
69 pte_t pte, int dirty)
70{
71 int changed = !pte_same(huge_ptep_get(ptep), pte);
72 if (changed) {
73 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
75 }
76 return changed;
77}
78
79static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
80 unsigned long addr, pte_t *ptep)
81{
82 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
83 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
84}
85
86static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
87{
88 return mk_pte(page, pgprot);
89}
90
91static inline int huge_pte_none(pte_t pte)
92{
93 return pte_none(pte);
94}
95
96static inline int huge_pte_write(pte_t pte)
97{
98 return pte_write(pte);
99}
100
101static inline int huge_pte_dirty(pte_t pte)
102{
103 return pte_dirty(pte);
104}
105
106static inline pte_t huge_pte_mkwrite(pte_t pte)
107{
108 return pte_mkwrite(pte);
109}
110
111static inline pte_t huge_pte_mkdirty(pte_t pte)
112{
113 return pte_mkdirty(pte);
114}
115
116static inline pte_t huge_pte_wrprotect(pte_t pte)
117{
118 return pte_wrprotect(pte);
119}
120
121static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
122{
123 return pte_modify(pte, newprot);
124}
125
126static inline bool gigantic_page_runtime_supported(void)
127{
128 return true;
129}
130
131#endif /* _ASM_S390_HUGETLB_H */