blob: b4076ac51005e58b1509fc168d05e5504417cca9 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_CACHEFLUSH_H
3#define _PARISC_CACHEFLUSH_H
4
5#include <linux/mm.h>
6#include <linux/uaccess.h>
7#include <asm/tlbflush.h>
8
9/* The usual comment is "Caches aren't brain-dead on the <architecture>".
10 * Unfortunately, that doesn't apply to PA-RISC. */
11
12/* Internal implementation */
13void flush_data_cache_local(void *); /* flushes local data-cache only */
14void flush_instruction_cache_local(void *); /* flushes local code-cache only */
15#ifdef CONFIG_SMP
16void flush_data_cache(void); /* flushes data-cache only (all processors) */
17void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
18#else
19#define flush_data_cache() flush_data_cache_local(NULL)
20#define flush_instruction_cache() flush_instruction_cache_local(NULL)
21#endif
22
23#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
24
25void flush_user_icache_range_asm(unsigned long, unsigned long);
26void flush_kernel_icache_range_asm(unsigned long, unsigned long);
27void flush_user_dcache_range_asm(unsigned long, unsigned long);
28void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
29void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
30void flush_kernel_dcache_page_asm(void *);
31void flush_kernel_icache_page(void *);
32
33/* Cache flush operations */
34
35void flush_cache_all_local(void);
36void flush_cache_all(void);
37void flush_cache_mm(struct mm_struct *mm);
38
39#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
40void flush_kernel_dcache_page_addr(void *addr);
41static inline void flush_kernel_dcache_page(struct page *page)
42{
43 flush_kernel_dcache_page_addr(page_address(page));
44}
45
46#define flush_kernel_dcache_range(start,size) \
47 flush_kernel_dcache_range_asm((start), (start)+(size));
48
49void flush_kernel_vmap_range(void *vaddr, int size);
50void invalidate_kernel_vmap_range(void *vaddr, int size);
51
52#define flush_cache_vmap(start, end) flush_cache_all()
53#define flush_cache_vunmap(start, end) flush_cache_all()
54
55#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
56extern void flush_dcache_page(struct page *page);
57
58#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
59#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
60#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
61 xa_lock_irqsave(&mapping->i_pages, flags)
62#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
63 xa_unlock_irqrestore(&mapping->i_pages, flags)
64
65
66#define flush_icache_page(vma,page) do { \
67 flush_kernel_dcache_page(page); \
68 flush_kernel_icache_page(page_address(page)); \
69} while (0)
70
71#define flush_icache_range(s,e) do { \
72 flush_kernel_dcache_range_asm(s,e); \
73 flush_kernel_icache_range_asm(s,e); \
74} while (0)
75
76#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
77do { \
78 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
79 memcpy(dst, src, len); \
80 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
81} while (0)
82
83#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
84do { \
85 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
86 memcpy(dst, src, len); \
87} while (0)
88
89void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
90void flush_cache_range(struct vm_area_struct *vma,
91 unsigned long start, unsigned long end);
92
93/* defined in pacache.S exported in cache.c used by flush_anon_page */
94void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
95
96#define ARCH_HAS_FLUSH_ANON_PAGE
97static inline void
98flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
99{
100 if (PageAnon(page)) {
101 flush_tlb_page(vma, vmaddr);
102 preempt_disable();
103 flush_dcache_page_asm(page_to_phys(page), vmaddr);
104 preempt_enable();
105 }
106}
107
108#include <asm/kmap_types.h>
109
110#define ARCH_HAS_KMAP
111
112static inline void *kmap(struct page *page)
113{
114 might_sleep();
115 return page_address(page);
116}
117
118static inline void kunmap(struct page *page)
119{
120 flush_kernel_dcache_page_addr(page_address(page));
121}
122
123static inline void *kmap_atomic(struct page *page)
124{
125 preempt_disable();
126 pagefault_disable();
127 return page_address(page);
128}
129
130static inline void __kunmap_atomic(void *addr)
131{
132 flush_kernel_dcache_page_addr(addr);
133 pagefault_enable();
134 preempt_enable();
135}
136
137#define kmap_atomic_prot(page, prot) kmap_atomic(page)
138#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
139
140#endif /* _PARISC_CACHEFLUSH_H */
141