blob: cac7404b2bdd2cdcd1c4041f496aecbf4e9245fe [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_CACHEFLUSH_H
3#define __ASM_CACHEFLUSH_H
4
5/* Keep includes the same across arches. */
6#include <linux/mm.h>
7
8#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
9
10/*
11 * The cache doesn't need to be flushed when TLB entries change when
12 * the cache is mapped to physical memory, not virtual memory
13 */
14#ifndef flush_cache_all
15static inline void flush_cache_all(void)
16{
17}
18#endif
19
20#ifndef flush_cache_mm
21static inline void flush_cache_mm(struct mm_struct *mm)
22{
23}
24#endif
25
26#ifndef flush_cache_dup_mm
27static inline void flush_cache_dup_mm(struct mm_struct *mm)
28{
29}
30#endif
31
32#ifndef flush_cache_range
33static inline void flush_cache_range(struct vm_area_struct *vma,
34 unsigned long start,
35 unsigned long end)
36{
37}
38#endif
39
40#ifndef flush_cache_page
41static inline void flush_cache_page(struct vm_area_struct *vma,
42 unsigned long vmaddr,
43 unsigned long pfn)
44{
45}
46#endif
47
48#ifndef flush_dcache_page
49static inline void flush_dcache_page(struct page *page)
50{
51}
52#endif
53
54#ifndef flush_dcache_mmap_lock
55static inline void flush_dcache_mmap_lock(struct address_space *mapping)
56{
57}
58#endif
59
60#ifndef flush_dcache_mmap_unlock
61static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
62{
63}
64#endif
65
66#ifndef flush_icache_range
67static inline void flush_icache_range(unsigned long start, unsigned long end)
68{
69}
70#endif
71
72#ifndef flush_icache_page
73static inline void flush_icache_page(struct vm_area_struct *vma,
74 struct page *page)
75{
76}
77#endif
78
79#ifndef flush_icache_user_range
80static inline void flush_icache_user_range(struct vm_area_struct *vma,
81 struct page *page,
82 unsigned long addr, int len)
83{
84}
85#endif
86
87#ifndef flush_cache_vmap
88static inline void flush_cache_vmap(unsigned long start, unsigned long end)
89{
90}
91#endif
92
93#ifndef flush_cache_vunmap
94static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
95{
96}
97#endif
98
99#ifndef copy_to_user_page
100#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
101 do { \
102 memcpy(dst, src, len); \
103 flush_icache_user_range(vma, page, vaddr, len); \
104 } while (0)
105#endif
106
107#ifndef copy_from_user_page
108#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
109 memcpy(dst, src, len)
110#endif
111
112#endif /* __ASM_CACHEFLUSH_H */