blob: 37faab48fad8547d0fbcc025647f6e081ad81a2c [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMA BUF page pool system
4 *
5 * Copyright (C) 2020 Linaro Ltd.
6 *
7 * Based on the ION page pool code
8 * Copyright (C) 2011 Google, Inc.
9 */
10
11#include <linux/freezer.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/swap.h>
16#include <linux/sched/signal.h>
17#include "page_pool.h"
18
19static LIST_HEAD(pool_list);
20static DEFINE_MUTEX(pool_list_lock);
21
22static inline
23struct page *dmabuf_page_pool_alloc_pages(struct dmabuf_page_pool *pool)
24{
25 if (fatal_signal_pending(current))
26 return NULL;
27 return alloc_pages(pool->gfp_mask, pool->order);
28}
29
30static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
31 struct page *page)
32{
33 __free_pages(page, pool->order);
34}
35
36static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
37{
38 int index;
39
40 if (PageHighMem(page))
41 index = POOL_HIGHPAGE;
42 else
43 index = POOL_LOWPAGE;
44
45 mutex_lock(&pool->mutex);
46 list_add_tail(&page->lru, &pool->items[index]);
47 pool->count[index]++;
48 mutex_unlock(&pool->mutex);
49 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
50 1 << pool->order);
51}
52
53static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
54{
55 struct page *page;
56
57 mutex_lock(&pool->mutex);
58 page = list_first_entry_or_null(&pool->items[index], struct page, lru);
59 if (page) {
60 pool->count[index]--;
61 list_del(&page->lru);
62 mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
63 -(1 << pool->order));
64 }
65 mutex_unlock(&pool->mutex);
66
67 return page;
68}
69
70static struct page *dmabuf_page_pool_fetch(struct dmabuf_page_pool *pool)
71{
72 struct page *page = NULL;
73
74 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
75 if (!page)
76 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
77
78 return page;
79}
80
81struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
82{
83 struct page *page = NULL;
84
85 if (WARN_ON(!pool))
86 return NULL;
87
88 page = dmabuf_page_pool_fetch(pool);
89
90 if (!page)
91 page = dmabuf_page_pool_alloc_pages(pool);
92 return page;
93}
94EXPORT_SYMBOL_GPL(dmabuf_page_pool_alloc);
95
96void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
97{
98 if (WARN_ON(pool->order != compound_order(page)))
99 return;
100
101 dmabuf_page_pool_add(pool, page);
102}
103EXPORT_SYMBOL_GPL(dmabuf_page_pool_free);
104
105static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
106{
107 int count = pool->count[POOL_LOWPAGE];
108
109 if (high)
110 count += pool->count[POOL_HIGHPAGE];
111
112 return count << pool->order;
113}
114
115struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
116{
117 struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
118 int i;
119
120 if (!pool)
121 return NULL;
122
123 for (i = 0; i < POOL_TYPE_SIZE; i++) {
124 pool->count[i] = 0;
125 INIT_LIST_HEAD(&pool->items[i]);
126 }
127 pool->gfp_mask = gfp_mask | __GFP_COMP;
128 pool->order = order;
129 mutex_init(&pool->mutex);
130
131 mutex_lock(&pool_list_lock);
132 list_add(&pool->list, &pool_list);
133 mutex_unlock(&pool_list_lock);
134
135 return pool;
136}
137EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
138
139void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
140{
141 struct page *page;
142 int i;
143
144 /* Remove us from the pool list */
145 mutex_lock(&pool_list_lock);
146 list_del(&pool->list);
147 mutex_unlock(&pool_list_lock);
148
149 /* Free any remaining pages in the pool */
150 for (i = 0; i < POOL_TYPE_SIZE; i++) {
151 while ((page = dmabuf_page_pool_remove(pool, i)))
152 dmabuf_page_pool_free_pages(pool, page);
153 }
154
155 kfree(pool);
156}
157EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);
158
159static int dmabuf_page_pool_do_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
160 int nr_to_scan)
161{
162 int freed = 0;
163 bool high;
164
165 if (current_is_kswapd())
166 high = true;
167 else
168 high = !!(gfp_mask & __GFP_HIGHMEM);
169
170 if (nr_to_scan == 0)
171 return dmabuf_page_pool_total(pool, high);
172
173 while (freed < nr_to_scan) {
174 struct page *page;
175
176 /* Try to free low pages first */
177 page = dmabuf_page_pool_remove(pool, POOL_LOWPAGE);
178 if (!page)
179 page = dmabuf_page_pool_remove(pool, POOL_HIGHPAGE);
180
181 if (!page)
182 break;
183
184 dmabuf_page_pool_free_pages(pool, page);
185 freed += (1 << pool->order);
186 }
187
188 return freed;
189}
190
191static int dmabuf_page_pool_shrink(gfp_t gfp_mask, int nr_to_scan)
192{
193 struct dmabuf_page_pool *pool;
194 int nr_total = 0;
195 int nr_freed;
196 int only_scan = 0;
197
198 if (!nr_to_scan)
199 only_scan = 1;
200
201 mutex_lock(&pool_list_lock);
202 list_for_each_entry(pool, &pool_list, list) {
203 if (only_scan) {
204 nr_total += dmabuf_page_pool_do_shrink(pool,
205 gfp_mask,
206 nr_to_scan);
207 } else {
208 nr_freed = dmabuf_page_pool_do_shrink(pool,
209 gfp_mask,
210 nr_to_scan);
211 nr_to_scan -= nr_freed;
212 nr_total += nr_freed;
213 if (nr_to_scan <= 0)
214 break;
215 }
216 }
217 mutex_unlock(&pool_list_lock);
218
219 return nr_total;
220}
221
222static unsigned long dmabuf_page_pool_shrink_count(struct shrinker *shrinker,
223 struct shrink_control *sc)
224{
225 return dmabuf_page_pool_shrink(sc->gfp_mask, 0);
226}
227
228static unsigned long dmabuf_page_pool_shrink_scan(struct shrinker *shrinker,
229 struct shrink_control *sc)
230{
231 if (sc->nr_to_scan == 0)
232 return 0;
233 return dmabuf_page_pool_shrink(sc->gfp_mask, sc->nr_to_scan);
234}
235
236struct shrinker pool_shrinker = {
237 .count_objects = dmabuf_page_pool_shrink_count,
238 .scan_objects = dmabuf_page_pool_shrink_scan,
239 .seeks = DEFAULT_SEEKS,
240 .batch = 0,
241};
242
243static int dmabuf_page_pool_init_shrinker(void)
244{
245 return register_shrinker(&pool_shrinker);
246}
247module_init(dmabuf_page_pool_init_shrinker);
248MODULE_LICENSE("GPL v2");