blob: 2ea2208855783b7649c1aeb3362f9feaffdc1c8f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2017 MediaTek Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <assert.h>
25#include <err.h>
26#include <kernel/mutex.h>
27#include <lib/console.h>
28#include <lib/mempool.h>
29#include <list.h>
30#include <malloc.h>
31#include <stdlib.h>
32#include <string.h>
33#include <trace.h>
34
35#define LOCAL_TRACE 0
36
37struct mem_chunk {
38 struct list_node node;
39 void *start;
40 size_t len;
41 bool free;
42};
43
44struct mempool {
45 struct list_node chunk_list;
46 void *start;
47 size_t len;
48};
49
50static struct mempool pool[MAX_MEMPOOL_TYPE] = {
51 { LIST_INITIAL_VALUE(pool[0].chunk_list), NULL, 0 },
52 { LIST_INITIAL_VALUE(pool[1].chunk_list), NULL, 0 }
53};
54
55static mutex_t memlock = MUTEX_INITIAL_VALUE(memlock);
56
57int mempool_init(void *mem, size_t size, uint32_t type)
58{
59 int i;
60 int ret;
61 struct mem_chunk *chunk;
62
63 LTRACEF("pool init %p, size %zx, type %d\n", mem, size, type);
64
65 /* check input arg, mem address should be aligned to a cache line */
66 if (!mem || !IS_ALIGNED(mem, CACHE_LINE) ||
67 !size || (type >= MAX_MEMPOOL_TYPE))
68 return ERR_INVALID_ARGS;
69
70 ret = NO_ERROR;
71 mutex_acquire(&memlock);
72 /* check if the mem address already inited */
73 for (i = 0; i < MAX_MEMPOOL_TYPE; i++) {
74 list_for_every_entry(&pool[i].chunk_list, chunk,
75 struct mem_chunk, node) {
76 if (chunk->start <= mem && ((chunk->start + chunk->len) > mem)) {
77 ret = ERR_ALREADY_EXISTS;
78 goto exit;
79 }
80 }
81 }
82
83 chunk = (struct mem_chunk *)malloc(sizeof(struct mem_chunk));
84 if (!chunk) {
85 ret = ERR_NO_MEMORY;
86 goto exit;
87 }
88
89 chunk->start = mem;
90 chunk->len = size;
91 chunk->free = true;
92 list_add_tail(&pool[type].chunk_list, &chunk->node);
93
94exit:
95 mutex_release(&memlock);
96 return ret;
97}
98
99void *mempool_alloc(size_t size, uint32_t type)
100{
101 bool found;
102 uint32_t i, s_type, e_type;
103 size_t alloc_size;
104 struct mem_chunk *chunk, *new_chunk;
105
106 LTRACEF("pool alloc size %zx, type %d\n", size, type);
107
108 if (!size ||
109 ((type >= MAX_MEMPOOL_TYPE) && (type != MEMPOOL_ANY)))
110 return NULL;
111
112 alloc_size = ROUNDUP(size, CACHE_LINE);
113 found = false;
114
115 s_type = e_type = type;
116 if (type == MEMPOOL_ANY) {
117 s_type = 0;
118 e_type = MAX_MEMPOOL_TYPE - 1;
119 }
120
121 mutex_acquire(&memlock);
122 for (i = s_type; i <= e_type; i++) {
123 list_for_every_entry(&pool[i].chunk_list, chunk,
124 struct mem_chunk, node) {
125 if (chunk->len < alloc_size || !chunk->free)
126 continue;
127
128 found = true;
129 break;
130 }
131
132 if (found)
133 break;
134 }
135
136 new_chunk = NULL;
137 if (found) {
138 /* if the chunk len happend to equal to alloc size, just return it */
139 if (chunk->len == alloc_size) {
140 chunk->free = false;
141 new_chunk = chunk;
142 } else {
143 new_chunk = (struct mem_chunk *)malloc(sizeof(struct mem_chunk));
144 if (new_chunk) {
145 new_chunk->start = chunk->start;
146 new_chunk->len = alloc_size;
147 new_chunk->free = false;
148 chunk->start = chunk->start + alloc_size;
149 chunk->len -= alloc_size;
150 list_add_before(&chunk->node, &new_chunk->node);
151 }
152 }
153 }
154
155 mutex_release(&memlock);
156 return new_chunk ? new_chunk->start : NULL;
157}
158
159void mempool_free(void *ptr)
160{
161 int i;
162 bool found;
163 struct mem_chunk *chunk, *prev, *next;
164
165 LTRACEF("pool free %p\n", ptr);
166
167 if (NULL == ptr)
168 return;
169
170 /* walk through list to find matched chunk */
171 found = false;
172 mutex_acquire(&memlock);
173 for (i = 0; i < MAX_MEMPOOL_TYPE; i++) {
174 list_for_every_entry(&pool[i].chunk_list, chunk,
175 struct mem_chunk, node) {
176 if (!chunk->free && (chunk->start == ptr)) {
177 found = true;
178 break;
179 }
180 }
181
182 if (found) {
183 /* merge with adjecent chunk if possible */
184 prev = list_prev_type(&pool[i].chunk_list, &chunk->node,
185 struct mem_chunk, node);
186 if (prev && prev->free && ((prev->start + prev->len) == ptr)) {
187 chunk->len += prev->len;
188 chunk->start = prev->start;
189 list_delete(&prev->node);
190 free(prev);
191 prev = NULL;
192 }
193
194 next = list_next_type(&pool[i].chunk_list, &chunk->node,
195 struct mem_chunk, node);
196 if (next && next->free &&
197 ((chunk->start + chunk->len) == next->start)) {
198 chunk->len += next->len;
199 list_delete(&next->node);
200 free(next);
201 next = NULL;
202 }
203
204 chunk->free = true;
205
206 break;
207 }
208 }
209
210 mutex_release(&memlock);
211}
212
213void mempool_clear(void)
214{
215 int i;
216 struct mem_chunk *chunk;
217 struct mem_chunk *temp;
218
219 /* delete every node in the list */
220 mutex_acquire(&memlock);
221 for (i = 0; i < MAX_MEMPOOL_TYPE; i++) {
222 list_for_every_entry_safe(&pool[i].chunk_list, chunk, temp,
223 struct mem_chunk, node) {
224 list_delete(&chunk->node);
225 free(chunk);
226 chunk = NULL;
227 }
228 pool[i].start = NULL;
229 pool[i].len = 0;
230 }
231
232 mutex_release(&memlock);
233}
234
235#if LK_DEBUGLEVEL > 1
236
237#include <lib/console.h>
238
239static int cmd_mempool(int argc, const cmd_args *argv);
240static void show_usage(const char *cmd);
241
242STATIC_COMMAND_START
243STATIC_COMMAND("mempool", "mempool debug commands", &cmd_mempool)
244STATIC_COMMAND_END(mempool);
245
246static void show_usage(const char *cmd)
247{
248 printf("usage:\n");
249 printf("\t%s init <address> <size> <type>\n", cmd);
250 printf("\t%s info\n", cmd);
251 printf("\t%s alloc <size> <type>\n", cmd);
252 printf("\t%s free <address>\n", cmd);
253}
254
255static void mempool_dump(void)
256{
257 int i;
258 struct mem_chunk *chunk;
259
260 for (i = 0; i < MAX_MEMPOOL_TYPE; i++) {
261 printf("dump mempool type %d\n", i);
262 list_for_every_entry(&pool[i].chunk_list, chunk,
263 struct mem_chunk, node) {
264 printf("start %p, len %zx, free %d, type %d\n",
265 chunk->start, chunk->len, chunk->free, i);
266 }
267 }
268}
269
270static int cmd_mempool(int argc, const cmd_args *argv)
271{
272 int ret;
273 void *p;
274
275 if (argc < 2) {
276notenoughargs:
277 printf("not enough arguments\n");
278usage:
279 show_usage(argv[0].str);
280 return -1;
281 }
282
283 if (strcmp(argv[1].str, "init") == 0) {
284 if (argc < 5)
285 goto notenoughargs;
286
287 ret = mempool_init((void *)argv[2].u, argv[3].u, argv[4].i);
288 if (ret != NO_ERROR)
289 printf("mempool_init failed, ret %d\n", ret);
290 } else if (strcmp(argv[1].str, "info") == 0) {
291 mempool_dump();
292 } else if (strcmp(argv[1].str, "alloc") == 0) {
293 if (argc < 4)
294 goto notenoughargs;
295
296 p = mempool_alloc(argv[2].u, argv[3].i);
297 if (!p) {
298 printf("mempool alloc failed, size %lu, type %ld\n",
299 argv[2].u, argv[3].i);
300 mempool_dump();
301 }
302 } else if (strcmp(argv[1].str, "free") == 0) {
303 if (argc < 3)
304 goto notenoughargs;
305
306 mempool_free((void *)(uintptr_t)argv[2].u);
307 } else {
308 printf("unrecognized command\n");
309 goto usage;
310 }
311
312 return 0;
313}
314#endif