| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * drivers/base/devres.c - device resource management | 
|  | 4 | * | 
|  | 5 | * Copyright (c) 2006  SUSE Linux Products GmbH | 
|  | 6 | * Copyright (c) 2006  Tejun Heo <teheo@suse.de> | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #include <linux/device.h> | 
|  | 10 | #include <linux/module.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/percpu.h> | 
|  | 13 |  | 
|  | 14 | #include "base.h" | 
|  | 15 |  | 
|  | 16 | struct devres_node { | 
|  | 17 | struct list_head		entry; | 
|  | 18 | dr_release_t			release; | 
|  | 19 | #ifdef CONFIG_DEBUG_DEVRES | 
|  | 20 | const char			*name; | 
|  | 21 | size_t				size; | 
|  | 22 | #endif | 
|  | 23 | }; | 
|  | 24 |  | 
|  | 25 | struct devres { | 
|  | 26 | struct devres_node		node; | 
|  | 27 | /* | 
|  | 28 | * Some archs want to perform DMA into kmalloc caches | 
|  | 29 | * and need a guaranteed alignment larger than | 
|  | 30 | * the alignment of a 64-bit integer. | 
|  | 31 | * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same | 
|  | 32 | * buffer alignment as if it was allocated by plain kmalloc(). | 
|  | 33 | */ | 
|  | 34 | u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; | 
|  | 35 | }; | 
|  | 36 |  | 
|  | 37 | struct devres_group { | 
|  | 38 | struct devres_node		node[2]; | 
|  | 39 | void				*id; | 
|  | 40 | int				color; | 
|  | 41 | /* -- 8 pointers */ | 
|  | 42 | }; | 
|  | 43 |  | 
|  | 44 | #ifdef CONFIG_DEBUG_DEVRES | 
|  | 45 | static int log_devres = 0; | 
|  | 46 | module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); | 
|  | 47 |  | 
|  | 48 | static void set_node_dbginfo(struct devres_node *node, const char *name, | 
|  | 49 | size_t size) | 
|  | 50 | { | 
|  | 51 | node->name = name; | 
|  | 52 | node->size = size; | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | static void devres_log(struct device *dev, struct devres_node *node, | 
|  | 56 | const char *op) | 
|  | 57 | { | 
|  | 58 | if (unlikely(log_devres)) | 
|  | 59 | dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n", | 
|  | 60 | op, node, node->name, (unsigned long)node->size); | 
|  | 61 | } | 
|  | 62 | #else /* CONFIG_DEBUG_DEVRES */ | 
|  | 63 | #define set_node_dbginfo(node, n, s)	do {} while (0) | 
|  | 64 | #define devres_log(dev, node, op)	do {} while (0) | 
|  | 65 | #endif /* CONFIG_DEBUG_DEVRES */ | 
|  | 66 |  | 
|  | 67 | /* | 
|  | 68 | * Release functions for devres group.  These callbacks are used only | 
|  | 69 | * for identification. | 
|  | 70 | */ | 
|  | 71 | static void group_open_release(struct device *dev, void *res) | 
|  | 72 | { | 
|  | 73 | /* noop */ | 
|  | 74 | } | 
|  | 75 |  | 
|  | 76 | static void group_close_release(struct device *dev, void *res) | 
|  | 77 | { | 
|  | 78 | /* noop */ | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | static struct devres_group * node_to_group(struct devres_node *node) | 
|  | 82 | { | 
|  | 83 | if (node->release == &group_open_release) | 
|  | 84 | return container_of(node, struct devres_group, node[0]); | 
|  | 85 | if (node->release == &group_close_release) | 
|  | 86 | return container_of(node, struct devres_group, node[1]); | 
|  | 87 | return NULL; | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | static __always_inline struct devres * alloc_dr(dr_release_t release, | 
|  | 91 | size_t size, gfp_t gfp, int nid) | 
|  | 92 | { | 
|  | 93 | size_t tot_size; | 
|  | 94 | struct devres *dr; | 
|  | 95 |  | 
|  | 96 | /* We must catch any near-SIZE_MAX cases that could overflow. */ | 
|  | 97 | if (unlikely(check_add_overflow(sizeof(struct devres), size, | 
|  | 98 | &tot_size))) | 
|  | 99 | return NULL; | 
|  | 100 |  | 
|  | 101 | dr = kmalloc_node_track_caller(tot_size, gfp, nid); | 
|  | 102 | if (unlikely(!dr)) | 
|  | 103 | return NULL; | 
|  | 104 |  | 
|  | 105 | memset(dr, 0, offsetof(struct devres, data)); | 
|  | 106 |  | 
|  | 107 | INIT_LIST_HEAD(&dr->node.entry); | 
|  | 108 | dr->node.release = release; | 
|  | 109 | return dr; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | static void add_dr(struct device *dev, struct devres_node *node) | 
|  | 113 | { | 
|  | 114 | devres_log(dev, node, "ADD"); | 
|  | 115 | BUG_ON(!list_empty(&node->entry)); | 
|  | 116 | list_add_tail(&node->entry, &dev->devres_head); | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | #ifdef CONFIG_DEBUG_DEVRES | 
|  | 120 | void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, | 
|  | 121 | const char *name) | 
|  | 122 | { | 
|  | 123 | struct devres *dr; | 
|  | 124 |  | 
|  | 125 | dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); | 
|  | 126 | if (unlikely(!dr)) | 
|  | 127 | return NULL; | 
|  | 128 | set_node_dbginfo(&dr->node, name, size); | 
|  | 129 | return dr->data; | 
|  | 130 | } | 
|  | 131 | EXPORT_SYMBOL_GPL(__devres_alloc_node); | 
|  | 132 | #else | 
|  | 133 | /** | 
|  | 134 | * devres_alloc - Allocate device resource data | 
|  | 135 | * @release: Release function devres will be associated with | 
|  | 136 | * @size: Allocation size | 
|  | 137 | * @gfp: Allocation flags | 
|  | 138 | * @nid: NUMA node | 
|  | 139 | * | 
|  | 140 | * Allocate devres of @size bytes.  The allocated area is zeroed, then | 
|  | 141 | * associated with @release.  The returned pointer can be passed to | 
|  | 142 | * other devres_*() functions. | 
|  | 143 | * | 
|  | 144 | * RETURNS: | 
|  | 145 | * Pointer to allocated devres on success, NULL on failure. | 
|  | 146 | */ | 
|  | 147 | void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) | 
|  | 148 | { | 
|  | 149 | struct devres *dr; | 
|  | 150 |  | 
|  | 151 | dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); | 
|  | 152 | if (unlikely(!dr)) | 
|  | 153 | return NULL; | 
|  | 154 | return dr->data; | 
|  | 155 | } | 
|  | 156 | EXPORT_SYMBOL_GPL(devres_alloc_node); | 
|  | 157 | #endif | 
|  | 158 |  | 
|  | 159 | /** | 
|  | 160 | * devres_for_each_res - Resource iterator | 
|  | 161 | * @dev: Device to iterate resource from | 
|  | 162 | * @release: Look for resources associated with this release function | 
|  | 163 | * @match: Match function (optional) | 
|  | 164 | * @match_data: Data for the match function | 
|  | 165 | * @fn: Function to be called for each matched resource. | 
|  | 166 | * @data: Data for @fn, the 3rd parameter of @fn | 
|  | 167 | * | 
|  | 168 | * Call @fn for each devres of @dev which is associated with @release | 
|  | 169 | * and for which @match returns 1. | 
|  | 170 | * | 
|  | 171 | * RETURNS: | 
|  | 172 | * 	void | 
|  | 173 | */ | 
|  | 174 | void devres_for_each_res(struct device *dev, dr_release_t release, | 
|  | 175 | dr_match_t match, void *match_data, | 
|  | 176 | void (*fn)(struct device *, void *, void *), | 
|  | 177 | void *data) | 
|  | 178 | { | 
|  | 179 | struct devres_node *node; | 
|  | 180 | struct devres_node *tmp; | 
|  | 181 | unsigned long flags; | 
|  | 182 |  | 
|  | 183 | if (!fn) | 
|  | 184 | return; | 
|  | 185 |  | 
|  | 186 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 187 | list_for_each_entry_safe_reverse(node, tmp, | 
|  | 188 | &dev->devres_head, entry) { | 
|  | 189 | struct devres *dr = container_of(node, struct devres, node); | 
|  | 190 |  | 
|  | 191 | if (node->release != release) | 
|  | 192 | continue; | 
|  | 193 | if (match && !match(dev, dr->data, match_data)) | 
|  | 194 | continue; | 
|  | 195 | fn(dev, dr->data, data); | 
|  | 196 | } | 
|  | 197 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 198 | } | 
|  | 199 | EXPORT_SYMBOL_GPL(devres_for_each_res); | 
|  | 200 |  | 
|  | 201 | /** | 
|  | 202 | * devres_free - Free device resource data | 
|  | 203 | * @res: Pointer to devres data to free | 
|  | 204 | * | 
|  | 205 | * Free devres created with devres_alloc(). | 
|  | 206 | */ | 
|  | 207 | void devres_free(void *res) | 
|  | 208 | { | 
|  | 209 | if (res) { | 
|  | 210 | struct devres *dr = container_of(res, struct devres, data); | 
|  | 211 |  | 
|  | 212 | BUG_ON(!list_empty(&dr->node.entry)); | 
|  | 213 | kfree(dr); | 
|  | 214 | } | 
|  | 215 | } | 
|  | 216 | EXPORT_SYMBOL_GPL(devres_free); | 
|  | 217 |  | 
|  | 218 | /** | 
|  | 219 | * devres_add - Register device resource | 
|  | 220 | * @dev: Device to add resource to | 
|  | 221 | * @res: Resource to register | 
|  | 222 | * | 
|  | 223 | * Register devres @res to @dev.  @res should have been allocated | 
|  | 224 | * using devres_alloc().  On driver detach, the associated release | 
|  | 225 | * function will be invoked and devres will be freed automatically. | 
|  | 226 | */ | 
|  | 227 | void devres_add(struct device *dev, void *res) | 
|  | 228 | { | 
|  | 229 | struct devres *dr = container_of(res, struct devres, data); | 
|  | 230 | unsigned long flags; | 
|  | 231 |  | 
|  | 232 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 233 | add_dr(dev, &dr->node); | 
|  | 234 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 235 | } | 
|  | 236 | EXPORT_SYMBOL_GPL(devres_add); | 
|  | 237 |  | 
|  | 238 | static struct devres *find_dr(struct device *dev, dr_release_t release, | 
|  | 239 | dr_match_t match, void *match_data) | 
|  | 240 | { | 
|  | 241 | struct devres_node *node; | 
|  | 242 |  | 
|  | 243 | list_for_each_entry_reverse(node, &dev->devres_head, entry) { | 
|  | 244 | struct devres *dr = container_of(node, struct devres, node); | 
|  | 245 |  | 
|  | 246 | if (node->release != release) | 
|  | 247 | continue; | 
|  | 248 | if (match && !match(dev, dr->data, match_data)) | 
|  | 249 | continue; | 
|  | 250 | return dr; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | return NULL; | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | /** | 
|  | 257 | * devres_find - Find device resource | 
|  | 258 | * @dev: Device to lookup resource from | 
|  | 259 | * @release: Look for resources associated with this release function | 
|  | 260 | * @match: Match function (optional) | 
|  | 261 | * @match_data: Data for the match function | 
|  | 262 | * | 
|  | 263 | * Find the latest devres of @dev which is associated with @release | 
|  | 264 | * and for which @match returns 1.  If @match is NULL, it's considered | 
|  | 265 | * to match all. | 
|  | 266 | * | 
|  | 267 | * RETURNS: | 
|  | 268 | * Pointer to found devres, NULL if not found. | 
|  | 269 | */ | 
|  | 270 | void * devres_find(struct device *dev, dr_release_t release, | 
|  | 271 | dr_match_t match, void *match_data) | 
|  | 272 | { | 
|  | 273 | struct devres *dr; | 
|  | 274 | unsigned long flags; | 
|  | 275 |  | 
|  | 276 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 277 | dr = find_dr(dev, release, match, match_data); | 
|  | 278 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 279 |  | 
|  | 280 | if (dr) | 
|  | 281 | return dr->data; | 
|  | 282 | return NULL; | 
|  | 283 | } | 
|  | 284 | EXPORT_SYMBOL_GPL(devres_find); | 
|  | 285 |  | 
|  | 286 | /** | 
|  | 287 | * devres_get - Find devres, if non-existent, add one atomically | 
|  | 288 | * @dev: Device to lookup or add devres for | 
|  | 289 | * @new_res: Pointer to new initialized devres to add if not found | 
|  | 290 | * @match: Match function (optional) | 
|  | 291 | * @match_data: Data for the match function | 
|  | 292 | * | 
|  | 293 | * Find the latest devres of @dev which has the same release function | 
|  | 294 | * as @new_res and for which @match return 1.  If found, @new_res is | 
|  | 295 | * freed; otherwise, @new_res is added atomically. | 
|  | 296 | * | 
|  | 297 | * RETURNS: | 
|  | 298 | * Pointer to found or added devres. | 
|  | 299 | */ | 
|  | 300 | void * devres_get(struct device *dev, void *new_res, | 
|  | 301 | dr_match_t match, void *match_data) | 
|  | 302 | { | 
|  | 303 | struct devres *new_dr = container_of(new_res, struct devres, data); | 
|  | 304 | struct devres *dr; | 
|  | 305 | unsigned long flags; | 
|  | 306 |  | 
|  | 307 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 308 | dr = find_dr(dev, new_dr->node.release, match, match_data); | 
|  | 309 | if (!dr) { | 
|  | 310 | add_dr(dev, &new_dr->node); | 
|  | 311 | dr = new_dr; | 
|  | 312 | new_res = NULL; | 
|  | 313 | } | 
|  | 314 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 315 | devres_free(new_res); | 
|  | 316 |  | 
|  | 317 | return dr->data; | 
|  | 318 | } | 
|  | 319 | EXPORT_SYMBOL_GPL(devres_get); | 
|  | 320 |  | 
|  | 321 | /** | 
|  | 322 | * devres_remove - Find a device resource and remove it | 
|  | 323 | * @dev: Device to find resource from | 
|  | 324 | * @release: Look for resources associated with this release function | 
|  | 325 | * @match: Match function (optional) | 
|  | 326 | * @match_data: Data for the match function | 
|  | 327 | * | 
|  | 328 | * Find the latest devres of @dev associated with @release and for | 
|  | 329 | * which @match returns 1.  If @match is NULL, it's considered to | 
|  | 330 | * match all.  If found, the resource is removed atomically and | 
|  | 331 | * returned. | 
|  | 332 | * | 
|  | 333 | * RETURNS: | 
|  | 334 | * Pointer to removed devres on success, NULL if not found. | 
|  | 335 | */ | 
|  | 336 | void * devres_remove(struct device *dev, dr_release_t release, | 
|  | 337 | dr_match_t match, void *match_data) | 
|  | 338 | { | 
|  | 339 | struct devres *dr; | 
|  | 340 | unsigned long flags; | 
|  | 341 |  | 
|  | 342 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 343 | dr = find_dr(dev, release, match, match_data); | 
|  | 344 | if (dr) { | 
|  | 345 | list_del_init(&dr->node.entry); | 
|  | 346 | devres_log(dev, &dr->node, "REM"); | 
|  | 347 | } | 
|  | 348 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 349 |  | 
|  | 350 | if (dr) | 
|  | 351 | return dr->data; | 
|  | 352 | return NULL; | 
|  | 353 | } | 
|  | 354 | EXPORT_SYMBOL_GPL(devres_remove); | 
|  | 355 |  | 
|  | 356 | /** | 
|  | 357 | * devres_destroy - Find a device resource and destroy it | 
|  | 358 | * @dev: Device to find resource from | 
|  | 359 | * @release: Look for resources associated with this release function | 
|  | 360 | * @match: Match function (optional) | 
|  | 361 | * @match_data: Data for the match function | 
|  | 362 | * | 
|  | 363 | * Find the latest devres of @dev associated with @release and for | 
|  | 364 | * which @match returns 1.  If @match is NULL, it's considered to | 
|  | 365 | * match all.  If found, the resource is removed atomically and freed. | 
|  | 366 | * | 
|  | 367 | * Note that the release function for the resource will not be called, | 
|  | 368 | * only the devres-allocated data will be freed.  The caller becomes | 
|  | 369 | * responsible for freeing any other data. | 
|  | 370 | * | 
|  | 371 | * RETURNS: | 
|  | 372 | * 0 if devres is found and freed, -ENOENT if not found. | 
|  | 373 | */ | 
|  | 374 | int devres_destroy(struct device *dev, dr_release_t release, | 
|  | 375 | dr_match_t match, void *match_data) | 
|  | 376 | { | 
|  | 377 | void *res; | 
|  | 378 |  | 
|  | 379 | res = devres_remove(dev, release, match, match_data); | 
|  | 380 | if (unlikely(!res)) | 
|  | 381 | return -ENOENT; | 
|  | 382 |  | 
|  | 383 | devres_free(res); | 
|  | 384 | return 0; | 
|  | 385 | } | 
|  | 386 | EXPORT_SYMBOL_GPL(devres_destroy); | 
|  | 387 |  | 
|  | 388 |  | 
|  | 389 | /** | 
|  | 390 | * devres_release - Find a device resource and destroy it, calling release | 
|  | 391 | * @dev: Device to find resource from | 
|  | 392 | * @release: Look for resources associated with this release function | 
|  | 393 | * @match: Match function (optional) | 
|  | 394 | * @match_data: Data for the match function | 
|  | 395 | * | 
|  | 396 | * Find the latest devres of @dev associated with @release and for | 
|  | 397 | * which @match returns 1.  If @match is NULL, it's considered to | 
|  | 398 | * match all.  If found, the resource is removed atomically, the | 
|  | 399 | * release function called and the resource freed. | 
|  | 400 | * | 
|  | 401 | * RETURNS: | 
|  | 402 | * 0 if devres is found and freed, -ENOENT if not found. | 
|  | 403 | */ | 
|  | 404 | int devres_release(struct device *dev, dr_release_t release, | 
|  | 405 | dr_match_t match, void *match_data) | 
|  | 406 | { | 
|  | 407 | void *res; | 
|  | 408 |  | 
|  | 409 | res = devres_remove(dev, release, match, match_data); | 
|  | 410 | if (unlikely(!res)) | 
|  | 411 | return -ENOENT; | 
|  | 412 |  | 
|  | 413 | (*release)(dev, res); | 
|  | 414 | devres_free(res); | 
|  | 415 | return 0; | 
|  | 416 | } | 
|  | 417 | EXPORT_SYMBOL_GPL(devres_release); | 
|  | 418 |  | 
|  | 419 | static int remove_nodes(struct device *dev, | 
|  | 420 | struct list_head *first, struct list_head *end, | 
|  | 421 | struct list_head *todo) | 
|  | 422 | { | 
|  | 423 | int cnt = 0, nr_groups = 0; | 
|  | 424 | struct list_head *cur; | 
|  | 425 |  | 
|  | 426 | /* First pass - move normal devres entries to @todo and clear | 
|  | 427 | * devres_group colors. | 
|  | 428 | */ | 
|  | 429 | cur = first; | 
|  | 430 | while (cur != end) { | 
|  | 431 | struct devres_node *node; | 
|  | 432 | struct devres_group *grp; | 
|  | 433 |  | 
|  | 434 | node = list_entry(cur, struct devres_node, entry); | 
|  | 435 | cur = cur->next; | 
|  | 436 |  | 
|  | 437 | grp = node_to_group(node); | 
|  | 438 | if (grp) { | 
|  | 439 | /* clear color of group markers in the first pass */ | 
|  | 440 | grp->color = 0; | 
|  | 441 | nr_groups++; | 
|  | 442 | } else { | 
|  | 443 | /* regular devres entry */ | 
|  | 444 | if (&node->entry == first) | 
|  | 445 | first = first->next; | 
|  | 446 | list_move_tail(&node->entry, todo); | 
|  | 447 | cnt++; | 
|  | 448 | } | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | if (!nr_groups) | 
|  | 452 | return cnt; | 
|  | 453 |  | 
|  | 454 | /* Second pass - Scan groups and color them.  A group gets | 
|  | 455 | * color value of two iff the group is wholly contained in | 
|  | 456 | * [cur, end).  That is, for a closed group, both opening and | 
|  | 457 | * closing markers should be in the range, while just the | 
|  | 458 | * opening marker is enough for an open group. | 
|  | 459 | */ | 
|  | 460 | cur = first; | 
|  | 461 | while (cur != end) { | 
|  | 462 | struct devres_node *node; | 
|  | 463 | struct devres_group *grp; | 
|  | 464 |  | 
|  | 465 | node = list_entry(cur, struct devres_node, entry); | 
|  | 466 | cur = cur->next; | 
|  | 467 |  | 
|  | 468 | grp = node_to_group(node); | 
|  | 469 | BUG_ON(!grp || list_empty(&grp->node[0].entry)); | 
|  | 470 |  | 
|  | 471 | grp->color++; | 
|  | 472 | if (list_empty(&grp->node[1].entry)) | 
|  | 473 | grp->color++; | 
|  | 474 |  | 
|  | 475 | BUG_ON(grp->color <= 0 || grp->color > 2); | 
|  | 476 | if (grp->color == 2) { | 
|  | 477 | /* No need to update cur or end.  The removed | 
|  | 478 | * nodes are always before both. | 
|  | 479 | */ | 
|  | 480 | list_move_tail(&grp->node[0].entry, todo); | 
|  | 481 | list_del_init(&grp->node[1].entry); | 
|  | 482 | } | 
|  | 483 | } | 
|  | 484 |  | 
|  | 485 | return cnt; | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | static int release_nodes(struct device *dev, struct list_head *first, | 
|  | 489 | struct list_head *end, unsigned long flags) | 
|  | 490 | __releases(&dev->devres_lock) | 
|  | 491 | { | 
|  | 492 | LIST_HEAD(todo); | 
|  | 493 | int cnt; | 
|  | 494 | struct devres *dr, *tmp; | 
|  | 495 |  | 
|  | 496 | cnt = remove_nodes(dev, first, end, &todo); | 
|  | 497 |  | 
|  | 498 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 499 |  | 
|  | 500 | /* Release.  Note that both devres and devres_group are | 
|  | 501 | * handled as devres in the following loop.  This is safe. | 
|  | 502 | */ | 
|  | 503 | list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { | 
|  | 504 | devres_log(dev, &dr->node, "REL"); | 
|  | 505 | dr->node.release(dev, dr->data); | 
|  | 506 | kfree(dr); | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | return cnt; | 
|  | 510 | } | 
|  | 511 |  | 
|  | 512 | /** | 
|  | 513 | * devres_release_all - Release all managed resources | 
|  | 514 | * @dev: Device to release resources for | 
|  | 515 | * | 
|  | 516 | * Release all resources associated with @dev.  This function is | 
|  | 517 | * called on driver detach. | 
|  | 518 | */ | 
|  | 519 | int devres_release_all(struct device *dev) | 
|  | 520 | { | 
|  | 521 | unsigned long flags; | 
|  | 522 |  | 
|  | 523 | /* Looks like an uninitialized device structure */ | 
|  | 524 | if (WARN_ON(dev->devres_head.next == NULL)) | 
|  | 525 | return -ENODEV; | 
|  | 526 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 527 | return release_nodes(dev, dev->devres_head.next, &dev->devres_head, | 
|  | 528 | flags); | 
|  | 529 | } | 
|  | 530 |  | 
|  | 531 | /** | 
|  | 532 | * devres_open_group - Open a new devres group | 
|  | 533 | * @dev: Device to open devres group for | 
|  | 534 | * @id: Separator ID | 
|  | 535 | * @gfp: Allocation flags | 
|  | 536 | * | 
|  | 537 | * Open a new devres group for @dev with @id.  For @id, using a | 
|  | 538 | * pointer to an object which won't be used for another group is | 
|  | 539 | * recommended.  If @id is NULL, address-wise unique ID is created. | 
|  | 540 | * | 
|  | 541 | * RETURNS: | 
|  | 542 | * ID of the new group, NULL on failure. | 
|  | 543 | */ | 
|  | 544 | void * devres_open_group(struct device *dev, void *id, gfp_t gfp) | 
|  | 545 | { | 
|  | 546 | struct devres_group *grp; | 
|  | 547 | unsigned long flags; | 
|  | 548 |  | 
|  | 549 | grp = kmalloc(sizeof(*grp), gfp); | 
|  | 550 | if (unlikely(!grp)) | 
|  | 551 | return NULL; | 
|  | 552 |  | 
|  | 553 | grp->node[0].release = &group_open_release; | 
|  | 554 | grp->node[1].release = &group_close_release; | 
|  | 555 | INIT_LIST_HEAD(&grp->node[0].entry); | 
|  | 556 | INIT_LIST_HEAD(&grp->node[1].entry); | 
|  | 557 | set_node_dbginfo(&grp->node[0], "grp<", 0); | 
|  | 558 | set_node_dbginfo(&grp->node[1], "grp>", 0); | 
|  | 559 | grp->id = grp; | 
|  | 560 | if (id) | 
|  | 561 | grp->id = id; | 
|  | 562 |  | 
|  | 563 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 564 | add_dr(dev, &grp->node[0]); | 
|  | 565 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 566 | return grp->id; | 
|  | 567 | } | 
|  | 568 | EXPORT_SYMBOL_GPL(devres_open_group); | 
|  | 569 |  | 
|  | 570 | /* Find devres group with ID @id.  If @id is NULL, look for the latest. */ | 
|  | 571 | static struct devres_group * find_group(struct device *dev, void *id) | 
|  | 572 | { | 
|  | 573 | struct devres_node *node; | 
|  | 574 |  | 
|  | 575 | list_for_each_entry_reverse(node, &dev->devres_head, entry) { | 
|  | 576 | struct devres_group *grp; | 
|  | 577 |  | 
|  | 578 | if (node->release != &group_open_release) | 
|  | 579 | continue; | 
|  | 580 |  | 
|  | 581 | grp = container_of(node, struct devres_group, node[0]); | 
|  | 582 |  | 
|  | 583 | if (id) { | 
|  | 584 | if (grp->id == id) | 
|  | 585 | return grp; | 
|  | 586 | } else if (list_empty(&grp->node[1].entry)) | 
|  | 587 | return grp; | 
|  | 588 | } | 
|  | 589 |  | 
|  | 590 | return NULL; | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | /** | 
|  | 594 | * devres_close_group - Close a devres group | 
|  | 595 | * @dev: Device to close devres group for | 
|  | 596 | * @id: ID of target group, can be NULL | 
|  | 597 | * | 
|  | 598 | * Close the group identified by @id.  If @id is NULL, the latest open | 
|  | 599 | * group is selected. | 
|  | 600 | */ | 
|  | 601 | void devres_close_group(struct device *dev, void *id) | 
|  | 602 | { | 
|  | 603 | struct devres_group *grp; | 
|  | 604 | unsigned long flags; | 
|  | 605 |  | 
|  | 606 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 607 |  | 
|  | 608 | grp = find_group(dev, id); | 
|  | 609 | if (grp) | 
|  | 610 | add_dr(dev, &grp->node[1]); | 
|  | 611 | else | 
|  | 612 | WARN_ON(1); | 
|  | 613 |  | 
|  | 614 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 615 | } | 
|  | 616 | EXPORT_SYMBOL_GPL(devres_close_group); | 
|  | 617 |  | 
|  | 618 | /** | 
|  | 619 | * devres_remove_group - Remove a devres group | 
|  | 620 | * @dev: Device to remove group for | 
|  | 621 | * @id: ID of target group, can be NULL | 
|  | 622 | * | 
|  | 623 | * Remove the group identified by @id.  If @id is NULL, the latest | 
|  | 624 | * open group is selected.  Note that removing a group doesn't affect | 
|  | 625 | * any other resources. | 
|  | 626 | */ | 
|  | 627 | void devres_remove_group(struct device *dev, void *id) | 
|  | 628 | { | 
|  | 629 | struct devres_group *grp; | 
|  | 630 | unsigned long flags; | 
|  | 631 |  | 
|  | 632 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 633 |  | 
|  | 634 | grp = find_group(dev, id); | 
|  | 635 | if (grp) { | 
|  | 636 | list_del_init(&grp->node[0].entry); | 
|  | 637 | list_del_init(&grp->node[1].entry); | 
|  | 638 | devres_log(dev, &grp->node[0], "REM"); | 
|  | 639 | } else | 
|  | 640 | WARN_ON(1); | 
|  | 641 |  | 
|  | 642 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 643 |  | 
|  | 644 | kfree(grp); | 
|  | 645 | } | 
|  | 646 | EXPORT_SYMBOL_GPL(devres_remove_group); | 
|  | 647 |  | 
|  | 648 | /** | 
|  | 649 | * devres_release_group - Release resources in a devres group | 
|  | 650 | * @dev: Device to release group for | 
|  | 651 | * @id: ID of target group, can be NULL | 
|  | 652 | * | 
|  | 653 | * Release all resources in the group identified by @id.  If @id is | 
|  | 654 | * NULL, the latest open group is selected.  The selected group and | 
|  | 655 | * groups properly nested inside the selected group are removed. | 
|  | 656 | * | 
|  | 657 | * RETURNS: | 
|  | 658 | * The number of released non-group resources. | 
|  | 659 | */ | 
|  | 660 | int devres_release_group(struct device *dev, void *id) | 
|  | 661 | { | 
|  | 662 | struct devres_group *grp; | 
|  | 663 | unsigned long flags; | 
|  | 664 | int cnt = 0; | 
|  | 665 |  | 
|  | 666 | spin_lock_irqsave(&dev->devres_lock, flags); | 
|  | 667 |  | 
|  | 668 | grp = find_group(dev, id); | 
|  | 669 | if (grp) { | 
|  | 670 | struct list_head *first = &grp->node[0].entry; | 
|  | 671 | struct list_head *end = &dev->devres_head; | 
|  | 672 |  | 
|  | 673 | if (!list_empty(&grp->node[1].entry)) | 
|  | 674 | end = grp->node[1].entry.next; | 
|  | 675 |  | 
|  | 676 | cnt = release_nodes(dev, first, end, flags); | 
|  | 677 | } else { | 
|  | 678 | WARN_ON(1); | 
|  | 679 | spin_unlock_irqrestore(&dev->devres_lock, flags); | 
|  | 680 | } | 
|  | 681 |  | 
|  | 682 | return cnt; | 
|  | 683 | } | 
|  | 684 | EXPORT_SYMBOL_GPL(devres_release_group); | 
|  | 685 |  | 
|  | 686 | /* | 
|  | 687 | * Custom devres actions allow inserting a simple function call | 
|  | 688 | * into the teadown sequence. | 
|  | 689 | */ | 
|  | 690 |  | 
|  | 691 | struct action_devres { | 
|  | 692 | void *data; | 
|  | 693 | void (*action)(void *); | 
|  | 694 | }; | 
|  | 695 |  | 
|  | 696 | static int devm_action_match(struct device *dev, void *res, void *p) | 
|  | 697 | { | 
|  | 698 | struct action_devres *devres = res; | 
|  | 699 | struct action_devres *target = p; | 
|  | 700 |  | 
|  | 701 | return devres->action == target->action && | 
|  | 702 | devres->data == target->data; | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | static void devm_action_release(struct device *dev, void *res) | 
|  | 706 | { | 
|  | 707 | struct action_devres *devres = res; | 
|  | 708 |  | 
|  | 709 | devres->action(devres->data); | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | /** | 
|  | 713 | * devm_add_action() - add a custom action to list of managed resources | 
|  | 714 | * @dev: Device that owns the action | 
|  | 715 | * @action: Function that should be called | 
|  | 716 | * @data: Pointer to data passed to @action implementation | 
|  | 717 | * | 
|  | 718 | * This adds a custom action to the list of managed resources so that | 
|  | 719 | * it gets executed as part of standard resource unwinding. | 
|  | 720 | */ | 
|  | 721 | int devm_add_action(struct device *dev, void (*action)(void *), void *data) | 
|  | 722 | { | 
|  | 723 | struct action_devres *devres; | 
|  | 724 |  | 
|  | 725 | devres = devres_alloc(devm_action_release, | 
|  | 726 | sizeof(struct action_devres), GFP_KERNEL); | 
|  | 727 | if (!devres) | 
|  | 728 | return -ENOMEM; | 
|  | 729 |  | 
|  | 730 | devres->data = data; | 
|  | 731 | devres->action = action; | 
|  | 732 |  | 
|  | 733 | devres_add(dev, devres); | 
|  | 734 | return 0; | 
|  | 735 | } | 
|  | 736 | EXPORT_SYMBOL_GPL(devm_add_action); | 
|  | 737 |  | 
|  | 738 | /** | 
|  | 739 | * devm_remove_action() - removes previously added custom action | 
|  | 740 | * @dev: Device that owns the action | 
|  | 741 | * @action: Function implementing the action | 
|  | 742 | * @data: Pointer to data passed to @action implementation | 
|  | 743 | * | 
|  | 744 | * Removes instance of @action previously added by devm_add_action(). | 
|  | 745 | * Both action and data should match one of the existing entries. | 
|  | 746 | */ | 
|  | 747 | void devm_remove_action(struct device *dev, void (*action)(void *), void *data) | 
|  | 748 | { | 
|  | 749 | struct action_devres devres = { | 
|  | 750 | .data = data, | 
|  | 751 | .action = action, | 
|  | 752 | }; | 
|  | 753 |  | 
|  | 754 | WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, | 
|  | 755 | &devres)); | 
|  | 756 |  | 
|  | 757 | } | 
|  | 758 | EXPORT_SYMBOL_GPL(devm_remove_action); | 
|  | 759 |  | 
|  | 760 | /* | 
|  | 761 | * Managed kmalloc/kfree | 
|  | 762 | */ | 
|  | 763 | static void devm_kmalloc_release(struct device *dev, void *res) | 
|  | 764 | { | 
|  | 765 | /* noop */ | 
|  | 766 | } | 
|  | 767 |  | 
|  | 768 | static int devm_kmalloc_match(struct device *dev, void *res, void *data) | 
|  | 769 | { | 
|  | 770 | return res == data; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | /** | 
|  | 774 | * devm_kmalloc - Resource-managed kmalloc | 
|  | 775 | * @dev: Device to allocate memory for | 
|  | 776 | * @size: Allocation size | 
|  | 777 | * @gfp: Allocation gfp flags | 
|  | 778 | * | 
|  | 779 | * Managed kmalloc.  Memory allocated with this function is | 
|  | 780 | * automatically freed on driver detach.  Like all other devres | 
|  | 781 | * resources, guaranteed alignment is unsigned long long. | 
|  | 782 | * | 
|  | 783 | * RETURNS: | 
|  | 784 | * Pointer to allocated memory on success, NULL on failure. | 
|  | 785 | */ | 
|  | 786 | void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) | 
|  | 787 | { | 
|  | 788 | struct devres *dr; | 
|  | 789 |  | 
|  | 790 | /* use raw alloc_dr for kmalloc caller tracing */ | 
|  | 791 | dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); | 
|  | 792 | if (unlikely(!dr)) | 
|  | 793 | return NULL; | 
|  | 794 |  | 
|  | 795 | /* | 
|  | 796 | * This is named devm_kzalloc_release for historical reasons | 
|  | 797 | * The initial implementation did not support kmalloc, only kzalloc | 
|  | 798 | */ | 
|  | 799 | set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); | 
|  | 800 | devres_add(dev, dr->data); | 
|  | 801 | return dr->data; | 
|  | 802 | } | 
|  | 803 | EXPORT_SYMBOL_GPL(devm_kmalloc); | 
|  | 804 |  | 
|  | 805 | /** | 
|  | 806 | * devm_kstrdup - Allocate resource managed space and | 
|  | 807 | *                copy an existing string into that. | 
|  | 808 | * @dev: Device to allocate memory for | 
|  | 809 | * @s: the string to duplicate | 
|  | 810 | * @gfp: the GFP mask used in the devm_kmalloc() call when | 
|  | 811 | *       allocating memory | 
|  | 812 | * RETURNS: | 
|  | 813 | * Pointer to allocated string on success, NULL on failure. | 
|  | 814 | */ | 
|  | 815 | char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) | 
|  | 816 | { | 
|  | 817 | size_t size; | 
|  | 818 | char *buf; | 
|  | 819 |  | 
|  | 820 | if (!s) | 
|  | 821 | return NULL; | 
|  | 822 |  | 
|  | 823 | size = strlen(s) + 1; | 
|  | 824 | buf = devm_kmalloc(dev, size, gfp); | 
|  | 825 | if (buf) | 
|  | 826 | memcpy(buf, s, size); | 
|  | 827 | return buf; | 
|  | 828 | } | 
|  | 829 | EXPORT_SYMBOL_GPL(devm_kstrdup); | 
|  | 830 |  | 
|  | 831 | /** | 
|  | 832 | * devm_kvasprintf - Allocate resource managed space and format a string | 
|  | 833 | *		     into that. | 
|  | 834 | * @dev: Device to allocate memory for | 
|  | 835 | * @gfp: the GFP mask used in the devm_kmalloc() call when | 
|  | 836 | *       allocating memory | 
|  | 837 | * @fmt: The printf()-style format string | 
|  | 838 | * @ap: Arguments for the format string | 
|  | 839 | * RETURNS: | 
|  | 840 | * Pointer to allocated string on success, NULL on failure. | 
|  | 841 | */ | 
|  | 842 | char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, | 
|  | 843 | va_list ap) | 
|  | 844 | { | 
|  | 845 | unsigned int len; | 
|  | 846 | char *p; | 
|  | 847 | va_list aq; | 
|  | 848 |  | 
|  | 849 | va_copy(aq, ap); | 
|  | 850 | len = vsnprintf(NULL, 0, fmt, aq); | 
|  | 851 | va_end(aq); | 
|  | 852 |  | 
|  | 853 | p = devm_kmalloc(dev, len+1, gfp); | 
|  | 854 | if (!p) | 
|  | 855 | return NULL; | 
|  | 856 |  | 
|  | 857 | vsnprintf(p, len+1, fmt, ap); | 
|  | 858 |  | 
|  | 859 | return p; | 
|  | 860 | } | 
|  | 861 | EXPORT_SYMBOL(devm_kvasprintf); | 
|  | 862 |  | 
|  | 863 | /** | 
|  | 864 | * devm_kasprintf - Allocate resource managed space and format a string | 
|  | 865 | *		    into that. | 
|  | 866 | * @dev: Device to allocate memory for | 
|  | 867 | * @gfp: the GFP mask used in the devm_kmalloc() call when | 
|  | 868 | *       allocating memory | 
|  | 869 | * @fmt: The printf()-style format string | 
|  | 870 | * @...: Arguments for the format string | 
|  | 871 | * RETURNS: | 
|  | 872 | * Pointer to allocated string on success, NULL on failure. | 
|  | 873 | */ | 
|  | 874 | char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) | 
|  | 875 | { | 
|  | 876 | va_list ap; | 
|  | 877 | char *p; | 
|  | 878 |  | 
|  | 879 | va_start(ap, fmt); | 
|  | 880 | p = devm_kvasprintf(dev, gfp, fmt, ap); | 
|  | 881 | va_end(ap); | 
|  | 882 |  | 
|  | 883 | return p; | 
|  | 884 | } | 
|  | 885 | EXPORT_SYMBOL_GPL(devm_kasprintf); | 
|  | 886 |  | 
|  | 887 | /** | 
|  | 888 | * devm_kfree - Resource-managed kfree | 
|  | 889 | * @dev: Device this memory belongs to | 
|  | 890 | * @p: Memory to free | 
|  | 891 | * | 
|  | 892 | * Free memory allocated with devm_kmalloc(). | 
|  | 893 | */ | 
|  | 894 | void devm_kfree(struct device *dev, void *p) | 
|  | 895 | { | 
|  | 896 | int rc; | 
|  | 897 |  | 
|  | 898 | rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p); | 
|  | 899 | WARN_ON(rc); | 
|  | 900 | } | 
|  | 901 | EXPORT_SYMBOL_GPL(devm_kfree); | 
|  | 902 |  | 
|  | 903 | /** | 
|  | 904 | * devm_kmemdup - Resource-managed kmemdup | 
|  | 905 | * @dev: Device this memory belongs to | 
|  | 906 | * @src: Memory region to duplicate | 
|  | 907 | * @len: Memory region length | 
|  | 908 | * @gfp: GFP mask to use | 
|  | 909 | * | 
|  | 910 | * Duplicate region of a memory using resource managed kmalloc | 
|  | 911 | */ | 
|  | 912 | void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) | 
|  | 913 | { | 
|  | 914 | void *p; | 
|  | 915 |  | 
|  | 916 | p = devm_kmalloc(dev, len, gfp); | 
|  | 917 | if (p) | 
|  | 918 | memcpy(p, src, len); | 
|  | 919 |  | 
|  | 920 | return p; | 
|  | 921 | } | 
|  | 922 | EXPORT_SYMBOL_GPL(devm_kmemdup); | 
|  | 923 |  | 
|  | 924 | struct pages_devres { | 
|  | 925 | unsigned long addr; | 
|  | 926 | unsigned int order; | 
|  | 927 | }; | 
|  | 928 |  | 
|  | 929 | static int devm_pages_match(struct device *dev, void *res, void *p) | 
|  | 930 | { | 
|  | 931 | struct pages_devres *devres = res; | 
|  | 932 | struct pages_devres *target = p; | 
|  | 933 |  | 
|  | 934 | return devres->addr == target->addr; | 
|  | 935 | } | 
|  | 936 |  | 
|  | 937 | static void devm_pages_release(struct device *dev, void *res) | 
|  | 938 | { | 
|  | 939 | struct pages_devres *devres = res; | 
|  | 940 |  | 
|  | 941 | free_pages(devres->addr, devres->order); | 
|  | 942 | } | 
|  | 943 |  | 
|  | 944 | /** | 
|  | 945 | * devm_get_free_pages - Resource-managed __get_free_pages | 
|  | 946 | * @dev: Device to allocate memory for | 
|  | 947 | * @gfp_mask: Allocation gfp flags | 
|  | 948 | * @order: Allocation size is (1 << order) pages | 
|  | 949 | * | 
|  | 950 | * Managed get_free_pages.  Memory allocated with this function is | 
|  | 951 | * automatically freed on driver detach. | 
|  | 952 | * | 
|  | 953 | * RETURNS: | 
|  | 954 | * Address of allocated memory on success, 0 on failure. | 
|  | 955 | */ | 
|  | 956 |  | 
|  | 957 | unsigned long devm_get_free_pages(struct device *dev, | 
|  | 958 | gfp_t gfp_mask, unsigned int order) | 
|  | 959 | { | 
|  | 960 | struct pages_devres *devres; | 
|  | 961 | unsigned long addr; | 
|  | 962 |  | 
|  | 963 | addr = __get_free_pages(gfp_mask, order); | 
|  | 964 |  | 
|  | 965 | if (unlikely(!addr)) | 
|  | 966 | return 0; | 
|  | 967 |  | 
|  | 968 | devres = devres_alloc(devm_pages_release, | 
|  | 969 | sizeof(struct pages_devres), GFP_KERNEL); | 
|  | 970 | if (unlikely(!devres)) { | 
|  | 971 | free_pages(addr, order); | 
|  | 972 | return 0; | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | devres->addr = addr; | 
|  | 976 | devres->order = order; | 
|  | 977 |  | 
|  | 978 | devres_add(dev, devres); | 
|  | 979 | return addr; | 
|  | 980 | } | 
|  | 981 | EXPORT_SYMBOL_GPL(devm_get_free_pages); | 
|  | 982 |  | 
|  | 983 | /** | 
|  | 984 | * devm_free_pages - Resource-managed free_pages | 
|  | 985 | * @dev: Device this memory belongs to | 
|  | 986 | * @addr: Memory to free | 
|  | 987 | * | 
|  | 988 | * Free memory allocated with devm_get_free_pages(). Unlike free_pages, | 
|  | 989 | * there is no need to supply the @order. | 
|  | 990 | */ | 
|  | 991 | void devm_free_pages(struct device *dev, unsigned long addr) | 
|  | 992 | { | 
|  | 993 | struct pages_devres devres = { .addr = addr }; | 
|  | 994 |  | 
|  | 995 | WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, | 
|  | 996 | &devres)); | 
|  | 997 | } | 
|  | 998 | EXPORT_SYMBOL_GPL(devm_free_pages); | 
|  | 999 |  | 
|  | 1000 | static void devm_percpu_release(struct device *dev, void *pdata) | 
|  | 1001 | { | 
|  | 1002 | void __percpu *p; | 
|  | 1003 |  | 
|  | 1004 | p = *(void __percpu **)pdata; | 
|  | 1005 | free_percpu(p); | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | static int devm_percpu_match(struct device *dev, void *data, void *p) | 
|  | 1009 | { | 
|  | 1010 | struct devres *devr = container_of(data, struct devres, data); | 
|  | 1011 |  | 
|  | 1012 | return *(void **)devr->data == p; | 
|  | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | /** | 
|  | 1016 | * __devm_alloc_percpu - Resource-managed alloc_percpu | 
|  | 1017 | * @dev: Device to allocate per-cpu memory for | 
|  | 1018 | * @size: Size of per-cpu memory to allocate | 
|  | 1019 | * @align: Alignment of per-cpu memory to allocate | 
|  | 1020 | * | 
|  | 1021 | * Managed alloc_percpu. Per-cpu memory allocated with this function is | 
|  | 1022 | * automatically freed on driver detach. | 
|  | 1023 | * | 
|  | 1024 | * RETURNS: | 
|  | 1025 | * Pointer to allocated memory on success, NULL on failure. | 
|  | 1026 | */ | 
|  | 1027 | void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, | 
|  | 1028 | size_t align) | 
|  | 1029 | { | 
|  | 1030 | void *p; | 
|  | 1031 | void __percpu *pcpu; | 
|  | 1032 |  | 
|  | 1033 | pcpu = __alloc_percpu(size, align); | 
|  | 1034 | if (!pcpu) | 
|  | 1035 | return NULL; | 
|  | 1036 |  | 
|  | 1037 | p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); | 
|  | 1038 | if (!p) { | 
|  | 1039 | free_percpu(pcpu); | 
|  | 1040 | return NULL; | 
|  | 1041 | } | 
|  | 1042 |  | 
|  | 1043 | *(void __percpu **)p = pcpu; | 
|  | 1044 |  | 
|  | 1045 | devres_add(dev, p); | 
|  | 1046 |  | 
|  | 1047 | return pcpu; | 
|  | 1048 | } | 
|  | 1049 | EXPORT_SYMBOL_GPL(__devm_alloc_percpu); | 
|  | 1050 |  | 
|  | 1051 | /** | 
|  | 1052 | * devm_free_percpu - Resource-managed free_percpu | 
|  | 1053 | * @dev: Device this memory belongs to | 
|  | 1054 | * @pdata: Per-cpu memory to free | 
|  | 1055 | * | 
|  | 1056 | * Free memory allocated with devm_alloc_percpu(). | 
|  | 1057 | */ | 
|  | 1058 | void devm_free_percpu(struct device *dev, void __percpu *pdata) | 
|  | 1059 | { | 
|  | 1060 | WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, | 
|  | 1061 | (void *)pdata)); | 
|  | 1062 | } | 
|  | 1063 | EXPORT_SYMBOL_GPL(devm_free_percpu); |