| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * A fairly generic DMA-API to IOMMU-API glue layer. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2014-2015 ARM Ltd. | 
|  | 5 | * | 
|  | 6 | * based in part on arch/arm/mm/dma-mapping.c: | 
|  | 7 | * Copyright (C) 2000-2004 Russell King | 
|  | 8 | * | 
|  | 9 | * This program is free software; you can redistribute it and/or modify | 
|  | 10 | * it under the terms of the GNU General Public License version 2 as | 
|  | 11 | * published by the Free Software Foundation. | 
|  | 12 | * | 
|  | 13 | * This program is distributed in the hope that it will be useful, | 
|  | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 16 | * GNU General Public License for more details. | 
|  | 17 | * | 
|  | 18 | * You should have received a copy of the GNU General Public License | 
|  | 19 | * along with this program.  If not, see <http://www.gnu.org/licenses/>. | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | #include <linux/acpi_iort.h> | 
|  | 23 | #include <linux/device.h> | 
|  | 24 | #include <linux/dma-iommu.h> | 
|  | 25 | #include <linux/gfp.h> | 
|  | 26 | #include <linux/huge_mm.h> | 
|  | 27 | #include <linux/iommu.h> | 
|  | 28 | #include <linux/iova.h> | 
|  | 29 | #include <linux/irq.h> | 
|  | 30 | #include <linux/mm.h> | 
|  | 31 | #include <linux/pci.h> | 
|  | 32 | #include <linux/scatterlist.h> | 
|  | 33 | #include <linux/vmalloc.h> | 
|  | 34 | #ifdef CONFIG_MTK_IOMMU_MISC_DBG | 
|  | 35 | #include "m4u_debug.h" | 
|  | 36 | #endif | 
|  | 37 |  | 
|  | 38 | #define IOMMU_MAPPING_ERROR	0 | 
|  | 39 |  | 
|  | 40 | struct iommu_dma_msi_page { | 
|  | 41 | struct list_head	list; | 
|  | 42 | dma_addr_t		iova; | 
|  | 43 | phys_addr_t		phys; | 
|  | 44 | }; | 
|  | 45 |  | 
|  | 46 | enum iommu_dma_cookie_type { | 
|  | 47 | IOMMU_DMA_IOVA_COOKIE, | 
|  | 48 | IOMMU_DMA_MSI_COOKIE, | 
|  | 49 | }; | 
|  | 50 |  | 
|  | 51 | struct iommu_dma_cookie { | 
|  | 52 | enum iommu_dma_cookie_type	type; | 
|  | 53 | union { | 
|  | 54 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ | 
|  | 55 | struct iova_domain	iovad; | 
|  | 56 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | 
|  | 57 | dma_addr_t		msi_iova; | 
|  | 58 | }; | 
|  | 59 | struct list_head		msi_page_list; | 
|  | 60 | spinlock_t			msi_lock; | 
|  | 61 | }; | 
|  | 62 |  | 
|  | 63 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) | 
|  | 64 | { | 
|  | 65 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | 
|  | 66 | return cookie->iovad.granule; | 
|  | 67 | return PAGE_SIZE; | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | 
|  | 71 | { | 
|  | 72 | struct iommu_dma_cookie *cookie; | 
|  | 73 |  | 
|  | 74 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 
|  | 75 | if (cookie) { | 
|  | 76 | spin_lock_init(&cookie->msi_lock); | 
|  | 77 | INIT_LIST_HEAD(&cookie->msi_page_list); | 
|  | 78 | cookie->type = type; | 
|  | 79 | } | 
|  | 80 | return cookie; | 
|  | 81 | } | 
|  | 82 |  | 
|  | 83 | int iommu_dma_init(void) | 
|  | 84 | { | 
|  | 85 | return iova_cache_get(); | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | /** | 
|  | 89 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain | 
|  | 90 | * @domain: IOMMU domain to prepare for DMA-API usage | 
|  | 91 | * | 
|  | 92 | * IOMMU drivers should normally call this from their domain_alloc | 
|  | 93 | * callback when domain->type == IOMMU_DOMAIN_DMA. | 
|  | 94 | */ | 
|  | 95 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 
|  | 96 | { | 
|  | 97 | if (domain->iova_cookie) | 
|  | 98 | return -EEXIST; | 
|  | 99 |  | 
|  | 100 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | 
|  | 101 | if (!domain->iova_cookie) | 
|  | 102 | return -ENOMEM; | 
|  | 103 |  | 
|  | 104 | return 0; | 
|  | 105 | } | 
|  | 106 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 
|  | 107 |  | 
|  | 108 | /** | 
|  | 109 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | 
|  | 110 | * @domain: IOMMU domain to prepare | 
|  | 111 | * @base: Start address of IOVA region for MSI mappings | 
|  | 112 | * | 
|  | 113 | * Users who manage their own IOVA allocation and do not want DMA API support, | 
|  | 114 | * but would still like to take advantage of automatic MSI remapping, can use | 
|  | 115 | * this to initialise their own domain appropriately. Users should reserve a | 
|  | 116 | * contiguous IOVA region, starting at @base, large enough to accommodate the | 
|  | 117 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | 
|  | 118 | * used by the devices attached to @domain. | 
|  | 119 | */ | 
|  | 120 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | 
|  | 121 | { | 
|  | 122 | struct iommu_dma_cookie *cookie; | 
|  | 123 |  | 
|  | 124 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | 
|  | 125 | return -EINVAL; | 
|  | 126 |  | 
|  | 127 | if (domain->iova_cookie) | 
|  | 128 | return -EEXIST; | 
|  | 129 |  | 
|  | 130 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); | 
|  | 131 | if (!cookie) | 
|  | 132 | return -ENOMEM; | 
|  | 133 |  | 
|  | 134 | cookie->msi_iova = base; | 
|  | 135 | domain->iova_cookie = cookie; | 
|  | 136 | return 0; | 
|  | 137 | } | 
|  | 138 | EXPORT_SYMBOL(iommu_get_msi_cookie); | 
|  | 139 |  | 
|  | 140 | /** | 
|  | 141 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | 
|  | 142 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or | 
|  | 143 | *          iommu_get_msi_cookie() | 
|  | 144 | * | 
|  | 145 | * IOMMU drivers should normally call this from their domain_free callback. | 
|  | 146 | */ | 
|  | 147 | void iommu_put_dma_cookie(struct iommu_domain *domain) | 
|  | 148 | { | 
|  | 149 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 150 | struct iommu_dma_msi_page *msi, *tmp; | 
|  | 151 |  | 
|  | 152 | if (!cookie) | 
|  | 153 | return; | 
|  | 154 |  | 
|  | 155 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) | 
|  | 156 | put_iova_domain(&cookie->iovad); | 
|  | 157 |  | 
|  | 158 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | 
|  | 159 | list_del(&msi->list); | 
|  | 160 | kfree(msi); | 
|  | 161 | } | 
|  | 162 | kfree(cookie); | 
|  | 163 | domain->iova_cookie = NULL; | 
|  | 164 | } | 
|  | 165 | EXPORT_SYMBOL(iommu_put_dma_cookie); | 
|  | 166 |  | 
|  | 167 | /** | 
|  | 168 | * iommu_dma_get_resv_regions - Reserved region driver helper | 
|  | 169 | * @dev: Device from iommu_get_resv_regions() | 
|  | 170 | * @list: Reserved region list from iommu_get_resv_regions() | 
|  | 171 | * | 
|  | 172 | * IOMMU drivers can use this to implement their .get_resv_regions callback | 
|  | 173 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 | 
|  | 174 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI | 
|  | 175 | * reservation. | 
|  | 176 | */ | 
|  | 177 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) | 
|  | 178 | { | 
|  | 179 |  | 
|  | 180 | if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) | 
|  | 181 | iort_iommu_msi_get_resv_regions(dev, list); | 
|  | 182 |  | 
|  | 183 | } | 
|  | 184 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); | 
|  | 185 |  | 
|  | 186 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, | 
|  | 187 | phys_addr_t start, phys_addr_t end) | 
|  | 188 | { | 
|  | 189 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 190 | struct iommu_dma_msi_page *msi_page; | 
|  | 191 | int i, num_pages; | 
|  | 192 |  | 
|  | 193 | start -= iova_offset(iovad, start); | 
|  | 194 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); | 
|  | 195 |  | 
|  | 196 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); | 
|  | 197 | if (!msi_page) | 
|  | 198 | return -ENOMEM; | 
|  | 199 |  | 
|  | 200 | for (i = 0; i < num_pages; i++) { | 
|  | 201 | msi_page[i].phys = start; | 
|  | 202 | msi_page[i].iova = start; | 
|  | 203 | INIT_LIST_HEAD(&msi_page[i].list); | 
|  | 204 | list_add(&msi_page[i].list, &cookie->msi_page_list); | 
|  | 205 | start += iovad->granule; | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | return 0; | 
|  | 209 | } | 
|  | 210 |  | 
|  | 211 | static void iova_reserve_pci_windows(struct pci_dev *dev, | 
|  | 212 | struct iova_domain *iovad) | 
|  | 213 | { | 
|  | 214 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | 
|  | 215 | struct resource_entry *window; | 
|  | 216 | unsigned long lo, hi; | 
|  | 217 |  | 
|  | 218 | resource_list_for_each_entry(window, &bridge->windows) { | 
|  | 219 | if (resource_type(window->res) != IORESOURCE_MEM) | 
|  | 220 | continue; | 
|  | 221 |  | 
|  | 222 | lo = iova_pfn(iovad, window->res->start - window->offset); | 
|  | 223 | hi = iova_pfn(iovad, window->res->end - window->offset); | 
|  | 224 | reserve_iova(iovad, lo, hi); | 
|  | 225 | } | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | static int iova_reserve_iommu_regions(struct device *dev, | 
|  | 229 | struct iommu_domain *domain) | 
|  | 230 | { | 
|  | 231 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 232 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 233 | struct iommu_resv_region *region; | 
|  | 234 | LIST_HEAD(resv_regions); | 
|  | 235 | int ret = 0; | 
|  | 236 |  | 
|  | 237 | if (dev_is_pci(dev)) | 
|  | 238 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | 
|  | 239 |  | 
|  | 240 | iommu_get_resv_regions(dev, &resv_regions); | 
|  | 241 | list_for_each_entry(region, &resv_regions, list) { | 
|  | 242 | unsigned long lo, hi; | 
|  | 243 |  | 
|  | 244 | /* We ARE the software that manages these! */ | 
|  | 245 | if (region->type == IOMMU_RESV_SW_MSI) | 
|  | 246 | continue; | 
|  | 247 |  | 
|  | 248 | lo = iova_pfn(iovad, region->start); | 
|  | 249 | hi = iova_pfn(iovad, region->start + region->length - 1); | 
|  | 250 | reserve_iova(iovad, lo, hi); | 
|  | 251 |  | 
|  | 252 | if (region->type == IOMMU_RESV_MSI) | 
|  | 253 | ret = cookie_init_hw_msi_region(cookie, region->start, | 
|  | 254 | region->start + region->length); | 
|  | 255 | if (ret) | 
|  | 256 | break; | 
|  | 257 | } | 
|  | 258 | iommu_put_resv_regions(dev, &resv_regions); | 
|  | 259 |  | 
|  | 260 | return ret; | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | /** | 
|  | 264 | * iommu_dma_init_domain - Initialise a DMA mapping domain | 
|  | 265 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | 
|  | 266 | * @base: IOVA at which the mappable address space starts | 
|  | 267 | * @size: Size of IOVA space | 
|  | 268 | * @dev: Device the domain is being initialised for | 
|  | 269 | * | 
|  | 270 | * @base and @size should be exact multiples of IOMMU page granularity to | 
|  | 271 | * avoid rounding surprises. If necessary, we reserve the page at address 0 | 
|  | 272 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but | 
|  | 273 | * any change which could make prior IOVAs invalid will fail. | 
|  | 274 | */ | 
|  | 275 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | 
|  | 276 | u64 size, struct device *dev) | 
|  | 277 | { | 
|  | 278 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 279 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 280 | unsigned long order, base_pfn, end_pfn; | 
|  | 281 |  | 
|  | 282 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) | 
|  | 283 | return -EINVAL; | 
|  | 284 |  | 
|  | 285 | /* Use the smallest supported page size for IOVA granularity */ | 
|  | 286 | order = __ffs(domain->pgsize_bitmap); | 
|  | 287 | base_pfn = max_t(unsigned long, 1, base >> order); | 
|  | 288 | end_pfn = (base + size - 1) >> order; | 
|  | 289 |  | 
|  | 290 | /* Check the domain allows at least some access to the device... */ | 
|  | 291 | if (domain->geometry.force_aperture) { | 
|  | 292 | if (base > domain->geometry.aperture_end || | 
|  | 293 | base + size <= domain->geometry.aperture_start) { | 
|  | 294 | pr_warn("specified DMA range outside IOMMU capability\n"); | 
|  | 295 | return -EFAULT; | 
|  | 296 | } | 
|  | 297 | /* ...then finally give it a kicking to make sure it fits */ | 
|  | 298 | base_pfn = max_t(unsigned long, base_pfn, | 
|  | 299 | domain->geometry.aperture_start >> order); | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | /* start_pfn is always nonzero for an already-initialised domain */ | 
|  | 303 | if (iovad->start_pfn) { | 
|  | 304 | if (1UL << order != iovad->granule || | 
|  | 305 | base_pfn != iovad->start_pfn) { | 
|  | 306 | pr_warn("Incompatible range for DMA domain\n"); | 
|  | 307 | return -EFAULT; | 
|  | 308 | } | 
|  | 309 |  | 
|  | 310 | return 0; | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | init_iova_domain(iovad, 1UL << order, base_pfn); | 
|  | 314 | if (!dev) | 
|  | 315 | return 0; | 
|  | 316 |  | 
|  | 317 | return iova_reserve_iommu_regions(dev, domain); | 
|  | 318 | } | 
|  | 319 | EXPORT_SYMBOL(iommu_dma_init_domain); | 
|  | 320 |  | 
|  | 321 | /** | 
|  | 322 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API | 
|  | 323 | *                    page flags. | 
|  | 324 | * @dir: Direction of DMA transfer | 
|  | 325 | * @coherent: Is the DMA master cache-coherent? | 
|  | 326 | * @attrs: DMA attributes for the mapping | 
|  | 327 | * | 
|  | 328 | * Return: corresponding IOMMU API page protection flags | 
|  | 329 | */ | 
|  | 330 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, | 
|  | 331 | unsigned long attrs) | 
|  | 332 | { | 
|  | 333 | int prot = coherent ? IOMMU_CACHE : 0; | 
|  | 334 |  | 
|  | 335 | if (attrs & DMA_ATTR_PRIVILEGED) | 
|  | 336 | prot |= IOMMU_PRIV; | 
|  | 337 |  | 
|  | 338 | switch (dir) { | 
|  | 339 | case DMA_BIDIRECTIONAL: | 
|  | 340 | return prot | IOMMU_READ | IOMMU_WRITE; | 
|  | 341 | case DMA_TO_DEVICE: | 
|  | 342 | return prot | IOMMU_READ; | 
|  | 343 | case DMA_FROM_DEVICE: | 
|  | 344 | return prot | IOMMU_WRITE; | 
|  | 345 | default: | 
|  | 346 | return 0; | 
|  | 347 | } | 
|  | 348 | } | 
|  | 349 |  | 
|  | 350 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, | 
|  | 351 | size_t size, dma_addr_t dma_limit, struct device *dev) | 
|  | 352 | { | 
|  | 353 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 354 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 355 | unsigned long shift, iova_len, iova = 0; | 
|  | 356 |  | 
|  | 357 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { | 
|  | 358 | cookie->msi_iova += size; | 
|  | 359 | return cookie->msi_iova - size; | 
|  | 360 | } | 
|  | 361 |  | 
|  | 362 | shift = iova_shift(iovad); | 
|  | 363 | iova_len = size >> shift; | 
|  | 364 | /* | 
|  | 365 | * Freeing non-power-of-two-sized allocations back into the IOVA caches | 
|  | 366 | * will come back to bite us badly, so we have to waste a bit of space | 
|  | 367 | * rounding up anything cacheable to make sure that can't happen. The | 
|  | 368 | * order of the unadjusted size will still match upon freeing. | 
|  | 369 | */ | 
|  | 370 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) | 
|  | 371 | iova_len = roundup_pow_of_two(iova_len); | 
|  | 372 |  | 
|  | 373 | if (dev->bus_dma_mask) | 
|  | 374 | dma_limit &= dev->bus_dma_mask; | 
|  | 375 |  | 
|  | 376 | if (domain->geometry.force_aperture) | 
|  | 377 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | 
|  | 378 |  | 
|  | 379 | /* Try to get PCI devices a SAC address */ | 
|  | 380 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) | 
|  | 381 | iova = alloc_iova_fast(iovad, iova_len, | 
|  | 382 | DMA_BIT_MASK(32) >> shift, false); | 
|  | 383 |  | 
|  | 384 | if (!iova) | 
|  | 385 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, | 
|  | 386 | true); | 
|  | 387 | #ifdef CONFIG_MTK_IOMMU_MISC_DBG | 
|  | 388 | if (iova) | 
|  | 389 | mtk_iova_dbg_alloc(dev, ((dma_addr_t)iova << shift), size); | 
|  | 390 | #endif | 
|  | 391 | return (dma_addr_t)iova << shift; | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, | 
|  | 395 | dma_addr_t iova, size_t size) | 
|  | 396 | { | 
|  | 397 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 398 |  | 
|  | 399 | /* The MSI case is only ever cleaning up its most recent allocation */ | 
|  | 400 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) | 
|  | 401 | cookie->msi_iova -= size; | 
|  | 402 | else | 
|  | 403 | free_iova_fast(iovad, iova_pfn(iovad, iova), | 
|  | 404 | size >> iova_shift(iovad)); | 
|  | 405 | #ifdef CONFIG_MTK_IOMMU_MISC_DBG | 
|  | 406 | mtk_iova_dbg_free(iova, size); | 
|  | 407 | #endif | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, | 
|  | 411 | size_t size) | 
|  | 412 | { | 
|  | 413 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 414 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 415 | size_t iova_off = iova_offset(iovad, dma_addr); | 
|  | 416 |  | 
|  | 417 | dma_addr -= iova_off; | 
|  | 418 | size = iova_align(iovad, size + iova_off); | 
|  | 419 |  | 
|  | 420 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); | 
|  | 421 | iommu_dma_free_iova(cookie, dma_addr, size); | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | static void __iommu_dma_free_pages(struct page **pages, int count) | 
|  | 425 | { | 
|  | 426 | while (count--) | 
|  | 427 | __free_page(pages[count]); | 
|  | 428 | kvfree(pages); | 
|  | 429 | } | 
|  | 430 |  | 
|  | 431 | static struct page **__iommu_dma_alloc_pages(unsigned int count, | 
|  | 432 | unsigned long order_mask, gfp_t gfp) | 
|  | 433 | { | 
|  | 434 | struct page **pages; | 
|  | 435 | unsigned int i = 0, array_size = count * sizeof(*pages); | 
|  | 436 |  | 
|  | 437 | pr_notice("%s, %d\n", __func__, __LINE__); | 
|  | 438 | order_mask &= (2U << MAX_ORDER) - 1; | 
|  | 439 | if (!order_mask) | 
|  | 440 | return NULL; | 
|  | 441 | array_size = PAGE_ALIGN(array_size); | 
|  | 442 | pr_notice("%s, %d, count:%u, size:%u\n", __func__, __LINE__, count, array_size); | 
|  | 443 |  | 
|  | 444 | if (array_size <= PAGE_SIZE) | 
|  | 445 | pages = kzalloc(array_size, GFP_KERNEL); | 
|  | 446 | else | 
|  | 447 | pages = vzalloc(array_size); | 
|  | 448 | pr_notice("%s, %d\n", __func__, __LINE__); | 
|  | 449 | if (!pages) | 
|  | 450 | return NULL; | 
|  | 451 |  | 
|  | 452 | /* IOMMU can map any pages, so himem can also be used here */ | 
|  | 453 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; | 
|  | 454 |  | 
|  | 455 | while (count) { | 
|  | 456 | struct page *page = NULL; | 
|  | 457 | unsigned int order_size; | 
|  | 458 |  | 
|  | 459 | /* | 
|  | 460 | * Higher-order allocations are a convenience rather | 
|  | 461 | * than a necessity, hence using __GFP_NORETRY until | 
|  | 462 | * falling back to minimum-order allocations. | 
|  | 463 | */ | 
|  | 464 | for (order_mask &= (2U << __fls(count)) - 1; | 
|  | 465 | order_mask; order_mask &= ~order_size) { | 
|  | 466 | unsigned int order = __fls(order_mask); | 
|  | 467 |  | 
|  | 468 | order_size = 1U << order; | 
|  | 469 | page = alloc_pages((order_mask - order_size) ? | 
|  | 470 | gfp | __GFP_NORETRY : gfp, order); | 
|  | 471 | if (!page) | 
|  | 472 | continue; | 
|  | 473 | if (!order) | 
|  | 474 | break; | 
|  | 475 | if (!PageCompound(page)) { | 
|  | 476 | split_page(page, order); | 
|  | 477 | break; | 
|  | 478 | } else if (!split_huge_page(page)) { | 
|  | 479 | break; | 
|  | 480 | } | 
|  | 481 | __free_pages(page, order); | 
|  | 482 | } | 
|  | 483 | if (!page) { | 
|  | 484 | __iommu_dma_free_pages(pages, i); | 
|  | 485 | return NULL; | 
|  | 486 | } | 
|  | 487 | count -= order_size; | 
|  | 488 | while (order_size--) | 
|  | 489 | pages[i++] = page++; | 
|  | 490 | } | 
|  | 491 | return pages; | 
|  | 492 | } | 
|  | 493 |  | 
|  | 494 | /** | 
|  | 495 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() | 
|  | 496 | * @dev: Device which owns this buffer | 
|  | 497 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() | 
|  | 498 | * @size: Size of buffer in bytes | 
|  | 499 | * @handle: DMA address of buffer | 
|  | 500 | * | 
|  | 501 | * Frees both the pages associated with the buffer, and the array | 
|  | 502 | * describing them | 
|  | 503 | */ | 
|  | 504 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, | 
|  | 505 | dma_addr_t *handle) | 
|  | 506 | { | 
|  | 507 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); | 
|  | 508 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); | 
|  | 509 | *handle = IOMMU_MAPPING_ERROR; | 
|  | 510 | } | 
|  | 511 |  | 
|  | 512 | /** | 
|  | 513 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space | 
|  | 514 | * @dev: Device to allocate memory for. Must be a real device | 
|  | 515 | *	 attached to an iommu_dma_domain | 
|  | 516 | * @size: Size of buffer in bytes | 
|  | 517 | * @gfp: Allocation flags | 
|  | 518 | * @attrs: DMA attributes for this allocation | 
|  | 519 | * @prot: IOMMU mapping flags | 
|  | 520 | * @handle: Out argument for allocated DMA handle | 
|  | 521 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the | 
|  | 522 | *		given VA/PA are visible to the given non-coherent device. | 
|  | 523 | * | 
|  | 524 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, | 
|  | 525 | * but an IOMMU which supports smaller pages might not map the whole thing. | 
|  | 526 | * | 
|  | 527 | * Return: Array of struct page pointers describing the buffer, | 
|  | 528 | *	   or NULL on failure. | 
|  | 529 | */ | 
|  | 530 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | 
|  | 531 | unsigned long attrs, int prot, dma_addr_t *handle, | 
|  | 532 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 
|  | 533 | { | 
|  | 534 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 
|  | 535 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 536 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 537 | struct page **pages; | 
|  | 538 | struct sg_table sgt; | 
|  | 539 | dma_addr_t iova; | 
|  | 540 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; | 
|  | 541 |  | 
|  | 542 | *handle = IOMMU_MAPPING_ERROR; | 
|  | 543 |  | 
|  | 544 | min_size = alloc_sizes & -alloc_sizes; | 
|  | 545 | if (min_size < PAGE_SIZE) { | 
|  | 546 | min_size = PAGE_SIZE; | 
|  | 547 | alloc_sizes |= PAGE_SIZE; | 
|  | 548 | } else { | 
|  | 549 | size = ALIGN(size, min_size); | 
|  | 550 | } | 
|  | 551 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) | 
|  | 552 | alloc_sizes = min_size; | 
|  | 553 |  | 
|  | 554 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 555 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); | 
|  | 556 | if (!pages) | 
|  | 557 | return NULL; | 
|  | 558 |  | 
|  | 559 | size = iova_align(iovad, size); | 
|  | 560 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); | 
|  | 561 | if (!iova) | 
|  | 562 | goto out_free_pages; | 
|  | 563 |  | 
|  | 564 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) | 
|  | 565 | goto out_free_iova; | 
|  | 566 |  | 
|  | 567 | if (!(prot & IOMMU_CACHE)) { | 
|  | 568 | struct sg_mapping_iter miter; | 
|  | 569 | /* | 
|  | 570 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't | 
|  | 571 | * sufficient here, so skip it by using the "wrong" direction. | 
|  | 572 | */ | 
|  | 573 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); | 
|  | 574 | while (sg_miter_next(&miter)) | 
|  | 575 | flush_page(dev, miter.addr, page_to_phys(miter.page)); | 
|  | 576 | sg_miter_stop(&miter); | 
|  | 577 | } | 
|  | 578 |  | 
|  | 579 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) | 
|  | 580 | < size) | 
|  | 581 | goto out_free_sg; | 
|  | 582 |  | 
|  | 583 | *handle = iova; | 
|  | 584 | sg_free_table(&sgt); | 
|  | 585 | return pages; | 
|  | 586 |  | 
|  | 587 | out_free_sg: | 
|  | 588 | sg_free_table(&sgt); | 
|  | 589 | out_free_iova: | 
|  | 590 | iommu_dma_free_iova(cookie, iova, size); | 
|  | 591 | out_free_pages: | 
|  | 592 | __iommu_dma_free_pages(pages, count); | 
|  | 593 | return NULL; | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | /** | 
|  | 597 | * iommu_dma_mmap - Map a buffer into provided user VMA | 
|  | 598 | * @pages: Array representing buffer from iommu_dma_alloc() | 
|  | 599 | * @size: Size of buffer in bytes | 
|  | 600 | * @vma: VMA describing requested userspace mapping | 
|  | 601 | * | 
|  | 602 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible | 
|  | 603 | * for verifying the correct size and protection of @vma beforehand. | 
|  | 604 | */ | 
|  | 605 |  | 
|  | 606 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) | 
|  | 607 | { | 
|  | 608 | unsigned long uaddr = vma->vm_start; | 
|  | 609 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 610 | int ret = -ENXIO; | 
|  | 611 |  | 
|  | 612 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { | 
|  | 613 | ret = vm_insert_page(vma, uaddr, pages[i]); | 
|  | 614 | if (ret) | 
|  | 615 | break; | 
|  | 616 | uaddr += PAGE_SIZE; | 
|  | 617 | } | 
|  | 618 | return ret; | 
|  | 619 | } | 
|  | 620 |  | 
|  | 621 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, | 
|  | 622 | size_t size, int prot) | 
|  | 623 | { | 
|  | 624 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 
|  | 625 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 626 | size_t iova_off = 0; | 
|  | 627 | dma_addr_t iova; | 
|  | 628 |  | 
|  | 629 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { | 
|  | 630 | iova_off = iova_offset(&cookie->iovad, phys); | 
|  | 631 | size = iova_align(&cookie->iovad, size + iova_off); | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); | 
|  | 635 | if (!iova) | 
|  | 636 | return IOMMU_MAPPING_ERROR; | 
|  | 637 |  | 
|  | 638 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { | 
|  | 639 | iommu_dma_free_iova(cookie, iova, size); | 
|  | 640 | return IOMMU_MAPPING_ERROR; | 
|  | 641 | } | 
|  | 642 | return iova + iova_off; | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | 
|  | 646 | unsigned long offset, size_t size, int prot) | 
|  | 647 | { | 
|  | 648 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); | 
|  | 649 | } | 
|  | 650 |  | 
|  | 651 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, | 
|  | 652 | enum dma_data_direction dir, unsigned long attrs) | 
|  | 653 | { | 
|  | 654 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | /* | 
|  | 658 | * Prepare a successfully-mapped scatterlist to give back to the caller. | 
|  | 659 | * | 
|  | 660 | * At this point the segments are already laid out by iommu_dma_map_sg() to | 
|  | 661 | * avoid individually crossing any boundaries, so we merely need to check a | 
|  | 662 | * segment's start address to avoid concatenating across one. | 
|  | 663 | */ | 
|  | 664 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 665 | dma_addr_t dma_addr) | 
|  | 666 | { | 
|  | 667 | struct scatterlist *s, *cur = sg; | 
|  | 668 | unsigned long seg_mask = dma_get_seg_boundary(dev); | 
|  | 669 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); | 
|  | 670 | int i, count = 0; | 
|  | 671 |  | 
|  | 672 | for_each_sg(sg, s, nents, i) { | 
|  | 673 | /* Restore this segment's original unaligned fields first */ | 
|  | 674 | unsigned int s_iova_off = sg_dma_address(s); | 
|  | 675 | unsigned int s_length = sg_dma_len(s); | 
|  | 676 | unsigned int s_iova_len = s->length; | 
|  | 677 |  | 
|  | 678 | s->offset += s_iova_off; | 
|  | 679 | s->length = s_length; | 
|  | 680 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; | 
|  | 681 | sg_dma_len(s) = 0; | 
|  | 682 |  | 
|  | 683 | /* | 
|  | 684 | * Now fill in the real DMA data. If... | 
|  | 685 | * - there is a valid output segment to append to | 
|  | 686 | * - and this segment starts on an IOVA page boundary | 
|  | 687 | * - but doesn't fall at a segment boundary | 
|  | 688 | * - and wouldn't make the resulting output segment too long | 
|  | 689 | */ | 
|  | 690 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && | 
|  | 691 | (max_len - cur_len >= s_length)) { | 
|  | 692 | /* ...then concatenate it with the previous one */ | 
|  | 693 | cur_len += s_length; | 
|  | 694 | } else { | 
|  | 695 | /* Otherwise start the next output segment */ | 
|  | 696 | if (i > 0) | 
|  | 697 | cur = sg_next(cur); | 
|  | 698 | cur_len = s_length; | 
|  | 699 | count++; | 
|  | 700 |  | 
|  | 701 | sg_dma_address(cur) = dma_addr + s_iova_off; | 
|  | 702 | } | 
|  | 703 |  | 
|  | 704 | sg_dma_len(cur) = cur_len; | 
|  | 705 | dma_addr += s_iova_len; | 
|  | 706 |  | 
|  | 707 | if (s_length + s_iova_off < s_iova_len) | 
|  | 708 | cur_len = 0; | 
|  | 709 | } | 
|  | 710 | return count; | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | /* | 
|  | 714 | * If mapping failed, then just restore the original list, | 
|  | 715 | * but making sure the DMA fields are invalidated. | 
|  | 716 | */ | 
|  | 717 | static void __invalidate_sg(struct scatterlist *sg, int nents) | 
|  | 718 | { | 
|  | 719 | struct scatterlist *s; | 
|  | 720 | int i; | 
|  | 721 |  | 
|  | 722 | for_each_sg(sg, s, nents, i) { | 
|  | 723 | if (sg_dma_address(s) != IOMMU_MAPPING_ERROR) | 
|  | 724 | s->offset += sg_dma_address(s); | 
|  | 725 | if (sg_dma_len(s)) | 
|  | 726 | s->length = sg_dma_len(s); | 
|  | 727 | sg_dma_address(s) = IOMMU_MAPPING_ERROR; | 
|  | 728 | sg_dma_len(s) = 0; | 
|  | 729 | } | 
|  | 730 | } | 
|  | 731 |  | 
|  | 732 | /* | 
|  | 733 | * The DMA API client is passing in a scatterlist which could describe | 
|  | 734 | * any old buffer layout, but the IOMMU API requires everything to be | 
|  | 735 | * aligned to IOMMU pages. Hence the need for this complicated bit of | 
|  | 736 | * impedance-matching, to be able to hand off a suitably-aligned list, | 
|  | 737 | * but still preserve the original offsets and sizes for the caller. | 
|  | 738 | */ | 
|  | 739 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | 
|  | 740 | int nents, int prot) | 
|  | 741 | { | 
|  | 742 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 
|  | 743 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 744 | struct iova_domain *iovad = &cookie->iovad; | 
|  | 745 | struct scatterlist *s, *prev = NULL; | 
|  | 746 | dma_addr_t iova; | 
|  | 747 | size_t iova_len = 0; | 
|  | 748 | unsigned long mask = dma_get_seg_boundary(dev); | 
|  | 749 | int i; | 
|  | 750 |  | 
|  | 751 | /* | 
|  | 752 | * Work out how much IOVA space we need, and align the segments to | 
|  | 753 | * IOVA granules for the IOMMU driver to handle. With some clever | 
|  | 754 | * trickery we can modify the list in-place, but reversibly, by | 
|  | 755 | * stashing the unaligned parts in the as-yet-unused DMA fields. | 
|  | 756 | */ | 
|  | 757 | for_each_sg(sg, s, nents, i) { | 
|  | 758 | size_t s_iova_off = iova_offset(iovad, s->offset); | 
|  | 759 | size_t s_length = s->length; | 
|  | 760 | size_t pad_len = (mask - iova_len + 1) & mask; | 
|  | 761 |  | 
|  | 762 | sg_dma_address(s) = s_iova_off; | 
|  | 763 | sg_dma_len(s) = s_length; | 
|  | 764 | s->offset -= s_iova_off; | 
|  | 765 | s_length = iova_align(iovad, s_length + s_iova_off); | 
|  | 766 | s->length = s_length; | 
|  | 767 |  | 
|  | 768 | /* | 
|  | 769 | * Due to the alignment of our single IOVA allocation, we can | 
|  | 770 | * depend on these assumptions about the segment boundary mask: | 
|  | 771 | * - If mask size >= IOVA size, then the IOVA range cannot | 
|  | 772 | *   possibly fall across a boundary, so we don't care. | 
|  | 773 | * - If mask size < IOVA size, then the IOVA range must start | 
|  | 774 | *   exactly on a boundary, therefore we can lay things out | 
|  | 775 | *   based purely on segment lengths without needing to know | 
|  | 776 | *   the actual addresses beforehand. | 
|  | 777 | * - The mask must be a power of 2, so pad_len == 0 if | 
|  | 778 | *   iova_len == 0, thus we cannot dereference prev the first | 
|  | 779 | *   time through here (i.e. before it has a meaningful value). | 
|  | 780 | */ | 
|  | 781 | if (pad_len && pad_len < s_length - 1) { | 
|  | 782 | prev->length += pad_len; | 
|  | 783 | iova_len += pad_len; | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | iova_len += s_length; | 
|  | 787 | prev = s; | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); | 
|  | 791 | if (!iova) | 
|  | 792 | goto out_restore_sg; | 
|  | 793 |  | 
|  | 794 | /* | 
|  | 795 | * We'll leave any physical concatenation to the IOMMU driver's | 
|  | 796 | * implementation - it knows better than we do. | 
|  | 797 | */ | 
|  | 798 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) | 
|  | 799 | goto out_free_iova; | 
|  | 800 |  | 
|  | 801 | return __finalise_sg(dev, sg, nents, iova); | 
|  | 802 |  | 
|  | 803 | out_free_iova: | 
|  | 804 | iommu_dma_free_iova(cookie, iova, iova_len); | 
|  | 805 | out_restore_sg: | 
|  | 806 | __invalidate_sg(sg, nents); | 
|  | 807 | return 0; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 811 | enum dma_data_direction dir, unsigned long attrs) | 
|  | 812 | { | 
|  | 813 | dma_addr_t start, end; | 
|  | 814 | struct scatterlist *tmp; | 
|  | 815 | int i; | 
|  | 816 | /* | 
|  | 817 | * The scatterlist segments are mapped into a single | 
|  | 818 | * contiguous IOVA allocation, so this is incredibly easy. | 
|  | 819 | */ | 
|  | 820 | start = sg_dma_address(sg); | 
|  | 821 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { | 
|  | 822 | if (sg_dma_len(tmp) == 0) | 
|  | 823 | break; | 
|  | 824 | sg = tmp; | 
|  | 825 | } | 
|  | 826 | end = sg_dma_address(sg) + sg_dma_len(sg); | 
|  | 827 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); | 
|  | 828 | } | 
|  | 829 |  | 
|  | 830 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, | 
|  | 831 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 
|  | 832 | { | 
|  | 833 | return __iommu_dma_map(dev, phys, size, | 
|  | 834 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | 
|  | 838 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 
|  | 839 | { | 
|  | 840 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 
|  | 844 | { | 
|  | 845 | return dma_addr == IOMMU_MAPPING_ERROR; | 
|  | 846 | } | 
|  | 847 |  | 
|  | 848 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | 
|  | 849 | phys_addr_t msi_addr, struct iommu_domain *domain) | 
|  | 850 | { | 
|  | 851 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 
|  | 852 | struct iommu_dma_msi_page *msi_page; | 
|  | 853 | dma_addr_t iova; | 
|  | 854 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 
|  | 855 | size_t size = cookie_msi_granule(cookie); | 
|  | 856 |  | 
|  | 857 | msi_addr &= ~(phys_addr_t)(size - 1); | 
|  | 858 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | 
|  | 859 | if (msi_page->phys == msi_addr) | 
|  | 860 | return msi_page; | 
|  | 861 |  | 
|  | 862 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | 
|  | 863 | if (!msi_page) | 
|  | 864 | return NULL; | 
|  | 865 |  | 
|  | 866 | iova = __iommu_dma_map(dev, msi_addr, size, prot); | 
|  | 867 | if (iommu_dma_mapping_error(dev, iova)) | 
|  | 868 | goto out_free_page; | 
|  | 869 |  | 
|  | 870 | INIT_LIST_HEAD(&msi_page->list); | 
|  | 871 | msi_page->phys = msi_addr; | 
|  | 872 | msi_page->iova = iova; | 
|  | 873 | list_add(&msi_page->list, &cookie->msi_page_list); | 
|  | 874 | return msi_page; | 
|  | 875 |  | 
|  | 876 | out_free_page: | 
|  | 877 | kfree(msi_page); | 
|  | 878 | return NULL; | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | 
|  | 882 | { | 
|  | 883 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | 
|  | 884 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 
|  | 885 | struct iommu_dma_cookie *cookie; | 
|  | 886 | struct iommu_dma_msi_page *msi_page; | 
|  | 887 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | 
|  | 888 | unsigned long flags; | 
|  | 889 |  | 
|  | 890 | if (!domain || !domain->iova_cookie) | 
|  | 891 | return; | 
|  | 892 |  | 
|  | 893 | cookie = domain->iova_cookie; | 
|  | 894 |  | 
|  | 895 | /* | 
|  | 896 | * We disable IRQs to rule out a possible inversion against | 
|  | 897 | * irq_desc_lock if, say, someone tries to retarget the affinity | 
|  | 898 | * of an MSI from within an IPI handler. | 
|  | 899 | */ | 
|  | 900 | spin_lock_irqsave(&cookie->msi_lock, flags); | 
|  | 901 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | 
|  | 902 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | 
|  | 903 |  | 
|  | 904 | if (WARN_ON(!msi_page)) { | 
|  | 905 | /* | 
|  | 906 | * We're called from a void callback, so the best we can do is | 
|  | 907 | * 'fail' by filling the message with obviously bogus values. | 
|  | 908 | * Since we got this far due to an IOMMU being present, it's | 
|  | 909 | * not like the existing address would have worked anyway... | 
|  | 910 | */ | 
|  | 911 | msg->address_hi = ~0U; | 
|  | 912 | msg->address_lo = ~0U; | 
|  | 913 | msg->data = ~0U; | 
|  | 914 | } else { | 
|  | 915 | msg->address_hi = upper_32_bits(msi_page->iova); | 
|  | 916 | msg->address_lo &= cookie_msi_granule(cookie) - 1; | 
|  | 917 | msg->address_lo += lower_32_bits(msi_page->iova); | 
|  | 918 | } | 
|  | 919 | } |