blob: 1f527ca609550bc0e175afdc0fe68bf44b8fd78e [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 *
22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
27 */
28
29#define pr_fmt(fmt) "DMAR: " fmt
30
31#include <linux/pci.h>
32#include <linux/dmar.h>
33#include <linux/iova.h>
34#include <linux/intel-iommu.h>
35#include <linux/timer.h>
36#include <linux/irq.h>
37#include <linux/interrupt.h>
38#include <linux/tboot.h>
39#include <linux/dmi.h>
40#include <linux/slab.h>
41#include <linux/iommu.h>
42#include <linux/limits.h>
43#include <asm/irq_remapping.h>
44#include <asm/iommu_table.h>
45
46#include "irq_remapping.h"
47
48typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
49struct dmar_res_callback {
50 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
51 void *arg[ACPI_DMAR_TYPE_RESERVED];
52 bool ignore_unhandled;
53 bool print_entry;
54};
55
56/*
57 * Assumptions:
58 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
59 * before IO devices managed by that unit.
60 * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
61 * after IO devices managed by that unit.
62 * 3) Hotplug events are rare.
63 *
64 * Locking rules for DMA and interrupt remapping related global data structures:
65 * 1) Use dmar_global_lock in process context
66 * 2) Use RCU in interrupt context
67 */
68DECLARE_RWSEM(dmar_global_lock);
69LIST_HEAD(dmar_drhd_units);
70
71struct acpi_table_header * __initdata dmar_tbl;
72static int dmar_dev_scope_status = 1;
73static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74
75static int alloc_iommu(struct dmar_drhd_unit *drhd);
76static void free_iommu(struct intel_iommu *iommu);
77
78extern const struct iommu_ops intel_iommu_ops;
79
80static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
81{
82 /*
83 * add INCLUDE_ALL at the tail, so scan the list will find it at
84 * the very end.
85 */
86 if (drhd->include_all)
87 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
88 else
89 list_add_rcu(&drhd->list, &dmar_drhd_units);
90}
91
92void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
93{
94 struct acpi_dmar_device_scope *scope;
95
96 *cnt = 0;
97 while (start < end) {
98 scope = start;
99 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
100 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
101 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
102 (*cnt)++;
103 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
104 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
105 pr_warn("Unsupported device scope\n");
106 }
107 start += scope->length;
108 }
109 if (*cnt == 0)
110 return NULL;
111
112 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
113}
114
115void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
116{
117 int i;
118 struct device *tmp_dev;
119
120 if (*devices && *cnt) {
121 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
122 put_device(tmp_dev);
123 kfree(*devices);
124 }
125
126 *devices = NULL;
127 *cnt = 0;
128}
129
130/* Optimize out kzalloc()/kfree() for normal cases */
131static char dmar_pci_notify_info_buf[64];
132
133static struct dmar_pci_notify_info *
134dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
135{
136 int level = 0;
137 size_t size;
138 struct pci_dev *tmp;
139 struct dmar_pci_notify_info *info;
140
141 BUG_ON(dev->is_virtfn);
142
143 /*
144 * Ignore devices that have a domain number higher than what can
145 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
146 */
147 if (pci_domain_nr(dev->bus) > U16_MAX)
148 return NULL;
149
150 /* Only generate path[] for device addition event */
151 if (event == BUS_NOTIFY_ADD_DEVICE)
152 for (tmp = dev; tmp; tmp = tmp->bus->self)
153 level++;
154
155 size = sizeof(*info) + level * sizeof(info->path[0]);
156 if (size <= sizeof(dmar_pci_notify_info_buf)) {
157 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
158 } else {
159 info = kzalloc(size, GFP_KERNEL);
160 if (!info) {
161 pr_warn("Out of memory when allocating notify_info "
162 "for %s.\n", pci_name(dev));
163 if (dmar_dev_scope_status == 0)
164 dmar_dev_scope_status = -ENOMEM;
165 return NULL;
166 }
167 }
168
169 info->event = event;
170 info->dev = dev;
171 info->seg = pci_domain_nr(dev->bus);
172 info->level = level;
173 if (event == BUS_NOTIFY_ADD_DEVICE) {
174 for (tmp = dev; tmp; tmp = tmp->bus->self) {
175 level--;
176 info->path[level].bus = tmp->bus->number;
177 info->path[level].device = PCI_SLOT(tmp->devfn);
178 info->path[level].function = PCI_FUNC(tmp->devfn);
179 if (pci_is_root_bus(tmp->bus))
180 info->bus = tmp->bus->number;
181 }
182 }
183
184 return info;
185}
186
187static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
188{
189 if ((void *)info != dmar_pci_notify_info_buf)
190 kfree(info);
191}
192
193static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
194 struct acpi_dmar_pci_path *path, int count)
195{
196 int i;
197
198 if (info->bus != bus)
199 goto fallback;
200 if (info->level != count)
201 goto fallback;
202
203 for (i = 0; i < count; i++) {
204 if (path[i].device != info->path[i].device ||
205 path[i].function != info->path[i].function)
206 goto fallback;
207 }
208
209 return true;
210
211fallback:
212
213 if (count != 1)
214 return false;
215
216 i = info->level - 1;
217 if (bus == info->path[i].bus &&
218 path[0].device == info->path[i].device &&
219 path[0].function == info->path[i].function) {
220 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
221 bus, path[0].device, path[0].function);
222 return true;
223 }
224
225 return false;
226}
227
228/* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
229int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
230 void *start, void*end, u16 segment,
231 struct dmar_dev_scope *devices,
232 int devices_cnt)
233{
234 int i, level;
235 struct device *tmp, *dev = &info->dev->dev;
236 struct acpi_dmar_device_scope *scope;
237 struct acpi_dmar_pci_path *path;
238
239 if (segment != info->seg)
240 return 0;
241
242 for (; start < end; start += scope->length) {
243 scope = start;
244 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
245 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
246 continue;
247
248 path = (struct acpi_dmar_pci_path *)(scope + 1);
249 level = (scope->length - sizeof(*scope)) / sizeof(*path);
250 if (!dmar_match_pci_path(info, scope->bus, path, level))
251 continue;
252
253 /*
254 * We expect devices with endpoint scope to have normal PCI
255 * headers, and devices with bridge scope to have bridge PCI
256 * headers. However PCI NTB devices may be listed in the
257 * DMAR table with bridge scope, even though they have a
258 * normal PCI header. NTB devices are identified by class
259 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
260 * for this special case.
261 */
262 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
263 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
264 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
265 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
266 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
267 pr_warn("Device scope type does not match for %s\n",
268 pci_name(info->dev));
269 return -EINVAL;
270 }
271
272 for_each_dev_scope(devices, devices_cnt, i, tmp)
273 if (tmp == NULL) {
274 devices[i].bus = info->dev->bus->number;
275 devices[i].devfn = info->dev->devfn;
276 rcu_assign_pointer(devices[i].dev,
277 get_device(dev));
278 return 1;
279 }
280 BUG_ON(i >= devices_cnt);
281 }
282
283 return 0;
284}
285
286int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
287 struct dmar_dev_scope *devices, int count)
288{
289 int index;
290 struct device *tmp;
291
292 if (info->seg != segment)
293 return 0;
294
295 for_each_active_dev_scope(devices, count, index, tmp)
296 if (tmp == &info->dev->dev) {
297 RCU_INIT_POINTER(devices[index].dev, NULL);
298 synchronize_rcu();
299 put_device(tmp);
300 return 1;
301 }
302
303 return 0;
304}
305
306static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
307{
308 int ret = 0;
309 struct dmar_drhd_unit *dmaru;
310 struct acpi_dmar_hardware_unit *drhd;
311
312 for_each_drhd_unit(dmaru) {
313 if (dmaru->include_all)
314 continue;
315
316 drhd = container_of(dmaru->hdr,
317 struct acpi_dmar_hardware_unit, header);
318 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
319 ((void *)drhd) + drhd->header.length,
320 dmaru->segment,
321 dmaru->devices, dmaru->devices_cnt);
322 if (ret)
323 break;
324 }
325 if (ret >= 0)
326 ret = dmar_iommu_notify_scope_dev(info);
327 if (ret < 0 && dmar_dev_scope_status == 0)
328 dmar_dev_scope_status = ret;
329
330 return ret;
331}
332
333static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
334{
335 struct dmar_drhd_unit *dmaru;
336
337 for_each_drhd_unit(dmaru)
338 if (dmar_remove_dev_scope(info, dmaru->segment,
339 dmaru->devices, dmaru->devices_cnt))
340 break;
341 dmar_iommu_notify_scope_dev(info);
342}
343
344static int dmar_pci_bus_notifier(struct notifier_block *nb,
345 unsigned long action, void *data)
346{
347 struct pci_dev *pdev = to_pci_dev(data);
348 struct dmar_pci_notify_info *info;
349
350 /* Only care about add/remove events for physical functions.
351 * For VFs we actually do the lookup based on the corresponding
352 * PF in device_to_iommu() anyway. */
353 if (pdev->is_virtfn)
354 return NOTIFY_DONE;
355 if (action != BUS_NOTIFY_ADD_DEVICE &&
356 action != BUS_NOTIFY_REMOVED_DEVICE)
357 return NOTIFY_DONE;
358
359 info = dmar_alloc_pci_notify_info(pdev, action);
360 if (!info)
361 return NOTIFY_DONE;
362
363 down_write(&dmar_global_lock);
364 if (action == BUS_NOTIFY_ADD_DEVICE)
365 dmar_pci_bus_add_dev(info);
366 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
367 dmar_pci_bus_del_dev(info);
368 up_write(&dmar_global_lock);
369
370 dmar_free_pci_notify_info(info);
371
372 return NOTIFY_OK;
373}
374
375static struct notifier_block dmar_pci_bus_nb = {
376 .notifier_call = dmar_pci_bus_notifier,
377 .priority = INT_MIN,
378};
379
380static struct dmar_drhd_unit *
381dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
382{
383 struct dmar_drhd_unit *dmaru;
384
385 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
386 if (dmaru->segment == drhd->segment &&
387 dmaru->reg_base_addr == drhd->address)
388 return dmaru;
389
390 return NULL;
391}
392
393/**
394 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
395 * structure which uniquely represent one DMA remapping hardware unit
396 * present in the platform
397 */
398static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
399{
400 struct acpi_dmar_hardware_unit *drhd;
401 struct dmar_drhd_unit *dmaru;
402 int ret;
403
404 drhd = (struct acpi_dmar_hardware_unit *)header;
405 dmaru = dmar_find_dmaru(drhd);
406 if (dmaru)
407 goto out;
408
409 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
410 if (!dmaru)
411 return -ENOMEM;
412
413 /*
414 * If header is allocated from slab by ACPI _DSM method, we need to
415 * copy the content because the memory buffer will be freed on return.
416 */
417 dmaru->hdr = (void *)(dmaru + 1);
418 memcpy(dmaru->hdr, header, header->length);
419 dmaru->reg_base_addr = drhd->address;
420 dmaru->segment = drhd->segment;
421 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
422 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
423 ((void *)drhd) + drhd->header.length,
424 &dmaru->devices_cnt);
425 if (dmaru->devices_cnt && dmaru->devices == NULL) {
426 kfree(dmaru);
427 return -ENOMEM;
428 }
429
430 ret = alloc_iommu(dmaru);
431 if (ret) {
432 dmar_free_dev_scope(&dmaru->devices,
433 &dmaru->devices_cnt);
434 kfree(dmaru);
435 return ret;
436 }
437 dmar_register_drhd_unit(dmaru);
438
439out:
440 if (arg)
441 (*(int *)arg)++;
442
443 return 0;
444}
445
446static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
447{
448 if (dmaru->devices && dmaru->devices_cnt)
449 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
450 if (dmaru->iommu)
451 free_iommu(dmaru->iommu);
452 kfree(dmaru);
453}
454
455static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
456 void *arg)
457{
458 struct acpi_dmar_andd *andd = (void *)header;
459
460 /* Check for NUL termination within the designated length */
461 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
462 pr_warn(FW_BUG
463 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
464 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
465 dmi_get_system_info(DMI_BIOS_VENDOR),
466 dmi_get_system_info(DMI_BIOS_VERSION),
467 dmi_get_system_info(DMI_PRODUCT_VERSION));
468 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
469 return -EINVAL;
470 }
471 pr_info("ANDD device: %x name: %s\n", andd->device_number,
472 andd->device_name);
473
474 return 0;
475}
476
477#ifdef CONFIG_ACPI_NUMA
478static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
479{
480 struct acpi_dmar_rhsa *rhsa;
481 struct dmar_drhd_unit *drhd;
482
483 rhsa = (struct acpi_dmar_rhsa *)header;
484 for_each_drhd_unit(drhd) {
485 if (drhd->reg_base_addr == rhsa->base_address) {
486 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
487
488 if (!node_online(node))
489 node = -1;
490 drhd->iommu->node = node;
491 return 0;
492 }
493 }
494 pr_warn(FW_BUG
495 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
496 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
497 rhsa->base_address,
498 dmi_get_system_info(DMI_BIOS_VENDOR),
499 dmi_get_system_info(DMI_BIOS_VERSION),
500 dmi_get_system_info(DMI_PRODUCT_VERSION));
501 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
502
503 return 0;
504}
505#else
506#define dmar_parse_one_rhsa dmar_res_noop
507#endif
508
509static void
510dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
511{
512 struct acpi_dmar_hardware_unit *drhd;
513 struct acpi_dmar_reserved_memory *rmrr;
514 struct acpi_dmar_atsr *atsr;
515 struct acpi_dmar_rhsa *rhsa;
516
517 switch (header->type) {
518 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
519 drhd = container_of(header, struct acpi_dmar_hardware_unit,
520 header);
521 pr_info("DRHD base: %#016Lx flags: %#x\n",
522 (unsigned long long)drhd->address, drhd->flags);
523 break;
524 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
525 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
526 header);
527 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
528 (unsigned long long)rmrr->base_address,
529 (unsigned long long)rmrr->end_address);
530 break;
531 case ACPI_DMAR_TYPE_ROOT_ATS:
532 atsr = container_of(header, struct acpi_dmar_atsr, header);
533 pr_info("ATSR flags: %#x\n", atsr->flags);
534 break;
535 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
536 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
537 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
538 (unsigned long long)rhsa->base_address,
539 rhsa->proximity_domain);
540 break;
541 case ACPI_DMAR_TYPE_NAMESPACE:
542 /* We don't print this here because we need to sanity-check
543 it first. So print it in dmar_parse_one_andd() instead. */
544 break;
545 }
546}
547
548/**
549 * dmar_table_detect - checks to see if the platform supports DMAR devices
550 */
551static int __init dmar_table_detect(void)
552{
553 acpi_status status = AE_OK;
554
555 /* if we could find DMAR table, then there are DMAR devices */
556 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
557
558 if (ACPI_SUCCESS(status) && !dmar_tbl) {
559 pr_warn("Unable to map DMAR\n");
560 status = AE_NOT_FOUND;
561 }
562
563 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
564}
565
566static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
567 size_t len, struct dmar_res_callback *cb)
568{
569 struct acpi_dmar_header *iter, *next;
570 struct acpi_dmar_header *end = ((void *)start) + len;
571
572 for (iter = start; iter < end; iter = next) {
573 next = (void *)iter + iter->length;
574 if (iter->length == 0) {
575 /* Avoid looping forever on bad ACPI tables */
576 pr_debug(FW_BUG "Invalid 0-length structure\n");
577 break;
578 } else if (next > end) {
579 /* Avoid passing table end */
580 pr_warn(FW_BUG "Record passes table end\n");
581 return -EINVAL;
582 }
583
584 if (cb->print_entry)
585 dmar_table_print_dmar_entry(iter);
586
587 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
588 /* continue for forward compatibility */
589 pr_debug("Unknown DMAR structure type %d\n",
590 iter->type);
591 } else if (cb->cb[iter->type]) {
592 int ret;
593
594 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
595 if (ret)
596 return ret;
597 } else if (!cb->ignore_unhandled) {
598 pr_warn("No handler for DMAR structure type %d\n",
599 iter->type);
600 return -EINVAL;
601 }
602 }
603
604 return 0;
605}
606
607static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
608 struct dmar_res_callback *cb)
609{
610 return dmar_walk_remapping_entries((void *)(dmar + 1),
611 dmar->header.length - sizeof(*dmar), cb);
612}
613
614/**
615 * parse_dmar_table - parses the DMA reporting table
616 */
617static int __init
618parse_dmar_table(void)
619{
620 struct acpi_table_dmar *dmar;
621 int drhd_count = 0;
622 int ret;
623 struct dmar_res_callback cb = {
624 .print_entry = true,
625 .ignore_unhandled = true,
626 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
627 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
628 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
629 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
630 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
631 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
632 };
633
634 /*
635 * Do it again, earlier dmar_tbl mapping could be mapped with
636 * fixed map.
637 */
638 dmar_table_detect();
639
640 /*
641 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
642 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
643 */
644 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
645
646 dmar = (struct acpi_table_dmar *)dmar_tbl;
647 if (!dmar)
648 return -ENODEV;
649
650 if (dmar->width < PAGE_SHIFT - 1) {
651 pr_warn("Invalid DMAR haw\n");
652 return -EINVAL;
653 }
654
655 pr_info("Host address width %d\n", dmar->width + 1);
656 ret = dmar_walk_dmar_table(dmar, &cb);
657 if (ret == 0 && drhd_count == 0)
658 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
659
660 return ret;
661}
662
663static int dmar_pci_device_match(struct dmar_dev_scope devices[],
664 int cnt, struct pci_dev *dev)
665{
666 int index;
667 struct device *tmp;
668
669 while (dev) {
670 for_each_active_dev_scope(devices, cnt, index, tmp)
671 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
672 return 1;
673
674 /* Check our parent */
675 dev = dev->bus->self;
676 }
677
678 return 0;
679}
680
681struct dmar_drhd_unit *
682dmar_find_matched_drhd_unit(struct pci_dev *dev)
683{
684 struct dmar_drhd_unit *dmaru;
685 struct acpi_dmar_hardware_unit *drhd;
686
687 dev = pci_physfn(dev);
688
689 rcu_read_lock();
690 for_each_drhd_unit(dmaru) {
691 drhd = container_of(dmaru->hdr,
692 struct acpi_dmar_hardware_unit,
693 header);
694
695 if (dmaru->include_all &&
696 drhd->segment == pci_domain_nr(dev->bus))
697 goto out;
698
699 if (dmar_pci_device_match(dmaru->devices,
700 dmaru->devices_cnt, dev))
701 goto out;
702 }
703 dmaru = NULL;
704out:
705 rcu_read_unlock();
706
707 return dmaru;
708}
709
710static void __init dmar_acpi_insert_dev_scope(u8 device_number,
711 struct acpi_device *adev)
712{
713 struct dmar_drhd_unit *dmaru;
714 struct acpi_dmar_hardware_unit *drhd;
715 struct acpi_dmar_device_scope *scope;
716 struct device *tmp;
717 int i;
718 struct acpi_dmar_pci_path *path;
719
720 for_each_drhd_unit(dmaru) {
721 drhd = container_of(dmaru->hdr,
722 struct acpi_dmar_hardware_unit,
723 header);
724
725 for (scope = (void *)(drhd + 1);
726 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
727 scope = ((void *)scope) + scope->length) {
728 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
729 continue;
730 if (scope->enumeration_id != device_number)
731 continue;
732
733 path = (void *)(scope + 1);
734 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
735 dev_name(&adev->dev), dmaru->reg_base_addr,
736 scope->bus, path->device, path->function);
737 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
738 if (tmp == NULL) {
739 dmaru->devices[i].bus = scope->bus;
740 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
741 path->function);
742 rcu_assign_pointer(dmaru->devices[i].dev,
743 get_device(&adev->dev));
744 return;
745 }
746 BUG_ON(i >= dmaru->devices_cnt);
747 }
748 }
749 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
750 device_number, dev_name(&adev->dev));
751}
752
753static int __init dmar_acpi_dev_scope_init(void)
754{
755 struct acpi_dmar_andd *andd;
756
757 if (dmar_tbl == NULL)
758 return -ENODEV;
759
760 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
761 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
762 andd = ((void *)andd) + andd->header.length) {
763 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
764 acpi_handle h;
765 struct acpi_device *adev;
766
767 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
768 andd->device_name,
769 &h))) {
770 pr_err("Failed to find handle for ACPI object %s\n",
771 andd->device_name);
772 continue;
773 }
774 if (acpi_bus_get_device(h, &adev)) {
775 pr_err("Failed to get device for ACPI object %s\n",
776 andd->device_name);
777 continue;
778 }
779 dmar_acpi_insert_dev_scope(andd->device_number, adev);
780 }
781 }
782 return 0;
783}
784
785int __init dmar_dev_scope_init(void)
786{
787 struct pci_dev *dev = NULL;
788 struct dmar_pci_notify_info *info;
789
790 if (dmar_dev_scope_status != 1)
791 return dmar_dev_scope_status;
792
793 if (list_empty(&dmar_drhd_units)) {
794 dmar_dev_scope_status = -ENODEV;
795 } else {
796 dmar_dev_scope_status = 0;
797
798 dmar_acpi_dev_scope_init();
799
800 for_each_pci_dev(dev) {
801 if (dev->is_virtfn)
802 continue;
803
804 info = dmar_alloc_pci_notify_info(dev,
805 BUS_NOTIFY_ADD_DEVICE);
806 if (!info) {
807 return dmar_dev_scope_status;
808 } else {
809 dmar_pci_bus_add_dev(info);
810 dmar_free_pci_notify_info(info);
811 }
812 }
813
814 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
815 }
816
817 return dmar_dev_scope_status;
818}
819
820
821int __init dmar_table_init(void)
822{
823 static int dmar_table_initialized;
824 int ret;
825
826 if (dmar_table_initialized == 0) {
827 ret = parse_dmar_table();
828 if (ret < 0) {
829 if (ret != -ENODEV)
830 pr_info("Parse DMAR table failure.\n");
831 } else if (list_empty(&dmar_drhd_units)) {
832 pr_info("No DMAR devices found\n");
833 ret = -ENODEV;
834 }
835
836 if (ret < 0)
837 dmar_table_initialized = ret;
838 else
839 dmar_table_initialized = 1;
840 }
841
842 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
843}
844
845static void warn_invalid_dmar(u64 addr, const char *message)
846{
847 pr_warn_once(FW_BUG
848 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
849 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
850 addr, message,
851 dmi_get_system_info(DMI_BIOS_VENDOR),
852 dmi_get_system_info(DMI_BIOS_VERSION),
853 dmi_get_system_info(DMI_PRODUCT_VERSION));
854 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
855}
856
857static int __ref
858dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
859{
860 struct acpi_dmar_hardware_unit *drhd;
861 void __iomem *addr;
862 u64 cap, ecap;
863
864 drhd = (void *)entry;
865 if (!drhd->address) {
866 warn_invalid_dmar(0, "");
867 return -EINVAL;
868 }
869
870 if (arg)
871 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
872 else
873 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
874 if (!addr) {
875 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
876 return -EINVAL;
877 }
878
879 cap = dmar_readq(addr + DMAR_CAP_REG);
880 ecap = dmar_readq(addr + DMAR_ECAP_REG);
881
882 if (arg)
883 iounmap(addr);
884 else
885 early_iounmap(addr, VTD_PAGE_SIZE);
886
887 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
888 warn_invalid_dmar(drhd->address, " returns all ones");
889 return -EINVAL;
890 }
891
892 return 0;
893}
894
895int __init detect_intel_iommu(void)
896{
897 int ret;
898 struct dmar_res_callback validate_drhd_cb = {
899 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
900 .ignore_unhandled = true,
901 };
902
903 down_write(&dmar_global_lock);
904 ret = dmar_table_detect();
905 if (!ret)
906 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
907 &validate_drhd_cb);
908 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
909 iommu_detected = 1;
910 /* Make sure ACS will be enabled */
911 pci_request_acs();
912 }
913
914#ifdef CONFIG_X86
915 if (!ret)
916 x86_init.iommu.iommu_init = intel_iommu_init;
917#endif
918
919 if (dmar_tbl) {
920 acpi_put_table(dmar_tbl);
921 dmar_tbl = NULL;
922 }
923 up_write(&dmar_global_lock);
924
925 return ret ? ret : 1;
926}
927
928static void unmap_iommu(struct intel_iommu *iommu)
929{
930 iounmap(iommu->reg);
931 release_mem_region(iommu->reg_phys, iommu->reg_size);
932}
933
934/**
935 * map_iommu: map the iommu's registers
936 * @iommu: the iommu to map
937 * @phys_addr: the physical address of the base resgister
938 *
939 * Memory map the iommu's registers. Start w/ a single page, and
940 * possibly expand if that turns out to be insufficent.
941 */
942static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
943{
944 int map_size, err=0;
945
946 iommu->reg_phys = phys_addr;
947 iommu->reg_size = VTD_PAGE_SIZE;
948
949 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
950 pr_err("Can't reserve memory\n");
951 err = -EBUSY;
952 goto out;
953 }
954
955 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
956 if (!iommu->reg) {
957 pr_err("Can't map the region\n");
958 err = -ENOMEM;
959 goto release;
960 }
961
962 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
963 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
964
965 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
966 err = -EINVAL;
967 warn_invalid_dmar(phys_addr, " returns all ones");
968 goto unmap;
969 }
970
971 /* the registers might be more than one page */
972 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
973 cap_max_fault_reg_offset(iommu->cap));
974 map_size = VTD_PAGE_ALIGN(map_size);
975 if (map_size > iommu->reg_size) {
976 iounmap(iommu->reg);
977 release_mem_region(iommu->reg_phys, iommu->reg_size);
978 iommu->reg_size = map_size;
979 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
980 iommu->name)) {
981 pr_err("Can't reserve memory\n");
982 err = -EBUSY;
983 goto out;
984 }
985 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
986 if (!iommu->reg) {
987 pr_err("Can't map the region\n");
988 err = -ENOMEM;
989 goto release;
990 }
991 }
992 err = 0;
993 goto out;
994
995unmap:
996 iounmap(iommu->reg);
997release:
998 release_mem_region(iommu->reg_phys, iommu->reg_size);
999out:
1000 return err;
1001}
1002
1003static int dmar_alloc_seq_id(struct intel_iommu *iommu)
1004{
1005 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
1006 DMAR_UNITS_SUPPORTED);
1007 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
1008 iommu->seq_id = -1;
1009 } else {
1010 set_bit(iommu->seq_id, dmar_seq_ids);
1011 sprintf(iommu->name, "dmar%d", iommu->seq_id);
1012 }
1013
1014 return iommu->seq_id;
1015}
1016
1017static void dmar_free_seq_id(struct intel_iommu *iommu)
1018{
1019 if (iommu->seq_id >= 0) {
1020 clear_bit(iommu->seq_id, dmar_seq_ids);
1021 iommu->seq_id = -1;
1022 }
1023}
1024
1025static int alloc_iommu(struct dmar_drhd_unit *drhd)
1026{
1027 struct intel_iommu *iommu;
1028 u32 ver, sts;
1029 int agaw = 0;
1030 int msagaw = 0;
1031 int err;
1032
1033 if (!drhd->reg_base_addr) {
1034 warn_invalid_dmar(0, "");
1035 return -EINVAL;
1036 }
1037
1038 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1039 if (!iommu)
1040 return -ENOMEM;
1041
1042 if (dmar_alloc_seq_id(iommu) < 0) {
1043 pr_err("Failed to allocate seq_id\n");
1044 err = -ENOSPC;
1045 goto error;
1046 }
1047
1048 err = map_iommu(iommu, drhd->reg_base_addr);
1049 if (err) {
1050 pr_err("Failed to map %s\n", iommu->name);
1051 goto error_free_seq_id;
1052 }
1053
1054 err = -EINVAL;
1055 agaw = iommu_calculate_agaw(iommu);
1056 if (agaw < 0) {
1057 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1058 iommu->seq_id);
1059 goto err_unmap;
1060 }
1061 msagaw = iommu_calculate_max_sagaw(iommu);
1062 if (msagaw < 0) {
1063 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1064 iommu->seq_id);
1065 goto err_unmap;
1066 }
1067 iommu->agaw = agaw;
1068 iommu->msagaw = msagaw;
1069 iommu->segment = drhd->segment;
1070
1071 iommu->node = -1;
1072
1073 ver = readl(iommu->reg + DMAR_VER_REG);
1074 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1075 iommu->name,
1076 (unsigned long long)drhd->reg_base_addr,
1077 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1078 (unsigned long long)iommu->cap,
1079 (unsigned long long)iommu->ecap);
1080
1081 /* Reflect status in gcmd */
1082 sts = readl(iommu->reg + DMAR_GSTS_REG);
1083 if (sts & DMA_GSTS_IRES)
1084 iommu->gcmd |= DMA_GCMD_IRE;
1085 if (sts & DMA_GSTS_TES)
1086 iommu->gcmd |= DMA_GCMD_TE;
1087 if (sts & DMA_GSTS_QIES)
1088 iommu->gcmd |= DMA_GCMD_QIE;
1089
1090 raw_spin_lock_init(&iommu->register_lock);
1091
1092 if (intel_iommu_enabled) {
1093 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1094 intel_iommu_groups,
1095 "%s", iommu->name);
1096 if (err)
1097 goto err_unmap;
1098
1099 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1100
1101 err = iommu_device_register(&iommu->iommu);
1102 if (err)
1103 goto err_unmap;
1104 }
1105
1106 drhd->iommu = iommu;
1107
1108 return 0;
1109
1110err_unmap:
1111 unmap_iommu(iommu);
1112error_free_seq_id:
1113 dmar_free_seq_id(iommu);
1114error:
1115 kfree(iommu);
1116 return err;
1117}
1118
1119static void free_iommu(struct intel_iommu *iommu)
1120{
1121 if (intel_iommu_enabled) {
1122 iommu_device_unregister(&iommu->iommu);
1123 iommu_device_sysfs_remove(&iommu->iommu);
1124 }
1125
1126 if (iommu->irq) {
1127 if (iommu->pr_irq) {
1128 free_irq(iommu->pr_irq, iommu);
1129 dmar_free_hwirq(iommu->pr_irq);
1130 iommu->pr_irq = 0;
1131 }
1132 free_irq(iommu->irq, iommu);
1133 dmar_free_hwirq(iommu->irq);
1134 iommu->irq = 0;
1135 }
1136
1137 if (iommu->qi) {
1138 free_page((unsigned long)iommu->qi->desc);
1139 kfree(iommu->qi->desc_status);
1140 kfree(iommu->qi);
1141 }
1142
1143 if (iommu->reg)
1144 unmap_iommu(iommu);
1145
1146 dmar_free_seq_id(iommu);
1147 kfree(iommu);
1148}
1149
1150/*
1151 * Reclaim all the submitted descriptors which have completed its work.
1152 */
1153static inline void reclaim_free_desc(struct q_inval *qi)
1154{
1155 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1156 qi->desc_status[qi->free_tail] == QI_ABORT) {
1157 qi->desc_status[qi->free_tail] = QI_FREE;
1158 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1159 qi->free_cnt++;
1160 }
1161}
1162
1163static int qi_check_fault(struct intel_iommu *iommu, int index)
1164{
1165 u32 fault;
1166 int head, tail;
1167 struct q_inval *qi = iommu->qi;
1168 int wait_index = (index + 1) % QI_LENGTH;
1169
1170 if (qi->desc_status[wait_index] == QI_ABORT)
1171 return -EAGAIN;
1172
1173 fault = readl(iommu->reg + DMAR_FSTS_REG);
1174
1175 /*
1176 * If IQE happens, the head points to the descriptor associated
1177 * with the error. No new descriptors are fetched until the IQE
1178 * is cleared.
1179 */
1180 if (fault & DMA_FSTS_IQE) {
1181 head = readl(iommu->reg + DMAR_IQH_REG);
1182 if ((head >> DMAR_IQ_SHIFT) == index) {
1183 pr_err("VT-d detected invalid descriptor: "
1184 "low=%llx, high=%llx\n",
1185 (unsigned long long)qi->desc[index].low,
1186 (unsigned long long)qi->desc[index].high);
1187 memcpy(&qi->desc[index], &qi->desc[wait_index],
1188 sizeof(struct qi_desc));
1189 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1190 return -EINVAL;
1191 }
1192 }
1193
1194 /*
1195 * If ITE happens, all pending wait_desc commands are aborted.
1196 * No new descriptors are fetched until the ITE is cleared.
1197 */
1198 if (fault & DMA_FSTS_ITE) {
1199 head = readl(iommu->reg + DMAR_IQH_REG);
1200 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1201 head |= 1;
1202 tail = readl(iommu->reg + DMAR_IQT_REG);
1203 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1204
1205 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1206
1207 do {
1208 if (qi->desc_status[head] == QI_IN_USE)
1209 qi->desc_status[head] = QI_ABORT;
1210 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1211 } while (head != tail);
1212
1213 if (qi->desc_status[wait_index] == QI_ABORT)
1214 return -EAGAIN;
1215 }
1216
1217 if (fault & DMA_FSTS_ICE)
1218 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1219
1220 return 0;
1221}
1222
1223/*
1224 * Submit the queued invalidation descriptor to the remapping
1225 * hardware unit and wait for its completion.
1226 */
1227int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1228{
1229 int rc;
1230 struct q_inval *qi = iommu->qi;
1231 struct qi_desc *hw, wait_desc;
1232 int wait_index, index;
1233 unsigned long flags;
1234
1235 if (!qi)
1236 return 0;
1237
1238 hw = qi->desc;
1239
1240restart:
1241 rc = 0;
1242
1243 raw_spin_lock_irqsave(&qi->q_lock, flags);
1244 while (qi->free_cnt < 3) {
1245 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1246 cpu_relax();
1247 raw_spin_lock_irqsave(&qi->q_lock, flags);
1248 }
1249
1250 index = qi->free_head;
1251 wait_index = (index + 1) % QI_LENGTH;
1252
1253 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1254
1255 hw[index] = *desc;
1256
1257 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1258 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1259 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1260
1261 hw[wait_index] = wait_desc;
1262
1263 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1264 qi->free_cnt -= 2;
1265
1266 /*
1267 * update the HW tail register indicating the presence of
1268 * new descriptors.
1269 */
1270 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1271
1272 while (qi->desc_status[wait_index] != QI_DONE) {
1273 /*
1274 * We will leave the interrupts disabled, to prevent interrupt
1275 * context to queue another cmd while a cmd is already submitted
1276 * and waiting for completion on this cpu. This is to avoid
1277 * a deadlock where the interrupt context can wait indefinitely
1278 * for free slots in the queue.
1279 */
1280 rc = qi_check_fault(iommu, index);
1281 if (rc)
1282 break;
1283
1284 raw_spin_unlock(&qi->q_lock);
1285 cpu_relax();
1286 raw_spin_lock(&qi->q_lock);
1287 }
1288
1289 qi->desc_status[index] = QI_DONE;
1290
1291 reclaim_free_desc(qi);
1292 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1293
1294 if (rc == -EAGAIN)
1295 goto restart;
1296
1297 return rc;
1298}
1299
1300/*
1301 * Flush the global interrupt entry cache.
1302 */
1303void qi_global_iec(struct intel_iommu *iommu)
1304{
1305 struct qi_desc desc;
1306
1307 desc.low = QI_IEC_TYPE;
1308 desc.high = 0;
1309
1310 /* should never fail */
1311 qi_submit_sync(&desc, iommu);
1312}
1313
1314void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1315 u64 type)
1316{
1317 struct qi_desc desc;
1318
1319 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1320 | QI_CC_GRAN(type) | QI_CC_TYPE;
1321 desc.high = 0;
1322
1323 qi_submit_sync(&desc, iommu);
1324}
1325
1326void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1327 unsigned int size_order, u64 type)
1328{
1329 u8 dw = 0, dr = 0;
1330
1331 struct qi_desc desc;
1332 int ih = 0;
1333
1334 if (cap_write_drain(iommu->cap))
1335 dw = 1;
1336
1337 if (cap_read_drain(iommu->cap))
1338 dr = 1;
1339
1340 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1341 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1342 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1343 | QI_IOTLB_AM(size_order);
1344
1345 qi_submit_sync(&desc, iommu);
1346}
1347
1348void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1349 u16 qdep, u64 addr, unsigned mask)
1350{
1351 struct qi_desc desc;
1352
1353 if (mask) {
1354 BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
1355 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1356 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1357 } else
1358 desc.high = QI_DEV_IOTLB_ADDR(addr);
1359
1360 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1361 qdep = 0;
1362
1363 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1364 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1365
1366 qi_submit_sync(&desc, iommu);
1367}
1368
1369/*
1370 * Disable Queued Invalidation interface.
1371 */
1372void dmar_disable_qi(struct intel_iommu *iommu)
1373{
1374 unsigned long flags;
1375 u32 sts;
1376 cycles_t start_time = get_cycles();
1377
1378 if (!ecap_qis(iommu->ecap))
1379 return;
1380
1381 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1382
1383 sts = readl(iommu->reg + DMAR_GSTS_REG);
1384 if (!(sts & DMA_GSTS_QIES))
1385 goto end;
1386
1387 /*
1388 * Give a chance to HW to complete the pending invalidation requests.
1389 */
1390 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1391 readl(iommu->reg + DMAR_IQH_REG)) &&
1392 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1393 cpu_relax();
1394
1395 iommu->gcmd &= ~DMA_GCMD_QIE;
1396 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1397
1398 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1399 !(sts & DMA_GSTS_QIES), sts);
1400end:
1401 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1402}
1403
1404/*
1405 * Enable queued invalidation.
1406 */
1407static void __dmar_enable_qi(struct intel_iommu *iommu)
1408{
1409 u32 sts;
1410 unsigned long flags;
1411 struct q_inval *qi = iommu->qi;
1412
1413 qi->free_head = qi->free_tail = 0;
1414 qi->free_cnt = QI_LENGTH;
1415
1416 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1417
1418 /* write zero to the tail reg */
1419 writel(0, iommu->reg + DMAR_IQT_REG);
1420
1421 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1422
1423 iommu->gcmd |= DMA_GCMD_QIE;
1424 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1425
1426 /* Make sure hardware complete it */
1427 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1428
1429 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1430}
1431
1432/*
1433 * Enable Queued Invalidation interface. This is a must to support
1434 * interrupt-remapping. Also used by DMA-remapping, which replaces
1435 * register based IOTLB invalidation.
1436 */
1437int dmar_enable_qi(struct intel_iommu *iommu)
1438{
1439 struct q_inval *qi;
1440 struct page *desc_page;
1441
1442 if (!ecap_qis(iommu->ecap))
1443 return -ENOENT;
1444
1445 /*
1446 * queued invalidation is already setup and enabled.
1447 */
1448 if (iommu->qi)
1449 return 0;
1450
1451 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1452 if (!iommu->qi)
1453 return -ENOMEM;
1454
1455 qi = iommu->qi;
1456
1457
1458 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1459 if (!desc_page) {
1460 kfree(qi);
1461 iommu->qi = NULL;
1462 return -ENOMEM;
1463 }
1464
1465 qi->desc = page_address(desc_page);
1466
1467 qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1468 if (!qi->desc_status) {
1469 free_page((unsigned long) qi->desc);
1470 kfree(qi);
1471 iommu->qi = NULL;
1472 return -ENOMEM;
1473 }
1474
1475 raw_spin_lock_init(&qi->q_lock);
1476
1477 __dmar_enable_qi(iommu);
1478
1479 return 0;
1480}
1481
1482/* iommu interrupt handling. Most stuff are MSI-like. */
1483
1484enum faulttype {
1485 DMA_REMAP,
1486 INTR_REMAP,
1487 UNKNOWN,
1488};
1489
1490static const char *dma_remap_fault_reasons[] =
1491{
1492 "Software",
1493 "Present bit in root entry is clear",
1494 "Present bit in context entry is clear",
1495 "Invalid context entry",
1496 "Access beyond MGAW",
1497 "PTE Write access is not set",
1498 "PTE Read access is not set",
1499 "Next page table ptr is invalid",
1500 "Root table address invalid",
1501 "Context table ptr is invalid",
1502 "non-zero reserved fields in RTP",
1503 "non-zero reserved fields in CTP",
1504 "non-zero reserved fields in PTE",
1505 "PCE for translation request specifies blocking",
1506};
1507
1508static const char *irq_remap_fault_reasons[] =
1509{
1510 "Detected reserved fields in the decoded interrupt-remapped request",
1511 "Interrupt index exceeded the interrupt-remapping table size",
1512 "Present field in the IRTE entry is clear",
1513 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1514 "Detected reserved fields in the IRTE entry",
1515 "Blocked a compatibility format interrupt request",
1516 "Blocked an interrupt request due to source-id verification failure",
1517};
1518
1519static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1520{
1521 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1522 ARRAY_SIZE(irq_remap_fault_reasons))) {
1523 *fault_type = INTR_REMAP;
1524 return irq_remap_fault_reasons[fault_reason - 0x20];
1525 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1526 *fault_type = DMA_REMAP;
1527 return dma_remap_fault_reasons[fault_reason];
1528 } else {
1529 *fault_type = UNKNOWN;
1530 return "Unknown";
1531 }
1532}
1533
1534
1535static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1536{
1537 if (iommu->irq == irq)
1538 return DMAR_FECTL_REG;
1539 else if (iommu->pr_irq == irq)
1540 return DMAR_PECTL_REG;
1541 else
1542 BUG();
1543}
1544
1545void dmar_msi_unmask(struct irq_data *data)
1546{
1547 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1548 int reg = dmar_msi_reg(iommu, data->irq);
1549 unsigned long flag;
1550
1551 /* unmask it */
1552 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1553 writel(0, iommu->reg + reg);
1554 /* Read a reg to force flush the post write */
1555 readl(iommu->reg + reg);
1556 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1557}
1558
1559void dmar_msi_mask(struct irq_data *data)
1560{
1561 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1562 int reg = dmar_msi_reg(iommu, data->irq);
1563 unsigned long flag;
1564
1565 /* mask it */
1566 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1567 writel(DMA_FECTL_IM, iommu->reg + reg);
1568 /* Read a reg to force flush the post write */
1569 readl(iommu->reg + reg);
1570 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1571}
1572
1573void dmar_msi_write(int irq, struct msi_msg *msg)
1574{
1575 struct intel_iommu *iommu = irq_get_handler_data(irq);
1576 int reg = dmar_msi_reg(iommu, irq);
1577 unsigned long flag;
1578
1579 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1580 writel(msg->data, iommu->reg + reg + 4);
1581 writel(msg->address_lo, iommu->reg + reg + 8);
1582 writel(msg->address_hi, iommu->reg + reg + 12);
1583 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1584}
1585
1586void dmar_msi_read(int irq, struct msi_msg *msg)
1587{
1588 struct intel_iommu *iommu = irq_get_handler_data(irq);
1589 int reg = dmar_msi_reg(iommu, irq);
1590 unsigned long flag;
1591
1592 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1593 msg->data = readl(iommu->reg + reg + 4);
1594 msg->address_lo = readl(iommu->reg + reg + 8);
1595 msg->address_hi = readl(iommu->reg + reg + 12);
1596 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1597}
1598
1599static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1600 u8 fault_reason, u16 source_id, unsigned long long addr)
1601{
1602 const char *reason;
1603 int fault_type;
1604
1605 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1606
1607 if (fault_type == INTR_REMAP)
1608 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1609 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1610 PCI_FUNC(source_id & 0xFF), addr >> 48,
1611 fault_reason, reason);
1612 else
1613 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1614 type ? "DMA Read" : "DMA Write",
1615 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1616 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1617 return 0;
1618}
1619
1620#define PRIMARY_FAULT_REG_LEN (16)
1621irqreturn_t dmar_fault(int irq, void *dev_id)
1622{
1623 struct intel_iommu *iommu = dev_id;
1624 int reg, fault_index;
1625 u32 fault_status;
1626 unsigned long flag;
1627 bool ratelimited;
1628 static DEFINE_RATELIMIT_STATE(rs,
1629 DEFAULT_RATELIMIT_INTERVAL,
1630 DEFAULT_RATELIMIT_BURST);
1631
1632 /* Disable printing, simply clear the fault when ratelimited */
1633 ratelimited = !__ratelimit(&rs);
1634
1635 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1636 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1637 if (fault_status && !ratelimited)
1638 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1639
1640 /* TBD: ignore advanced fault log currently */
1641 if (!(fault_status & DMA_FSTS_PPF))
1642 goto unlock_exit;
1643
1644 fault_index = dma_fsts_fault_record_index(fault_status);
1645 reg = cap_fault_reg_offset(iommu->cap);
1646 while (1) {
1647 u8 fault_reason;
1648 u16 source_id;
1649 u64 guest_addr;
1650 int type;
1651 u32 data;
1652
1653 /* highest 32 bits */
1654 data = readl(iommu->reg + reg +
1655 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1656 if (!(data & DMA_FRCD_F))
1657 break;
1658
1659 if (!ratelimited) {
1660 fault_reason = dma_frcd_fault_reason(data);
1661 type = dma_frcd_type(data);
1662
1663 data = readl(iommu->reg + reg +
1664 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1665 source_id = dma_frcd_source_id(data);
1666
1667 guest_addr = dmar_readq(iommu->reg + reg +
1668 fault_index * PRIMARY_FAULT_REG_LEN);
1669 guest_addr = dma_frcd_page_addr(guest_addr);
1670 }
1671
1672 /* clear the fault */
1673 writel(DMA_FRCD_F, iommu->reg + reg +
1674 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1675
1676 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1677
1678 if (!ratelimited)
1679 dmar_fault_do_one(iommu, type, fault_reason,
1680 source_id, guest_addr);
1681
1682 fault_index++;
1683 if (fault_index >= cap_num_fault_regs(iommu->cap))
1684 fault_index = 0;
1685 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1686 }
1687
1688 writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1689
1690unlock_exit:
1691 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1692 return IRQ_HANDLED;
1693}
1694
1695int dmar_set_interrupt(struct intel_iommu *iommu)
1696{
1697 int irq, ret;
1698
1699 /*
1700 * Check if the fault interrupt is already initialized.
1701 */
1702 if (iommu->irq)
1703 return 0;
1704
1705 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1706 if (irq > 0) {
1707 iommu->irq = irq;
1708 } else {
1709 pr_err("No free IRQ vectors\n");
1710 return -EINVAL;
1711 }
1712
1713 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1714 if (ret)
1715 pr_err("Can't request irq\n");
1716 return ret;
1717}
1718
1719int __init enable_drhd_fault_handling(void)
1720{
1721 struct dmar_drhd_unit *drhd;
1722 struct intel_iommu *iommu;
1723
1724 /*
1725 * Enable fault control interrupt.
1726 */
1727 for_each_iommu(iommu, drhd) {
1728 u32 fault_status;
1729 int ret = dmar_set_interrupt(iommu);
1730
1731 if (ret) {
1732 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1733 (unsigned long long)drhd->reg_base_addr, ret);
1734 return -1;
1735 }
1736
1737 /*
1738 * Clear any previous faults.
1739 */
1740 dmar_fault(iommu->irq, iommu);
1741 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1742 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1743 }
1744
1745 return 0;
1746}
1747
1748/*
1749 * Re-enable Queued Invalidation interface.
1750 */
1751int dmar_reenable_qi(struct intel_iommu *iommu)
1752{
1753 if (!ecap_qis(iommu->ecap))
1754 return -ENOENT;
1755
1756 if (!iommu->qi)
1757 return -ENOENT;
1758
1759 /*
1760 * First disable queued invalidation.
1761 */
1762 dmar_disable_qi(iommu);
1763 /*
1764 * Then enable queued invalidation again. Since there is no pending
1765 * invalidation requests now, it's safe to re-enable queued
1766 * invalidation.
1767 */
1768 __dmar_enable_qi(iommu);
1769
1770 return 0;
1771}
1772
1773/*
1774 * Check interrupt remapping support in DMAR table description.
1775 */
1776int __init dmar_ir_support(void)
1777{
1778 struct acpi_table_dmar *dmar;
1779 dmar = (struct acpi_table_dmar *)dmar_tbl;
1780 if (!dmar)
1781 return 0;
1782 return dmar->flags & 0x1;
1783}
1784
1785/* Check whether DMAR units are in use */
1786static inline bool dmar_in_use(void)
1787{
1788 return irq_remapping_enabled || intel_iommu_enabled;
1789}
1790
1791static int __init dmar_free_unused_resources(void)
1792{
1793 struct dmar_drhd_unit *dmaru, *dmaru_n;
1794
1795 if (dmar_in_use())
1796 return 0;
1797
1798 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1799 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1800
1801 down_write(&dmar_global_lock);
1802 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1803 list_del(&dmaru->list);
1804 dmar_free_drhd(dmaru);
1805 }
1806 up_write(&dmar_global_lock);
1807
1808 return 0;
1809}
1810
1811late_initcall(dmar_free_unused_resources);
1812IOMMU_INIT_POST(detect_intel_iommu);
1813
1814/*
1815 * DMAR Hotplug Support
1816 * For more details, please refer to Intel(R) Virtualization Technology
1817 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1818 * "Remapping Hardware Unit Hot Plug".
1819 */
1820static guid_t dmar_hp_guid =
1821 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1822 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1823
1824/*
1825 * Currently there's only one revision and BIOS will not check the revision id,
1826 * so use 0 for safety.
1827 */
1828#define DMAR_DSM_REV_ID 0
1829#define DMAR_DSM_FUNC_DRHD 1
1830#define DMAR_DSM_FUNC_ATSR 2
1831#define DMAR_DSM_FUNC_RHSA 3
1832
1833static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1834{
1835 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1836}
1837
1838static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1839 dmar_res_handler_t handler, void *arg)
1840{
1841 int ret = -ENODEV;
1842 union acpi_object *obj;
1843 struct acpi_dmar_header *start;
1844 struct dmar_res_callback callback;
1845 static int res_type[] = {
1846 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1847 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1848 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1849 };
1850
1851 if (!dmar_detect_dsm(handle, func))
1852 return 0;
1853
1854 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1855 func, NULL, ACPI_TYPE_BUFFER);
1856 if (!obj)
1857 return -ENODEV;
1858
1859 memset(&callback, 0, sizeof(callback));
1860 callback.cb[res_type[func]] = handler;
1861 callback.arg[res_type[func]] = arg;
1862 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1863 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1864
1865 ACPI_FREE(obj);
1866
1867 return ret;
1868}
1869
1870static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1871{
1872 int ret;
1873 struct dmar_drhd_unit *dmaru;
1874
1875 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1876 if (!dmaru)
1877 return -ENODEV;
1878
1879 ret = dmar_ir_hotplug(dmaru, true);
1880 if (ret == 0)
1881 ret = dmar_iommu_hotplug(dmaru, true);
1882
1883 return ret;
1884}
1885
1886static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1887{
1888 int i, ret;
1889 struct device *dev;
1890 struct dmar_drhd_unit *dmaru;
1891
1892 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1893 if (!dmaru)
1894 return 0;
1895
1896 /*
1897 * All PCI devices managed by this unit should have been destroyed.
1898 */
1899 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1900 for_each_active_dev_scope(dmaru->devices,
1901 dmaru->devices_cnt, i, dev)
1902 return -EBUSY;
1903 }
1904
1905 ret = dmar_ir_hotplug(dmaru, false);
1906 if (ret == 0)
1907 ret = dmar_iommu_hotplug(dmaru, false);
1908
1909 return ret;
1910}
1911
1912static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1913{
1914 struct dmar_drhd_unit *dmaru;
1915
1916 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1917 if (dmaru) {
1918 list_del_rcu(&dmaru->list);
1919 synchronize_rcu();
1920 dmar_free_drhd(dmaru);
1921 }
1922
1923 return 0;
1924}
1925
1926static int dmar_hotplug_insert(acpi_handle handle)
1927{
1928 int ret;
1929 int drhd_count = 0;
1930
1931 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1932 &dmar_validate_one_drhd, (void *)1);
1933 if (ret)
1934 goto out;
1935
1936 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1937 &dmar_parse_one_drhd, (void *)&drhd_count);
1938 if (ret == 0 && drhd_count == 0) {
1939 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1940 goto out;
1941 } else if (ret) {
1942 goto release_drhd;
1943 }
1944
1945 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1946 &dmar_parse_one_rhsa, NULL);
1947 if (ret)
1948 goto release_drhd;
1949
1950 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1951 &dmar_parse_one_atsr, NULL);
1952 if (ret)
1953 goto release_atsr;
1954
1955 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1956 &dmar_hp_add_drhd, NULL);
1957 if (!ret)
1958 return 0;
1959
1960 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1961 &dmar_hp_remove_drhd, NULL);
1962release_atsr:
1963 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1964 &dmar_release_one_atsr, NULL);
1965release_drhd:
1966 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1967 &dmar_hp_release_drhd, NULL);
1968out:
1969 return ret;
1970}
1971
1972static int dmar_hotplug_remove(acpi_handle handle)
1973{
1974 int ret;
1975
1976 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1977 &dmar_check_one_atsr, NULL);
1978 if (ret)
1979 return ret;
1980
1981 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1982 &dmar_hp_remove_drhd, NULL);
1983 if (ret == 0) {
1984 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1985 &dmar_release_one_atsr, NULL));
1986 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1987 &dmar_hp_release_drhd, NULL));
1988 } else {
1989 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1990 &dmar_hp_add_drhd, NULL);
1991 }
1992
1993 return ret;
1994}
1995
1996static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1997 void *context, void **retval)
1998{
1999 acpi_handle *phdl = retval;
2000
2001 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2002 *phdl = handle;
2003 return AE_CTRL_TERMINATE;
2004 }
2005
2006 return AE_OK;
2007}
2008
2009static int dmar_device_hotplug(acpi_handle handle, bool insert)
2010{
2011 int ret;
2012 acpi_handle tmp = NULL;
2013 acpi_status status;
2014
2015 if (!dmar_in_use())
2016 return 0;
2017
2018 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2019 tmp = handle;
2020 } else {
2021 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2022 ACPI_UINT32_MAX,
2023 dmar_get_dsm_handle,
2024 NULL, NULL, &tmp);
2025 if (ACPI_FAILURE(status)) {
2026 pr_warn("Failed to locate _DSM method.\n");
2027 return -ENXIO;
2028 }
2029 }
2030 if (tmp == NULL)
2031 return 0;
2032
2033 down_write(&dmar_global_lock);
2034 if (insert)
2035 ret = dmar_hotplug_insert(tmp);
2036 else
2037 ret = dmar_hotplug_remove(tmp);
2038 up_write(&dmar_global_lock);
2039
2040 return ret;
2041}
2042
2043int dmar_device_add(acpi_handle handle)
2044{
2045 return dmar_device_hotplug(handle, true);
2046}
2047
2048int dmar_device_remove(acpi_handle handle)
2049{
2050 return dmar_device_hotplug(handle, false);
2051}