| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
 | 2 | /** | 
 | 3 |  * intel-pasid.c - PASID idr, table and entry manipulation | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2018 Intel Corporation | 
 | 6 |  * | 
 | 7 |  * Author: Lu Baolu <baolu.lu@linux.intel.com> | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #define pr_fmt(fmt)	"DMAR: " fmt | 
 | 11 |  | 
 | 12 | #include <linux/dmar.h> | 
 | 13 | #include <linux/intel-iommu.h> | 
 | 14 | #include <linux/iommu.h> | 
 | 15 | #include <linux/memory.h> | 
 | 16 | #include <linux/pci.h> | 
 | 17 | #include <linux/pci-ats.h> | 
 | 18 | #include <linux/spinlock.h> | 
 | 19 |  | 
 | 20 | #include "intel-pasid.h" | 
 | 21 |  | 
 | 22 | /* | 
 | 23 |  * Intel IOMMU system wide PASID name space: | 
 | 24 |  */ | 
 | 25 | static DEFINE_SPINLOCK(pasid_lock); | 
 | 26 | u32 intel_pasid_max_id = PASID_MAX; | 
 | 27 | static DEFINE_IDR(pasid_idr); | 
 | 28 |  | 
 | 29 | int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp) | 
 | 30 | { | 
 | 31 | 	int ret, min, max; | 
 | 32 |  | 
 | 33 | 	min = max_t(int, start, PASID_MIN); | 
 | 34 | 	max = min_t(int, end, intel_pasid_max_id); | 
 | 35 |  | 
 | 36 | 	WARN_ON(in_interrupt()); | 
 | 37 | 	idr_preload(gfp); | 
 | 38 | 	spin_lock(&pasid_lock); | 
 | 39 | 	ret = idr_alloc(&pasid_idr, ptr, min, max, GFP_ATOMIC); | 
 | 40 | 	spin_unlock(&pasid_lock); | 
 | 41 | 	idr_preload_end(); | 
 | 42 |  | 
 | 43 | 	return ret; | 
 | 44 | } | 
 | 45 |  | 
 | 46 | void intel_pasid_free_id(int pasid) | 
 | 47 | { | 
 | 48 | 	spin_lock(&pasid_lock); | 
 | 49 | 	idr_remove(&pasid_idr, pasid); | 
 | 50 | 	spin_unlock(&pasid_lock); | 
 | 51 | } | 
 | 52 |  | 
 | 53 | void *intel_pasid_lookup_id(int pasid) | 
 | 54 | { | 
 | 55 | 	void *p; | 
 | 56 |  | 
 | 57 | 	spin_lock(&pasid_lock); | 
 | 58 | 	p = idr_find(&pasid_idr, pasid); | 
 | 59 | 	spin_unlock(&pasid_lock); | 
 | 60 |  | 
 | 61 | 	return p; | 
 | 62 | } | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * Per device pasid table management: | 
 | 66 |  */ | 
 | 67 | static inline void | 
 | 68 | device_attach_pasid_table(struct device_domain_info *info, | 
 | 69 | 			  struct pasid_table *pasid_table) | 
 | 70 | { | 
 | 71 | 	info->pasid_table = pasid_table; | 
 | 72 | 	list_add(&info->table, &pasid_table->dev); | 
 | 73 | } | 
 | 74 |  | 
 | 75 | static inline void | 
 | 76 | device_detach_pasid_table(struct device_domain_info *info, | 
 | 77 | 			  struct pasid_table *pasid_table) | 
 | 78 | { | 
 | 79 | 	info->pasid_table = NULL; | 
 | 80 | 	list_del(&info->table); | 
 | 81 | } | 
 | 82 |  | 
 | 83 | struct pasid_table_opaque { | 
 | 84 | 	struct pasid_table	**pasid_table; | 
 | 85 | 	int			segment; | 
 | 86 | 	int			bus; | 
 | 87 | 	int			devfn; | 
 | 88 | }; | 
 | 89 |  | 
 | 90 | static int search_pasid_table(struct device_domain_info *info, void *opaque) | 
 | 91 | { | 
 | 92 | 	struct pasid_table_opaque *data = opaque; | 
 | 93 |  | 
 | 94 | 	if (info->iommu->segment == data->segment && | 
 | 95 | 	    info->bus == data->bus && | 
 | 96 | 	    info->devfn == data->devfn && | 
 | 97 | 	    info->pasid_table) { | 
 | 98 | 		*data->pasid_table = info->pasid_table; | 
 | 99 | 		return 1; | 
 | 100 | 	} | 
 | 101 |  | 
 | 102 | 	return 0; | 
 | 103 | } | 
 | 104 |  | 
 | 105 | static int get_alias_pasid_table(struct pci_dev *pdev, u16 alias, void *opaque) | 
 | 106 | { | 
 | 107 | 	struct pasid_table_opaque *data = opaque; | 
 | 108 |  | 
 | 109 | 	data->segment = pci_domain_nr(pdev->bus); | 
 | 110 | 	data->bus = PCI_BUS_NUM(alias); | 
 | 111 | 	data->devfn = alias & 0xff; | 
 | 112 |  | 
 | 113 | 	return for_each_device_domain(&search_pasid_table, data); | 
 | 114 | } | 
 | 115 |  | 
 | 116 | /* | 
 | 117 |  * Allocate a pasid table for @dev. It should be called in a | 
 | 118 |  * single-thread context. | 
 | 119 |  */ | 
 | 120 | int intel_pasid_alloc_table(struct device *dev) | 
 | 121 | { | 
 | 122 | 	struct device_domain_info *info; | 
 | 123 | 	struct pasid_table *pasid_table; | 
 | 124 | 	struct pasid_table_opaque data; | 
 | 125 | 	struct page *pages; | 
 | 126 | 	size_t size, count; | 
 | 127 | 	int ret, order; | 
 | 128 |  | 
 | 129 | 	info = dev->archdata.iommu; | 
 | 130 | 	if (WARN_ON(!info || !dev_is_pci(dev) || | 
 | 131 | 		    !info->pasid_supported || info->pasid_table)) | 
 | 132 | 		return -EINVAL; | 
 | 133 |  | 
 | 134 | 	/* DMA alias device already has a pasid table, use it: */ | 
 | 135 | 	data.pasid_table = &pasid_table; | 
 | 136 | 	ret = pci_for_each_dma_alias(to_pci_dev(dev), | 
 | 137 | 				     &get_alias_pasid_table, &data); | 
 | 138 | 	if (ret) | 
 | 139 | 		goto attach_out; | 
 | 140 |  | 
 | 141 | 	pasid_table = kzalloc(sizeof(*pasid_table), GFP_ATOMIC); | 
 | 142 | 	if (!pasid_table) | 
 | 143 | 		return -ENOMEM; | 
 | 144 | 	INIT_LIST_HEAD(&pasid_table->dev); | 
 | 145 |  | 
 | 146 | 	size = sizeof(struct pasid_entry); | 
 | 147 | 	count = min_t(int, pci_max_pasids(to_pci_dev(dev)), intel_pasid_max_id); | 
 | 148 | 	order = get_order(size * count); | 
 | 149 | 	pages = alloc_pages_node(info->iommu->node, | 
 | 150 | 				 GFP_ATOMIC | __GFP_ZERO, | 
 | 151 | 				 order); | 
 | 152 | 	if (!pages) | 
 | 153 | 		return -ENOMEM; | 
 | 154 |  | 
 | 155 | 	pasid_table->table = page_address(pages); | 
 | 156 | 	pasid_table->order = order; | 
 | 157 | 	pasid_table->max_pasid = count; | 
 | 158 |  | 
 | 159 | attach_out: | 
 | 160 | 	device_attach_pasid_table(info, pasid_table); | 
 | 161 |  | 
 | 162 | 	return 0; | 
 | 163 | } | 
 | 164 |  | 
 | 165 | void intel_pasid_free_table(struct device *dev) | 
 | 166 | { | 
 | 167 | 	struct device_domain_info *info; | 
 | 168 | 	struct pasid_table *pasid_table; | 
 | 169 |  | 
 | 170 | 	info = dev->archdata.iommu; | 
 | 171 | 	if (!info || !dev_is_pci(dev) || | 
 | 172 | 	    !info->pasid_supported || !info->pasid_table) | 
 | 173 | 		return; | 
 | 174 |  | 
 | 175 | 	pasid_table = info->pasid_table; | 
 | 176 | 	device_detach_pasid_table(info, pasid_table); | 
 | 177 |  | 
 | 178 | 	if (!list_empty(&pasid_table->dev)) | 
 | 179 | 		return; | 
 | 180 |  | 
 | 181 | 	free_pages((unsigned long)pasid_table->table, pasid_table->order); | 
 | 182 | 	kfree(pasid_table); | 
 | 183 | } | 
 | 184 |  | 
 | 185 | struct pasid_table *intel_pasid_get_table(struct device *dev) | 
 | 186 | { | 
 | 187 | 	struct device_domain_info *info; | 
 | 188 |  | 
 | 189 | 	info = dev->archdata.iommu; | 
 | 190 | 	if (!info) | 
 | 191 | 		return NULL; | 
 | 192 |  | 
 | 193 | 	return info->pasid_table; | 
 | 194 | } | 
 | 195 |  | 
 | 196 | int intel_pasid_get_dev_max_id(struct device *dev) | 
 | 197 | { | 
 | 198 | 	struct device_domain_info *info; | 
 | 199 |  | 
 | 200 | 	info = dev->archdata.iommu; | 
 | 201 | 	if (!info || !info->pasid_table) | 
 | 202 | 		return 0; | 
 | 203 |  | 
 | 204 | 	return info->pasid_table->max_pasid; | 
 | 205 | } | 
 | 206 |  | 
 | 207 | struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid) | 
 | 208 | { | 
 | 209 | 	struct pasid_table *pasid_table; | 
 | 210 | 	struct pasid_entry *entries; | 
 | 211 |  | 
 | 212 | 	pasid_table = intel_pasid_get_table(dev); | 
 | 213 | 	if (WARN_ON(!pasid_table || pasid < 0 || | 
 | 214 | 		    pasid >= intel_pasid_get_dev_max_id(dev))) | 
 | 215 | 		return NULL; | 
 | 216 |  | 
 | 217 | 	entries = pasid_table->table; | 
 | 218 |  | 
 | 219 | 	return &entries[pasid]; | 
 | 220 | } | 
 | 221 |  | 
 | 222 | /* | 
 | 223 |  * Interfaces for PASID table entry manipulation: | 
 | 224 |  */ | 
 | 225 | static inline void pasid_clear_entry(struct pasid_entry *pe) | 
 | 226 | { | 
 | 227 | 	WRITE_ONCE(pe->val, 0); | 
 | 228 | } | 
 | 229 |  | 
 | 230 | void intel_pasid_clear_entry(struct device *dev, int pasid) | 
 | 231 | { | 
 | 232 | 	struct pasid_entry *pe; | 
 | 233 |  | 
 | 234 | 	pe = intel_pasid_get_entry(dev, pasid); | 
 | 235 | 	if (WARN_ON(!pe)) | 
 | 236 | 		return; | 
 | 237 |  | 
 | 238 | 	pasid_clear_entry(pe); | 
 | 239 | } |