| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify it | 
 | 5 |  * under the terms of the GNU General Public License as published by the Free | 
 | 6 |  * Software Foundation; either version 2 of the License, or (at your option) | 
 | 7 |  * any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, but WITHOUT | 
 | 10 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
 | 11 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
 | 12 |  * more details. | 
 | 13 |  * | 
 | 14 |  * The full GNU General Public License is included in this distribution in the | 
 | 15 |  * file called COPYING. | 
 | 16 |  */ | 
 | 17 | #ifndef IOATDMA_H | 
 | 18 | #define IOATDMA_H | 
 | 19 |  | 
 | 20 | #include <linux/dmaengine.h> | 
 | 21 | #include <linux/init.h> | 
 | 22 | #include <linux/dmapool.h> | 
 | 23 | #include <linux/cache.h> | 
 | 24 | #include <linux/pci_ids.h> | 
 | 25 | #include <linux/circ_buf.h> | 
 | 26 | #include <linux/interrupt.h> | 
 | 27 | #include "registers.h" | 
 | 28 | #include "hw.h" | 
 | 29 |  | 
 | 30 | #define IOAT_DMA_VERSION  "4.00" | 
 | 31 |  | 
 | 32 | #define IOAT_DMA_DCA_ANY_CPU		~0 | 
 | 33 |  | 
 | 34 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) | 
 | 35 | #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) | 
 | 36 | #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) | 
 | 37 |  | 
 | 38 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) | 
 | 39 |  | 
 | 40 | /* ioat hardware assumes at least two sources for raid operations */ | 
 | 41 | #define src_cnt_to_sw(x) ((x) + 2) | 
 | 42 | #define src_cnt_to_hw(x) ((x) - 2) | 
 | 43 | #define ndest_to_sw(x) ((x) + 1) | 
 | 44 | #define ndest_to_hw(x) ((x) - 1) | 
 | 45 | #define src16_cnt_to_sw(x) ((x) + 9) | 
 | 46 | #define src16_cnt_to_hw(x) ((x) - 9) | 
 | 47 |  | 
 | 48 | /* | 
 | 49 |  * workaround for IOAT ver.3.0 null descriptor issue | 
 | 50 |  * (channel returns error when size is 0) | 
 | 51 |  */ | 
 | 52 | #define NULL_DESC_BUFFER_SIZE 1 | 
 | 53 |  | 
 | 54 | enum ioat_irq_mode { | 
 | 55 | 	IOAT_NOIRQ = 0, | 
 | 56 | 	IOAT_MSIX, | 
 | 57 | 	IOAT_MSI, | 
 | 58 | 	IOAT_INTX | 
 | 59 | }; | 
 | 60 |  | 
 | 61 | /** | 
 | 62 |  * struct ioatdma_device - internal representation of a IOAT device | 
 | 63 |  * @pdev: PCI-Express device | 
 | 64 |  * @reg_base: MMIO register space base address | 
 | 65 |  * @completion_pool: DMA buffers for completion ops | 
 | 66 |  * @sed_hw_pool: DMA super descriptor pools | 
 | 67 |  * @dma_dev: embedded struct dma_device | 
 | 68 |  * @version: version of ioatdma device | 
 | 69 |  * @msix_entries: irq handlers | 
 | 70 |  * @idx: per channel data | 
 | 71 |  * @dca: direct cache access context | 
 | 72 |  * @irq_mode: interrupt mode (INTX, MSI, MSIX) | 
 | 73 |  * @cap: read DMA capabilities register | 
 | 74 |  */ | 
 | 75 | struct ioatdma_device { | 
 | 76 | 	struct pci_dev *pdev; | 
 | 77 | 	void __iomem *reg_base; | 
 | 78 | 	struct dma_pool *completion_pool; | 
 | 79 | #define MAX_SED_POOLS	5 | 
 | 80 | 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 
 | 81 | 	struct dma_device dma_dev; | 
 | 82 | 	u8 version; | 
 | 83 | #define IOAT_MAX_CHANS 4 | 
 | 84 | 	struct msix_entry msix_entries[IOAT_MAX_CHANS]; | 
 | 85 | 	struct ioatdma_chan *idx[IOAT_MAX_CHANS]; | 
 | 86 | 	struct dca_provider *dca; | 
 | 87 | 	enum ioat_irq_mode irq_mode; | 
 | 88 | 	u32 cap; | 
 | 89 |  | 
 | 90 | 	/* shadow version for CB3.3 chan reset errata workaround */ | 
 | 91 | 	u64 msixtba0; | 
 | 92 | 	u64 msixdata0; | 
 | 93 | 	u32 msixpba; | 
 | 94 | }; | 
 | 95 |  | 
 | 96 | struct ioat_descs { | 
 | 97 | 	void *virt; | 
 | 98 | 	dma_addr_t hw; | 
 | 99 | }; | 
 | 100 |  | 
 | 101 | struct ioatdma_chan { | 
 | 102 | 	struct dma_chan dma_chan; | 
 | 103 | 	void __iomem *reg_base; | 
 | 104 | 	dma_addr_t last_completion; | 
 | 105 | 	spinlock_t cleanup_lock; | 
 | 106 | 	unsigned long state; | 
 | 107 | 	#define IOAT_CHAN_DOWN 0 | 
 | 108 | 	#define IOAT_COMPLETION_ACK 1 | 
 | 109 | 	#define IOAT_RESET_PENDING 2 | 
 | 110 | 	#define IOAT_KOBJ_INIT_FAIL 3 | 
 | 111 | 	#define IOAT_RUN 5 | 
 | 112 | 	#define IOAT_CHAN_ACTIVE 6 | 
 | 113 | 	struct timer_list timer; | 
 | 114 | 	#define COMPLETION_TIMEOUT msecs_to_jiffies(100) | 
 | 115 | 	#define IDLE_TIMEOUT msecs_to_jiffies(2000) | 
 | 116 | 	#define RESET_DELAY msecs_to_jiffies(100) | 
 | 117 | 	struct ioatdma_device *ioat_dma; | 
 | 118 | 	dma_addr_t completion_dma; | 
 | 119 | 	u64 *completion; | 
 | 120 | 	struct tasklet_struct cleanup_task; | 
 | 121 | 	struct kobject kobj; | 
 | 122 |  | 
 | 123 | /* ioat v2 / v3 channel attributes | 
 | 124 |  * @xfercap_log; log2 of channel max transfer length (for fast division) | 
 | 125 |  * @head: allocated index | 
 | 126 |  * @issued: hardware notification point | 
 | 127 |  * @tail: cleanup index | 
 | 128 |  * @dmacount: identical to 'head' except for occasionally resetting to zero | 
 | 129 |  * @alloc_order: log2 of the number of allocated descriptors | 
 | 130 |  * @produce: number of descriptors to produce at submit time | 
 | 131 |  * @ring: software ring buffer implementation of hardware ring | 
 | 132 |  * @prep_lock: serializes descriptor preparation (producers) | 
 | 133 |  */ | 
 | 134 | 	size_t xfercap_log; | 
 | 135 | 	u16 head; | 
 | 136 | 	u16 issued; | 
 | 137 | 	u16 tail; | 
 | 138 | 	u16 dmacount; | 
 | 139 | 	u16 alloc_order; | 
 | 140 | 	u16 produce; | 
 | 141 | 	struct ioat_ring_ent **ring; | 
 | 142 | 	spinlock_t prep_lock; | 
 | 143 | 	struct ioat_descs descs[2]; | 
 | 144 | 	int desc_chunks; | 
 | 145 | 	int intr_coalesce; | 
 | 146 | 	int prev_intr_coalesce; | 
 | 147 | }; | 
 | 148 |  | 
 | 149 | struct ioat_sysfs_entry { | 
 | 150 | 	struct attribute attr; | 
 | 151 | 	ssize_t (*show)(struct dma_chan *, char *); | 
 | 152 | 	ssize_t (*store)(struct dma_chan *, const char *, size_t); | 
 | 153 | }; | 
 | 154 |  | 
 | 155 | /** | 
 | 156 |  * struct ioat_sed_ent - wrapper around super extended hardware descriptor | 
 | 157 |  * @hw: hardware SED | 
 | 158 |  * @dma: dma address for the SED | 
 | 159 |  * @parent: point to the dma descriptor that's the parent | 
 | 160 |  * @hw_pool: descriptor pool index | 
 | 161 |  */ | 
 | 162 | struct ioat_sed_ent { | 
 | 163 | 	struct ioat_sed_raw_descriptor *hw; | 
 | 164 | 	dma_addr_t dma; | 
 | 165 | 	struct ioat_ring_ent *parent; | 
 | 166 | 	unsigned int hw_pool; | 
 | 167 | }; | 
 | 168 |  | 
 | 169 | /** | 
 | 170 |  * struct ioat_ring_ent - wrapper around hardware descriptor | 
 | 171 |  * @hw: hardware DMA descriptor (for memcpy) | 
 | 172 |  * @xor: hardware xor descriptor | 
 | 173 |  * @xor_ex: hardware xor extension descriptor | 
 | 174 |  * @pq: hardware pq descriptor | 
 | 175 |  * @pq_ex: hardware pq extension descriptor | 
 | 176 |  * @pqu: hardware pq update descriptor | 
 | 177 |  * @raw: hardware raw (un-typed) descriptor | 
 | 178 |  * @txd: the generic software descriptor for all engines | 
 | 179 |  * @len: total transaction length for unmap | 
 | 180 |  * @result: asynchronous result of validate operations | 
 | 181 |  * @id: identifier for debug | 
 | 182 |  * @sed: pointer to super extended descriptor sw desc | 
 | 183 |  */ | 
 | 184 |  | 
 | 185 | struct ioat_ring_ent { | 
 | 186 | 	union { | 
 | 187 | 		struct ioat_dma_descriptor *hw; | 
 | 188 | 		struct ioat_xor_descriptor *xor; | 
 | 189 | 		struct ioat_xor_ext_descriptor *xor_ex; | 
 | 190 | 		struct ioat_pq_descriptor *pq; | 
 | 191 | 		struct ioat_pq_ext_descriptor *pq_ex; | 
 | 192 | 		struct ioat_pq_update_descriptor *pqu; | 
 | 193 | 		struct ioat_raw_descriptor *raw; | 
 | 194 | 	}; | 
 | 195 | 	size_t len; | 
 | 196 | 	struct dma_async_tx_descriptor txd; | 
 | 197 | 	enum sum_check_flags *result; | 
 | 198 | 	#ifdef DEBUG | 
 | 199 | 	int id; | 
 | 200 | 	#endif | 
 | 201 | 	struct ioat_sed_ent *sed; | 
 | 202 | }; | 
 | 203 |  | 
 | 204 | extern const struct sysfs_ops ioat_sysfs_ops; | 
 | 205 | extern struct ioat_sysfs_entry ioat_version_attr; | 
 | 206 | extern struct ioat_sysfs_entry ioat_cap_attr; | 
 | 207 | extern int ioat_pending_level; | 
 | 208 | extern int ioat_ring_alloc_order; | 
 | 209 | extern struct kobj_type ioat_ktype; | 
 | 210 | extern struct kmem_cache *ioat_cache; | 
 | 211 | extern int ioat_ring_max_alloc_order; | 
 | 212 | extern struct kmem_cache *ioat_sed_cache; | 
 | 213 |  | 
 | 214 | static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) | 
 | 215 | { | 
 | 216 | 	return container_of(c, struct ioatdma_chan, dma_chan); | 
 | 217 | } | 
 | 218 |  | 
 | 219 | /* wrapper around hardware descriptor format + additional software fields */ | 
 | 220 | #ifdef DEBUG | 
 | 221 | #define set_desc_id(desc, i) ((desc)->id = (i)) | 
 | 222 | #define desc_id(desc) ((desc)->id) | 
 | 223 | #else | 
 | 224 | #define set_desc_id(desc, i) | 
 | 225 | #define desc_id(desc) (0) | 
 | 226 | #endif | 
 | 227 |  | 
 | 228 | static inline void | 
 | 229 | __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, | 
 | 230 | 		struct dma_async_tx_descriptor *tx, int id) | 
 | 231 | { | 
 | 232 | 	struct device *dev = to_dev(ioat_chan); | 
 | 233 |  | 
 | 234 | 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | 
 | 235 | 		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, | 
 | 236 | 		(unsigned long long) tx->phys, | 
 | 237 | 		(unsigned long long) hw->next, tx->cookie, tx->flags, | 
 | 238 | 		hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | 
 | 239 | } | 
 | 240 |  | 
 | 241 | #define dump_desc_dbg(c, d) \ | 
 | 242 | 	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) | 
 | 243 |  | 
 | 244 | static inline struct ioatdma_chan * | 
 | 245 | ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) | 
 | 246 | { | 
 | 247 | 	return ioat_dma->idx[index]; | 
 | 248 | } | 
 | 249 |  | 
 | 250 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) | 
 | 251 | { | 
 | 252 | 	return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); | 
 | 253 | } | 
 | 254 |  | 
 | 255 | static inline u64 ioat_chansts_to_addr(u64 status) | 
 | 256 | { | 
 | 257 | 	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | 
 | 258 | } | 
 | 259 |  | 
 | 260 | static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) | 
 | 261 | { | 
 | 262 | 	return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | 
 | 263 | } | 
 | 264 |  | 
 | 265 | static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) | 
 | 266 | { | 
 | 267 | 	u8 ver = ioat_chan->ioat_dma->version; | 
 | 268 |  | 
 | 269 | 	writeb(IOAT_CHANCMD_SUSPEND, | 
 | 270 | 	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 
 | 271 | } | 
 | 272 |  | 
 | 273 | static inline void ioat_reset(struct ioatdma_chan *ioat_chan) | 
 | 274 | { | 
 | 275 | 	u8 ver = ioat_chan->ioat_dma->version; | 
 | 276 |  | 
 | 277 | 	writeb(IOAT_CHANCMD_RESET, | 
 | 278 | 	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 
 | 279 | } | 
 | 280 |  | 
 | 281 | static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) | 
 | 282 | { | 
 | 283 | 	u8 ver = ioat_chan->ioat_dma->version; | 
 | 284 | 	u8 cmd; | 
 | 285 |  | 
 | 286 | 	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | 
 | 287 | 	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; | 
 | 288 | } | 
 | 289 |  | 
 | 290 | static inline bool is_ioat_active(unsigned long status) | 
 | 291 | { | 
 | 292 | 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | 
 | 293 | } | 
 | 294 |  | 
 | 295 | static inline bool is_ioat_idle(unsigned long status) | 
 | 296 | { | 
 | 297 | 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | 
 | 298 | } | 
 | 299 |  | 
 | 300 | static inline bool is_ioat_halted(unsigned long status) | 
 | 301 | { | 
 | 302 | 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | 
 | 303 | } | 
 | 304 |  | 
 | 305 | static inline bool is_ioat_suspended(unsigned long status) | 
 | 306 | { | 
 | 307 | 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | 
 | 308 | } | 
 | 309 |  | 
 | 310 | /* channel was fatally programmed */ | 
 | 311 | static inline bool is_ioat_bug(unsigned long err) | 
 | 312 | { | 
 | 313 | 	return !!err; | 
 | 314 | } | 
 | 315 |  | 
 | 316 | #define IOAT_MAX_ORDER 16 | 
 | 317 | #define IOAT_MAX_DESCS 65536 | 
 | 318 | #define IOAT_DESCS_PER_2M 32768 | 
 | 319 |  | 
 | 320 | static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) | 
 | 321 | { | 
 | 322 | 	return 1 << ioat_chan->alloc_order; | 
 | 323 | } | 
 | 324 |  | 
 | 325 | /* count of descriptors in flight with the engine */ | 
 | 326 | static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) | 
 | 327 | { | 
 | 328 | 	return CIRC_CNT(ioat_chan->head, ioat_chan->tail, | 
 | 329 | 			ioat_ring_size(ioat_chan)); | 
 | 330 | } | 
 | 331 |  | 
 | 332 | /* count of descriptors pending submission to hardware */ | 
 | 333 | static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) | 
 | 334 | { | 
 | 335 | 	return CIRC_CNT(ioat_chan->head, ioat_chan->issued, | 
 | 336 | 			ioat_ring_size(ioat_chan)); | 
 | 337 | } | 
 | 338 |  | 
 | 339 | static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) | 
 | 340 | { | 
 | 341 | 	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); | 
 | 342 | } | 
 | 343 |  | 
 | 344 | static inline u16 | 
 | 345 | ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) | 
 | 346 | { | 
 | 347 | 	u16 num_descs = len >> ioat_chan->xfercap_log; | 
 | 348 |  | 
 | 349 | 	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); | 
 | 350 | 	return num_descs; | 
 | 351 | } | 
 | 352 |  | 
 | 353 | static inline struct ioat_ring_ent * | 
 | 354 | ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) | 
 | 355 | { | 
 | 356 | 	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; | 
 | 357 | } | 
 | 358 |  | 
 | 359 | static inline void | 
 | 360 | ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | 
 | 361 | { | 
 | 362 | 	writel(addr & 0x00000000FFFFFFFF, | 
 | 363 | 	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | 
 | 364 | 	writel(addr >> 32, | 
 | 365 | 	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | 
 | 366 | } | 
 | 367 |  | 
 | 368 | /* IOAT Prep functions */ | 
 | 369 | struct dma_async_tx_descriptor * | 
 | 370 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | 
 | 371 | 			   dma_addr_t dma_src, size_t len, unsigned long flags); | 
 | 372 | struct dma_async_tx_descriptor * | 
 | 373 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); | 
 | 374 | struct dma_async_tx_descriptor * | 
 | 375 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 
 | 376 | 	       unsigned int src_cnt, size_t len, unsigned long flags); | 
 | 377 | struct dma_async_tx_descriptor * | 
 | 378 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | 
 | 379 | 		    unsigned int src_cnt, size_t len, | 
 | 380 | 		    enum sum_check_flags *result, unsigned long flags); | 
 | 381 | struct dma_async_tx_descriptor * | 
 | 382 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 
 | 383 | 	      unsigned int src_cnt, const unsigned char *scf, size_t len, | 
 | 384 | 	      unsigned long flags); | 
 | 385 | struct dma_async_tx_descriptor * | 
 | 386 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 
 | 387 | 		  unsigned int src_cnt, const unsigned char *scf, size_t len, | 
 | 388 | 		  enum sum_check_flags *pqres, unsigned long flags); | 
 | 389 | struct dma_async_tx_descriptor * | 
 | 390 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | 
 | 391 | 		 unsigned int src_cnt, size_t len, unsigned long flags); | 
 | 392 | struct dma_async_tx_descriptor * | 
 | 393 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | 
 | 394 | 		     unsigned int src_cnt, size_t len, | 
 | 395 | 		     enum sum_check_flags *result, unsigned long flags); | 
 | 396 |  | 
 | 397 | /* IOAT Operation functions */ | 
 | 398 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | 
 | 399 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | 
 | 400 | struct ioat_ring_ent ** | 
 | 401 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | 
 | 402 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | 
 | 403 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | 
 | 404 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | 
 | 405 | enum dma_status | 
 | 406 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 
 | 407 | 		struct dma_tx_state *txstate); | 
 | 408 | void ioat_cleanup_event(unsigned long data); | 
 | 409 | void ioat_timer_event(struct timer_list *t); | 
 | 410 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); | 
 | 411 | void ioat_issue_pending(struct dma_chan *chan); | 
 | 412 |  | 
 | 413 | /* IOAT Init functions */ | 
 | 414 | bool is_bwd_ioat(struct pci_dev *pdev); | 
 | 415 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 
 | 416 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); | 
 | 417 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); | 
 | 418 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); | 
 | 419 | void ioat_stop(struct ioatdma_chan *ioat_chan); | 
 | 420 | #endif /* IOATDMA_H */ |