blob: 01f929957230358a45c6874b24790b34ee320985 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
16 */
17#ifndef IOATDMA_H
18#define IOATDMA_H
19
20#include <linux/dmaengine.h>
21#include <linux/init.h>
22#include <linux/dmapool.h>
23#include <linux/cache.h>
24#include <linux/pci_ids.h>
25#include <linux/circ_buf.h>
26#include <linux/interrupt.h>
27#include "registers.h"
28#include "hw.h"
29
30#define IOAT_DMA_VERSION "4.00"
31
32#define IOAT_DMA_DCA_ANY_CPU ~0
33
34#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
37
38#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
39
40/* ioat hardware assumes at least two sources for raid operations */
41#define src_cnt_to_sw(x) ((x) + 2)
42#define src_cnt_to_hw(x) ((x) - 2)
43#define ndest_to_sw(x) ((x) + 1)
44#define ndest_to_hw(x) ((x) - 1)
45#define src16_cnt_to_sw(x) ((x) + 9)
46#define src16_cnt_to_hw(x) ((x) - 9)
47
48/*
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
51 */
52#define NULL_DESC_BUFFER_SIZE 1
53
54enum ioat_irq_mode {
55 IOAT_NOIRQ = 0,
56 IOAT_MSIX,
57 IOAT_MSI,
58 IOAT_INTX
59};
60
61/**
62 * struct ioatdma_device - internal representation of a IOAT device
63 * @pdev: PCI-Express device
64 * @reg_base: MMIO register space base address
65 * @completion_pool: DMA buffers for completion ops
66 * @sed_hw_pool: DMA super descriptor pools
67 * @dma_dev: embedded struct dma_device
68 * @version: version of ioatdma device
69 * @msix_entries: irq handlers
70 * @idx: per channel data
71 * @dca: direct cache access context
72 * @irq_mode: interrupt mode (INTX, MSI, MSIX)
73 * @cap: read DMA capabilities register
74 */
75struct ioatdma_device {
76 struct pci_dev *pdev;
77 void __iomem *reg_base;
78 struct dma_pool *completion_pool;
79#define MAX_SED_POOLS 5
80 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
81 struct dma_device dma_dev;
82 u8 version;
83#define IOAT_MAX_CHANS 4
84 struct msix_entry msix_entries[IOAT_MAX_CHANS];
85 struct ioatdma_chan *idx[IOAT_MAX_CHANS];
86 struct dca_provider *dca;
87 enum ioat_irq_mode irq_mode;
88 u32 cap;
89
90 /* shadow version for CB3.3 chan reset errata workaround */
91 u64 msixtba0;
92 u64 msixdata0;
93 u32 msixpba;
94};
95
96struct ioat_descs {
97 void *virt;
98 dma_addr_t hw;
99};
100
101struct ioatdma_chan {
102 struct dma_chan dma_chan;
103 void __iomem *reg_base;
104 dma_addr_t last_completion;
105 spinlock_t cleanup_lock;
106 unsigned long state;
107 #define IOAT_CHAN_DOWN 0
108 #define IOAT_COMPLETION_ACK 1
109 #define IOAT_RESET_PENDING 2
110 #define IOAT_KOBJ_INIT_FAIL 3
111 #define IOAT_RUN 5
112 #define IOAT_CHAN_ACTIVE 6
113 struct timer_list timer;
114 #define RESET_DELAY msecs_to_jiffies(100)
115 struct ioatdma_device *ioat_dma;
116 dma_addr_t completion_dma;
117 u64 *completion;
118 struct tasklet_struct cleanup_task;
119 struct kobject kobj;
120
121/* ioat v2 / v3 channel attributes
122 * @xfercap_log; log2 of channel max transfer length (for fast division)
123 * @head: allocated index
124 * @issued: hardware notification point
125 * @tail: cleanup index
126 * @dmacount: identical to 'head' except for occasionally resetting to zero
127 * @alloc_order: log2 of the number of allocated descriptors
128 * @produce: number of descriptors to produce at submit time
129 * @ring: software ring buffer implementation of hardware ring
130 * @prep_lock: serializes descriptor preparation (producers)
131 */
132 size_t xfercap_log;
133 u16 head;
134 u16 issued;
135 u16 tail;
136 u16 dmacount;
137 u16 alloc_order;
138 u16 produce;
139 struct ioat_ring_ent **ring;
140 spinlock_t prep_lock;
141 struct ioat_descs descs[2];
142 int desc_chunks;
143 int intr_coalesce;
144 int prev_intr_coalesce;
145};
146
147struct ioat_sysfs_entry {
148 struct attribute attr;
149 ssize_t (*show)(struct dma_chan *, char *);
150 ssize_t (*store)(struct dma_chan *, const char *, size_t);
151};
152
153/**
154 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
155 * @hw: hardware SED
156 * @dma: dma address for the SED
157 * @parent: point to the dma descriptor that's the parent
158 * @hw_pool: descriptor pool index
159 */
160struct ioat_sed_ent {
161 struct ioat_sed_raw_descriptor *hw;
162 dma_addr_t dma;
163 struct ioat_ring_ent *parent;
164 unsigned int hw_pool;
165};
166
167/**
168 * struct ioat_ring_ent - wrapper around hardware descriptor
169 * @hw: hardware DMA descriptor (for memcpy)
170 * @xor: hardware xor descriptor
171 * @xor_ex: hardware xor extension descriptor
172 * @pq: hardware pq descriptor
173 * @pq_ex: hardware pq extension descriptor
174 * @pqu: hardware pq update descriptor
175 * @raw: hardware raw (un-typed) descriptor
176 * @txd: the generic software descriptor for all engines
177 * @len: total transaction length for unmap
178 * @result: asynchronous result of validate operations
179 * @id: identifier for debug
180 * @sed: pointer to super extended descriptor sw desc
181 */
182
183struct ioat_ring_ent {
184 union {
185 struct ioat_dma_descriptor *hw;
186 struct ioat_xor_descriptor *xor;
187 struct ioat_xor_ext_descriptor *xor_ex;
188 struct ioat_pq_descriptor *pq;
189 struct ioat_pq_ext_descriptor *pq_ex;
190 struct ioat_pq_update_descriptor *pqu;
191 struct ioat_raw_descriptor *raw;
192 };
193 size_t len;
194 struct dma_async_tx_descriptor txd;
195 enum sum_check_flags *result;
196 #ifdef DEBUG
197 int id;
198 #endif
199 struct ioat_sed_ent *sed;
200};
201
202extern const struct sysfs_ops ioat_sysfs_ops;
203extern struct ioat_sysfs_entry ioat_version_attr;
204extern struct ioat_sysfs_entry ioat_cap_attr;
205extern int ioat_pending_level;
206extern int ioat_ring_alloc_order;
207extern struct kobj_type ioat_ktype;
208extern struct kmem_cache *ioat_cache;
209extern int ioat_ring_max_alloc_order;
210extern struct kmem_cache *ioat_sed_cache;
211
212static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
213{
214 return container_of(c, struct ioatdma_chan, dma_chan);
215}
216
217/* wrapper around hardware descriptor format + additional software fields */
218#ifdef DEBUG
219#define set_desc_id(desc, i) ((desc)->id = (i))
220#define desc_id(desc) ((desc)->id)
221#else
222#define set_desc_id(desc, i)
223#define desc_id(desc) (0)
224#endif
225
226static inline void
227__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
228 struct dma_async_tx_descriptor *tx, int id)
229{
230 struct device *dev = to_dev(ioat_chan);
231
232 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
233 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
234 (unsigned long long) tx->phys,
235 (unsigned long long) hw->next, tx->cookie, tx->flags,
236 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
237}
238
239#define dump_desc_dbg(c, d) \
240 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
241
242static inline struct ioatdma_chan *
243ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
244{
245 return ioat_dma->idx[index];
246}
247
248static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
249{
250 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
251}
252
253static inline u64 ioat_chansts_to_addr(u64 status)
254{
255 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
256}
257
258static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
259{
260 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
261}
262
263static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
264{
265 u8 ver = ioat_chan->ioat_dma->version;
266
267 writeb(IOAT_CHANCMD_SUSPEND,
268 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
269}
270
271static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
272{
273 u8 ver = ioat_chan->ioat_dma->version;
274
275 writeb(IOAT_CHANCMD_RESET,
276 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
277}
278
279static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
280{
281 u8 ver = ioat_chan->ioat_dma->version;
282 u8 cmd;
283
284 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
285 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
286}
287
288static inline bool is_ioat_active(unsigned long status)
289{
290 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
291}
292
293static inline bool is_ioat_idle(unsigned long status)
294{
295 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
296}
297
298static inline bool is_ioat_halted(unsigned long status)
299{
300 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
301}
302
303static inline bool is_ioat_suspended(unsigned long status)
304{
305 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
306}
307
308/* channel was fatally programmed */
309static inline bool is_ioat_bug(unsigned long err)
310{
311 return !!err;
312}
313
314#define IOAT_MAX_ORDER 16
315#define IOAT_MAX_DESCS 65536
316#define IOAT_DESCS_PER_2M 32768
317
318static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
319{
320 return 1 << ioat_chan->alloc_order;
321}
322
323/* count of descriptors in flight with the engine */
324static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
325{
326 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
327 ioat_ring_size(ioat_chan));
328}
329
330/* count of descriptors pending submission to hardware */
331static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
332{
333 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
334 ioat_ring_size(ioat_chan));
335}
336
337static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
338{
339 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
340}
341
342static inline u16
343ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
344{
345 u16 num_descs = len >> ioat_chan->xfercap_log;
346
347 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
348 return num_descs;
349}
350
351static inline struct ioat_ring_ent *
352ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
353{
354 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
355}
356
357static inline void
358ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
359{
360 writel(addr & 0x00000000FFFFFFFF,
361 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
362 writel(addr >> 32,
363 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
364}
365
366/* IOAT Prep functions */
367struct dma_async_tx_descriptor *
368ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
369 dma_addr_t dma_src, size_t len, unsigned long flags);
370struct dma_async_tx_descriptor *
371ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
372struct dma_async_tx_descriptor *
373ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
374 unsigned int src_cnt, size_t len, unsigned long flags);
375struct dma_async_tx_descriptor *
376ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
377 unsigned int src_cnt, size_t len,
378 enum sum_check_flags *result, unsigned long flags);
379struct dma_async_tx_descriptor *
380ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
381 unsigned int src_cnt, const unsigned char *scf, size_t len,
382 unsigned long flags);
383struct dma_async_tx_descriptor *
384ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
385 unsigned int src_cnt, const unsigned char *scf, size_t len,
386 enum sum_check_flags *pqres, unsigned long flags);
387struct dma_async_tx_descriptor *
388ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
389 unsigned int src_cnt, size_t len, unsigned long flags);
390struct dma_async_tx_descriptor *
391ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
392 unsigned int src_cnt, size_t len,
393 enum sum_check_flags *result, unsigned long flags);
394
395/* IOAT Operation functions */
396irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
397irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
398struct ioat_ring_ent **
399ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
400void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
401void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
402int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
403enum dma_status
404ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
405 struct dma_tx_state *txstate);
406void ioat_cleanup_event(unsigned long data);
407void ioat_timer_event(unsigned long data);
408int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
409void ioat_issue_pending(struct dma_chan *chan);
410void ioat_timer_event(unsigned long data);
411
412/* IOAT Init functions */
413bool is_bwd_ioat(struct pci_dev *pdev);
414struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
415void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
416void ioat_kobject_del(struct ioatdma_device *ioat_dma);
417int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
418void ioat_stop(struct ioatdma_chan *ioat_chan);
419#endif /* IOATDMA_H */