rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Thunderbolt Cactus Ridge driver - NHI driver |
| 4 | * |
| 5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 6 | */ |
| 7 | |
| 8 | #ifndef DSL3510_H_ |
| 9 | #define DSL3510_H_ |
| 10 | |
| 11 | #include <linux/idr.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/workqueue.h> |
| 14 | |
| 15 | /** |
| 16 | * struct tb_nhi - thunderbolt native host interface |
| 17 | * @lock: Must be held during ring creation/destruction. Is acquired by |
| 18 | * interrupt_work when dispatching interrupts to individual rings. |
| 19 | * @pdev: Pointer to the PCI device |
| 20 | * @iobase: MMIO space of the NHI |
| 21 | * @tx_rings: All Tx rings available on this host controller |
| 22 | * @rx_rings: All Rx rings available on this host controller |
| 23 | * @msix_ida: Used to allocate MSI-X vectors for rings |
| 24 | * @going_away: The host controller device is about to disappear so when |
| 25 | * this flag is set, avoid touching the hardware anymore. |
| 26 | * @interrupt_work: Work scheduled to handle ring interrupt when no |
| 27 | * MSI-X is used. |
| 28 | * @hop_count: Number of rings (end point hops) supported by NHI. |
| 29 | */ |
| 30 | struct tb_nhi { |
| 31 | struct mutex lock; |
| 32 | struct pci_dev *pdev; |
| 33 | void __iomem *iobase; |
| 34 | struct tb_ring **tx_rings; |
| 35 | struct tb_ring **rx_rings; |
| 36 | struct ida msix_ida; |
| 37 | bool going_away; |
| 38 | struct work_struct interrupt_work; |
| 39 | u32 hop_count; |
| 40 | }; |
| 41 | |
| 42 | /** |
| 43 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI |
| 44 | * @lock: Lock serializing actions to this ring. Must be acquired after |
| 45 | * nhi->lock. |
| 46 | * @nhi: Pointer to the native host controller interface |
| 47 | * @size: Size of the ring |
| 48 | * @hop: Hop (DMA channel) associated with this ring |
| 49 | * @head: Head of the ring (write next descriptor here) |
| 50 | * @tail: Tail of the ring (complete next descriptor here) |
| 51 | * @descriptors: Allocated descriptors for this ring |
| 52 | * @queue: Queue holding frames to be transferred over this ring |
| 53 | * @in_flight: Queue holding frames that are currently in flight |
| 54 | * @work: Interrupt work structure |
| 55 | * @is_tx: Is the ring Tx or Rx |
| 56 | * @running: Is the ring running |
| 57 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. |
| 58 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) |
| 59 | * @flags: Ring specific flags |
| 60 | */ |
| 61 | struct tb_ring { |
| 62 | struct mutex lock; |
| 63 | struct tb_nhi *nhi; |
| 64 | int size; |
| 65 | int hop; |
| 66 | int head; |
| 67 | int tail; |
| 68 | struct ring_desc *descriptors; |
| 69 | dma_addr_t descriptors_dma; |
| 70 | struct list_head queue; |
| 71 | struct list_head in_flight; |
| 72 | struct work_struct work; |
| 73 | bool is_tx:1; |
| 74 | bool running:1; |
| 75 | int irq; |
| 76 | u8 vector; |
| 77 | unsigned int flags; |
| 78 | }; |
| 79 | |
| 80 | /* Leave ring interrupt enabled on suspend */ |
| 81 | #define RING_FLAG_NO_SUSPEND BIT(0) |
| 82 | |
| 83 | struct ring_frame; |
| 84 | typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); |
| 85 | |
| 86 | /** |
| 87 | * struct ring_frame - for use with ring_rx/ring_tx |
| 88 | */ |
| 89 | struct ring_frame { |
| 90 | dma_addr_t buffer_phy; |
| 91 | ring_cb callback; |
| 92 | struct list_head list; |
| 93 | u32 size:12; /* TX: in, RX: out*/ |
| 94 | u32 flags:12; /* RX: out */ |
| 95 | u32 eof:4; /* TX:in, RX: out */ |
| 96 | u32 sof:4; /* TX:in, RX: out */ |
| 97 | }; |
| 98 | |
| 99 | #define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */ |
| 100 | |
| 101 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, |
| 102 | unsigned int flags); |
| 103 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, |
| 104 | unsigned int flags); |
| 105 | void ring_start(struct tb_ring *ring); |
| 106 | void ring_stop(struct tb_ring *ring); |
| 107 | void ring_free(struct tb_ring *ring); |
| 108 | |
| 109 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); |
| 110 | |
| 111 | /** |
| 112 | * ring_rx() - enqueue a frame on an RX ring |
| 113 | * |
| 114 | * frame->buffer, frame->buffer_phy and frame->callback have to be set. The |
| 115 | * buffer must contain at least TB_FRAME_SIZE bytes. |
| 116 | * |
| 117 | * frame->callback will be invoked with frame->size, frame->flags, frame->eof, |
| 118 | * frame->sof set once the frame has been received. |
| 119 | * |
| 120 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 121 | * will be called with canceled set to true. |
| 122 | * |
| 123 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 124 | */ |
| 125 | static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) |
| 126 | { |
| 127 | WARN_ON(ring->is_tx); |
| 128 | return __ring_enqueue(ring, frame); |
| 129 | } |
| 130 | |
| 131 | /** |
| 132 | * ring_tx() - enqueue a frame on an TX ring |
| 133 | * |
| 134 | * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof |
| 135 | * and frame->sof have to be set. |
| 136 | * |
| 137 | * frame->callback will be invoked with once the frame has been transmitted. |
| 138 | * |
| 139 | * If ring_stop is called after the packet has been enqueued frame->callback |
| 140 | * will be called with canceled set to true. |
| 141 | * |
| 142 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. |
| 143 | */ |
| 144 | static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) |
| 145 | { |
| 146 | WARN_ON(!ring->is_tx); |
| 147 | return __ring_enqueue(ring, frame); |
| 148 | } |
| 149 | |
| 150 | enum nhi_fw_mode { |
| 151 | NHI_FW_SAFE_MODE, |
| 152 | NHI_FW_AUTH_MODE, |
| 153 | NHI_FW_EP_MODE, |
| 154 | NHI_FW_CM_MODE, |
| 155 | }; |
| 156 | |
| 157 | enum nhi_mailbox_cmd { |
| 158 | NHI_MAILBOX_SAVE_DEVS = 0x05, |
| 159 | NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, |
| 160 | NHI_MAILBOX_DRV_UNLOADS = 0x07, |
| 161 | NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, |
| 162 | }; |
| 163 | |
| 164 | int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); |
| 165 | enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); |
| 166 | |
| 167 | /* |
| 168 | * PCI IDs used in this driver from Win Ridge forward. There is no |
| 169 | * need for the PCI quirk anymore as we will use ICM also on Apple |
| 170 | * hardware. |
| 171 | */ |
| 172 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d |
| 173 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e |
| 174 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf |
| 175 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0 |
| 176 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2 |
| 177 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3 |
| 178 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9 |
| 179 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da |
| 180 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc |
| 181 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd |
| 182 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de |
| 183 | |
| 184 | #endif |