blob: 3f17bfd33eb5f9b3dc98a8f940e757846b522fb5 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef IB_SRP_H
34#define IB_SRP_H
35
36#include <linux/types.h>
37#include <linux/list.h>
38#include <linux/mutex.h>
39#include <linux/scatterlist.h>
40
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_sa.h>
46#include <rdma/ib_cm.h>
47#include <rdma/ib_fmr_pool.h>
48#include <rdma/rdma_cm.h>
49
50enum {
51 SRP_PATH_REC_TIMEOUT_MS = 1000,
52 SRP_ABORT_TIMEOUT_MS = 5000,
53
54 SRP_PORT_REDIRECT = 1,
55 SRP_DLID_REDIRECT = 2,
56 SRP_STALE_CONN = 3,
57
58 SRP_DEF_SG_TABLESIZE = 12,
59
60 SRP_DEFAULT_QUEUE_SIZE = 1 << 6,
61 SRP_RSP_SQ_SIZE = 1,
62 SRP_TSK_MGMT_SQ_SIZE = 1,
63 SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
64 SRP_TSK_MGMT_SQ_SIZE,
65
66 SRP_MAX_PAGES_PER_MR = 512,
67
68 SRP_MAX_ADD_CDB_LEN = 16,
69
70 SRP_MAX_IMM_SGE = 2,
71 SRP_MAX_SGE = SRP_MAX_IMM_SGE + 1,
72 /*
73 * Choose the immediate data offset such that a 32 byte CDB still fits.
74 */
75 SRP_IMM_DATA_OFFSET = sizeof(struct srp_cmd) +
76 SRP_MAX_ADD_CDB_LEN +
77 sizeof(struct srp_imm_buf),
78};
79
80enum {
81 SRP_TAG_NO_REQ = ~0U,
82 SRP_TAG_TSK_MGMT = BIT(31),
83};
84
85enum srp_target_state {
86 SRP_TARGET_SCANNING,
87 SRP_TARGET_LIVE,
88 SRP_TARGET_REMOVED,
89};
90
91enum srp_iu_type {
92 SRP_IU_CMD,
93 SRP_IU_TSK_MGMT,
94 SRP_IU_RSP,
95};
96
97/*
98 * @mr_page_mask: HCA memory registration page mask.
99 * @mr_page_size: HCA memory registration page size.
100 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
101 * request.
102 */
103struct srp_device {
104 struct list_head dev_list;
105 struct ib_device *dev;
106 struct ib_pd *pd;
107 u32 global_rkey;
108 u64 mr_page_mask;
109 int mr_page_size;
110 int mr_max_size;
111 int max_pages_per_mr;
112 bool has_fmr;
113 bool has_fr;
114 bool use_fmr;
115 bool use_fast_reg;
116};
117
118struct srp_host {
119 struct srp_device *srp_dev;
120 u8 port;
121 struct device dev;
122 struct list_head target_list;
123 spinlock_t target_lock;
124 struct completion released;
125 struct list_head list;
126 struct mutex add_target_mutex;
127};
128
129struct srp_request {
130 struct scsi_cmnd *scmnd;
131 struct srp_iu *cmd;
132 union {
133 struct ib_pool_fmr **fmr_list;
134 struct srp_fr_desc **fr_list;
135 };
136 u64 *map_page;
137 struct srp_direct_buf *indirect_desc;
138 dma_addr_t indirect_dma_addr;
139 short nmdesc;
140 struct ib_cqe reg_cqe;
141};
142
143/**
144 * struct srp_rdma_ch
145 * @comp_vector: Completion vector used by this RDMA channel.
146 * @max_it_iu_len: Maximum initiator-to-target information unit length.
147 * @max_ti_iu_len: Maximum target-to-initiator information unit length.
148 */
149struct srp_rdma_ch {
150 /* These are RW in the hot path, and commonly used together */
151 struct list_head free_tx;
152 spinlock_t lock;
153 s32 req_lim;
154
155 /* These are read-only in the hot path */
156 struct srp_target_port *target ____cacheline_aligned_in_smp;
157 struct ib_cq *send_cq;
158 struct ib_cq *recv_cq;
159 struct ib_qp *qp;
160 union {
161 struct ib_fmr_pool *fmr_pool;
162 struct srp_fr_pool *fr_pool;
163 };
164 uint32_t max_it_iu_len;
165 uint32_t max_ti_iu_len;
166 bool use_imm_data;
167
168 /* Everything above this point is used in the hot path of
169 * command processing. Try to keep them packed into cachelines.
170 */
171
172 struct completion done;
173 int status;
174
175 union {
176 struct ib_cm {
177 struct sa_path_rec path;
178 struct ib_sa_query *path_query;
179 int path_query_id;
180 struct ib_cm_id *cm_id;
181 } ib_cm;
182 struct rdma_cm {
183 struct rdma_cm_id *cm_id;
184 } rdma_cm;
185 };
186
187 struct srp_iu **tx_ring;
188 struct srp_iu **rx_ring;
189 struct srp_request *req_ring;
190 int comp_vector;
191
192 u64 tsk_mgmt_tag;
193 struct completion tsk_mgmt_done;
194 u8 tsk_mgmt_status;
195 bool connected;
196};
197
198/**
199 * struct srp_target_port
200 * @comp_vector: Completion vector used by the first RDMA channel created for
201 * this target port.
202 */
203struct srp_target_port {
204 /* read and written in the hot path */
205 spinlock_t lock;
206
207 /* read only in the hot path */
208 u32 global_rkey;
209 struct srp_rdma_ch *ch;
210 struct net *net;
211 u32 ch_count;
212 u32 lkey;
213 enum srp_target_state state;
214 unsigned int cmd_sg_cnt;
215 unsigned int indirect_size;
216 bool allow_ext_sg;
217
218 /* other member variables */
219 union ib_gid sgid;
220 __be64 id_ext;
221 __be64 ioc_guid;
222 __be64 initiator_ext;
223 u16 io_class;
224 struct srp_host *srp_host;
225 struct Scsi_Host *scsi_host;
226 struct srp_rport *rport;
227 char target_name[32];
228 unsigned int scsi_id;
229 unsigned int sg_tablesize;
230 unsigned int target_can_queue;
231 int mr_pool_size;
232 int mr_per_cmd;
233 int queue_size;
234 int req_ring_size;
235 int comp_vector;
236 int tl_retry_count;
237
238 bool using_rdma_cm;
239
240 union {
241 struct {
242 __be64 service_id;
243 union ib_gid orig_dgid;
244 __be16 pkey;
245 } ib_cm;
246 struct {
247 union {
248 struct sockaddr_in ip4;
249 struct sockaddr_in6 ip6;
250 struct sockaddr_storage ss;
251 } src;
252 union {
253 struct sockaddr_in ip4;
254 struct sockaddr_in6 ip6;
255 struct sockaddr_storage ss;
256 } dst;
257 bool src_specified;
258 } rdma_cm;
259 };
260
261 u32 rq_tmo_jiffies;
262
263 int zero_req_lim;
264
265 struct work_struct tl_err_work;
266 struct work_struct remove_work;
267
268 struct list_head list;
269 bool qp_in_error;
270};
271
272struct srp_iu {
273 struct list_head list;
274 u64 dma;
275 void *buf;
276 size_t size;
277 enum dma_data_direction direction;
278 u32 num_sge;
279 struct ib_sge sge[SRP_MAX_SGE];
280 struct ib_cqe cqe;
281};
282
283/**
284 * struct srp_fr_desc - fast registration work request arguments
285 * @entry: Entry in srp_fr_pool.free_list.
286 * @mr: Memory region.
287 * @frpl: Fast registration page list.
288 */
289struct srp_fr_desc {
290 struct list_head entry;
291 struct ib_mr *mr;
292};
293
294/**
295 * struct srp_fr_pool - pool of fast registration descriptors
296 *
297 * An entry is available for allocation if and only if it occurs in @free_list.
298 *
299 * @size: Number of descriptors in this pool.
300 * @max_page_list_len: Maximum fast registration work request page list length.
301 * @lock: Protects free_list.
302 * @free_list: List of free descriptors.
303 * @desc: Fast registration descriptor pool.
304 */
305struct srp_fr_pool {
306 int size;
307 int max_page_list_len;
308 spinlock_t lock;
309 struct list_head free_list;
310 struct srp_fr_desc desc[0];
311};
312
313/**
314 * struct srp_map_state - per-request DMA memory mapping state
315 * @desc: Pointer to the element of the SRP buffer descriptor array
316 * that is being filled in.
317 * @pages: Array with DMA addresses of pages being considered for
318 * memory registration.
319 * @base_dma_addr: DMA address of the first page that has not yet been mapped.
320 * @dma_len: Number of bytes that will be registered with the next
321 * FMR or FR memory registration call.
322 * @total_len: Total number of bytes in the sg-list being mapped.
323 * @npages: Number of page addresses in the pages[] array.
324 * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
325 * @ndesc: Number of SRP buffer descriptors that have been filled in.
326 */
327struct srp_map_state {
328 union {
329 struct {
330 struct ib_pool_fmr **next;
331 struct ib_pool_fmr **end;
332 } fmr;
333 struct {
334 struct srp_fr_desc **next;
335 struct srp_fr_desc **end;
336 } fr;
337 struct {
338 void **next;
339 void **end;
340 } gen;
341 };
342 struct srp_direct_buf *desc;
343 union {
344 u64 *pages;
345 struct scatterlist *sg;
346 };
347 dma_addr_t base_dma_addr;
348 u32 dma_len;
349 u32 total_len;
350 unsigned int npages;
351 unsigned int nmdesc;
352 unsigned int ndesc;
353};
354
355#endif /* IB_SRP_H */