blob: f238e0f41f07c4bfdfe9f6fa83bdab1541ec19e6 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/export.h>
27#include <linux/delay.h>
28#include <asm/unaligned.h>
29#include <linux/t10-pi.h>
30#include <linux/crc-t10dif.h>
31#include <net/checksum.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38#include <scsi/scsi_transport_fc.h>
39
40#include "lpfc_version.h"
41#include "lpfc_hw4.h"
42#include "lpfc_hw.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc_nl.h"
46#include "lpfc_disc.h"
47#include "lpfc.h"
48#include "lpfc_scsi.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52
53#define LPFC_RESET_WAIT 2
54#define LPFC_ABORT_WAIT 2
55
56static char *dif_op_str[] = {
57 "PROT_NORMAL",
58 "PROT_READ_INSERT",
59 "PROT_WRITE_STRIP",
60 "PROT_READ_STRIP",
61 "PROT_WRITE_INSERT",
62 "PROT_READ_PASS",
63 "PROT_WRITE_PASS",
64};
65
66struct scsi_dif_tuple {
67 __be16 guard_tag; /* Checksum */
68 __be16 app_tag; /* Opaque storage */
69 __be32 ref_tag; /* Target LBA or indirect LBA */
70};
71
72static struct lpfc_rport_data *
73lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74{
75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77 if (vport->phba->cfg_fof)
78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79 else
80 return (struct lpfc_rport_data *)sdev->hostdata;
81}
82
83static void
84lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85static void
86lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87static int
88lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90static inline unsigned
91lpfc_cmd_blksize(struct scsi_cmnd *sc)
92{
93 return sc->device->sector_size;
94}
95
96#define LPFC_CHECK_PROTECT_GUARD 1
97#define LPFC_CHECK_PROTECT_REF 2
98static inline unsigned
99lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100{
101 return 1;
102}
103
104static inline unsigned
105lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106{
107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108 return 0;
109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110 return 1;
111 return 0;
112}
113
114/**
115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
118 *
119 * This function is called from the lpfc_prep_task_mgmt_cmd function to
120 * set the last bit in the response sge entry.
121 **/
122static void
123lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124 struct lpfc_io_buf *lpfc_cmd)
125{
126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127 if (sgl) {
128 sgl += 1;
129 sgl->word2 = le32_to_cpu(sgl->word2);
130 bf_set(lpfc_sli4_sge_last, sgl, 1);
131 sgl->word2 = cpu_to_le32(sgl->word2);
132 }
133}
134
135/**
136 * lpfc_update_stats - Update statistical data for the command completion
137 * @phba: Pointer to HBA object.
138 * @lpfc_cmd: lpfc scsi command object pointer.
139 *
140 * This function is called when there is a command completion and this
141 * function updates the statistical data for the command completion.
142 **/
143static void
144lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
145{
146 struct lpfc_rport_data *rdata;
147 struct lpfc_nodelist *pnode;
148 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
149 unsigned long flags;
150 struct Scsi_Host *shost = cmd->device->host;
151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
152 unsigned long latency;
153 int i;
154
155 if (!vport->stat_data_enabled ||
156 vport->stat_data_blocked ||
157 (cmd->result))
158 return;
159
160 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161 rdata = lpfc_cmd->rdata;
162 pnode = rdata->pnode;
163
164 spin_lock_irqsave(shost->host_lock, flags);
165 if (!pnode ||
166 !pnode->lat_data ||
167 (phba->bucket_type == LPFC_NO_BUCKET)) {
168 spin_unlock_irqrestore(shost->host_lock, flags);
169 return;
170 }
171
172 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174 phba->bucket_step;
175 /* check array subscript bounds */
176 if (i < 0)
177 i = 0;
178 else if (i >= LPFC_MAX_BUCKET_COUNT)
179 i = LPFC_MAX_BUCKET_COUNT - 1;
180 } else {
181 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182 if (latency <= (phba->bucket_base +
183 ((1<<i)*phba->bucket_step)))
184 break;
185 }
186
187 pnode->lat_data[i].cmd_count++;
188 spin_unlock_irqrestore(shost->host_lock, flags);
189}
190
191/**
192 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193 * @phba: The Hba for which this call is being executed.
194 *
195 * This routine is called when there is resource error in driver or firmware.
196 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197 * posts at most 1 event each second. This routine wakes up worker thread of
198 * @phba to process WORKER_RAM_DOWN_EVENT event.
199 *
200 * This routine should be called with no lock held.
201 **/
202void
203lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204{
205 unsigned long flags;
206 uint32_t evt_posted;
207 unsigned long expires;
208
209 spin_lock_irqsave(&phba->hbalock, flags);
210 atomic_inc(&phba->num_rsrc_err);
211 phba->last_rsrc_error_time = jiffies;
212
213 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214 if (time_after(expires, jiffies)) {
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 return;
217 }
218
219 phba->last_ramp_down_time = jiffies;
220
221 spin_unlock_irqrestore(&phba->hbalock, flags);
222
223 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225 if (!evt_posted)
226 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229 if (!evt_posted)
230 lpfc_worker_wake_up(phba);
231 return;
232}
233
234/**
235 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236 * @phba: The Hba for which this call is being executed.
237 *
238 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
239 * thread.This routine reduces queue depth for all scsi device on each vport
240 * associated with @phba.
241 **/
242void
243lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244{
245 struct lpfc_vport **vports;
246 struct Scsi_Host *shost;
247 struct scsi_device *sdev;
248 unsigned long new_queue_depth;
249 unsigned long num_rsrc_err;
250 int i;
251
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253
254 /*
255 * The error and success command counters are global per
256 * driver instance. If another handler has already
257 * operated on this error event, just exit.
258 */
259 if (num_rsrc_err == 0)
260 return;
261
262 vports = lpfc_create_vport_work_array(phba);
263 if (vports != NULL)
264 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
265 shost = lpfc_shost_from_vport(vports[i]);
266 shost_for_each_device(sdev, shost) {
267 if (num_rsrc_err >= sdev->queue_depth)
268 new_queue_depth = 1;
269 else
270 new_queue_depth = sdev->queue_depth -
271 num_rsrc_err;
272 scsi_change_queue_depth(sdev, new_queue_depth);
273 }
274 }
275 lpfc_destroy_vport_work_array(phba, vports);
276 atomic_set(&phba->num_rsrc_err, 0);
277}
278
279/**
280 * lpfc_scsi_dev_block - set all scsi hosts to block state
281 * @phba: Pointer to HBA context object.
282 *
283 * This function walks vport list and set each SCSI host to block state
284 * by invoking fc_remote_port_delete() routine. This function is invoked
285 * with EEH when device's PCI slot has been permanently disabled.
286 **/
287void
288lpfc_scsi_dev_block(struct lpfc_hba *phba)
289{
290 struct lpfc_vport **vports;
291 struct Scsi_Host *shost;
292 struct scsi_device *sdev;
293 struct fc_rport *rport;
294 int i;
295
296 vports = lpfc_create_vport_work_array(phba);
297 if (vports != NULL)
298 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
299 shost = lpfc_shost_from_vport(vports[i]);
300 shost_for_each_device(sdev, shost) {
301 rport = starget_to_rport(scsi_target(sdev));
302 fc_remote_port_delete(rport);
303 }
304 }
305 lpfc_destroy_vport_work_array(phba, vports);
306}
307
308/**
309 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
310 * @vport: The virtual port for which this call being executed.
311 * @num_to_allocate: The requested number of buffers to allocate.
312 *
313 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
314 * the scsi buffer contains all the necessary information needed to initiate
315 * a SCSI I/O. The non-DMAable buffer region contains information to build
316 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
317 * and the initial BPL. In addition to allocating memory, the FCP CMND and
318 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
319 *
320 * Return codes:
321 * int - number of scsi buffers that were allocated.
322 * 0 = failure, less than num_to_alloc is a partial failure.
323 **/
324static int
325lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
326{
327 struct lpfc_hba *phba = vport->phba;
328 struct lpfc_io_buf *psb;
329 struct ulp_bde64 *bpl;
330 IOCB_t *iocb;
331 dma_addr_t pdma_phys_fcp_cmd;
332 dma_addr_t pdma_phys_fcp_rsp;
333 dma_addr_t pdma_phys_sgl;
334 uint16_t iotag;
335 int bcnt, bpl_size;
336
337 bpl_size = phba->cfg_sg_dma_buf_size -
338 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
339
340 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
341 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
342 num_to_alloc, phba->cfg_sg_dma_buf_size,
343 (int)sizeof(struct fcp_cmnd),
344 (int)sizeof(struct fcp_rsp), bpl_size);
345
346 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
347 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
348 if (!psb)
349 break;
350
351 /*
352 * Get memory from the pci pool to map the virt space to pci
353 * bus space for an I/O. The DMA buffer includes space for the
354 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
355 * necessary to support the sg_tablesize.
356 */
357 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
358 GFP_KERNEL, &psb->dma_handle);
359 if (!psb->data) {
360 kfree(psb);
361 break;
362 }
363
364
365 /* Allocate iotag for psb->cur_iocbq. */
366 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
367 if (iotag == 0) {
368 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
369 psb->data, psb->dma_handle);
370 kfree(psb);
371 break;
372 }
373 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
374
375 psb->fcp_cmnd = psb->data;
376 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
377 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
378 sizeof(struct fcp_rsp);
379
380 /* Initialize local short-hand pointers. */
381 bpl = (struct ulp_bde64 *)psb->dma_sgl;
382 pdma_phys_fcp_cmd = psb->dma_handle;
383 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
384 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
385 sizeof(struct fcp_rsp);
386
387 /*
388 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
389 * are sg list bdes. Initialize the first two and leave the
390 * rest for queuecommand.
391 */
392 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
393 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
394 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
395 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
396 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
397
398 /* Setup the physical region for the FCP RSP */
399 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
400 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
401 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
402 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
403 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
404
405 /*
406 * Since the IOCB for the FCP I/O is built into this
407 * lpfc_scsi_buf, initialize it with all known data now.
408 */
409 iocb = &psb->cur_iocbq.iocb;
410 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
411 if ((phba->sli_rev == 3) &&
412 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
413 /* fill in immediate fcp command BDE */
414 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
415 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
416 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
417 unsli3.fcp_ext.icd);
418 iocb->un.fcpi64.bdl.addrHigh = 0;
419 iocb->ulpBdeCount = 0;
420 iocb->ulpLe = 0;
421 /* fill in response BDE */
422 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
423 BUFF_TYPE_BDE_64;
424 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
425 sizeof(struct fcp_rsp);
426 iocb->unsli3.fcp_ext.rbde.addrLow =
427 putPaddrLow(pdma_phys_fcp_rsp);
428 iocb->unsli3.fcp_ext.rbde.addrHigh =
429 putPaddrHigh(pdma_phys_fcp_rsp);
430 } else {
431 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
432 iocb->un.fcpi64.bdl.bdeSize =
433 (2 * sizeof(struct ulp_bde64));
434 iocb->un.fcpi64.bdl.addrLow =
435 putPaddrLow(pdma_phys_sgl);
436 iocb->un.fcpi64.bdl.addrHigh =
437 putPaddrHigh(pdma_phys_sgl);
438 iocb->ulpBdeCount = 1;
439 iocb->ulpLe = 1;
440 }
441 iocb->ulpClass = CLASS3;
442 psb->status = IOSTAT_SUCCESS;
443 /* Put it back into the SCSI buffer list */
444 psb->cur_iocbq.context1 = psb;
445 spin_lock_init(&psb->buf_lock);
446 lpfc_release_scsi_buf_s3(phba, psb);
447
448 }
449
450 return bcnt;
451}
452
453/**
454 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
455 * @vport: pointer to lpfc vport data structure.
456 *
457 * This routine is invoked by the vport cleanup for deletions and the cleanup
458 * for an ndlp on removal.
459 **/
460void
461lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
462{
463 struct lpfc_hba *phba = vport->phba;
464 struct lpfc_io_buf *psb, *next_psb;
465 struct lpfc_sli4_hdw_queue *qp;
466 unsigned long iflag = 0;
467 int idx;
468
469 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
470 return;
471
472 spin_lock_irqsave(&phba->hbalock, iflag);
473 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
474 qp = &phba->sli4_hba.hdwq[idx];
475
476 spin_lock(&qp->abts_io_buf_list_lock);
477 list_for_each_entry_safe(psb, next_psb,
478 &qp->lpfc_abts_io_buf_list, list) {
479 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
480 continue;
481
482 if (psb->rdata && psb->rdata->pnode &&
483 psb->rdata->pnode->vport == vport)
484 psb->rdata = NULL;
485 }
486 spin_unlock(&qp->abts_io_buf_list_lock);
487 }
488 spin_unlock_irqrestore(&phba->hbalock, iflag);
489}
490
491/**
492 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
493 * @phba: pointer to lpfc hba data structure.
494 * @axri: pointer to the fcp xri abort wcqe structure.
495 *
496 * This routine is invoked by the worker thread to process a SLI4 fast-path
497 * FCP or NVME aborted xri.
498 **/
499void
500lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
501 struct sli4_wcqe_xri_aborted *axri, int idx)
502{
503 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
504 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
505 struct lpfc_io_buf *psb, *next_psb;
506 struct lpfc_sli4_hdw_queue *qp;
507 unsigned long iflag = 0;
508 struct lpfc_iocbq *iocbq;
509 int i;
510 struct lpfc_nodelist *ndlp;
511 int rrq_empty = 0;
512 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
513
514 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
515 return;
516
517 qp = &phba->sli4_hba.hdwq[idx];
518 spin_lock_irqsave(&phba->hbalock, iflag);
519 spin_lock(&qp->abts_io_buf_list_lock);
520 list_for_each_entry_safe(psb, next_psb,
521 &qp->lpfc_abts_io_buf_list, list) {
522 if (psb->cur_iocbq.sli4_xritag == xri) {
523 list_del_init(&psb->list);
524 psb->flags &= ~LPFC_SBUF_XBUSY;
525 psb->status = IOSTAT_SUCCESS;
526 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
527 qp->abts_nvme_io_bufs--;
528 spin_unlock(&qp->abts_io_buf_list_lock);
529 spin_unlock_irqrestore(&phba->hbalock, iflag);
530 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
531 return;
532 }
533 qp->abts_scsi_io_bufs--;
534 spin_unlock(&qp->abts_io_buf_list_lock);
535
536 if (psb->rdata && psb->rdata->pnode)
537 ndlp = psb->rdata->pnode;
538 else
539 ndlp = NULL;
540
541 rrq_empty = list_empty(&phba->active_rrq_list);
542 spin_unlock_irqrestore(&phba->hbalock, iflag);
543 if (ndlp) {
544 lpfc_set_rrq_active(phba, ndlp,
545 psb->cur_iocbq.sli4_lxritag, rxid, 1);
546 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
547 }
548 lpfc_release_scsi_buf_s4(phba, psb);
549 if (rrq_empty)
550 lpfc_worker_wake_up(phba);
551 return;
552 }
553 }
554 spin_unlock(&qp->abts_io_buf_list_lock);
555 for (i = 1; i <= phba->sli.last_iotag; i++) {
556 iocbq = phba->sli.iocbq_lookup[i];
557
558 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
559 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
560 continue;
561 if (iocbq->sli4_xritag != xri)
562 continue;
563 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
564 psb->flags &= ~LPFC_SBUF_XBUSY;
565 spin_unlock_irqrestore(&phba->hbalock, iflag);
566 if (!list_empty(&pring->txq))
567 lpfc_worker_wake_up(phba);
568 return;
569
570 }
571 spin_unlock_irqrestore(&phba->hbalock, iflag);
572}
573
574/**
575 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
576 * @phba: The HBA for which this call is being executed.
577 *
578 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
579 * and returns to caller.
580 *
581 * Return codes:
582 * NULL - Error
583 * Pointer to lpfc_scsi_buf - Success
584 **/
585static struct lpfc_io_buf *
586lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
587 struct scsi_cmnd *cmnd)
588{
589 struct lpfc_io_buf *lpfc_cmd = NULL;
590 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
591 unsigned long iflag = 0;
592
593 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
594 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
595 list);
596 if (!lpfc_cmd) {
597 spin_lock(&phba->scsi_buf_list_put_lock);
598 list_splice(&phba->lpfc_scsi_buf_list_put,
599 &phba->lpfc_scsi_buf_list_get);
600 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
601 list_remove_head(scsi_buf_list_get, lpfc_cmd,
602 struct lpfc_io_buf, list);
603 spin_unlock(&phba->scsi_buf_list_put_lock);
604 }
605 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
606
607 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
608 atomic_inc(&ndlp->cmd_pending);
609 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
610 }
611 return lpfc_cmd;
612}
613/**
614 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
615 * @phba: The HBA for which this call is being executed.
616 *
617 * This routine removes a scsi buffer from head of @hdwq io_buf_list
618 * and returns to caller.
619 *
620 * Return codes:
621 * NULL - Error
622 * Pointer to lpfc_scsi_buf - Success
623 **/
624static struct lpfc_io_buf *
625lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
626 struct scsi_cmnd *cmnd)
627{
628 struct lpfc_io_buf *lpfc_cmd;
629 struct lpfc_sli4_hdw_queue *qp;
630 struct sli4_sge *sgl;
631 IOCB_t *iocb;
632 dma_addr_t pdma_phys_fcp_rsp;
633 dma_addr_t pdma_phys_fcp_cmd;
634 uint32_t cpu, idx;
635 int tag;
636 struct fcp_cmd_rsp_buf *tmp = NULL;
637
638 cpu = raw_smp_processor_id();
639 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
640 tag = blk_mq_unique_tag(cmnd->request);
641 idx = blk_mq_unique_tag_to_hwq(tag);
642 } else {
643 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
644 }
645
646 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
647 !phba->cfg_xri_rebalancing);
648 if (!lpfc_cmd) {
649 qp = &phba->sli4_hba.hdwq[idx];
650 qp->empty_io_bufs++;
651 return NULL;
652 }
653
654 /* Setup key fields in buffer that may have been changed
655 * if other protocols used this buffer.
656 */
657 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
658 lpfc_cmd->prot_seg_cnt = 0;
659 lpfc_cmd->seg_cnt = 0;
660 lpfc_cmd->timeout = 0;
661 lpfc_cmd->flags = 0;
662 lpfc_cmd->start_time = jiffies;
663 lpfc_cmd->waitq = NULL;
664 lpfc_cmd->cpu = cpu;
665#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
666 lpfc_cmd->prot_data_type = 0;
667#endif
668 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
669 if (!tmp) {
670 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
671 return NULL;
672 }
673
674 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
675 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
676
677 /*
678 * The first two SGEs are the FCP_CMD and FCP_RSP.
679 * The balance are sg list bdes. Initialize the
680 * first two and leave the rest for queuecommand.
681 */
682 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
683 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
684 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
685 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
686 sgl->word2 = le32_to_cpu(sgl->word2);
687 bf_set(lpfc_sli4_sge_last, sgl, 0);
688 sgl->word2 = cpu_to_le32(sgl->word2);
689 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
690 sgl++;
691
692 /* Setup the physical region for the FCP RSP */
693 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
694 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
695 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
696 sgl->word2 = le32_to_cpu(sgl->word2);
697 bf_set(lpfc_sli4_sge_last, sgl, 1);
698 sgl->word2 = cpu_to_le32(sgl->word2);
699 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
700
701 /*
702 * Since the IOCB for the FCP I/O is built into this
703 * lpfc_io_buf, initialize it with all known data now.
704 */
705 iocb = &lpfc_cmd->cur_iocbq.iocb;
706 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
707 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
708 /* setting the BLP size to 2 * sizeof BDE may not be correct.
709 * We are setting the bpl to point to out sgl. An sgl's
710 * entries are 16 bytes, a bpl entries are 12 bytes.
711 */
712 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
713 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
714 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
715 iocb->ulpBdeCount = 1;
716 iocb->ulpLe = 1;
717 iocb->ulpClass = CLASS3;
718
719 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
720 atomic_inc(&ndlp->cmd_pending);
721 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
722 }
723 return lpfc_cmd;
724}
725/**
726 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
727 * @phba: The HBA for which this call is being executed.
728 *
729 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
730 * and returns to caller.
731 *
732 * Return codes:
733 * NULL - Error
734 * Pointer to lpfc_scsi_buf - Success
735 **/
736static struct lpfc_io_buf*
737lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
738 struct scsi_cmnd *cmnd)
739{
740 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
741}
742
743/**
744 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
745 * @phba: The Hba for which this call is being executed.
746 * @psb: The scsi buffer which is being released.
747 *
748 * This routine releases @psb scsi buffer by adding it to tail of @phba
749 * lpfc_scsi_buf_list list.
750 **/
751static void
752lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
753{
754 unsigned long iflag = 0;
755
756 psb->seg_cnt = 0;
757 psb->prot_seg_cnt = 0;
758
759 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
760 psb->pCmd = NULL;
761 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
762 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
763 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
764}
765
766/**
767 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
768 * @phba: The Hba for which this call is being executed.
769 * @psb: The scsi buffer which is being released.
770 *
771 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
772 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
773 * and cannot be reused for at least RA_TOV amount of time if it was
774 * aborted.
775 **/
776static void
777lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
778{
779 struct lpfc_sli4_hdw_queue *qp;
780 unsigned long iflag = 0;
781
782 psb->seg_cnt = 0;
783 psb->prot_seg_cnt = 0;
784
785 qp = psb->hdwq;
786 if (psb->flags & LPFC_SBUF_XBUSY) {
787 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
788 psb->pCmd = NULL;
789 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
790 qp->abts_scsi_io_bufs++;
791 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
792 } else {
793 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
794 }
795}
796
797/**
798 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
799 * @phba: The Hba for which this call is being executed.
800 * @psb: The scsi buffer which is being released.
801 *
802 * This routine releases @psb scsi buffer by adding it to tail of @phba
803 * lpfc_scsi_buf_list list.
804 **/
805static void
806lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
807{
808 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
809 atomic_dec(&psb->ndlp->cmd_pending);
810
811 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
812 phba->lpfc_release_scsi_buf(phba, psb);
813}
814
815/**
816 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
817 * @phba: The Hba for which this call is being executed.
818 * @lpfc_cmd: The scsi buffer which is going to be mapped.
819 *
820 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
821 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
822 * through sg elements and format the bde. This routine also initializes all
823 * IOCB fields which are dependent on scsi command request buffer.
824 *
825 * Return codes:
826 * 1 - Error
827 * 0 - Success
828 **/
829static int
830lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
831{
832 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
833 struct scatterlist *sgel = NULL;
834 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
835 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
836 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
837 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
838 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
839 dma_addr_t physaddr;
840 uint32_t num_bde = 0;
841 int nseg, datadir = scsi_cmnd->sc_data_direction;
842
843 /*
844 * There are three possibilities here - use scatter-gather segment, use
845 * the single mapping, or neither. Start the lpfc command prep by
846 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
847 * data bde entry.
848 */
849 bpl += 2;
850 if (scsi_sg_count(scsi_cmnd)) {
851 /*
852 * The driver stores the segment count returned from pci_map_sg
853 * because this a count of dma-mappings used to map the use_sg
854 * pages. They are not guaranteed to be the same for those
855 * architectures that implement an IOMMU.
856 */
857
858 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
859 scsi_sg_count(scsi_cmnd), datadir);
860 if (unlikely(!nseg))
861 return 1;
862
863 lpfc_cmd->seg_cnt = nseg;
864 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
865 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
866 "9064 BLKGRD: %s: Too many sg segments from "
867 "dma_map_sg. Config %d, seg_cnt %d\n",
868 __func__, phba->cfg_sg_seg_cnt,
869 lpfc_cmd->seg_cnt);
870 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
871 lpfc_cmd->seg_cnt = 0;
872 scsi_dma_unmap(scsi_cmnd);
873 return 2;
874 }
875
876 /*
877 * The driver established a maximum scatter-gather segment count
878 * during probe that limits the number of sg elements in any
879 * single scsi command. Just run through the seg_cnt and format
880 * the bde's.
881 * When using SLI-3 the driver will try to fit all the BDEs into
882 * the IOCB. If it can't then the BDEs get added to a BPL as it
883 * does for SLI-2 mode.
884 */
885 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
886 physaddr = sg_dma_address(sgel);
887 if (phba->sli_rev == 3 &&
888 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
889 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
890 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
891 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
892 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
893 data_bde->addrLow = putPaddrLow(physaddr);
894 data_bde->addrHigh = putPaddrHigh(physaddr);
895 data_bde++;
896 } else {
897 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
898 bpl->tus.f.bdeSize = sg_dma_len(sgel);
899 bpl->tus.w = le32_to_cpu(bpl->tus.w);
900 bpl->addrLow =
901 le32_to_cpu(putPaddrLow(physaddr));
902 bpl->addrHigh =
903 le32_to_cpu(putPaddrHigh(physaddr));
904 bpl++;
905 }
906 }
907 }
908
909 /*
910 * Finish initializing those IOCB fields that are dependent on the
911 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
912 * explicitly reinitialized and for SLI-3 the extended bde count is
913 * explicitly reinitialized since all iocb memory resources are reused.
914 */
915 if (phba->sli_rev == 3 &&
916 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
917 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
918 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
919 /*
920 * The extended IOCB format can only fit 3 BDE or a BPL.
921 * This I/O has more than 3 BDE so the 1st data bde will
922 * be a BPL that is filled in here.
923 */
924 physaddr = lpfc_cmd->dma_handle;
925 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
926 data_bde->tus.f.bdeSize = (num_bde *
927 sizeof(struct ulp_bde64));
928 physaddr += (sizeof(struct fcp_cmnd) +
929 sizeof(struct fcp_rsp) +
930 (2 * sizeof(struct ulp_bde64)));
931 data_bde->addrHigh = putPaddrHigh(physaddr);
932 data_bde->addrLow = putPaddrLow(physaddr);
933 /* ebde count includes the response bde and data bpl */
934 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
935 } else {
936 /* ebde count includes the response bde and data bdes */
937 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
938 }
939 } else {
940 iocb_cmd->un.fcpi64.bdl.bdeSize =
941 ((num_bde + 2) * sizeof(struct ulp_bde64));
942 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
943 }
944 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
945
946 /*
947 * Due to difference in data length between DIF/non-DIF paths,
948 * we need to set word 4 of IOCB here
949 */
950 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
951 return 0;
952}
953
954#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
955
956/* Return BG_ERR_INIT if error injection is detected by Initiator */
957#define BG_ERR_INIT 0x1
958/* Return BG_ERR_TGT if error injection is detected by Target */
959#define BG_ERR_TGT 0x2
960/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
961#define BG_ERR_SWAP 0x10
962/**
963 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
964 * error injection
965 **/
966#define BG_ERR_CHECK 0x20
967
968/**
969 * lpfc_bg_err_inject - Determine if we should inject an error
970 * @phba: The Hba for which this call is being executed.
971 * @sc: The SCSI command to examine
972 * @reftag: (out) BlockGuard reference tag for transmitted data
973 * @apptag: (out) BlockGuard application tag for transmitted data
974 * @new_guard (in) Value to replace CRC with if needed
975 *
976 * Returns BG_ERR_* bit mask or 0 if request ignored
977 **/
978static int
979lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
980 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
981{
982 struct scatterlist *sgpe; /* s/g prot entry */
983 struct lpfc_io_buf *lpfc_cmd = NULL;
984 struct scsi_dif_tuple *src = NULL;
985 struct lpfc_nodelist *ndlp;
986 struct lpfc_rport_data *rdata;
987 uint32_t op = scsi_get_prot_op(sc);
988 uint32_t blksize;
989 uint32_t numblks;
990 sector_t lba;
991 int rc = 0;
992 int blockoff = 0;
993
994 if (op == SCSI_PROT_NORMAL)
995 return 0;
996
997 sgpe = scsi_prot_sglist(sc);
998 lba = scsi_get_lba(sc);
999
1000 /* First check if we need to match the LBA */
1001 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1002 blksize = lpfc_cmd_blksize(sc);
1003 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1004
1005 /* Make sure we have the right LBA if one is specified */
1006 if ((phba->lpfc_injerr_lba < lba) ||
1007 (phba->lpfc_injerr_lba >= (lba + numblks)))
1008 return 0;
1009 if (sgpe) {
1010 blockoff = phba->lpfc_injerr_lba - lba;
1011 numblks = sg_dma_len(sgpe) /
1012 sizeof(struct scsi_dif_tuple);
1013 if (numblks < blockoff)
1014 blockoff = numblks;
1015 }
1016 }
1017
1018 /* Next check if we need to match the remote NPortID or WWPN */
1019 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1020 if (rdata && rdata->pnode) {
1021 ndlp = rdata->pnode;
1022
1023 /* Make sure we have the right NPortID if one is specified */
1024 if (phba->lpfc_injerr_nportid &&
1025 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1026 return 0;
1027
1028 /*
1029 * Make sure we have the right WWPN if one is specified.
1030 * wwn[0] should be a non-zero NAA in a good WWPN.
1031 */
1032 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1033 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1034 sizeof(struct lpfc_name)) != 0))
1035 return 0;
1036 }
1037
1038 /* Setup a ptr to the protection data if the SCSI host provides it */
1039 if (sgpe) {
1040 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1041 src += blockoff;
1042 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1043 }
1044
1045 /* Should we change the Reference Tag */
1046 if (reftag) {
1047 if (phba->lpfc_injerr_wref_cnt) {
1048 switch (op) {
1049 case SCSI_PROT_WRITE_PASS:
1050 if (src) {
1051 /*
1052 * For WRITE_PASS, force the error
1053 * to be sent on the wire. It should
1054 * be detected by the Target.
1055 * If blockoff != 0 error will be
1056 * inserted in middle of the IO.
1057 */
1058
1059 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1060 "9076 BLKGRD: Injecting reftag error: "
1061 "write lba x%lx + x%x oldrefTag x%x\n",
1062 (unsigned long)lba, blockoff,
1063 be32_to_cpu(src->ref_tag));
1064
1065 /*
1066 * Save the old ref_tag so we can
1067 * restore it on completion.
1068 */
1069 if (lpfc_cmd) {
1070 lpfc_cmd->prot_data_type =
1071 LPFC_INJERR_REFTAG;
1072 lpfc_cmd->prot_data_segment =
1073 src;
1074 lpfc_cmd->prot_data =
1075 src->ref_tag;
1076 }
1077 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1078 phba->lpfc_injerr_wref_cnt--;
1079 if (phba->lpfc_injerr_wref_cnt == 0) {
1080 phba->lpfc_injerr_nportid = 0;
1081 phba->lpfc_injerr_lba =
1082 LPFC_INJERR_LBA_OFF;
1083 memset(&phba->lpfc_injerr_wwpn,
1084 0, sizeof(struct lpfc_name));
1085 }
1086 rc = BG_ERR_TGT | BG_ERR_CHECK;
1087
1088 break;
1089 }
1090 /* fall through */
1091 case SCSI_PROT_WRITE_INSERT:
1092 /*
1093 * For WRITE_INSERT, force the error
1094 * to be sent on the wire. It should be
1095 * detected by the Target.
1096 */
1097 /* DEADBEEF will be the reftag on the wire */
1098 *reftag = 0xDEADBEEF;
1099 phba->lpfc_injerr_wref_cnt--;
1100 if (phba->lpfc_injerr_wref_cnt == 0) {
1101 phba->lpfc_injerr_nportid = 0;
1102 phba->lpfc_injerr_lba =
1103 LPFC_INJERR_LBA_OFF;
1104 memset(&phba->lpfc_injerr_wwpn,
1105 0, sizeof(struct lpfc_name));
1106 }
1107 rc = BG_ERR_TGT | BG_ERR_CHECK;
1108
1109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1110 "9078 BLKGRD: Injecting reftag error: "
1111 "write lba x%lx\n", (unsigned long)lba);
1112 break;
1113 case SCSI_PROT_WRITE_STRIP:
1114 /*
1115 * For WRITE_STRIP and WRITE_PASS,
1116 * force the error on data
1117 * being copied from SLI-Host to SLI-Port.
1118 */
1119 *reftag = 0xDEADBEEF;
1120 phba->lpfc_injerr_wref_cnt--;
1121 if (phba->lpfc_injerr_wref_cnt == 0) {
1122 phba->lpfc_injerr_nportid = 0;
1123 phba->lpfc_injerr_lba =
1124 LPFC_INJERR_LBA_OFF;
1125 memset(&phba->lpfc_injerr_wwpn,
1126 0, sizeof(struct lpfc_name));
1127 }
1128 rc = BG_ERR_INIT;
1129
1130 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1131 "9077 BLKGRD: Injecting reftag error: "
1132 "write lba x%lx\n", (unsigned long)lba);
1133 break;
1134 }
1135 }
1136 if (phba->lpfc_injerr_rref_cnt) {
1137 switch (op) {
1138 case SCSI_PROT_READ_INSERT:
1139 case SCSI_PROT_READ_STRIP:
1140 case SCSI_PROT_READ_PASS:
1141 /*
1142 * For READ_STRIP and READ_PASS, force the
1143 * error on data being read off the wire. It
1144 * should force an IO error to the driver.
1145 */
1146 *reftag = 0xDEADBEEF;
1147 phba->lpfc_injerr_rref_cnt--;
1148 if (phba->lpfc_injerr_rref_cnt == 0) {
1149 phba->lpfc_injerr_nportid = 0;
1150 phba->lpfc_injerr_lba =
1151 LPFC_INJERR_LBA_OFF;
1152 memset(&phba->lpfc_injerr_wwpn,
1153 0, sizeof(struct lpfc_name));
1154 }
1155 rc = BG_ERR_INIT;
1156
1157 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1158 "9079 BLKGRD: Injecting reftag error: "
1159 "read lba x%lx\n", (unsigned long)lba);
1160 break;
1161 }
1162 }
1163 }
1164
1165 /* Should we change the Application Tag */
1166 if (apptag) {
1167 if (phba->lpfc_injerr_wapp_cnt) {
1168 switch (op) {
1169 case SCSI_PROT_WRITE_PASS:
1170 if (src) {
1171 /*
1172 * For WRITE_PASS, force the error
1173 * to be sent on the wire. It should
1174 * be detected by the Target.
1175 * If blockoff != 0 error will be
1176 * inserted in middle of the IO.
1177 */
1178
1179 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1180 "9080 BLKGRD: Injecting apptag error: "
1181 "write lba x%lx + x%x oldappTag x%x\n",
1182 (unsigned long)lba, blockoff,
1183 be16_to_cpu(src->app_tag));
1184
1185 /*
1186 * Save the old app_tag so we can
1187 * restore it on completion.
1188 */
1189 if (lpfc_cmd) {
1190 lpfc_cmd->prot_data_type =
1191 LPFC_INJERR_APPTAG;
1192 lpfc_cmd->prot_data_segment =
1193 src;
1194 lpfc_cmd->prot_data =
1195 src->app_tag;
1196 }
1197 src->app_tag = cpu_to_be16(0xDEAD);
1198 phba->lpfc_injerr_wapp_cnt--;
1199 if (phba->lpfc_injerr_wapp_cnt == 0) {
1200 phba->lpfc_injerr_nportid = 0;
1201 phba->lpfc_injerr_lba =
1202 LPFC_INJERR_LBA_OFF;
1203 memset(&phba->lpfc_injerr_wwpn,
1204 0, sizeof(struct lpfc_name));
1205 }
1206 rc = BG_ERR_TGT | BG_ERR_CHECK;
1207 break;
1208 }
1209 /* fall through */
1210 case SCSI_PROT_WRITE_INSERT:
1211 /*
1212 * For WRITE_INSERT, force the
1213 * error to be sent on the wire. It should be
1214 * detected by the Target.
1215 */
1216 /* DEAD will be the apptag on the wire */
1217 *apptag = 0xDEAD;
1218 phba->lpfc_injerr_wapp_cnt--;
1219 if (phba->lpfc_injerr_wapp_cnt == 0) {
1220 phba->lpfc_injerr_nportid = 0;
1221 phba->lpfc_injerr_lba =
1222 LPFC_INJERR_LBA_OFF;
1223 memset(&phba->lpfc_injerr_wwpn,
1224 0, sizeof(struct lpfc_name));
1225 }
1226 rc = BG_ERR_TGT | BG_ERR_CHECK;
1227
1228 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1229 "0813 BLKGRD: Injecting apptag error: "
1230 "write lba x%lx\n", (unsigned long)lba);
1231 break;
1232 case SCSI_PROT_WRITE_STRIP:
1233 /*
1234 * For WRITE_STRIP and WRITE_PASS,
1235 * force the error on data
1236 * being copied from SLI-Host to SLI-Port.
1237 */
1238 *apptag = 0xDEAD;
1239 phba->lpfc_injerr_wapp_cnt--;
1240 if (phba->lpfc_injerr_wapp_cnt == 0) {
1241 phba->lpfc_injerr_nportid = 0;
1242 phba->lpfc_injerr_lba =
1243 LPFC_INJERR_LBA_OFF;
1244 memset(&phba->lpfc_injerr_wwpn,
1245 0, sizeof(struct lpfc_name));
1246 }
1247 rc = BG_ERR_INIT;
1248
1249 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1250 "0812 BLKGRD: Injecting apptag error: "
1251 "write lba x%lx\n", (unsigned long)lba);
1252 break;
1253 }
1254 }
1255 if (phba->lpfc_injerr_rapp_cnt) {
1256 switch (op) {
1257 case SCSI_PROT_READ_INSERT:
1258 case SCSI_PROT_READ_STRIP:
1259 case SCSI_PROT_READ_PASS:
1260 /*
1261 * For READ_STRIP and READ_PASS, force the
1262 * error on data being read off the wire. It
1263 * should force an IO error to the driver.
1264 */
1265 *apptag = 0xDEAD;
1266 phba->lpfc_injerr_rapp_cnt--;
1267 if (phba->lpfc_injerr_rapp_cnt == 0) {
1268 phba->lpfc_injerr_nportid = 0;
1269 phba->lpfc_injerr_lba =
1270 LPFC_INJERR_LBA_OFF;
1271 memset(&phba->lpfc_injerr_wwpn,
1272 0, sizeof(struct lpfc_name));
1273 }
1274 rc = BG_ERR_INIT;
1275
1276 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1277 "0814 BLKGRD: Injecting apptag error: "
1278 "read lba x%lx\n", (unsigned long)lba);
1279 break;
1280 }
1281 }
1282 }
1283
1284
1285 /* Should we change the Guard Tag */
1286 if (new_guard) {
1287 if (phba->lpfc_injerr_wgrd_cnt) {
1288 switch (op) {
1289 case SCSI_PROT_WRITE_PASS:
1290 rc = BG_ERR_CHECK;
1291 /* fall through */
1292
1293 case SCSI_PROT_WRITE_INSERT:
1294 /*
1295 * For WRITE_INSERT, force the
1296 * error to be sent on the wire. It should be
1297 * detected by the Target.
1298 */
1299 phba->lpfc_injerr_wgrd_cnt--;
1300 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1301 phba->lpfc_injerr_nportid = 0;
1302 phba->lpfc_injerr_lba =
1303 LPFC_INJERR_LBA_OFF;
1304 memset(&phba->lpfc_injerr_wwpn,
1305 0, sizeof(struct lpfc_name));
1306 }
1307
1308 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1309 /* Signals the caller to swap CRC->CSUM */
1310
1311 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1312 "0817 BLKGRD: Injecting guard error: "
1313 "write lba x%lx\n", (unsigned long)lba);
1314 break;
1315 case SCSI_PROT_WRITE_STRIP:
1316 /*
1317 * For WRITE_STRIP and WRITE_PASS,
1318 * force the error on data
1319 * being copied from SLI-Host to SLI-Port.
1320 */
1321 phba->lpfc_injerr_wgrd_cnt--;
1322 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1323 phba->lpfc_injerr_nportid = 0;
1324 phba->lpfc_injerr_lba =
1325 LPFC_INJERR_LBA_OFF;
1326 memset(&phba->lpfc_injerr_wwpn,
1327 0, sizeof(struct lpfc_name));
1328 }
1329
1330 rc = BG_ERR_INIT | BG_ERR_SWAP;
1331 /* Signals the caller to swap CRC->CSUM */
1332
1333 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1334 "0816 BLKGRD: Injecting guard error: "
1335 "write lba x%lx\n", (unsigned long)lba);
1336 break;
1337 }
1338 }
1339 if (phba->lpfc_injerr_rgrd_cnt) {
1340 switch (op) {
1341 case SCSI_PROT_READ_INSERT:
1342 case SCSI_PROT_READ_STRIP:
1343 case SCSI_PROT_READ_PASS:
1344 /*
1345 * For READ_STRIP and READ_PASS, force the
1346 * error on data being read off the wire. It
1347 * should force an IO error to the driver.
1348 */
1349 phba->lpfc_injerr_rgrd_cnt--;
1350 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1351 phba->lpfc_injerr_nportid = 0;
1352 phba->lpfc_injerr_lba =
1353 LPFC_INJERR_LBA_OFF;
1354 memset(&phba->lpfc_injerr_wwpn,
1355 0, sizeof(struct lpfc_name));
1356 }
1357
1358 rc = BG_ERR_INIT | BG_ERR_SWAP;
1359 /* Signals the caller to swap CRC->CSUM */
1360
1361 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1362 "0818 BLKGRD: Injecting guard error: "
1363 "read lba x%lx\n", (unsigned long)lba);
1364 }
1365 }
1366 }
1367
1368 return rc;
1369}
1370#endif
1371
1372/**
1373 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1374 * the specified SCSI command.
1375 * @phba: The Hba for which this call is being executed.
1376 * @sc: The SCSI command to examine
1377 * @txopt: (out) BlockGuard operation for transmitted data
1378 * @rxopt: (out) BlockGuard operation for received data
1379 *
1380 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1381 *
1382 **/
1383static int
1384lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1385 uint8_t *txop, uint8_t *rxop)
1386{
1387 uint8_t ret = 0;
1388
1389 if (lpfc_cmd_guard_csum(sc)) {
1390 switch (scsi_get_prot_op(sc)) {
1391 case SCSI_PROT_READ_INSERT:
1392 case SCSI_PROT_WRITE_STRIP:
1393 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1394 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1395 break;
1396
1397 case SCSI_PROT_READ_STRIP:
1398 case SCSI_PROT_WRITE_INSERT:
1399 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1400 *txop = BG_OP_IN_NODIF_OUT_CRC;
1401 break;
1402
1403 case SCSI_PROT_READ_PASS:
1404 case SCSI_PROT_WRITE_PASS:
1405 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1406 *txop = BG_OP_IN_CSUM_OUT_CRC;
1407 break;
1408
1409 case SCSI_PROT_NORMAL:
1410 default:
1411 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1412 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1413 scsi_get_prot_op(sc));
1414 ret = 1;
1415 break;
1416
1417 }
1418 } else {
1419 switch (scsi_get_prot_op(sc)) {
1420 case SCSI_PROT_READ_STRIP:
1421 case SCSI_PROT_WRITE_INSERT:
1422 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1423 *txop = BG_OP_IN_NODIF_OUT_CRC;
1424 break;
1425
1426 case SCSI_PROT_READ_PASS:
1427 case SCSI_PROT_WRITE_PASS:
1428 *rxop = BG_OP_IN_CRC_OUT_CRC;
1429 *txop = BG_OP_IN_CRC_OUT_CRC;
1430 break;
1431
1432 case SCSI_PROT_READ_INSERT:
1433 case SCSI_PROT_WRITE_STRIP:
1434 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1435 *txop = BG_OP_IN_CRC_OUT_NODIF;
1436 break;
1437
1438 case SCSI_PROT_NORMAL:
1439 default:
1440 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1441 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1442 scsi_get_prot_op(sc));
1443 ret = 1;
1444 break;
1445 }
1446 }
1447
1448 return ret;
1449}
1450
1451#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1452/**
1453 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1454 * the specified SCSI command in order to force a guard tag error.
1455 * @phba: The Hba for which this call is being executed.
1456 * @sc: The SCSI command to examine
1457 * @txopt: (out) BlockGuard operation for transmitted data
1458 * @rxopt: (out) BlockGuard operation for received data
1459 *
1460 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1461 *
1462 **/
1463static int
1464lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1465 uint8_t *txop, uint8_t *rxop)
1466{
1467 uint8_t ret = 0;
1468
1469 if (lpfc_cmd_guard_csum(sc)) {
1470 switch (scsi_get_prot_op(sc)) {
1471 case SCSI_PROT_READ_INSERT:
1472 case SCSI_PROT_WRITE_STRIP:
1473 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1474 *txop = BG_OP_IN_CRC_OUT_NODIF;
1475 break;
1476
1477 case SCSI_PROT_READ_STRIP:
1478 case SCSI_PROT_WRITE_INSERT:
1479 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1480 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1481 break;
1482
1483 case SCSI_PROT_READ_PASS:
1484 case SCSI_PROT_WRITE_PASS:
1485 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1486 *txop = BG_OP_IN_CRC_OUT_CSUM;
1487 break;
1488
1489 case SCSI_PROT_NORMAL:
1490 default:
1491 break;
1492
1493 }
1494 } else {
1495 switch (scsi_get_prot_op(sc)) {
1496 case SCSI_PROT_READ_STRIP:
1497 case SCSI_PROT_WRITE_INSERT:
1498 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1499 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1500 break;
1501
1502 case SCSI_PROT_READ_PASS:
1503 case SCSI_PROT_WRITE_PASS:
1504 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1505 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1506 break;
1507
1508 case SCSI_PROT_READ_INSERT:
1509 case SCSI_PROT_WRITE_STRIP:
1510 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1511 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1512 break;
1513
1514 case SCSI_PROT_NORMAL:
1515 default:
1516 break;
1517 }
1518 }
1519
1520 return ret;
1521}
1522#endif
1523
1524/**
1525 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1526 * @phba: The Hba for which this call is being executed.
1527 * @sc: pointer to scsi command we're working on
1528 * @bpl: pointer to buffer list for protection groups
1529 * @datacnt: number of segments of data that have been dma mapped
1530 *
1531 * This function sets up BPL buffer list for protection groups of
1532 * type LPFC_PG_TYPE_NO_DIF
1533 *
1534 * This is usually used when the HBA is instructed to generate
1535 * DIFs and insert them into data stream (or strip DIF from
1536 * incoming data stream)
1537 *
1538 * The buffer list consists of just one protection group described
1539 * below:
1540 * +-------------------------+
1541 * start of prot group --> | PDE_5 |
1542 * +-------------------------+
1543 * | PDE_6 |
1544 * +-------------------------+
1545 * | Data BDE |
1546 * +-------------------------+
1547 * |more Data BDE's ... (opt)|
1548 * +-------------------------+
1549 *
1550 *
1551 * Note: Data s/g buffers have been dma mapped
1552 *
1553 * Returns the number of BDEs added to the BPL.
1554 **/
1555static int
1556lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1557 struct ulp_bde64 *bpl, int datasegcnt)
1558{
1559 struct scatterlist *sgde = NULL; /* s/g data entry */
1560 struct lpfc_pde5 *pde5 = NULL;
1561 struct lpfc_pde6 *pde6 = NULL;
1562 dma_addr_t physaddr;
1563 int i = 0, num_bde = 0, status;
1564 int datadir = sc->sc_data_direction;
1565#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1566 uint32_t rc;
1567#endif
1568 uint32_t checking = 1;
1569 uint32_t reftag;
1570 uint8_t txop, rxop;
1571
1572 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1573 if (status)
1574 goto out;
1575
1576 /* extract some info from the scsi command for pde*/
1577 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1578
1579#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1580 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1581 if (rc) {
1582 if (rc & BG_ERR_SWAP)
1583 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1584 if (rc & BG_ERR_CHECK)
1585 checking = 0;
1586 }
1587#endif
1588
1589 /* setup PDE5 with what we have */
1590 pde5 = (struct lpfc_pde5 *) bpl;
1591 memset(pde5, 0, sizeof(struct lpfc_pde5));
1592 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1593
1594 /* Endianness conversion if necessary for PDE5 */
1595 pde5->word0 = cpu_to_le32(pde5->word0);
1596 pde5->reftag = cpu_to_le32(reftag);
1597
1598 /* advance bpl and increment bde count */
1599 num_bde++;
1600 bpl++;
1601 pde6 = (struct lpfc_pde6 *) bpl;
1602
1603 /* setup PDE6 with the rest of the info */
1604 memset(pde6, 0, sizeof(struct lpfc_pde6));
1605 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1606 bf_set(pde6_optx, pde6, txop);
1607 bf_set(pde6_oprx, pde6, rxop);
1608
1609 /*
1610 * We only need to check the data on READs, for WRITEs
1611 * protection data is automatically generated, not checked.
1612 */
1613 if (datadir == DMA_FROM_DEVICE) {
1614 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1615 bf_set(pde6_ce, pde6, checking);
1616 else
1617 bf_set(pde6_ce, pde6, 0);
1618
1619 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1620 bf_set(pde6_re, pde6, checking);
1621 else
1622 bf_set(pde6_re, pde6, 0);
1623 }
1624 bf_set(pde6_ai, pde6, 1);
1625 bf_set(pde6_ae, pde6, 0);
1626 bf_set(pde6_apptagval, pde6, 0);
1627
1628 /* Endianness conversion if necessary for PDE6 */
1629 pde6->word0 = cpu_to_le32(pde6->word0);
1630 pde6->word1 = cpu_to_le32(pde6->word1);
1631 pde6->word2 = cpu_to_le32(pde6->word2);
1632
1633 /* advance bpl and increment bde count */
1634 num_bde++;
1635 bpl++;
1636
1637 /* assumption: caller has already run dma_map_sg on command data */
1638 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1639 physaddr = sg_dma_address(sgde);
1640 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1641 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1642 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1643 if (datadir == DMA_TO_DEVICE)
1644 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1645 else
1646 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1647 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1648 bpl++;
1649 num_bde++;
1650 }
1651
1652out:
1653 return num_bde;
1654}
1655
1656/**
1657 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1658 * @phba: The Hba for which this call is being executed.
1659 * @sc: pointer to scsi command we're working on
1660 * @bpl: pointer to buffer list for protection groups
1661 * @datacnt: number of segments of data that have been dma mapped
1662 * @protcnt: number of segment of protection data that have been dma mapped
1663 *
1664 * This function sets up BPL buffer list for protection groups of
1665 * type LPFC_PG_TYPE_DIF
1666 *
1667 * This is usually used when DIFs are in their own buffers,
1668 * separate from the data. The HBA can then by instructed
1669 * to place the DIFs in the outgoing stream. For read operations,
1670 * The HBA could extract the DIFs and place it in DIF buffers.
1671 *
1672 * The buffer list for this type consists of one or more of the
1673 * protection groups described below:
1674 * +-------------------------+
1675 * start of first prot group --> | PDE_5 |
1676 * +-------------------------+
1677 * | PDE_6 |
1678 * +-------------------------+
1679 * | PDE_7 (Prot BDE) |
1680 * +-------------------------+
1681 * | Data BDE |
1682 * +-------------------------+
1683 * |more Data BDE's ... (opt)|
1684 * +-------------------------+
1685 * start of new prot group --> | PDE_5 |
1686 * +-------------------------+
1687 * | ... |
1688 * +-------------------------+
1689 *
1690 * Note: It is assumed that both data and protection s/g buffers have been
1691 * mapped for DMA
1692 *
1693 * Returns the number of BDEs added to the BPL.
1694 **/
1695static int
1696lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1697 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1698{
1699 struct scatterlist *sgde = NULL; /* s/g data entry */
1700 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1701 struct lpfc_pde5 *pde5 = NULL;
1702 struct lpfc_pde6 *pde6 = NULL;
1703 struct lpfc_pde7 *pde7 = NULL;
1704 dma_addr_t dataphysaddr, protphysaddr;
1705 unsigned short curr_data = 0, curr_prot = 0;
1706 unsigned int split_offset;
1707 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1708 unsigned int protgrp_blks, protgrp_bytes;
1709 unsigned int remainder, subtotal;
1710 int status;
1711 int datadir = sc->sc_data_direction;
1712 unsigned char pgdone = 0, alldone = 0;
1713 unsigned blksize;
1714#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1715 uint32_t rc;
1716#endif
1717 uint32_t checking = 1;
1718 uint32_t reftag;
1719 uint8_t txop, rxop;
1720 int num_bde = 0;
1721
1722 sgpe = scsi_prot_sglist(sc);
1723 sgde = scsi_sglist(sc);
1724
1725 if (!sgpe || !sgde) {
1726 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1727 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1728 sgpe, sgde);
1729 return 0;
1730 }
1731
1732 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1733 if (status)
1734 goto out;
1735
1736 /* extract some info from the scsi command */
1737 blksize = lpfc_cmd_blksize(sc);
1738 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1739
1740#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1741 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1742 if (rc) {
1743 if (rc & BG_ERR_SWAP)
1744 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1745 if (rc & BG_ERR_CHECK)
1746 checking = 0;
1747 }
1748#endif
1749
1750 split_offset = 0;
1751 do {
1752 /* Check to see if we ran out of space */
1753 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1754 return num_bde + 3;
1755
1756 /* setup PDE5 with what we have */
1757 pde5 = (struct lpfc_pde5 *) bpl;
1758 memset(pde5, 0, sizeof(struct lpfc_pde5));
1759 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1760
1761 /* Endianness conversion if necessary for PDE5 */
1762 pde5->word0 = cpu_to_le32(pde5->word0);
1763 pde5->reftag = cpu_to_le32(reftag);
1764
1765 /* advance bpl and increment bde count */
1766 num_bde++;
1767 bpl++;
1768 pde6 = (struct lpfc_pde6 *) bpl;
1769
1770 /* setup PDE6 with the rest of the info */
1771 memset(pde6, 0, sizeof(struct lpfc_pde6));
1772 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1773 bf_set(pde6_optx, pde6, txop);
1774 bf_set(pde6_oprx, pde6, rxop);
1775
1776 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1777 bf_set(pde6_ce, pde6, checking);
1778 else
1779 bf_set(pde6_ce, pde6, 0);
1780
1781 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1782 bf_set(pde6_re, pde6, checking);
1783 else
1784 bf_set(pde6_re, pde6, 0);
1785
1786 bf_set(pde6_ai, pde6, 1);
1787 bf_set(pde6_ae, pde6, 0);
1788 bf_set(pde6_apptagval, pde6, 0);
1789
1790 /* Endianness conversion if necessary for PDE6 */
1791 pde6->word0 = cpu_to_le32(pde6->word0);
1792 pde6->word1 = cpu_to_le32(pde6->word1);
1793 pde6->word2 = cpu_to_le32(pde6->word2);
1794
1795 /* advance bpl and increment bde count */
1796 num_bde++;
1797 bpl++;
1798
1799 /* setup the first BDE that points to protection buffer */
1800 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1801 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1802
1803 /* must be integer multiple of the DIF block length */
1804 BUG_ON(protgroup_len % 8);
1805
1806 pde7 = (struct lpfc_pde7 *) bpl;
1807 memset(pde7, 0, sizeof(struct lpfc_pde7));
1808 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1809
1810 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1811 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1812
1813 protgrp_blks = protgroup_len / 8;
1814 protgrp_bytes = protgrp_blks * blksize;
1815
1816 /* check if this pde is crossing the 4K boundary; if so split */
1817 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1818 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1819 protgroup_offset += protgroup_remainder;
1820 protgrp_blks = protgroup_remainder / 8;
1821 protgrp_bytes = protgrp_blks * blksize;
1822 } else {
1823 protgroup_offset = 0;
1824 curr_prot++;
1825 }
1826
1827 num_bde++;
1828
1829 /* setup BDE's for data blocks associated with DIF data */
1830 pgdone = 0;
1831 subtotal = 0; /* total bytes processed for current prot grp */
1832 while (!pgdone) {
1833 /* Check to see if we ran out of space */
1834 if (num_bde >= phba->cfg_total_seg_cnt)
1835 return num_bde + 1;
1836
1837 if (!sgde) {
1838 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1839 "9065 BLKGRD:%s Invalid data segment\n",
1840 __func__);
1841 return 0;
1842 }
1843 bpl++;
1844 dataphysaddr = sg_dma_address(sgde) + split_offset;
1845 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1846 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1847
1848 remainder = sg_dma_len(sgde) - split_offset;
1849
1850 if ((subtotal + remainder) <= protgrp_bytes) {
1851 /* we can use this whole buffer */
1852 bpl->tus.f.bdeSize = remainder;
1853 split_offset = 0;
1854
1855 if ((subtotal + remainder) == protgrp_bytes)
1856 pgdone = 1;
1857 } else {
1858 /* must split this buffer with next prot grp */
1859 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1860 split_offset += bpl->tus.f.bdeSize;
1861 }
1862
1863 subtotal += bpl->tus.f.bdeSize;
1864
1865 if (datadir == DMA_TO_DEVICE)
1866 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1867 else
1868 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1869 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1870
1871 num_bde++;
1872 curr_data++;
1873
1874 if (split_offset)
1875 break;
1876
1877 /* Move to the next s/g segment if possible */
1878 sgde = sg_next(sgde);
1879
1880 }
1881
1882 if (protgroup_offset) {
1883 /* update the reference tag */
1884 reftag += protgrp_blks;
1885 bpl++;
1886 continue;
1887 }
1888
1889 /* are we done ? */
1890 if (curr_prot == protcnt) {
1891 alldone = 1;
1892 } else if (curr_prot < protcnt) {
1893 /* advance to next prot buffer */
1894 sgpe = sg_next(sgpe);
1895 bpl++;
1896
1897 /* update the reference tag */
1898 reftag += protgrp_blks;
1899 } else {
1900 /* if we're here, we have a bug */
1901 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1902 "9054 BLKGRD: bug in %s\n", __func__);
1903 }
1904
1905 } while (!alldone);
1906out:
1907
1908 return num_bde;
1909}
1910
1911/**
1912 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1913 * @phba: The Hba for which this call is being executed.
1914 * @sc: pointer to scsi command we're working on
1915 * @sgl: pointer to buffer list for protection groups
1916 * @datacnt: number of segments of data that have been dma mapped
1917 *
1918 * This function sets up SGL buffer list for protection groups of
1919 * type LPFC_PG_TYPE_NO_DIF
1920 *
1921 * This is usually used when the HBA is instructed to generate
1922 * DIFs and insert them into data stream (or strip DIF from
1923 * incoming data stream)
1924 *
1925 * The buffer list consists of just one protection group described
1926 * below:
1927 * +-------------------------+
1928 * start of prot group --> | DI_SEED |
1929 * +-------------------------+
1930 * | Data SGE |
1931 * +-------------------------+
1932 * |more Data SGE's ... (opt)|
1933 * +-------------------------+
1934 *
1935 *
1936 * Note: Data s/g buffers have been dma mapped
1937 *
1938 * Returns the number of SGEs added to the SGL.
1939 **/
1940static uint32_t
1941lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1942 struct sli4_sge *sgl, int datasegcnt,
1943 struct lpfc_io_buf *lpfc_cmd)
1944{
1945 struct scatterlist *sgde = NULL; /* s/g data entry */
1946 struct sli4_sge_diseed *diseed = NULL;
1947 dma_addr_t physaddr;
1948 int i = 0, status;
1949 uint32_t reftag, num_sge = 0;
1950 uint8_t txop, rxop;
1951#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1952 uint32_t rc;
1953#endif
1954 uint32_t checking = 1;
1955 uint32_t dma_len;
1956 uint32_t dma_offset = 0;
1957 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1958 int j;
1959 bool lsp_just_set = false;
1960
1961 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1962 if (status)
1963 goto out;
1964
1965 /* extract some info from the scsi command for pde*/
1966 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1967
1968#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1969 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1970 if (rc) {
1971 if (rc & BG_ERR_SWAP)
1972 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1973 if (rc & BG_ERR_CHECK)
1974 checking = 0;
1975 }
1976#endif
1977
1978 /* setup DISEED with what we have */
1979 diseed = (struct sli4_sge_diseed *) sgl;
1980 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1981 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1982
1983 /* Endianness conversion if necessary */
1984 diseed->ref_tag = cpu_to_le32(reftag);
1985 diseed->ref_tag_tran = diseed->ref_tag;
1986
1987 /*
1988 * We only need to check the data on READs, for WRITEs
1989 * protection data is automatically generated, not checked.
1990 */
1991 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1992 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1993 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1994 else
1995 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
1996
1997 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1998 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
1999 else
2000 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2001 }
2002
2003 /* setup DISEED with the rest of the info */
2004 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2005 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2006
2007 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2008 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2009
2010 /* Endianness conversion if necessary for DISEED */
2011 diseed->word2 = cpu_to_le32(diseed->word2);
2012 diseed->word3 = cpu_to_le32(diseed->word3);
2013
2014 /* advance bpl and increment sge count */
2015 num_sge++;
2016 sgl++;
2017
2018 /* assumption: caller has already run dma_map_sg on command data */
2019 sgde = scsi_sglist(sc);
2020 j = 3;
2021 for (i = 0; i < datasegcnt; i++) {
2022 /* clear it */
2023 sgl->word2 = 0;
2024
2025 /* do we need to expand the segment */
2026 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2027 ((datasegcnt - 1) != i)) {
2028 /* set LSP type */
2029 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2030
2031 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2032
2033 if (unlikely(!sgl_xtra)) {
2034 lpfc_cmd->seg_cnt = 0;
2035 return 0;
2036 }
2037 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2038 sgl_xtra->dma_phys_sgl));
2039 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2040 sgl_xtra->dma_phys_sgl));
2041
2042 } else {
2043 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2044 }
2045
2046 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2047 if ((datasegcnt - 1) == i)
2048 bf_set(lpfc_sli4_sge_last, sgl, 1);
2049 physaddr = sg_dma_address(sgde);
2050 dma_len = sg_dma_len(sgde);
2051 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2052 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2053
2054 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2055 sgl->word2 = cpu_to_le32(sgl->word2);
2056 sgl->sge_len = cpu_to_le32(dma_len);
2057
2058 dma_offset += dma_len;
2059 sgde = sg_next(sgde);
2060
2061 sgl++;
2062 num_sge++;
2063 lsp_just_set = false;
2064
2065 } else {
2066 sgl->word2 = cpu_to_le32(sgl->word2);
2067 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2068
2069 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2070 i = i - 1;
2071
2072 lsp_just_set = true;
2073 }
2074
2075 j++;
2076
2077 }
2078
2079out:
2080 return num_sge;
2081}
2082
2083/**
2084 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2085 * @phba: The Hba for which this call is being executed.
2086 * @sc: pointer to scsi command we're working on
2087 * @sgl: pointer to buffer list for protection groups
2088 * @datacnt: number of segments of data that have been dma mapped
2089 * @protcnt: number of segment of protection data that have been dma mapped
2090 *
2091 * This function sets up SGL buffer list for protection groups of
2092 * type LPFC_PG_TYPE_DIF
2093 *
2094 * This is usually used when DIFs are in their own buffers,
2095 * separate from the data. The HBA can then by instructed
2096 * to place the DIFs in the outgoing stream. For read operations,
2097 * The HBA could extract the DIFs and place it in DIF buffers.
2098 *
2099 * The buffer list for this type consists of one or more of the
2100 * protection groups described below:
2101 * +-------------------------+
2102 * start of first prot group --> | DISEED |
2103 * +-------------------------+
2104 * | DIF (Prot SGE) |
2105 * +-------------------------+
2106 * | Data SGE |
2107 * +-------------------------+
2108 * |more Data SGE's ... (opt)|
2109 * +-------------------------+
2110 * start of new prot group --> | DISEED |
2111 * +-------------------------+
2112 * | ... |
2113 * +-------------------------+
2114 *
2115 * Note: It is assumed that both data and protection s/g buffers have been
2116 * mapped for DMA
2117 *
2118 * Returns the number of SGEs added to the SGL.
2119 **/
2120static uint32_t
2121lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2122 struct sli4_sge *sgl, int datacnt, int protcnt,
2123 struct lpfc_io_buf *lpfc_cmd)
2124{
2125 struct scatterlist *sgde = NULL; /* s/g data entry */
2126 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2127 struct sli4_sge_diseed *diseed = NULL;
2128 dma_addr_t dataphysaddr, protphysaddr;
2129 unsigned short curr_data = 0, curr_prot = 0;
2130 unsigned int split_offset;
2131 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2132 unsigned int protgrp_blks, protgrp_bytes;
2133 unsigned int remainder, subtotal;
2134 int status;
2135 unsigned char pgdone = 0, alldone = 0;
2136 unsigned blksize;
2137 uint32_t reftag;
2138 uint8_t txop, rxop;
2139 uint32_t dma_len;
2140#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2141 uint32_t rc;
2142#endif
2143 uint32_t checking = 1;
2144 uint32_t dma_offset = 0, num_sge = 0;
2145 int j = 2;
2146 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2147
2148 sgpe = scsi_prot_sglist(sc);
2149 sgde = scsi_sglist(sc);
2150
2151 if (!sgpe || !sgde) {
2152 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2153 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2154 sgpe, sgde);
2155 return 0;
2156 }
2157
2158 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2159 if (status)
2160 goto out;
2161
2162 /* extract some info from the scsi command */
2163 blksize = lpfc_cmd_blksize(sc);
2164 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2165
2166#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2167 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2168 if (rc) {
2169 if (rc & BG_ERR_SWAP)
2170 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2171 if (rc & BG_ERR_CHECK)
2172 checking = 0;
2173 }
2174#endif
2175
2176 split_offset = 0;
2177 do {
2178 /* Check to see if we ran out of space */
2179 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2180 !(phba->cfg_xpsgl))
2181 return num_sge + 3;
2182
2183 /* DISEED and DIF have to be together */
2184 if (!((j + 1) % phba->border_sge_num) ||
2185 !((j + 2) % phba->border_sge_num) ||
2186 !((j + 3) % phba->border_sge_num)) {
2187 sgl->word2 = 0;
2188
2189 /* set LSP type */
2190 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2191
2192 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2193
2194 if (unlikely(!sgl_xtra)) {
2195 goto out;
2196 } else {
2197 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2198 sgl_xtra->dma_phys_sgl));
2199 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2200 sgl_xtra->dma_phys_sgl));
2201 }
2202
2203 sgl->word2 = cpu_to_le32(sgl->word2);
2204 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2205
2206 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2207 j = 0;
2208 }
2209
2210 /* setup DISEED with what we have */
2211 diseed = (struct sli4_sge_diseed *) sgl;
2212 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2213 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2214
2215 /* Endianness conversion if necessary */
2216 diseed->ref_tag = cpu_to_le32(reftag);
2217 diseed->ref_tag_tran = diseed->ref_tag;
2218
2219 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2220 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2221
2222 } else {
2223 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2224 /*
2225 * When in this mode, the hardware will replace
2226 * the guard tag from the host with a
2227 * newly generated good CRC for the wire.
2228 * Switch to raw mode here to avoid this
2229 * behavior. What the host sends gets put on the wire.
2230 */
2231 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2232 txop = BG_OP_RAW_MODE;
2233 rxop = BG_OP_RAW_MODE;
2234 }
2235 }
2236
2237
2238 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2239 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2240 else
2241 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2242
2243 /* setup DISEED with the rest of the info */
2244 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2245 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2246
2247 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2248 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2249
2250 /* Endianness conversion if necessary for DISEED */
2251 diseed->word2 = cpu_to_le32(diseed->word2);
2252 diseed->word3 = cpu_to_le32(diseed->word3);
2253
2254 /* advance sgl and increment bde count */
2255 num_sge++;
2256
2257 sgl++;
2258 j++;
2259
2260 /* setup the first BDE that points to protection buffer */
2261 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2262 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2263
2264 /* must be integer multiple of the DIF block length */
2265 BUG_ON(protgroup_len % 8);
2266
2267 /* Now setup DIF SGE */
2268 sgl->word2 = 0;
2269 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2270 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2271 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2272 sgl->word2 = cpu_to_le32(sgl->word2);
2273 sgl->sge_len = 0;
2274
2275 protgrp_blks = protgroup_len / 8;
2276 protgrp_bytes = protgrp_blks * blksize;
2277
2278 /* check if DIF SGE is crossing the 4K boundary; if so split */
2279 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2280 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2281 protgroup_offset += protgroup_remainder;
2282 protgrp_blks = protgroup_remainder / 8;
2283 protgrp_bytes = protgrp_blks * blksize;
2284 } else {
2285 protgroup_offset = 0;
2286 curr_prot++;
2287 }
2288
2289 num_sge++;
2290
2291 /* setup SGE's for data blocks associated with DIF data */
2292 pgdone = 0;
2293 subtotal = 0; /* total bytes processed for current prot grp */
2294
2295 sgl++;
2296 j++;
2297
2298 while (!pgdone) {
2299 /* Check to see if we ran out of space */
2300 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2301 !phba->cfg_xpsgl)
2302 return num_sge + 1;
2303
2304 if (!sgde) {
2305 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2306 "9086 BLKGRD:%s Invalid data segment\n",
2307 __func__);
2308 return 0;
2309 }
2310
2311 if (!((j + 1) % phba->border_sge_num)) {
2312 sgl->word2 = 0;
2313
2314 /* set LSP type */
2315 bf_set(lpfc_sli4_sge_type, sgl,
2316 LPFC_SGE_TYPE_LSP);
2317
2318 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2319 lpfc_cmd);
2320
2321 if (unlikely(!sgl_xtra)) {
2322 goto out;
2323 } else {
2324 sgl->addr_lo = cpu_to_le32(
2325 putPaddrLow(sgl_xtra->dma_phys_sgl));
2326 sgl->addr_hi = cpu_to_le32(
2327 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2328 }
2329
2330 sgl->word2 = cpu_to_le32(sgl->word2);
2331 sgl->sge_len = cpu_to_le32(
2332 phba->cfg_sg_dma_buf_size);
2333
2334 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2335 } else {
2336 dataphysaddr = sg_dma_address(sgde) +
2337 split_offset;
2338
2339 remainder = sg_dma_len(sgde) - split_offset;
2340
2341 if ((subtotal + remainder) <= protgrp_bytes) {
2342 /* we can use this whole buffer */
2343 dma_len = remainder;
2344 split_offset = 0;
2345
2346 if ((subtotal + remainder) ==
2347 protgrp_bytes)
2348 pgdone = 1;
2349 } else {
2350 /* must split this buffer with next
2351 * prot grp
2352 */
2353 dma_len = protgrp_bytes - subtotal;
2354 split_offset += dma_len;
2355 }
2356
2357 subtotal += dma_len;
2358
2359 sgl->word2 = 0;
2360 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2361 dataphysaddr));
2362 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2363 dataphysaddr));
2364 bf_set(lpfc_sli4_sge_last, sgl, 0);
2365 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2366 bf_set(lpfc_sli4_sge_type, sgl,
2367 LPFC_SGE_TYPE_DATA);
2368
2369 sgl->sge_len = cpu_to_le32(dma_len);
2370 dma_offset += dma_len;
2371
2372 num_sge++;
2373 curr_data++;
2374
2375 if (split_offset) {
2376 sgl++;
2377 j++;
2378 break;
2379 }
2380
2381 /* Move to the next s/g segment if possible */
2382 sgde = sg_next(sgde);
2383
2384 sgl++;
2385 }
2386
2387 j++;
2388 }
2389
2390 if (protgroup_offset) {
2391 /* update the reference tag */
2392 reftag += protgrp_blks;
2393 continue;
2394 }
2395
2396 /* are we done ? */
2397 if (curr_prot == protcnt) {
2398 /* mark the last SGL */
2399 sgl--;
2400 bf_set(lpfc_sli4_sge_last, sgl, 1);
2401 alldone = 1;
2402 } else if (curr_prot < protcnt) {
2403 /* advance to next prot buffer */
2404 sgpe = sg_next(sgpe);
2405
2406 /* update the reference tag */
2407 reftag += protgrp_blks;
2408 } else {
2409 /* if we're here, we have a bug */
2410 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2411 "9085 BLKGRD: bug in %s\n", __func__);
2412 }
2413
2414 } while (!alldone);
2415
2416out:
2417
2418 return num_sge;
2419}
2420
2421/**
2422 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2423 * @phba: The Hba for which this call is being executed.
2424 * @sc: pointer to scsi command we're working on
2425 *
2426 * Given a SCSI command that supports DIF, determine composition of protection
2427 * groups involved in setting up buffer lists
2428 *
2429 * Returns: Protection group type (with or without DIF)
2430 *
2431 **/
2432static int
2433lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2434{
2435 int ret = LPFC_PG_TYPE_INVALID;
2436 unsigned char op = scsi_get_prot_op(sc);
2437
2438 switch (op) {
2439 case SCSI_PROT_READ_STRIP:
2440 case SCSI_PROT_WRITE_INSERT:
2441 ret = LPFC_PG_TYPE_NO_DIF;
2442 break;
2443 case SCSI_PROT_READ_INSERT:
2444 case SCSI_PROT_WRITE_STRIP:
2445 case SCSI_PROT_READ_PASS:
2446 case SCSI_PROT_WRITE_PASS:
2447 ret = LPFC_PG_TYPE_DIF_BUF;
2448 break;
2449 default:
2450 if (phba)
2451 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2452 "9021 Unsupported protection op:%d\n",
2453 op);
2454 break;
2455 }
2456 return ret;
2457}
2458
2459/**
2460 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2461 * @phba: The Hba for which this call is being executed.
2462 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2463 *
2464 * Adjust the data length to account for how much data
2465 * is actually on the wire.
2466 *
2467 * returns the adjusted data length
2468 **/
2469static int
2470lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2471 struct lpfc_io_buf *lpfc_cmd)
2472{
2473 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2474 int fcpdl;
2475
2476 fcpdl = scsi_bufflen(sc);
2477
2478 /* Check if there is protection data on the wire */
2479 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2480 /* Read check for protection data */
2481 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2482 return fcpdl;
2483
2484 } else {
2485 /* Write check for protection data */
2486 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2487 return fcpdl;
2488 }
2489
2490 /*
2491 * If we are in DIF Type 1 mode every data block has a 8 byte
2492 * DIF (trailer) attached to it. Must ajust FCP data length
2493 * to account for the protection data.
2494 */
2495 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2496
2497 return fcpdl;
2498}
2499
2500/**
2501 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2502 * @phba: The Hba for which this call is being executed.
2503 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2504 *
2505 * This is the protection/DIF aware version of
2506 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2507 * two functions eventually, but for now, it's here.
2508 * RETURNS 0 - SUCCESS,
2509 * 1 - Failed DMA map, retry.
2510 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2511 **/
2512static int
2513lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2514 struct lpfc_io_buf *lpfc_cmd)
2515{
2516 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2517 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2518 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2519 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2520 uint32_t num_bde = 0;
2521 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2522 int prot_group_type = 0;
2523 int fcpdl;
2524 int ret = 1;
2525 struct lpfc_vport *vport = phba->pport;
2526
2527 /*
2528 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2529 * fcp_rsp regions to the first data bde entry
2530 */
2531 bpl += 2;
2532 if (scsi_sg_count(scsi_cmnd)) {
2533 /*
2534 * The driver stores the segment count returned from pci_map_sg
2535 * because this a count of dma-mappings used to map the use_sg
2536 * pages. They are not guaranteed to be the same for those
2537 * architectures that implement an IOMMU.
2538 */
2539 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2540 scsi_sglist(scsi_cmnd),
2541 scsi_sg_count(scsi_cmnd), datadir);
2542 if (unlikely(!datasegcnt))
2543 return 1;
2544
2545 lpfc_cmd->seg_cnt = datasegcnt;
2546
2547 /* First check if data segment count from SCSI Layer is good */
2548 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2549 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2550 ret = 2;
2551 goto err;
2552 }
2553
2554 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2555
2556 switch (prot_group_type) {
2557 case LPFC_PG_TYPE_NO_DIF:
2558
2559 /* Here we need to add a PDE5 and PDE6 to the count */
2560 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2561 ret = 2;
2562 goto err;
2563 }
2564
2565 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2566 datasegcnt);
2567 /* we should have 2 or more entries in buffer list */
2568 if (num_bde < 2) {
2569 ret = 2;
2570 goto err;
2571 }
2572 break;
2573
2574 case LPFC_PG_TYPE_DIF_BUF:
2575 /*
2576 * This type indicates that protection buffers are
2577 * passed to the driver, so that needs to be prepared
2578 * for DMA
2579 */
2580 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2581 scsi_prot_sglist(scsi_cmnd),
2582 scsi_prot_sg_count(scsi_cmnd), datadir);
2583 if (unlikely(!protsegcnt)) {
2584 scsi_dma_unmap(scsi_cmnd);
2585 return 1;
2586 }
2587
2588 lpfc_cmd->prot_seg_cnt = protsegcnt;
2589
2590 /*
2591 * There is a minimun of 4 BPLs used for every
2592 * protection data segment.
2593 */
2594 if ((lpfc_cmd->prot_seg_cnt * 4) >
2595 (phba->cfg_total_seg_cnt - 2)) {
2596 ret = 2;
2597 goto err;
2598 }
2599
2600 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2601 datasegcnt, protsegcnt);
2602 /* we should have 3 or more entries in buffer list */
2603 if ((num_bde < 3) ||
2604 (num_bde > phba->cfg_total_seg_cnt)) {
2605 ret = 2;
2606 goto err;
2607 }
2608 break;
2609
2610 case LPFC_PG_TYPE_INVALID:
2611 default:
2612 scsi_dma_unmap(scsi_cmnd);
2613 lpfc_cmd->seg_cnt = 0;
2614
2615 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2616 "9022 Unexpected protection group %i\n",
2617 prot_group_type);
2618 return 2;
2619 }
2620 }
2621
2622 /*
2623 * Finish initializing those IOCB fields that are dependent on the
2624 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2625 * reinitialized since all iocb memory resources are used many times
2626 * for transmit, receive, and continuation bpl's.
2627 */
2628 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2629 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2630 iocb_cmd->ulpBdeCount = 1;
2631 iocb_cmd->ulpLe = 1;
2632
2633 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2634 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2635
2636 /*
2637 * Due to difference in data length between DIF/non-DIF paths,
2638 * we need to set word 4 of IOCB here
2639 */
2640 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2641
2642 /*
2643 * For First burst, we may need to adjust the initial transfer
2644 * length for DIF
2645 */
2646 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2647 (fcpdl < vport->cfg_first_burst_size))
2648 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2649
2650 return 0;
2651err:
2652 if (lpfc_cmd->seg_cnt)
2653 scsi_dma_unmap(scsi_cmnd);
2654 if (lpfc_cmd->prot_seg_cnt)
2655 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2656 scsi_prot_sg_count(scsi_cmnd),
2657 scsi_cmnd->sc_data_direction);
2658
2659 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2660 "9023 Cannot setup S/G List for HBA"
2661 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2662 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2663 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2664 prot_group_type, num_bde);
2665
2666 lpfc_cmd->seg_cnt = 0;
2667 lpfc_cmd->prot_seg_cnt = 0;
2668 return ret;
2669}
2670
2671/*
2672 * This function calcuates the T10 DIF guard tag
2673 * on the specified data using a CRC algorithmn
2674 * using crc_t10dif.
2675 */
2676static uint16_t
2677lpfc_bg_crc(uint8_t *data, int count)
2678{
2679 uint16_t crc = 0;
2680 uint16_t x;
2681
2682 crc = crc_t10dif(data, count);
2683 x = cpu_to_be16(crc);
2684 return x;
2685}
2686
2687/*
2688 * This function calcuates the T10 DIF guard tag
2689 * on the specified data using a CSUM algorithmn
2690 * using ip_compute_csum.
2691 */
2692static uint16_t
2693lpfc_bg_csum(uint8_t *data, int count)
2694{
2695 uint16_t ret;
2696
2697 ret = ip_compute_csum(data, count);
2698 return ret;
2699}
2700
2701/*
2702 * This function examines the protection data to try to determine
2703 * what type of T10-DIF error occurred.
2704 */
2705static void
2706lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2707{
2708 struct scatterlist *sgpe; /* s/g prot entry */
2709 struct scatterlist *sgde; /* s/g data entry */
2710 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2711 struct scsi_dif_tuple *src = NULL;
2712 uint8_t *data_src = NULL;
2713 uint16_t guard_tag;
2714 uint16_t start_app_tag, app_tag;
2715 uint32_t start_ref_tag, ref_tag;
2716 int prot, protsegcnt;
2717 int err_type, len, data_len;
2718 int chk_ref, chk_app, chk_guard;
2719 uint16_t sum;
2720 unsigned blksize;
2721
2722 err_type = BGS_GUARD_ERR_MASK;
2723 sum = 0;
2724 guard_tag = 0;
2725
2726 /* First check to see if there is protection data to examine */
2727 prot = scsi_get_prot_op(cmd);
2728 if ((prot == SCSI_PROT_READ_STRIP) ||
2729 (prot == SCSI_PROT_WRITE_INSERT) ||
2730 (prot == SCSI_PROT_NORMAL))
2731 goto out;
2732
2733 /* Currently the driver just supports ref_tag and guard_tag checking */
2734 chk_ref = 1;
2735 chk_app = 0;
2736 chk_guard = 0;
2737
2738 /* Setup a ptr to the protection data provided by the SCSI host */
2739 sgpe = scsi_prot_sglist(cmd);
2740 protsegcnt = lpfc_cmd->prot_seg_cnt;
2741
2742 if (sgpe && protsegcnt) {
2743
2744 /*
2745 * We will only try to verify guard tag if the segment
2746 * data length is a multiple of the blksize.
2747 */
2748 sgde = scsi_sglist(cmd);
2749 blksize = lpfc_cmd_blksize(cmd);
2750 data_src = (uint8_t *)sg_virt(sgde);
2751 data_len = sgde->length;
2752 if ((data_len & (blksize - 1)) == 0)
2753 chk_guard = 1;
2754
2755 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2756 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2757 start_app_tag = src->app_tag;
2758 len = sgpe->length;
2759 while (src && protsegcnt) {
2760 while (len) {
2761
2762 /*
2763 * First check to see if a protection data
2764 * check is valid
2765 */
2766 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2767 (src->app_tag == T10_PI_APP_ESCAPE)) {
2768 start_ref_tag++;
2769 goto skipit;
2770 }
2771
2772 /* First Guard Tag checking */
2773 if (chk_guard) {
2774 guard_tag = src->guard_tag;
2775 if (lpfc_cmd_guard_csum(cmd))
2776 sum = lpfc_bg_csum(data_src,
2777 blksize);
2778 else
2779 sum = lpfc_bg_crc(data_src,
2780 blksize);
2781 if ((guard_tag != sum)) {
2782 err_type = BGS_GUARD_ERR_MASK;
2783 goto out;
2784 }
2785 }
2786
2787 /* Reference Tag checking */
2788 ref_tag = be32_to_cpu(src->ref_tag);
2789 if (chk_ref && (ref_tag != start_ref_tag)) {
2790 err_type = BGS_REFTAG_ERR_MASK;
2791 goto out;
2792 }
2793 start_ref_tag++;
2794
2795 /* App Tag checking */
2796 app_tag = src->app_tag;
2797 if (chk_app && (app_tag != start_app_tag)) {
2798 err_type = BGS_APPTAG_ERR_MASK;
2799 goto out;
2800 }
2801skipit:
2802 len -= sizeof(struct scsi_dif_tuple);
2803 if (len < 0)
2804 len = 0;
2805 src++;
2806
2807 data_src += blksize;
2808 data_len -= blksize;
2809
2810 /*
2811 * Are we at the end of the Data segment?
2812 * The data segment is only used for Guard
2813 * tag checking.
2814 */
2815 if (chk_guard && (data_len == 0)) {
2816 chk_guard = 0;
2817 sgde = sg_next(sgde);
2818 if (!sgde)
2819 goto out;
2820
2821 data_src = (uint8_t *)sg_virt(sgde);
2822 data_len = sgde->length;
2823 if ((data_len & (blksize - 1)) == 0)
2824 chk_guard = 1;
2825 }
2826 }
2827
2828 /* Goto the next Protection data segment */
2829 sgpe = sg_next(sgpe);
2830 if (sgpe) {
2831 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2832 len = sgpe->length;
2833 } else {
2834 src = NULL;
2835 }
2836 protsegcnt--;
2837 }
2838 }
2839out:
2840 if (err_type == BGS_GUARD_ERR_MASK) {
2841 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2842 0x10, 0x1);
2843 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2844 SAM_STAT_CHECK_CONDITION;
2845 phba->bg_guard_err_cnt++;
2846 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2847 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2848 (unsigned long)scsi_get_lba(cmd),
2849 sum, guard_tag);
2850
2851 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2852 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2853 0x10, 0x3);
2854 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2855 SAM_STAT_CHECK_CONDITION;
2856
2857 phba->bg_reftag_err_cnt++;
2858 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2859 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2860 (unsigned long)scsi_get_lba(cmd),
2861 ref_tag, start_ref_tag);
2862
2863 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2864 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2865 0x10, 0x2);
2866 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2867 SAM_STAT_CHECK_CONDITION;
2868
2869 phba->bg_apptag_err_cnt++;
2870 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2871 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2872 (unsigned long)scsi_get_lba(cmd),
2873 app_tag, start_app_tag);
2874 }
2875}
2876
2877
2878/*
2879 * This function checks for BlockGuard errors detected by
2880 * the HBA. In case of errors, the ASC/ASCQ fields in the
2881 * sense buffer will be set accordingly, paired with
2882 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2883 * detected corruption.
2884 *
2885 * Returns:
2886 * 0 - No error found
2887 * 1 - BlockGuard error found
2888 * -1 - Internal error (bad profile, ...etc)
2889 */
2890static int
2891lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2892 struct lpfc_iocbq *pIocbOut)
2893{
2894 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2895 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2896 int ret = 0;
2897 uint32_t bghm = bgf->bghm;
2898 uint32_t bgstat = bgf->bgstat;
2899 uint64_t failing_sector = 0;
2900
2901 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2902 cmd->result = DID_ERROR << 16;
2903 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2904 "9072 BLKGRD: Invalid BG Profile in cmd"
2905 " 0x%x lba 0x%llx blk cnt 0x%x "
2906 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2907 (unsigned long long)scsi_get_lba(cmd),
2908 blk_rq_sectors(cmd->request), bgstat, bghm);
2909 ret = (-1);
2910 goto out;
2911 }
2912
2913 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2914 cmd->result = DID_ERROR << 16;
2915 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2916 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2917 " 0x%x lba 0x%llx blk cnt 0x%x "
2918 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2919 (unsigned long long)scsi_get_lba(cmd),
2920 blk_rq_sectors(cmd->request), bgstat, bghm);
2921 ret = (-1);
2922 goto out;
2923 }
2924
2925 if (lpfc_bgs_get_guard_err(bgstat)) {
2926 ret = 1;
2927
2928 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2929 0x10, 0x1);
2930 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2931 SAM_STAT_CHECK_CONDITION;
2932 phba->bg_guard_err_cnt++;
2933 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2934 "9055 BLKGRD: Guard Tag error in cmd"
2935 " 0x%x lba 0x%llx blk cnt 0x%x "
2936 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2937 (unsigned long long)scsi_get_lba(cmd),
2938 blk_rq_sectors(cmd->request), bgstat, bghm);
2939 }
2940
2941 if (lpfc_bgs_get_reftag_err(bgstat)) {
2942 ret = 1;
2943
2944 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2945 0x10, 0x3);
2946 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2947 SAM_STAT_CHECK_CONDITION;
2948
2949 phba->bg_reftag_err_cnt++;
2950 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2951 "9056 BLKGRD: Ref Tag error in cmd"
2952 " 0x%x lba 0x%llx blk cnt 0x%x "
2953 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2954 (unsigned long long)scsi_get_lba(cmd),
2955 blk_rq_sectors(cmd->request), bgstat, bghm);
2956 }
2957
2958 if (lpfc_bgs_get_apptag_err(bgstat)) {
2959 ret = 1;
2960
2961 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2962 0x10, 0x2);
2963 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2964 SAM_STAT_CHECK_CONDITION;
2965
2966 phba->bg_apptag_err_cnt++;
2967 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2968 "9061 BLKGRD: App Tag error in cmd"
2969 " 0x%x lba 0x%llx blk cnt 0x%x "
2970 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2971 (unsigned long long)scsi_get_lba(cmd),
2972 blk_rq_sectors(cmd->request), bgstat, bghm);
2973 }
2974
2975 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2976 /*
2977 * setup sense data descriptor 0 per SPC-4 as an information
2978 * field, and put the failing LBA in it.
2979 * This code assumes there was also a guard/app/ref tag error
2980 * indication.
2981 */
2982 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2983 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2984 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2985 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2986
2987 /* bghm is a "on the wire" FC frame based count */
2988 switch (scsi_get_prot_op(cmd)) {
2989 case SCSI_PROT_READ_INSERT:
2990 case SCSI_PROT_WRITE_STRIP:
2991 bghm /= cmd->device->sector_size;
2992 break;
2993 case SCSI_PROT_READ_STRIP:
2994 case SCSI_PROT_WRITE_INSERT:
2995 case SCSI_PROT_READ_PASS:
2996 case SCSI_PROT_WRITE_PASS:
2997 bghm /= (cmd->device->sector_size +
2998 sizeof(struct scsi_dif_tuple));
2999 break;
3000 }
3001
3002 failing_sector = scsi_get_lba(cmd);
3003 failing_sector += bghm;
3004
3005 /* Descriptor Information */
3006 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3007 }
3008
3009 if (!ret) {
3010 /* No error was reported - problem in FW? */
3011 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3012 "9057 BLKGRD: Unknown error in cmd"
3013 " 0x%x lba 0x%llx blk cnt 0x%x "
3014 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3015 (unsigned long long)scsi_get_lba(cmd),
3016 blk_rq_sectors(cmd->request), bgstat, bghm);
3017
3018 /* Calcuate what type of error it was */
3019 lpfc_calc_bg_err(phba, lpfc_cmd);
3020 }
3021out:
3022 return ret;
3023}
3024
3025/**
3026 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3027 * @phba: The Hba for which this call is being executed.
3028 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3029 *
3030 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3031 * field of @lpfc_cmd for device with SLI-4 interface spec.
3032 *
3033 * Return codes:
3034 * 2 - Error - Do not retry
3035 * 1 - Error - Retry
3036 * 0 - Success
3037 **/
3038static int
3039lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3040{
3041 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3042 struct scatterlist *sgel = NULL;
3043 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3044 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3045 struct sli4_sge *first_data_sgl;
3046 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3047 dma_addr_t physaddr;
3048 uint32_t num_bde = 0;
3049 uint32_t dma_len;
3050 uint32_t dma_offset = 0;
3051 int nseg, i, j;
3052 struct ulp_bde64 *bde;
3053 bool lsp_just_set = false;
3054 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3055
3056 /*
3057 * There are three possibilities here - use scatter-gather segment, use
3058 * the single mapping, or neither. Start the lpfc command prep by
3059 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3060 * data bde entry.
3061 */
3062 if (scsi_sg_count(scsi_cmnd)) {
3063 /*
3064 * The driver stores the segment count returned from pci_map_sg
3065 * because this a count of dma-mappings used to map the use_sg
3066 * pages. They are not guaranteed to be the same for those
3067 * architectures that implement an IOMMU.
3068 */
3069
3070 nseg = scsi_dma_map(scsi_cmnd);
3071 if (unlikely(nseg <= 0))
3072 return 1;
3073 sgl += 1;
3074 /* clear the last flag in the fcp_rsp map entry */
3075 sgl->word2 = le32_to_cpu(sgl->word2);
3076 bf_set(lpfc_sli4_sge_last, sgl, 0);
3077 sgl->word2 = cpu_to_le32(sgl->word2);
3078 sgl += 1;
3079 first_data_sgl = sgl;
3080 lpfc_cmd->seg_cnt = nseg;
3081 if (!phba->cfg_xpsgl &&
3082 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3083 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3084 " %s: Too many sg segments from "
3085 "dma_map_sg. Config %d, seg_cnt %d\n",
3086 __func__, phba->cfg_sg_seg_cnt,
3087 lpfc_cmd->seg_cnt);
3088 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3089 lpfc_cmd->seg_cnt = 0;
3090 scsi_dma_unmap(scsi_cmnd);
3091 return 2;
3092 }
3093
3094 /*
3095 * The driver established a maximum scatter-gather segment count
3096 * during probe that limits the number of sg elements in any
3097 * single scsi command. Just run through the seg_cnt and format
3098 * the sge's.
3099 * When using SLI-3 the driver will try to fit all the BDEs into
3100 * the IOCB. If it can't then the BDEs get added to a BPL as it
3101 * does for SLI-2 mode.
3102 */
3103
3104 /* for tracking segment boundaries */
3105 sgel = scsi_sglist(scsi_cmnd);
3106 j = 2;
3107 for (i = 0; i < nseg; i++) {
3108 sgl->word2 = 0;
3109 if ((num_bde + 1) == nseg) {
3110 bf_set(lpfc_sli4_sge_last, sgl, 1);
3111 bf_set(lpfc_sli4_sge_type, sgl,
3112 LPFC_SGE_TYPE_DATA);
3113 } else {
3114 bf_set(lpfc_sli4_sge_last, sgl, 0);
3115
3116 /* do we need to expand the segment */
3117 if (!lsp_just_set &&
3118 !((j + 1) % phba->border_sge_num) &&
3119 ((nseg - 1) != i)) {
3120 /* set LSP type */
3121 bf_set(lpfc_sli4_sge_type, sgl,
3122 LPFC_SGE_TYPE_LSP);
3123
3124 sgl_xtra = lpfc_get_sgl_per_hdwq(
3125 phba, lpfc_cmd);
3126
3127 if (unlikely(!sgl_xtra)) {
3128 lpfc_cmd->seg_cnt = 0;
3129 scsi_dma_unmap(scsi_cmnd);
3130 return 1;
3131 }
3132 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3133 sgl_xtra->dma_phys_sgl));
3134 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3135 sgl_xtra->dma_phys_sgl));
3136
3137 } else {
3138 bf_set(lpfc_sli4_sge_type, sgl,
3139 LPFC_SGE_TYPE_DATA);
3140 }
3141 }
3142
3143 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3144 LPFC_SGE_TYPE_LSP)) {
3145 if ((nseg - 1) == i)
3146 bf_set(lpfc_sli4_sge_last, sgl, 1);
3147
3148 physaddr = sg_dma_address(sgel);
3149 dma_len = sg_dma_len(sgel);
3150 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3151 physaddr));
3152 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3153 physaddr));
3154
3155 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3156 sgl->word2 = cpu_to_le32(sgl->word2);
3157 sgl->sge_len = cpu_to_le32(dma_len);
3158
3159 dma_offset += dma_len;
3160 sgel = sg_next(sgel);
3161
3162 sgl++;
3163 lsp_just_set = false;
3164
3165 } else {
3166 sgl->word2 = cpu_to_le32(sgl->word2);
3167 sgl->sge_len = cpu_to_le32(
3168 phba->cfg_sg_dma_buf_size);
3169
3170 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3171 i = i - 1;
3172
3173 lsp_just_set = true;
3174 }
3175
3176 j++;
3177 }
3178 /*
3179 * Setup the first Payload BDE. For FCoE we just key off
3180 * Performance Hints, for FC we use lpfc_enable_pbde.
3181 * We populate words 13-15 of IOCB/WQE.
3182 */
3183 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3184 phba->cfg_enable_pbde) {
3185 bde = (struct ulp_bde64 *)
3186 &(iocb_cmd->unsli3.sli3Words[5]);
3187 bde->addrLow = first_data_sgl->addr_lo;
3188 bde->addrHigh = first_data_sgl->addr_hi;
3189 bde->tus.f.bdeSize =
3190 le32_to_cpu(first_data_sgl->sge_len);
3191 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3192 bde->tus.w = cpu_to_le32(bde->tus.w);
3193 }
3194 } else {
3195 sgl += 1;
3196 /* clear the last flag in the fcp_rsp map entry */
3197 sgl->word2 = le32_to_cpu(sgl->word2);
3198 bf_set(lpfc_sli4_sge_last, sgl, 1);
3199 sgl->word2 = cpu_to_le32(sgl->word2);
3200
3201 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3202 phba->cfg_enable_pbde) {
3203 bde = (struct ulp_bde64 *)
3204 &(iocb_cmd->unsli3.sli3Words[5]);
3205 memset(bde, 0, (sizeof(uint32_t) * 3));
3206 }
3207 }
3208
3209 /*
3210 * Finish initializing those IOCB fields that are dependent on the
3211 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3212 * explicitly reinitialized.
3213 * all iocb memory resources are reused.
3214 */
3215 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3216
3217 /*
3218 * Due to difference in data length between DIF/non-DIF paths,
3219 * we need to set word 4 of IOCB here
3220 */
3221 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3222
3223 /*
3224 * If the OAS driver feature is enabled and the lun is enabled for
3225 * OAS, set the oas iocb related flags.
3226 */
3227 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3228 scsi_cmnd->device->hostdata)->oas_enabled) {
3229 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3230 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3231 scsi_cmnd->device->hostdata)->priority;
3232 }
3233
3234 return 0;
3235}
3236
3237/**
3238 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3239 * @phba: The Hba for which this call is being executed.
3240 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3241 *
3242 * This is the protection/DIF aware version of
3243 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3244 * two functions eventually, but for now, it's here
3245 * Return codes:
3246 * 2 - Error - Do not retry
3247 * 1 - Error - Retry
3248 * 0 - Success
3249 **/
3250static int
3251lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3252 struct lpfc_io_buf *lpfc_cmd)
3253{
3254 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3255 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3256 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3257 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3258 uint32_t num_sge = 0;
3259 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3260 int prot_group_type = 0;
3261 int fcpdl;
3262 int ret = 1;
3263 struct lpfc_vport *vport = phba->pport;
3264
3265 /*
3266 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3267 * fcp_rsp regions to the first data sge entry
3268 */
3269 if (scsi_sg_count(scsi_cmnd)) {
3270 /*
3271 * The driver stores the segment count returned from pci_map_sg
3272 * because this a count of dma-mappings used to map the use_sg
3273 * pages. They are not guaranteed to be the same for those
3274 * architectures that implement an IOMMU.
3275 */
3276 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3277 scsi_sglist(scsi_cmnd),
3278 scsi_sg_count(scsi_cmnd), datadir);
3279 if (unlikely(!datasegcnt))
3280 return 1;
3281
3282 sgl += 1;
3283 /* clear the last flag in the fcp_rsp map entry */
3284 sgl->word2 = le32_to_cpu(sgl->word2);
3285 bf_set(lpfc_sli4_sge_last, sgl, 0);
3286 sgl->word2 = cpu_to_le32(sgl->word2);
3287
3288 sgl += 1;
3289 lpfc_cmd->seg_cnt = datasegcnt;
3290
3291 /* First check if data segment count from SCSI Layer is good */
3292 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3293 !phba->cfg_xpsgl) {
3294 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3295 ret = 2;
3296 goto err;
3297 }
3298
3299 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3300
3301 switch (prot_group_type) {
3302 case LPFC_PG_TYPE_NO_DIF:
3303 /* Here we need to add a DISEED to the count */
3304 if (((lpfc_cmd->seg_cnt + 1) >
3305 phba->cfg_total_seg_cnt) &&
3306 !phba->cfg_xpsgl) {
3307 ret = 2;
3308 goto err;
3309 }
3310
3311 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3312 datasegcnt, lpfc_cmd);
3313
3314 /* we should have 2 or more entries in buffer list */
3315 if (num_sge < 2) {
3316 ret = 2;
3317 goto err;
3318 }
3319 break;
3320
3321 case LPFC_PG_TYPE_DIF_BUF:
3322 /*
3323 * This type indicates that protection buffers are
3324 * passed to the driver, so that needs to be prepared
3325 * for DMA
3326 */
3327 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3328 scsi_prot_sglist(scsi_cmnd),
3329 scsi_prot_sg_count(scsi_cmnd), datadir);
3330 if (unlikely(!protsegcnt)) {
3331 scsi_dma_unmap(scsi_cmnd);
3332 return 1;
3333 }
3334
3335 lpfc_cmd->prot_seg_cnt = protsegcnt;
3336 /*
3337 * There is a minimun of 3 SGEs used for every
3338 * protection data segment.
3339 */
3340 if (((lpfc_cmd->prot_seg_cnt * 3) >
3341 (phba->cfg_total_seg_cnt - 2)) &&
3342 !phba->cfg_xpsgl) {
3343 ret = 2;
3344 goto err;
3345 }
3346
3347 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3348 datasegcnt, protsegcnt, lpfc_cmd);
3349
3350 /* we should have 3 or more entries in buffer list */
3351 if (num_sge < 3 ||
3352 (num_sge > phba->cfg_total_seg_cnt &&
3353 !phba->cfg_xpsgl)) {
3354 ret = 2;
3355 goto err;
3356 }
3357 break;
3358
3359 case LPFC_PG_TYPE_INVALID:
3360 default:
3361 scsi_dma_unmap(scsi_cmnd);
3362 lpfc_cmd->seg_cnt = 0;
3363
3364 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3365 "9083 Unexpected protection group %i\n",
3366 prot_group_type);
3367 return 2;
3368 }
3369 }
3370
3371 switch (scsi_get_prot_op(scsi_cmnd)) {
3372 case SCSI_PROT_WRITE_STRIP:
3373 case SCSI_PROT_READ_STRIP:
3374 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3375 break;
3376 case SCSI_PROT_WRITE_INSERT:
3377 case SCSI_PROT_READ_INSERT:
3378 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3379 break;
3380 case SCSI_PROT_WRITE_PASS:
3381 case SCSI_PROT_READ_PASS:
3382 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3383 break;
3384 }
3385
3386 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3387 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3388
3389 /*
3390 * Due to difference in data length between DIF/non-DIF paths,
3391 * we need to set word 4 of IOCB here
3392 */
3393 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3394
3395 /*
3396 * For First burst, we may need to adjust the initial transfer
3397 * length for DIF
3398 */
3399 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3400 (fcpdl < vport->cfg_first_burst_size))
3401 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3402
3403 /*
3404 * If the OAS driver feature is enabled and the lun is enabled for
3405 * OAS, set the oas iocb related flags.
3406 */
3407 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3408 scsi_cmnd->device->hostdata)->oas_enabled)
3409 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3410
3411 return 0;
3412err:
3413 if (lpfc_cmd->seg_cnt)
3414 scsi_dma_unmap(scsi_cmnd);
3415 if (lpfc_cmd->prot_seg_cnt)
3416 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3417 scsi_prot_sg_count(scsi_cmnd),
3418 scsi_cmnd->sc_data_direction);
3419
3420 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3421 "9084 Cannot setup S/G List for HBA"
3422 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3423 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3424 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3425 prot_group_type, num_sge);
3426
3427 lpfc_cmd->seg_cnt = 0;
3428 lpfc_cmd->prot_seg_cnt = 0;
3429 return ret;
3430}
3431
3432/**
3433 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3434 * @phba: The Hba for which this call is being executed.
3435 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3436 *
3437 * This routine wraps the actual DMA mapping function pointer from the
3438 * lpfc_hba struct.
3439 *
3440 * Return codes:
3441 * 1 - Error
3442 * 0 - Success
3443 **/
3444static inline int
3445lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3446{
3447 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3448}
3449
3450/**
3451 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3452 * using BlockGuard.
3453 * @phba: The Hba for which this call is being executed.
3454 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3455 *
3456 * This routine wraps the actual DMA mapping function pointer from the
3457 * lpfc_hba struct.
3458 *
3459 * Return codes:
3460 * 1 - Error
3461 * 0 - Success
3462 **/
3463static inline int
3464lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3465{
3466 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3467}
3468
3469/**
3470 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3471 * @phba: Pointer to hba context object.
3472 * @vport: Pointer to vport object.
3473 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3474 * @rsp_iocb: Pointer to response iocb object which reported error.
3475 *
3476 * This function posts an event when there is a SCSI command reporting
3477 * error from the scsi device.
3478 **/
3479static void
3480lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3481 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3482 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3483 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3484 uint32_t resp_info = fcprsp->rspStatus2;
3485 uint32_t scsi_status = fcprsp->rspStatus3;
3486 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3487 struct lpfc_fast_path_event *fast_path_evt = NULL;
3488 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3489 unsigned long flags;
3490
3491 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3492 return;
3493
3494 /* If there is queuefull or busy condition send a scsi event */
3495 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3496 (cmnd->result == SAM_STAT_BUSY)) {
3497 fast_path_evt = lpfc_alloc_fast_evt(phba);
3498 if (!fast_path_evt)
3499 return;
3500 fast_path_evt->un.scsi_evt.event_type =
3501 FC_REG_SCSI_EVENT;
3502 fast_path_evt->un.scsi_evt.subcategory =
3503 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3504 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3505 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3506 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3507 &pnode->nlp_portname, sizeof(struct lpfc_name));
3508 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3509 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3510 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3511 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3512 fast_path_evt = lpfc_alloc_fast_evt(phba);
3513 if (!fast_path_evt)
3514 return;
3515 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3516 FC_REG_SCSI_EVENT;
3517 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3518 LPFC_EVENT_CHECK_COND;
3519 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3520 cmnd->device->lun;
3521 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3522 &pnode->nlp_portname, sizeof(struct lpfc_name));
3523 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3524 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3525 fast_path_evt->un.check_cond_evt.sense_key =
3526 cmnd->sense_buffer[2] & 0xf;
3527 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3528 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3529 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3530 fcpi_parm &&
3531 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3532 ((scsi_status == SAM_STAT_GOOD) &&
3533 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3534 /*
3535 * If status is good or resid does not match with fcp_param and
3536 * there is valid fcpi_parm, then there is a read_check error
3537 */
3538 fast_path_evt = lpfc_alloc_fast_evt(phba);
3539 if (!fast_path_evt)
3540 return;
3541 fast_path_evt->un.read_check_error.header.event_type =
3542 FC_REG_FABRIC_EVENT;
3543 fast_path_evt->un.read_check_error.header.subcategory =
3544 LPFC_EVENT_FCPRDCHKERR;
3545 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3546 &pnode->nlp_portname, sizeof(struct lpfc_name));
3547 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3548 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3549 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3550 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3551 fast_path_evt->un.read_check_error.fcpiparam =
3552 fcpi_parm;
3553 } else
3554 return;
3555
3556 fast_path_evt->vport = vport;
3557 spin_lock_irqsave(&phba->hbalock, flags);
3558 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3559 spin_unlock_irqrestore(&phba->hbalock, flags);
3560 lpfc_worker_wake_up(phba);
3561 return;
3562}
3563
3564/**
3565 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3566 * @phba: The HBA for which this call is being executed.
3567 * @psb: The scsi buffer which is going to be un-mapped.
3568 *
3569 * This routine does DMA un-mapping of scatter gather list of scsi command
3570 * field of @lpfc_cmd for device with SLI-3 interface spec.
3571 **/
3572static void
3573lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3574{
3575 /*
3576 * There are only two special cases to consider. (1) the scsi command
3577 * requested scatter-gather usage or (2) the scsi command allocated
3578 * a request buffer, but did not request use_sg. There is a third
3579 * case, but it does not require resource deallocation.
3580 */
3581 if (psb->seg_cnt > 0)
3582 scsi_dma_unmap(psb->pCmd);
3583 if (psb->prot_seg_cnt > 0)
3584 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3585 scsi_prot_sg_count(psb->pCmd),
3586 psb->pCmd->sc_data_direction);
3587}
3588
3589/**
3590 * lpfc_handler_fcp_err - FCP response handler
3591 * @vport: The virtual port for which this call is being executed.
3592 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3593 * @rsp_iocb: The response IOCB which contains FCP error.
3594 *
3595 * This routine is called to process response IOCB with status field
3596 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3597 * based upon SCSI and FCP error.
3598 **/
3599static void
3600lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3601 struct lpfc_iocbq *rsp_iocb)
3602{
3603 struct lpfc_hba *phba = vport->phba;
3604 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3605 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3606 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3607 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3608 uint32_t resp_info = fcprsp->rspStatus2;
3609 uint32_t scsi_status = fcprsp->rspStatus3;
3610 uint32_t *lp;
3611 uint32_t host_status = DID_OK;
3612 uint32_t rsplen = 0;
3613 uint32_t fcpDl;
3614 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3615
3616
3617 /*
3618 * If this is a task management command, there is no
3619 * scsi packet associated with this lpfc_cmd. The driver
3620 * consumes it.
3621 */
3622 if (fcpcmd->fcpCntl2) {
3623 scsi_status = 0;
3624 goto out;
3625 }
3626
3627 if (resp_info & RSP_LEN_VALID) {
3628 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3629 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3630 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3631 "2719 Invalid response length: "
3632 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3633 cmnd->device->id,
3634 cmnd->device->lun, cmnd->cmnd[0],
3635 rsplen);
3636 host_status = DID_ERROR;
3637 goto out;
3638 }
3639 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3640 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3641 "2757 Protocol failure detected during "
3642 "processing of FCP I/O op: "
3643 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3644 cmnd->device->id,
3645 cmnd->device->lun, cmnd->cmnd[0],
3646 fcprsp->rspInfo3);
3647 host_status = DID_ERROR;
3648 goto out;
3649 }
3650 }
3651
3652 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3653 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3654 if (snslen > SCSI_SENSE_BUFFERSIZE)
3655 snslen = SCSI_SENSE_BUFFERSIZE;
3656
3657 if (resp_info & RSP_LEN_VALID)
3658 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3659 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3660 }
3661 lp = (uint32_t *)cmnd->sense_buffer;
3662
3663 /* special handling for under run conditions */
3664 if (!scsi_status && (resp_info & RESID_UNDER)) {
3665 /* don't log under runs if fcp set... */
3666 if (vport->cfg_log_verbose & LOG_FCP)
3667 logit = LOG_FCP_ERROR;
3668 /* unless operator says so */
3669 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3670 logit = LOG_FCP_UNDER;
3671 }
3672
3673 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3674 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3675 "Data: x%x x%x x%x x%x x%x\n",
3676 cmnd->cmnd[0], scsi_status,
3677 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3678 be32_to_cpu(fcprsp->rspResId),
3679 be32_to_cpu(fcprsp->rspSnsLen),
3680 be32_to_cpu(fcprsp->rspRspLen),
3681 fcprsp->rspInfo3);
3682
3683 scsi_set_resid(cmnd, 0);
3684 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3685 if (resp_info & RESID_UNDER) {
3686 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3687
3688 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3689 "9025 FCP Underrun, expected %d, "
3690 "residual %d Data: x%x x%x x%x\n",
3691 fcpDl,
3692 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3693 cmnd->underflow);
3694
3695 /*
3696 * If there is an under run, check if under run reported by
3697 * storage array is same as the under run reported by HBA.
3698 * If this is not same, there is a dropped frame.
3699 */
3700 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3701 lpfc_printf_vlog(vport, KERN_WARNING,
3702 LOG_FCP | LOG_FCP_ERROR,
3703 "9026 FCP Read Check Error "
3704 "and Underrun Data: x%x x%x x%x x%x\n",
3705 fcpDl,
3706 scsi_get_resid(cmnd), fcpi_parm,
3707 cmnd->cmnd[0]);
3708 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3709 host_status = DID_ERROR;
3710 }
3711 /*
3712 * The cmnd->underflow is the minimum number of bytes that must
3713 * be transferred for this command. Provided a sense condition
3714 * is not present, make sure the actual amount transferred is at
3715 * least the underflow value or fail.
3716 */
3717 if (!(resp_info & SNS_LEN_VALID) &&
3718 (scsi_status == SAM_STAT_GOOD) &&
3719 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3720 < cmnd->underflow)) {
3721 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3722 "9027 FCP command x%x residual "
3723 "underrun converted to error "
3724 "Data: x%x x%x x%x\n",
3725 cmnd->cmnd[0], scsi_bufflen(cmnd),
3726 scsi_get_resid(cmnd), cmnd->underflow);
3727 host_status = DID_ERROR;
3728 }
3729 } else if (resp_info & RESID_OVER) {
3730 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3731 "9028 FCP command x%x residual overrun error. "
3732 "Data: x%x x%x\n", cmnd->cmnd[0],
3733 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3734 host_status = DID_ERROR;
3735
3736 /*
3737 * Check SLI validation that all the transfer was actually done
3738 * (fcpi_parm should be zero). Apply check only to reads.
3739 */
3740 } else if (fcpi_parm) {
3741 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3742 "9029 FCP %s Check Error xri x%x Data: "
3743 "x%x x%x x%x x%x x%x\n",
3744 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3745 "Read" : "Write"),
3746 ((phba->sli_rev == LPFC_SLI_REV4) ?
3747 lpfc_cmd->cur_iocbq.sli4_xritag :
3748 rsp_iocb->iocb.ulpContext),
3749 fcpDl, be32_to_cpu(fcprsp->rspResId),
3750 fcpi_parm, cmnd->cmnd[0], scsi_status);
3751
3752 /* There is some issue with the LPe12000 that causes it
3753 * to miscalculate the fcpi_parm and falsely trip this
3754 * recovery logic. Detect this case and don't error when true.
3755 */
3756 if (fcpi_parm > fcpDl)
3757 goto out;
3758
3759 switch (scsi_status) {
3760 case SAM_STAT_GOOD:
3761 case SAM_STAT_CHECK_CONDITION:
3762 /* Fabric dropped a data frame. Fail any successful
3763 * command in which we detected dropped frames.
3764 * A status of good or some check conditions could
3765 * be considered a successful command.
3766 */
3767 host_status = DID_ERROR;
3768 break;
3769 }
3770 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3771 }
3772
3773 out:
3774 cmnd->result = host_status << 16 | scsi_status;
3775 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3776}
3777
3778/**
3779 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3780 * @phba: The Hba for which this call is being executed.
3781 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3782 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3783 *
3784 * This routine assigns scsi command result by looking into response IOCB
3785 * status field appropriately. This routine handles QUEUE FULL condition as
3786 * well by ramping down device queue depth.
3787 **/
3788static void
3789lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3790 struct lpfc_iocbq *pIocbOut)
3791{
3792 struct lpfc_io_buf *lpfc_cmd =
3793 (struct lpfc_io_buf *) pIocbIn->context1;
3794 struct lpfc_vport *vport = pIocbIn->vport;
3795 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3796 struct lpfc_nodelist *pnode = rdata->pnode;
3797 struct scsi_cmnd *cmd;
3798 unsigned long flags;
3799 struct lpfc_fast_path_event *fast_path_evt;
3800 struct Scsi_Host *shost;
3801 int idx;
3802 uint32_t logit = LOG_FCP;
3803#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3804 int cpu;
3805#endif
3806
3807 /* Guard against abort handler being called at same time */
3808 spin_lock(&lpfc_cmd->buf_lock);
3809
3810 /* Sanity check on return of outstanding command */
3811 cmd = lpfc_cmd->pCmd;
3812 if (!cmd) {
3813 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3814 "2621 IO completion: Not an active IO\n");
3815 spin_unlock(&lpfc_cmd->buf_lock);
3816 return;
3817 }
3818
3819 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3820 if (phba->sli4_hba.hdwq)
3821 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3822
3823#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3824 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
3825 cpu = raw_smp_processor_id();
3826 if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
3827 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
3828 }
3829#endif
3830 shost = cmd->device->host;
3831
3832 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3833 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3834 /* pick up SLI4 exhange busy status from HBA */
3835 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
3836 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3837 else
3838 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3839
3840#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3841 if (lpfc_cmd->prot_data_type) {
3842 struct scsi_dif_tuple *src = NULL;
3843
3844 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3845 /*
3846 * Used to restore any changes to protection
3847 * data for error injection.
3848 */
3849 switch (lpfc_cmd->prot_data_type) {
3850 case LPFC_INJERR_REFTAG:
3851 src->ref_tag =
3852 lpfc_cmd->prot_data;
3853 break;
3854 case LPFC_INJERR_APPTAG:
3855 src->app_tag =
3856 (uint16_t)lpfc_cmd->prot_data;
3857 break;
3858 case LPFC_INJERR_GUARD:
3859 src->guard_tag =
3860 (uint16_t)lpfc_cmd->prot_data;
3861 break;
3862 default:
3863 break;
3864 }
3865
3866 lpfc_cmd->prot_data = 0;
3867 lpfc_cmd->prot_data_type = 0;
3868 lpfc_cmd->prot_data_segment = NULL;
3869 }
3870#endif
3871
3872 if (lpfc_cmd->status) {
3873 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3874 (lpfc_cmd->result & IOERR_DRVR_MASK))
3875 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3876 else if (lpfc_cmd->status >= IOSTAT_CNT)
3877 lpfc_cmd->status = IOSTAT_DEFAULT;
3878 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3879 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3880 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3881 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3882 logit = 0;
3883 else
3884 logit = LOG_FCP | LOG_FCP_UNDER;
3885 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3886 "9030 FCP cmd x%x failed <%d/%lld> "
3887 "status: x%x result: x%x "
3888 "sid: x%x did: x%x oxid: x%x "
3889 "Data: x%x x%x\n",
3890 cmd->cmnd[0],
3891 cmd->device ? cmd->device->id : 0xffff,
3892 cmd->device ? cmd->device->lun : 0xffff,
3893 lpfc_cmd->status, lpfc_cmd->result,
3894 vport->fc_myDID,
3895 (pnode) ? pnode->nlp_DID : 0,
3896 phba->sli_rev == LPFC_SLI_REV4 ?
3897 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3898 pIocbOut->iocb.ulpContext,
3899 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3900
3901 switch (lpfc_cmd->status) {
3902 case IOSTAT_FCP_RSP_ERROR:
3903 /* Call FCP RSP handler to determine result */
3904 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3905 break;
3906 case IOSTAT_NPORT_BSY:
3907 case IOSTAT_FABRIC_BSY:
3908 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3909 fast_path_evt = lpfc_alloc_fast_evt(phba);
3910 if (!fast_path_evt)
3911 break;
3912 fast_path_evt->un.fabric_evt.event_type =
3913 FC_REG_FABRIC_EVENT;
3914 fast_path_evt->un.fabric_evt.subcategory =
3915 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3916 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3917 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3918 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3919 &pnode->nlp_portname,
3920 sizeof(struct lpfc_name));
3921 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3922 &pnode->nlp_nodename,
3923 sizeof(struct lpfc_name));
3924 }
3925 fast_path_evt->vport = vport;
3926 fast_path_evt->work_evt.evt =
3927 LPFC_EVT_FASTPATH_MGMT_EVT;
3928 spin_lock_irqsave(&phba->hbalock, flags);
3929 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3930 &phba->work_list);
3931 spin_unlock_irqrestore(&phba->hbalock, flags);
3932 lpfc_worker_wake_up(phba);
3933 break;
3934 case IOSTAT_LOCAL_REJECT:
3935 case IOSTAT_REMOTE_STOP:
3936 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3937 lpfc_cmd->result ==
3938 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3939 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3940 lpfc_cmd->result ==
3941 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3942 cmd->result = DID_NO_CONNECT << 16;
3943 break;
3944 }
3945 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3946 lpfc_cmd->result == IOERR_NO_RESOURCES ||
3947 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3948 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3949 cmd->result = DID_REQUEUE << 16;
3950 break;
3951 }
3952 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3953 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3954 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3955 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3956 /*
3957 * This is a response for a BG enabled
3958 * cmd. Parse BG error
3959 */
3960 lpfc_parse_bg_err(phba, lpfc_cmd,
3961 pIocbOut);
3962 break;
3963 } else {
3964 lpfc_printf_vlog(vport, KERN_WARNING,
3965 LOG_BG,
3966 "9031 non-zero BGSTAT "
3967 "on unprotected cmd\n");
3968 }
3969 }
3970 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3971 && (phba->sli_rev == LPFC_SLI_REV4)
3972 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3973 /* This IO was aborted by the target, we don't
3974 * know the rxid and because we did not send the
3975 * ABTS we cannot generate and RRQ.
3976 */
3977 lpfc_set_rrq_active(phba, pnode,
3978 lpfc_cmd->cur_iocbq.sli4_lxritag,
3979 0, 0);
3980 }
3981 /* fall through */
3982 default:
3983 cmd->result = DID_ERROR << 16;
3984 break;
3985 }
3986
3987 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3988 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3989 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3990 SAM_STAT_BUSY;
3991 } else
3992 cmd->result = DID_OK << 16;
3993
3994 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3995 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3996
3997 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3998 "0710 Iodone <%d/%llu> cmd x%px, error "
3999 "x%x SNS x%x x%x Data: x%x x%x\n",
4000 cmd->device->id, cmd->device->lun, cmd,
4001 cmd->result, *lp, *(lp + 3), cmd->retries,
4002 scsi_get_resid(cmd));
4003 }
4004
4005 lpfc_update_stats(phba, lpfc_cmd);
4006 if (vport->cfg_max_scsicmpl_time &&
4007 time_after(jiffies, lpfc_cmd->start_time +
4008 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4009 spin_lock_irqsave(shost->host_lock, flags);
4010 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4011 if (pnode->cmd_qdepth >
4012 atomic_read(&pnode->cmd_pending) &&
4013 (atomic_read(&pnode->cmd_pending) >
4014 LPFC_MIN_TGT_QDEPTH) &&
4015 ((cmd->cmnd[0] == READ_10) ||
4016 (cmd->cmnd[0] == WRITE_10)))
4017 pnode->cmd_qdepth =
4018 atomic_read(&pnode->cmd_pending);
4019
4020 pnode->last_change_time = jiffies;
4021 }
4022 spin_unlock_irqrestore(shost->host_lock, flags);
4023 }
4024 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4025
4026 lpfc_cmd->pCmd = NULL;
4027 spin_unlock(&lpfc_cmd->buf_lock);
4028
4029 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4030 cmd->scsi_done(cmd);
4031
4032 /*
4033 * If there is an abort thread waiting for command completion
4034 * wake up the thread.
4035 */
4036 spin_lock(&lpfc_cmd->buf_lock);
4037 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4038 if (lpfc_cmd->waitq)
4039 wake_up(lpfc_cmd->waitq);
4040 spin_unlock(&lpfc_cmd->buf_lock);
4041
4042 lpfc_release_scsi_buf(phba, lpfc_cmd);
4043}
4044
4045/**
4046 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4047 * @data: A pointer to the immediate command data portion of the IOCB.
4048 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4049 *
4050 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4051 * byte swapping the data to big endian format for transmission on the wire.
4052 **/
4053static void
4054lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4055{
4056 int i, j;
4057 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4058 i += sizeof(uint32_t), j++) {
4059 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4060 }
4061}
4062
4063/**
4064 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4065 * @vport: The virtual port for which this call is being executed.
4066 * @lpfc_cmd: The scsi command which needs to send.
4067 * @pnode: Pointer to lpfc_nodelist.
4068 *
4069 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4070 * to transfer for device with SLI3 interface spec.
4071 **/
4072static void
4073lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4074 struct lpfc_nodelist *pnode)
4075{
4076 struct lpfc_hba *phba = vport->phba;
4077 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4078 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4079 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4080 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4081 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4082 int datadir = scsi_cmnd->sc_data_direction;
4083 int idx;
4084 uint8_t *ptr;
4085 bool sli4;
4086 uint32_t fcpdl;
4087
4088 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4089 return;
4090
4091 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4092 /* clear task management bits */
4093 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4094
4095 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4096 &lpfc_cmd->fcp_cmnd->fcp_lun);
4097
4098 ptr = &fcp_cmnd->fcpCdb[0];
4099 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4100 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4101 ptr += scsi_cmnd->cmd_len;
4102 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4103 }
4104
4105 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4106
4107 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4108 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4109 idx = lpfc_cmd->hdwq_no;
4110 if (phba->sli4_hba.hdwq)
4111 hdwq = &phba->sli4_hba.hdwq[idx];
4112
4113 /*
4114 * There are three possibilities here - use scatter-gather segment, use
4115 * the single mapping, or neither. Start the lpfc command prep by
4116 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4117 * data bde entry.
4118 */
4119 if (scsi_sg_count(scsi_cmnd)) {
4120 if (datadir == DMA_TO_DEVICE) {
4121 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4122 iocb_cmd->ulpPU = PARM_READ_CHECK;
4123 if (vport->cfg_first_burst_size &&
4124 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4125 fcpdl = scsi_bufflen(scsi_cmnd);
4126 if (fcpdl < vport->cfg_first_burst_size)
4127 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4128 else
4129 piocbq->iocb.un.fcpi.fcpi_XRdy =
4130 vport->cfg_first_burst_size;
4131 }
4132 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4133 if (hdwq)
4134 hdwq->scsi_cstat.output_requests++;
4135 } else {
4136 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4137 iocb_cmd->ulpPU = PARM_READ_CHECK;
4138 fcp_cmnd->fcpCntl3 = READ_DATA;
4139 if (hdwq)
4140 hdwq->scsi_cstat.input_requests++;
4141 }
4142 } else {
4143 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4144 iocb_cmd->un.fcpi.fcpi_parm = 0;
4145 iocb_cmd->ulpPU = 0;
4146 fcp_cmnd->fcpCntl3 = 0;
4147 if (hdwq)
4148 hdwq->scsi_cstat.control_requests++;
4149 }
4150 if (phba->sli_rev == 3 &&
4151 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4152 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4153 /*
4154 * Finish initializing those IOCB fields that are independent
4155 * of the scsi_cmnd request_buffer
4156 */
4157 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4158 if (sli4)
4159 piocbq->iocb.ulpContext =
4160 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4161 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4162 piocbq->iocb.ulpFCP2Rcvy = 1;
4163 else
4164 piocbq->iocb.ulpFCP2Rcvy = 0;
4165
4166 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4167 piocbq->context1 = lpfc_cmd;
4168 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4169 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4170 piocbq->vport = vport;
4171}
4172
4173/**
4174 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4175 * @vport: The virtual port for which this call is being executed.
4176 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4177 * @lun: Logical unit number.
4178 * @task_mgmt_cmd: SCSI task management command.
4179 *
4180 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4181 * for device with SLI-3 interface spec.
4182 *
4183 * Return codes:
4184 * 0 - Error
4185 * 1 - Success
4186 **/
4187static int
4188lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4189 struct lpfc_io_buf *lpfc_cmd,
4190 uint64_t lun,
4191 uint8_t task_mgmt_cmd)
4192{
4193 struct lpfc_iocbq *piocbq;
4194 IOCB_t *piocb;
4195 struct fcp_cmnd *fcp_cmnd;
4196 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4197 struct lpfc_nodelist *ndlp = rdata->pnode;
4198
4199 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4200 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4201 return 0;
4202
4203 piocbq = &(lpfc_cmd->cur_iocbq);
4204 piocbq->vport = vport;
4205
4206 piocb = &piocbq->iocb;
4207
4208 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4209 /* Clear out any old data in the FCP command area */
4210 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4211 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4212 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4213 if (vport->phba->sli_rev == 3 &&
4214 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4215 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4216 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4217 piocb->ulpContext = ndlp->nlp_rpi;
4218 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4219 piocb->ulpContext =
4220 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4221 }
4222 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4223 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4224 piocb->ulpPU = 0;
4225 piocb->un.fcpi.fcpi_parm = 0;
4226
4227 /* ulpTimeout is only one byte */
4228 if (lpfc_cmd->timeout > 0xff) {
4229 /*
4230 * Do not timeout the command at the firmware level.
4231 * The driver will provide the timeout mechanism.
4232 */
4233 piocb->ulpTimeout = 0;
4234 } else
4235 piocb->ulpTimeout = lpfc_cmd->timeout;
4236
4237 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4238 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4239
4240 return 1;
4241}
4242
4243/**
4244 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4245 * @phba: The hba struct for which this call is being executed.
4246 * @dev_grp: The HBA PCI-Device group number.
4247 *
4248 * This routine sets up the SCSI interface API function jump table in @phba
4249 * struct.
4250 * Returns: 0 - success, -ENODEV - failure.
4251 **/
4252int
4253lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4254{
4255
4256 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4257 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4258
4259 switch (dev_grp) {
4260 case LPFC_PCI_DEV_LP:
4261 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4262 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4263 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4264 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4265 break;
4266 case LPFC_PCI_DEV_OC:
4267 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4268 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4269 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4270 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4271 break;
4272 default:
4273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4274 "1418 Invalid HBA PCI-device group: 0x%x\n",
4275 dev_grp);
4276 return -ENODEV;
4277 break;
4278 }
4279 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4280 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4281 return 0;
4282}
4283
4284/**
4285 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4286 * @phba: The Hba for which this call is being executed.
4287 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4288 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4289 *
4290 * This routine is IOCB completion routine for device reset and target reset
4291 * routine. This routine release scsi buffer associated with lpfc_cmd.
4292 **/
4293static void
4294lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4295 struct lpfc_iocbq *cmdiocbq,
4296 struct lpfc_iocbq *rspiocbq)
4297{
4298 struct lpfc_io_buf *lpfc_cmd =
4299 (struct lpfc_io_buf *) cmdiocbq->context1;
4300 if (lpfc_cmd)
4301 lpfc_release_scsi_buf(phba, lpfc_cmd);
4302 return;
4303}
4304
4305/**
4306 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4307 * if issuing a pci_bus_reset is possibly unsafe
4308 * @phba: lpfc_hba pointer.
4309 *
4310 * Description:
4311 * Walks the bus_list to ensure only PCI devices with Emulex
4312 * vendor id, device ids that support hot reset, and only one occurrence
4313 * of function 0.
4314 *
4315 * Returns:
4316 * -EBADSLT, detected invalid device
4317 * 0, successful
4318 */
4319int
4320lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4321{
4322 const struct pci_dev *pdev = phba->pcidev;
4323 struct pci_dev *ptr = NULL;
4324 u8 counter = 0;
4325
4326 /* Walk the list of devices on the pci_dev's bus */
4327 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4328 /* Check for Emulex Vendor ID */
4329 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4330 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4331 "8346 Non-Emulex vendor found: "
4332 "0x%04x\n", ptr->vendor);
4333 return -EBADSLT;
4334 }
4335
4336 /* Check for valid Emulex Device ID */
4337 switch (ptr->device) {
4338 case PCI_DEVICE_ID_LANCER_FC:
4339 case PCI_DEVICE_ID_LANCER_G6_FC:
4340 case PCI_DEVICE_ID_LANCER_G7_FC:
4341 break;
4342 default:
4343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4344 "8347 Invalid device found: "
4345 "0x%04x\n", ptr->device);
4346 return -EBADSLT;
4347 }
4348
4349 /* Check for only one function 0 ID to ensure only one HBA on
4350 * secondary bus
4351 */
4352 if (ptr->devfn == 0) {
4353 if (++counter > 1) {
4354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4355 "8348 More than one device on "
4356 "secondary bus found\n");
4357 return -EBADSLT;
4358 }
4359 }
4360 }
4361
4362 return 0;
4363}
4364
4365/**
4366 * lpfc_info - Info entry point of scsi_host_template data structure
4367 * @host: The scsi host for which this call is being executed.
4368 *
4369 * This routine provides module information about hba.
4370 *
4371 * Reutrn code:
4372 * Pointer to char - Success.
4373 **/
4374const char *
4375lpfc_info(struct Scsi_Host *host)
4376{
4377 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4378 struct lpfc_hba *phba = vport->phba;
4379 int link_speed = 0;
4380 static char lpfcinfobuf[384];
4381 char tmp[384] = {0};
4382
4383 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4384 if (phba && phba->pcidev){
4385 /* Model Description */
4386 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4387 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4388 sizeof(lpfcinfobuf))
4389 goto buffer_done;
4390
4391 /* PCI Info */
4392 scnprintf(tmp, sizeof(tmp),
4393 " on PCI bus %02x device %02x irq %d",
4394 phba->pcidev->bus->number, phba->pcidev->devfn,
4395 phba->pcidev->irq);
4396 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4397 sizeof(lpfcinfobuf))
4398 goto buffer_done;
4399
4400 /* Port Number */
4401 if (phba->Port[0]) {
4402 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4403 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4404 sizeof(lpfcinfobuf))
4405 goto buffer_done;
4406 }
4407
4408 /* Link Speed */
4409 link_speed = lpfc_sli_port_speed_get(phba);
4410 if (link_speed != 0) {
4411 scnprintf(tmp, sizeof(tmp),
4412 " Logical Link Speed: %d Mbps", link_speed);
4413 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4414 sizeof(lpfcinfobuf))
4415 goto buffer_done;
4416 }
4417
4418 /* PCI resettable */
4419 if (!lpfc_check_pci_resettable(phba)) {
4420 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4421 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4422 }
4423 }
4424
4425buffer_done:
4426 return lpfcinfobuf;
4427}
4428
4429/**
4430 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4431 * @phba: The Hba for which this call is being executed.
4432 *
4433 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4434 * The default value of cfg_poll_tmo is 10 milliseconds.
4435 **/
4436static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4437{
4438 unsigned long poll_tmo_expires =
4439 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4440
4441 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4442 mod_timer(&phba->fcp_poll_timer,
4443 poll_tmo_expires);
4444}
4445
4446/**
4447 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4448 * @phba: The Hba for which this call is being executed.
4449 *
4450 * This routine starts the fcp_poll_timer of @phba.
4451 **/
4452void lpfc_poll_start_timer(struct lpfc_hba * phba)
4453{
4454 lpfc_poll_rearm_timer(phba);
4455}
4456
4457/**
4458 * lpfc_poll_timeout - Restart polling timer
4459 * @ptr: Map to lpfc_hba data structure pointer.
4460 *
4461 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4462 * and FCP Ring interrupt is disable.
4463 **/
4464
4465void lpfc_poll_timeout(struct timer_list *t)
4466{
4467 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4468
4469 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4470 lpfc_sli_handle_fast_ring_event(phba,
4471 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4472
4473 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4474 lpfc_poll_rearm_timer(phba);
4475 }
4476}
4477
4478/**
4479 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4480 * @cmnd: Pointer to scsi_cmnd data structure.
4481 * @done: Pointer to done routine.
4482 *
4483 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4484 * This routine prepares an IOCB from scsi command and provides to firmware.
4485 * The @done callback is invoked after driver finished processing the command.
4486 *
4487 * Return value :
4488 * 0 - Success
4489 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4490 **/
4491static int
4492lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4493{
4494 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4495 struct lpfc_hba *phba = vport->phba;
4496 struct lpfc_rport_data *rdata;
4497 struct lpfc_nodelist *ndlp;
4498 struct lpfc_io_buf *lpfc_cmd;
4499 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4500 int err, idx;
4501#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4502 int cpu;
4503#endif
4504
4505 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4506
4507 /* sanity check on references */
4508 if (unlikely(!rdata) || unlikely(!rport))
4509 goto out_fail_command;
4510
4511 err = fc_remote_port_chkready(rport);
4512 if (err) {
4513 cmnd->result = err;
4514 goto out_fail_command;
4515 }
4516 ndlp = rdata->pnode;
4517
4518 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4519 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4520
4521 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4522 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4523 " op:%02x str=%s without registering for"
4524 " BlockGuard - Rejecting command\n",
4525 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4526 dif_op_str[scsi_get_prot_op(cmnd)]);
4527 goto out_fail_command;
4528 }
4529
4530 /*
4531 * Catch race where our node has transitioned, but the
4532 * transport is still transitioning.
4533 */
4534 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4535 goto out_tgt_busy;
4536 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4537 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4538 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4539 "3377 Target Queue Full, scsi Id:%d "
4540 "Qdepth:%d Pending command:%d"
4541 " WWNN:%02x:%02x:%02x:%02x:"
4542 "%02x:%02x:%02x:%02x, "
4543 " WWPN:%02x:%02x:%02x:%02x:"
4544 "%02x:%02x:%02x:%02x",
4545 ndlp->nlp_sid, ndlp->cmd_qdepth,
4546 atomic_read(&ndlp->cmd_pending),
4547 ndlp->nlp_nodename.u.wwn[0],
4548 ndlp->nlp_nodename.u.wwn[1],
4549 ndlp->nlp_nodename.u.wwn[2],
4550 ndlp->nlp_nodename.u.wwn[3],
4551 ndlp->nlp_nodename.u.wwn[4],
4552 ndlp->nlp_nodename.u.wwn[5],
4553 ndlp->nlp_nodename.u.wwn[6],
4554 ndlp->nlp_nodename.u.wwn[7],
4555 ndlp->nlp_portname.u.wwn[0],
4556 ndlp->nlp_portname.u.wwn[1],
4557 ndlp->nlp_portname.u.wwn[2],
4558 ndlp->nlp_portname.u.wwn[3],
4559 ndlp->nlp_portname.u.wwn[4],
4560 ndlp->nlp_portname.u.wwn[5],
4561 ndlp->nlp_portname.u.wwn[6],
4562 ndlp->nlp_portname.u.wwn[7]);
4563 goto out_tgt_busy;
4564 }
4565 }
4566
4567 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4568 if (lpfc_cmd == NULL) {
4569 lpfc_rampdown_queue_depth(phba);
4570
4571 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4572 "0707 driver's buffer pool is empty, "
4573 "IO busied\n");
4574 goto out_host_busy;
4575 }
4576
4577 /*
4578 * Store the midlayer's command structure for the completion phase
4579 * and complete the command initialization.
4580 */
4581 lpfc_cmd->pCmd = cmnd;
4582 lpfc_cmd->rdata = rdata;
4583 lpfc_cmd->ndlp = ndlp;
4584 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4585
4586 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4587 if (vport->phba->cfg_enable_bg) {
4588 lpfc_printf_vlog(vport,
4589 KERN_INFO, LOG_SCSI_CMD,
4590 "9033 BLKGRD: rcvd %s cmd:x%x "
4591 "sector x%llx cnt %u pt %x\n",
4592 dif_op_str[scsi_get_prot_op(cmnd)],
4593 cmnd->cmnd[0],
4594 (unsigned long long)scsi_get_lba(cmnd),
4595 blk_rq_sectors(cmnd->request),
4596 (cmnd->cmnd[1]>>5));
4597 }
4598 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4599 } else {
4600 if (vport->phba->cfg_enable_bg) {
4601 lpfc_printf_vlog(vport,
4602 KERN_INFO, LOG_SCSI_CMD,
4603 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4604 "x%x sector x%llx cnt %u pt %x\n",
4605 cmnd->cmnd[0],
4606 (unsigned long long)scsi_get_lba(cmnd),
4607 blk_rq_sectors(cmnd->request),
4608 (cmnd->cmnd[1]>>5));
4609 }
4610 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4611 }
4612
4613 if (err == 2) {
4614 cmnd->result = DID_ERROR << 16;
4615 goto out_fail_command_release_buf;
4616 } else if (err) {
4617 goto out_host_busy_free_buf;
4618 }
4619
4620 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4621
4622#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4623 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
4624 cpu = raw_smp_processor_id();
4625 if (cpu < LPFC_CHECK_CPU_CNT) {
4626 struct lpfc_sli4_hdw_queue *hdwq =
4627 &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
4628 hdwq->cpucheck_xmt_io[cpu]++;
4629 }
4630 }
4631#endif
4632 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4633 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4634 if (err) {
4635 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4636 "3376 FCP could not issue IOCB err %x"
4637 "FCP cmd x%x <%d/%llu> "
4638 "sid: x%x did: x%x oxid: x%x "
4639 "Data: x%x x%x x%x x%x\n",
4640 err, cmnd->cmnd[0],
4641 cmnd->device ? cmnd->device->id : 0xffff,
4642 cmnd->device ? cmnd->device->lun : (u64) -1,
4643 vport->fc_myDID, ndlp->nlp_DID,
4644 phba->sli_rev == LPFC_SLI_REV4 ?
4645 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4646 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4647 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4648 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4649 (uint32_t)
4650 (cmnd->request->timeout / 1000));
4651
4652 goto out_host_busy_free_buf;
4653 }
4654 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4655 lpfc_sli_handle_fast_ring_event(phba,
4656 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4657
4658 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4659 lpfc_poll_rearm_timer(phba);
4660 }
4661
4662 if (phba->cfg_xri_rebalancing)
4663 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4664
4665 return 0;
4666
4667 out_host_busy_free_buf:
4668 idx = lpfc_cmd->hdwq_no;
4669 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4670 if (phba->sli4_hba.hdwq) {
4671 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4672 case WRITE_DATA:
4673 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4674 break;
4675 case READ_DATA:
4676 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4677 break;
4678 default:
4679 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4680 }
4681 }
4682 lpfc_release_scsi_buf(phba, lpfc_cmd);
4683 out_host_busy:
4684 return SCSI_MLQUEUE_HOST_BUSY;
4685
4686 out_tgt_busy:
4687 return SCSI_MLQUEUE_TARGET_BUSY;
4688
4689 out_fail_command_release_buf:
4690 lpfc_release_scsi_buf(phba, lpfc_cmd);
4691
4692 out_fail_command:
4693 cmnd->scsi_done(cmnd);
4694 return 0;
4695}
4696
4697
4698/**
4699 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4700 * @cmnd: Pointer to scsi_cmnd data structure.
4701 *
4702 * This routine aborts @cmnd pending in base driver.
4703 *
4704 * Return code :
4705 * 0x2003 - Error
4706 * 0x2002 - Success
4707 **/
4708static int
4709lpfc_abort_handler(struct scsi_cmnd *cmnd)
4710{
4711 struct Scsi_Host *shost = cmnd->device->host;
4712 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4713 struct lpfc_hba *phba = vport->phba;
4714 struct lpfc_iocbq *iocb;
4715 struct lpfc_iocbq *abtsiocb;
4716 struct lpfc_io_buf *lpfc_cmd;
4717 IOCB_t *cmd, *icmd;
4718 int ret = SUCCESS, status = 0;
4719 struct lpfc_sli_ring *pring_s4 = NULL;
4720 int ret_val;
4721 unsigned long flags;
4722 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4723
4724 status = fc_block_scsi_eh(cmnd);
4725 if (status != 0 && status != SUCCESS)
4726 return status;
4727
4728 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4729 if (!lpfc_cmd)
4730 return ret;
4731
4732 spin_lock_irqsave(&phba->hbalock, flags);
4733 /* driver queued commands are in process of being flushed */
4734 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4735 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4736 "3168 SCSI Layer abort requested I/O has been "
4737 "flushed by LLD.\n");
4738 ret = FAILED;
4739 goto out_unlock;
4740 }
4741
4742 /* Guard against IO completion being called at same time */
4743 spin_lock(&lpfc_cmd->buf_lock);
4744
4745 if (!lpfc_cmd->pCmd) {
4746 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4747 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4748 "x%x ID %d LUN %llu\n",
4749 SUCCESS, cmnd->device->id, cmnd->device->lun);
4750 goto out_unlock_buf;
4751 }
4752
4753 iocb = &lpfc_cmd->cur_iocbq;
4754 if (phba->sli_rev == LPFC_SLI_REV4) {
4755 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4756 if (!pring_s4) {
4757 ret = FAILED;
4758 goto out_unlock_buf;
4759 }
4760 spin_lock(&pring_s4->ring_lock);
4761 }
4762 /* the command is in process of being cancelled */
4763 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4764 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4765 "3169 SCSI Layer abort requested I/O has been "
4766 "cancelled by LLD.\n");
4767 ret = FAILED;
4768 goto out_unlock_ring;
4769 }
4770 /*
4771 * If pCmd field of the corresponding lpfc_io_buf structure
4772 * points to a different SCSI command, then the driver has
4773 * already completed this command, but the midlayer did not
4774 * see the completion before the eh fired. Just return SUCCESS.
4775 */
4776 if (lpfc_cmd->pCmd != cmnd) {
4777 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4778 "3170 SCSI Layer abort requested I/O has been "
4779 "completed by LLD.\n");
4780 goto out_unlock_ring;
4781 }
4782
4783 BUG_ON(iocb->context1 != lpfc_cmd);
4784
4785 /* abort issued in recovery is still in progress */
4786 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4787 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4788 "3389 SCSI Layer I/O Abort Request is pending\n");
4789 if (phba->sli_rev == LPFC_SLI_REV4)
4790 spin_unlock(&pring_s4->ring_lock);
4791 spin_unlock(&lpfc_cmd->buf_lock);
4792 spin_unlock_irqrestore(&phba->hbalock, flags);
4793 goto wait_for_cmpl;
4794 }
4795
4796 abtsiocb = __lpfc_sli_get_iocbq(phba);
4797 if (abtsiocb == NULL) {
4798 ret = FAILED;
4799 goto out_unlock_ring;
4800 }
4801
4802 /* Indicate the IO is being aborted by the driver. */
4803 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4804
4805 /*
4806 * The scsi command can not be in txq and it is in flight because the
4807 * pCmd is still pointig at the SCSI command we have to abort. There
4808 * is no need to search the txcmplq. Just send an abort to the FW.
4809 */
4810
4811 cmd = &iocb->iocb;
4812 icmd = &abtsiocb->iocb;
4813 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4814 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4815 if (phba->sli_rev == LPFC_SLI_REV4)
4816 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4817 else
4818 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4819
4820 icmd->ulpLe = 1;
4821 icmd->ulpClass = cmd->ulpClass;
4822
4823 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4824 abtsiocb->hba_wqidx = iocb->hba_wqidx;
4825 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4826 if (iocb->iocb_flag & LPFC_IO_FOF)
4827 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4828
4829 if (lpfc_is_link_up(phba))
4830 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4831 else
4832 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4833
4834 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4835 abtsiocb->vport = vport;
4836 lpfc_cmd->waitq = &waitq;
4837 if (phba->sli_rev == LPFC_SLI_REV4) {
4838 /* Note: both hbalock and ring_lock must be set here */
4839 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4840 abtsiocb, 0);
4841 spin_unlock(&pring_s4->ring_lock);
4842 } else {
4843 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4844 abtsiocb, 0);
4845 }
4846
4847 if (ret_val == IOCB_ERROR) {
4848 /* Indicate the IO is not being aborted by the driver. */
4849 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4850 lpfc_cmd->waitq = NULL;
4851 spin_unlock(&lpfc_cmd->buf_lock);
4852 spin_unlock_irqrestore(&phba->hbalock, flags);
4853 lpfc_sli_release_iocbq(phba, abtsiocb);
4854 ret = FAILED;
4855 goto out;
4856 }
4857
4858 /* no longer need the lock after this point */
4859 spin_unlock(&lpfc_cmd->buf_lock);
4860 spin_unlock_irqrestore(&phba->hbalock, flags);
4861
4862 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4863 lpfc_sli_handle_fast_ring_event(phba,
4864 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4865
4866wait_for_cmpl:
4867 /* Wait for abort to complete */
4868 wait_event_timeout(waitq,
4869 (lpfc_cmd->pCmd != cmnd),
4870 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4871
4872 spin_lock(&lpfc_cmd->buf_lock);
4873
4874 if (lpfc_cmd->pCmd == cmnd) {
4875 ret = FAILED;
4876 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4877 "0748 abort handler timed out waiting "
4878 "for aborting I/O (xri:x%x) to complete: "
4879 "ret %#x, ID %d, LUN %llu\n",
4880 iocb->sli4_xritag, ret,
4881 cmnd->device->id, cmnd->device->lun);
4882 }
4883
4884 lpfc_cmd->waitq = NULL;
4885
4886 spin_unlock(&lpfc_cmd->buf_lock);
4887 goto out;
4888
4889out_unlock_ring:
4890 if (phba->sli_rev == LPFC_SLI_REV4)
4891 spin_unlock(&pring_s4->ring_lock);
4892out_unlock_buf:
4893 spin_unlock(&lpfc_cmd->buf_lock);
4894out_unlock:
4895 spin_unlock_irqrestore(&phba->hbalock, flags);
4896out:
4897 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4898 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4899 "LUN %llu\n", ret, cmnd->device->id,
4900 cmnd->device->lun);
4901 return ret;
4902}
4903
4904static char *
4905lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4906{
4907 switch (task_mgmt_cmd) {
4908 case FCP_ABORT_TASK_SET:
4909 return "ABORT_TASK_SET";
4910 case FCP_CLEAR_TASK_SET:
4911 return "FCP_CLEAR_TASK_SET";
4912 case FCP_BUS_RESET:
4913 return "FCP_BUS_RESET";
4914 case FCP_LUN_RESET:
4915 return "FCP_LUN_RESET";
4916 case FCP_TARGET_RESET:
4917 return "FCP_TARGET_RESET";
4918 case FCP_CLEAR_ACA:
4919 return "FCP_CLEAR_ACA";
4920 case FCP_TERMINATE_TASK:
4921 return "FCP_TERMINATE_TASK";
4922 default:
4923 return "unknown";
4924 }
4925}
4926
4927
4928/**
4929 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4930 * @vport: The virtual port for which this call is being executed.
4931 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4932 *
4933 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4934 *
4935 * Return code :
4936 * 0x2003 - Error
4937 * 0x2002 - Success
4938 **/
4939static int
4940lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4941{
4942 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4943 uint32_t rsp_info;
4944 uint32_t rsp_len;
4945 uint8_t rsp_info_code;
4946 int ret = FAILED;
4947
4948
4949 if (fcprsp == NULL)
4950 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4951 "0703 fcp_rsp is missing\n");
4952 else {
4953 rsp_info = fcprsp->rspStatus2;
4954 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4955 rsp_info_code = fcprsp->rspInfo3;
4956
4957
4958 lpfc_printf_vlog(vport, KERN_INFO,
4959 LOG_FCP,
4960 "0706 fcp_rsp valid 0x%x,"
4961 " rsp len=%d code 0x%x\n",
4962 rsp_info,
4963 rsp_len, rsp_info_code);
4964
4965 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
4966 * field specifies the number of valid bytes of FCP_RSP_INFO.
4967 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
4968 */
4969 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4970 ((rsp_len == 8) || (rsp_len == 4))) {
4971 switch (rsp_info_code) {
4972 case RSP_NO_FAILURE:
4973 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4974 "0715 Task Mgmt No Failure\n");
4975 ret = SUCCESS;
4976 break;
4977 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4978 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4979 "0716 Task Mgmt Target "
4980 "reject\n");
4981 break;
4982 case RSP_TM_NOT_COMPLETED: /* TM failed */
4983 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4984 "0717 Task Mgmt Target "
4985 "failed TM\n");
4986 break;
4987 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
4988 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4989 "0718 Task Mgmt to invalid "
4990 "LUN\n");
4991 break;
4992 }
4993 }
4994 }
4995 return ret;
4996}
4997
4998
4999/**
5000 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5001 * @vport: The virtual port for which this call is being executed.
5002 * @rdata: Pointer to remote port local data
5003 * @tgt_id: Target ID of remote device.
5004 * @lun_id: Lun number for the TMF
5005 * @task_mgmt_cmd: type of TMF to send
5006 *
5007 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5008 * a remote port.
5009 *
5010 * Return Code:
5011 * 0x2003 - Error
5012 * 0x2002 - Success.
5013 **/
5014static int
5015lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5016 unsigned int tgt_id, uint64_t lun_id,
5017 uint8_t task_mgmt_cmd)
5018{
5019 struct lpfc_hba *phba = vport->phba;
5020 struct lpfc_io_buf *lpfc_cmd;
5021 struct lpfc_iocbq *iocbq;
5022 struct lpfc_iocbq *iocbqrsp;
5023 struct lpfc_rport_data *rdata;
5024 struct lpfc_nodelist *pnode;
5025 int ret;
5026 int status;
5027
5028 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5029 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5030 return FAILED;
5031 pnode = rdata->pnode;
5032
5033 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5034 if (lpfc_cmd == NULL)
5035 return FAILED;
5036 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5037 lpfc_cmd->rdata = rdata;
5038 lpfc_cmd->pCmd = cmnd;
5039 lpfc_cmd->ndlp = pnode;
5040
5041 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5042 task_mgmt_cmd);
5043 if (!status) {
5044 lpfc_release_scsi_buf(phba, lpfc_cmd);
5045 return FAILED;
5046 }
5047
5048 iocbq = &lpfc_cmd->cur_iocbq;
5049 iocbqrsp = lpfc_sli_get_iocbq(phba);
5050 if (iocbqrsp == NULL) {
5051 lpfc_release_scsi_buf(phba, lpfc_cmd);
5052 return FAILED;
5053 }
5054 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5055
5056 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5057 "0702 Issue %s to TGT %d LUN %llu "
5058 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5059 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5060 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5061 iocbq->iocb_flag);
5062
5063 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5064 iocbq, iocbqrsp, lpfc_cmd->timeout);
5065 if ((status != IOCB_SUCCESS) ||
5066 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5067 if (status != IOCB_SUCCESS ||
5068 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5069 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5070 "0727 TMF %s to TGT %d LUN %llu "
5071 "failed (%d, %d) iocb_flag x%x\n",
5072 lpfc_taskmgmt_name(task_mgmt_cmd),
5073 tgt_id, lun_id,
5074 iocbqrsp->iocb.ulpStatus,
5075 iocbqrsp->iocb.un.ulpWord[4],
5076 iocbq->iocb_flag);
5077 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5078 if (status == IOCB_SUCCESS) {
5079 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5080 /* Something in the FCP_RSP was invalid.
5081 * Check conditions */
5082 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5083 else
5084 ret = FAILED;
5085 } else if (status == IOCB_TIMEDOUT) {
5086 ret = TIMEOUT_ERROR;
5087 } else {
5088 ret = FAILED;
5089 }
5090 } else
5091 ret = SUCCESS;
5092
5093 lpfc_sli_release_iocbq(phba, iocbqrsp);
5094
5095 if (ret != TIMEOUT_ERROR)
5096 lpfc_release_scsi_buf(phba, lpfc_cmd);
5097
5098 return ret;
5099}
5100
5101/**
5102 * lpfc_chk_tgt_mapped -
5103 * @vport: The virtual port to check on
5104 * @cmnd: Pointer to scsi_cmnd data structure.
5105 *
5106 * This routine delays until the scsi target (aka rport) for the
5107 * command exists (is present and logged in) or we declare it non-existent.
5108 *
5109 * Return code :
5110 * 0x2003 - Error
5111 * 0x2002 - Success
5112 **/
5113static int
5114lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5115{
5116 struct lpfc_rport_data *rdata;
5117 struct lpfc_nodelist *pnode;
5118 unsigned long later;
5119
5120 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5121 if (!rdata) {
5122 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5123 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5124 return FAILED;
5125 }
5126 pnode = rdata->pnode;
5127 /*
5128 * If target is not in a MAPPED state, delay until
5129 * target is rediscovered or devloss timeout expires.
5130 */
5131 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5132 while (time_after(later, jiffies)) {
5133 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5134 return FAILED;
5135 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5136 return SUCCESS;
5137 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5138 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5139 if (!rdata)
5140 return FAILED;
5141 pnode = rdata->pnode;
5142 }
5143 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5144 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5145 return FAILED;
5146 return SUCCESS;
5147}
5148
5149/**
5150 * lpfc_reset_flush_io_context -
5151 * @vport: The virtual port (scsi_host) for the flush context
5152 * @tgt_id: If aborting by Target contect - specifies the target id
5153 * @lun_id: If aborting by Lun context - specifies the lun id
5154 * @context: specifies the context level to flush at.
5155 *
5156 * After a reset condition via TMF, we need to flush orphaned i/o
5157 * contexts from the adapter. This routine aborts any contexts
5158 * outstanding, then waits for their completions. The wait is
5159 * bounded by devloss_tmo though.
5160 *
5161 * Return code :
5162 * 0x2003 - Error
5163 * 0x2002 - Success
5164 **/
5165static int
5166lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5167 uint64_t lun_id, lpfc_ctx_cmd context)
5168{
5169 struct lpfc_hba *phba = vport->phba;
5170 unsigned long later;
5171 int cnt;
5172
5173 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5174 if (cnt)
5175 lpfc_sli_abort_taskmgmt(vport,
5176 &phba->sli.sli3_ring[LPFC_FCP_RING],
5177 tgt_id, lun_id, context);
5178 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5179 while (time_after(later, jiffies) && cnt) {
5180 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5181 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5182 }
5183 if (cnt) {
5184 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5185 "0724 I/O flush failure for context %s : cnt x%x\n",
5186 ((context == LPFC_CTX_LUN) ? "LUN" :
5187 ((context == LPFC_CTX_TGT) ? "TGT" :
5188 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5189 cnt);
5190 return FAILED;
5191 }
5192 return SUCCESS;
5193}
5194
5195/**
5196 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5197 * @cmnd: Pointer to scsi_cmnd data structure.
5198 *
5199 * This routine does a device reset by sending a LUN_RESET task management
5200 * command.
5201 *
5202 * Return code :
5203 * 0x2003 - Error
5204 * 0x2002 - Success
5205 **/
5206static int
5207lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5208{
5209 struct Scsi_Host *shost = cmnd->device->host;
5210 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5211 struct lpfc_rport_data *rdata;
5212 struct lpfc_nodelist *pnode;
5213 unsigned tgt_id = cmnd->device->id;
5214 uint64_t lun_id = cmnd->device->lun;
5215 struct lpfc_scsi_event_header scsi_event;
5216 int status;
5217
5218 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5219 if (!rdata || !rdata->pnode) {
5220 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5221 "0798 Device Reset rdata failure: rdata x%px\n",
5222 rdata);
5223 return FAILED;
5224 }
5225 pnode = rdata->pnode;
5226 status = fc_block_scsi_eh(cmnd);
5227 if (status != 0 && status != SUCCESS)
5228 return status;
5229
5230 status = lpfc_chk_tgt_mapped(vport, cmnd);
5231 if (status == FAILED) {
5232 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5233 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5234 return FAILED;
5235 }
5236
5237 scsi_event.event_type = FC_REG_SCSI_EVENT;
5238 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5239 scsi_event.lun = lun_id;
5240 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5241 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5242
5243 fc_host_post_vendor_event(shost, fc_get_event_number(),
5244 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5245
5246 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5247 FCP_LUN_RESET);
5248
5249 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5250 "0713 SCSI layer issued Device Reset (%d, %llu) "
5251 "return x%x\n", tgt_id, lun_id, status);
5252
5253 /*
5254 * We have to clean up i/o as : they may be orphaned by the TMF;
5255 * or if the TMF failed, they may be in an indeterminate state.
5256 * So, continue on.
5257 * We will report success if all the i/o aborts successfully.
5258 */
5259 if (status == SUCCESS)
5260 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5261 LPFC_CTX_LUN);
5262
5263 return status;
5264}
5265
5266/**
5267 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5268 * @cmnd: Pointer to scsi_cmnd data structure.
5269 *
5270 * This routine does a target reset by sending a TARGET_RESET task management
5271 * command.
5272 *
5273 * Return code :
5274 * 0x2003 - Error
5275 * 0x2002 - Success
5276 **/
5277static int
5278lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5279{
5280 struct Scsi_Host *shost = cmnd->device->host;
5281 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5282 struct lpfc_rport_data *rdata;
5283 struct lpfc_nodelist *pnode;
5284 unsigned tgt_id = cmnd->device->id;
5285 uint64_t lun_id = cmnd->device->lun;
5286 struct lpfc_scsi_event_header scsi_event;
5287 int status;
5288
5289 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5290 if (!rdata || !rdata->pnode) {
5291 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5292 "0799 Target Reset rdata failure: rdata x%px\n",
5293 rdata);
5294 return FAILED;
5295 }
5296 pnode = rdata->pnode;
5297 status = fc_block_scsi_eh(cmnd);
5298 if (status != 0 && status != SUCCESS)
5299 return status;
5300
5301 status = lpfc_chk_tgt_mapped(vport, cmnd);
5302 if (status == FAILED) {
5303 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5304 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5305 if (pnode) {
5306 spin_lock_irq(shost->host_lock);
5307 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5308 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5309 spin_unlock_irq(shost->host_lock);
5310 }
5311 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5312 LPFC_CTX_TGT);
5313 return FAST_IO_FAIL;
5314 }
5315
5316 scsi_event.event_type = FC_REG_SCSI_EVENT;
5317 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5318 scsi_event.lun = 0;
5319 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5320 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5321
5322 fc_host_post_vendor_event(shost, fc_get_event_number(),
5323 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5324
5325 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5326 FCP_TARGET_RESET);
5327
5328 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5329 "0723 SCSI layer issued Target Reset (%d, %llu) "
5330 "return x%x\n", tgt_id, lun_id, status);
5331
5332 /*
5333 * We have to clean up i/o as : they may be orphaned by the TMF;
5334 * or if the TMF failed, they may be in an indeterminate state.
5335 * So, continue on.
5336 * We will report success if all the i/o aborts successfully.
5337 */
5338 if (status == SUCCESS)
5339 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5340 LPFC_CTX_TGT);
5341 return status;
5342}
5343
5344/**
5345 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5346 * @cmnd: Pointer to scsi_cmnd data structure.
5347 *
5348 * This routine does target reset to all targets on @cmnd->device->host.
5349 * This emulates Parallel SCSI Bus Reset Semantics.
5350 *
5351 * Return code :
5352 * 0x2003 - Error
5353 * 0x2002 - Success
5354 **/
5355static int
5356lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5357{
5358 struct Scsi_Host *shost = cmnd->device->host;
5359 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5360 struct lpfc_nodelist *ndlp = NULL;
5361 struct lpfc_scsi_event_header scsi_event;
5362 int match;
5363 int ret = SUCCESS, status, i;
5364
5365 scsi_event.event_type = FC_REG_SCSI_EVENT;
5366 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5367 scsi_event.lun = 0;
5368 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5369 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5370
5371 fc_host_post_vendor_event(shost, fc_get_event_number(),
5372 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5373
5374 status = fc_block_scsi_eh(cmnd);
5375 if (status != 0 && status != SUCCESS)
5376 return status;
5377
5378 /*
5379 * Since the driver manages a single bus device, reset all
5380 * targets known to the driver. Should any target reset
5381 * fail, this routine returns failure to the midlayer.
5382 */
5383 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5384 /* Search for mapped node by target ID */
5385 match = 0;
5386 spin_lock_irq(shost->host_lock);
5387 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5388 if (!NLP_CHK_NODE_ACT(ndlp))
5389 continue;
5390 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5391 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5392 continue;
5393 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5394 ndlp->nlp_sid == i &&
5395 ndlp->rport &&
5396 ndlp->nlp_type & NLP_FCP_TARGET) {
5397 match = 1;
5398 break;
5399 }
5400 }
5401 spin_unlock_irq(shost->host_lock);
5402 if (!match)
5403 continue;
5404
5405 status = lpfc_send_taskmgmt(vport, cmnd,
5406 i, 0, FCP_TARGET_RESET);
5407
5408 if (status != SUCCESS) {
5409 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5410 "0700 Bus Reset on target %d failed\n",
5411 i);
5412 ret = FAILED;
5413 }
5414 }
5415 /*
5416 * We have to clean up i/o as : they may be orphaned by the TMFs
5417 * above; or if any of the TMFs failed, they may be in an
5418 * indeterminate state.
5419 * We will report success if all the i/o aborts successfully.
5420 */
5421
5422 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5423 if (status != SUCCESS)
5424 ret = FAILED;
5425
5426 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5427 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5428 return ret;
5429}
5430
5431/**
5432 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5433 * @cmnd: Pointer to scsi_cmnd data structure.
5434 *
5435 * This routine does host reset to the adaptor port. It brings the HBA
5436 * offline, performs a board restart, and then brings the board back online.
5437 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5438 * reject all outstanding SCSI commands to the host and error returned
5439 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5440 * of error handling, it will only return error if resetting of the adapter
5441 * is not successful; in all other cases, will return success.
5442 *
5443 * Return code :
5444 * 0x2003 - Error
5445 * 0x2002 - Success
5446 **/
5447static int
5448lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5449{
5450 struct Scsi_Host *shost = cmnd->device->host;
5451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5452 struct lpfc_hba *phba = vport->phba;
5453 int rc, ret = SUCCESS;
5454
5455 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5456 "3172 SCSI layer issued Host Reset Data:\n");
5457
5458 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5459 lpfc_offline(phba);
5460 rc = lpfc_sli_brdrestart(phba);
5461 if (rc)
5462 goto error;
5463
5464 rc = lpfc_online(phba);
5465 if (rc)
5466 goto error;
5467
5468 lpfc_unblock_mgmt_io(phba);
5469
5470 return ret;
5471error:
5472 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5473 "3323 Failed host reset\n");
5474 lpfc_unblock_mgmt_io(phba);
5475 return FAILED;
5476}
5477
5478/**
5479 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5480 * @sdev: Pointer to scsi_device.
5481 *
5482 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5483 * globally available list of scsi buffers. This routine also makes sure scsi
5484 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5485 * of scsi buffer exists for the lifetime of the driver.
5486 *
5487 * Return codes:
5488 * non-0 - Error
5489 * 0 - Success
5490 **/
5491static int
5492lpfc_slave_alloc(struct scsi_device *sdev)
5493{
5494 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5495 struct lpfc_hba *phba = vport->phba;
5496 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5497 uint32_t total = 0;
5498 uint32_t num_to_alloc = 0;
5499 int num_allocated = 0;
5500 uint32_t sdev_cnt;
5501 struct lpfc_device_data *device_data;
5502 unsigned long flags;
5503 struct lpfc_name target_wwpn;
5504
5505 if (!rport || fc_remote_port_chkready(rport))
5506 return -ENXIO;
5507
5508 if (phba->cfg_fof) {
5509
5510 /*
5511 * Check to see if the device data structure for the lun
5512 * exists. If not, create one.
5513 */
5514
5515 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5516 spin_lock_irqsave(&phba->devicelock, flags);
5517 device_data = __lpfc_get_device_data(phba,
5518 &phba->luns,
5519 &vport->fc_portname,
5520 &target_wwpn,
5521 sdev->lun);
5522 if (!device_data) {
5523 spin_unlock_irqrestore(&phba->devicelock, flags);
5524 device_data = lpfc_create_device_data(phba,
5525 &vport->fc_portname,
5526 &target_wwpn,
5527 sdev->lun,
5528 phba->cfg_XLanePriority,
5529 true);
5530 if (!device_data)
5531 return -ENOMEM;
5532 spin_lock_irqsave(&phba->devicelock, flags);
5533 list_add_tail(&device_data->listentry, &phba->luns);
5534 }
5535 device_data->rport_data = rport->dd_data;
5536 device_data->available = true;
5537 spin_unlock_irqrestore(&phba->devicelock, flags);
5538 sdev->hostdata = device_data;
5539 } else {
5540 sdev->hostdata = rport->dd_data;
5541 }
5542 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5543
5544 /* For SLI4, all IO buffers are pre-allocated */
5545 if (phba->sli_rev == LPFC_SLI_REV4)
5546 return 0;
5547
5548 /* This code path is now ONLY for SLI3 adapters */
5549
5550 /*
5551 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5552 * available list of scsi buffers. Don't allocate more than the
5553 * HBA limit conveyed to the midlayer via the host structure. The
5554 * formula accounts for the lun_queue_depth + error handlers + 1
5555 * extra. This list of scsi bufs exists for the lifetime of the driver.
5556 */
5557 total = phba->total_scsi_bufs;
5558 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5559
5560 /* If allocated buffers are enough do nothing */
5561 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5562 return 0;
5563
5564 /* Allow some exchanges to be available always to complete discovery */
5565 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5566 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5567 "0704 At limitation of %d preallocated "
5568 "command buffers\n", total);
5569 return 0;
5570 /* Allow some exchanges to be available always to complete discovery */
5571 } else if (total + num_to_alloc >
5572 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5573 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5574 "0705 Allocation request of %d "
5575 "command buffers will exceed max of %d. "
5576 "Reducing allocation request to %d.\n",
5577 num_to_alloc, phba->cfg_hba_queue_depth,
5578 (phba->cfg_hba_queue_depth - total));
5579 num_to_alloc = phba->cfg_hba_queue_depth - total;
5580 }
5581 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5582 if (num_to_alloc != num_allocated) {
5583 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5584 "0708 Allocation request of %d "
5585 "command buffers did not succeed. "
5586 "Allocated %d buffers.\n",
5587 num_to_alloc, num_allocated);
5588 }
5589 if (num_allocated > 0)
5590 phba->total_scsi_bufs += num_allocated;
5591 return 0;
5592}
5593
5594/**
5595 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5596 * @sdev: Pointer to scsi_device.
5597 *
5598 * This routine configures following items
5599 * - Tag command queuing support for @sdev if supported.
5600 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5601 *
5602 * Return codes:
5603 * 0 - Success
5604 **/
5605static int
5606lpfc_slave_configure(struct scsi_device *sdev)
5607{
5608 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5609 struct lpfc_hba *phba = vport->phba;
5610
5611 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5612
5613 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5614 lpfc_sli_handle_fast_ring_event(phba,
5615 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5616 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5617 lpfc_poll_rearm_timer(phba);
5618 }
5619
5620 return 0;
5621}
5622
5623/**
5624 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5625 * @sdev: Pointer to scsi_device.
5626 *
5627 * This routine sets @sdev hostatdata filed to null.
5628 **/
5629static void
5630lpfc_slave_destroy(struct scsi_device *sdev)
5631{
5632 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5633 struct lpfc_hba *phba = vport->phba;
5634 unsigned long flags;
5635 struct lpfc_device_data *device_data = sdev->hostdata;
5636
5637 atomic_dec(&phba->sdev_cnt);
5638 if ((phba->cfg_fof) && (device_data)) {
5639 spin_lock_irqsave(&phba->devicelock, flags);
5640 device_data->available = false;
5641 if (!device_data->oas_enabled)
5642 lpfc_delete_device_data(phba, device_data);
5643 spin_unlock_irqrestore(&phba->devicelock, flags);
5644 }
5645 sdev->hostdata = NULL;
5646 return;
5647}
5648
5649/**
5650 * lpfc_create_device_data - creates and initializes device data structure for OAS
5651 * @pha: Pointer to host bus adapter structure.
5652 * @vport_wwpn: Pointer to vport's wwpn information
5653 * @target_wwpn: Pointer to target's wwpn information
5654 * @lun: Lun on target
5655 * @atomic_create: Flag to indicate if memory should be allocated using the
5656 * GFP_ATOMIC flag or not.
5657 *
5658 * This routine creates a device data structure which will contain identifying
5659 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5660 * whether or not the corresponding lun is available by the system,
5661 * and pointer to the rport data.
5662 *
5663 * Return codes:
5664 * NULL - Error
5665 * Pointer to lpfc_device_data - Success
5666 **/
5667struct lpfc_device_data*
5668lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5669 struct lpfc_name *target_wwpn, uint64_t lun,
5670 uint32_t pri, bool atomic_create)
5671{
5672
5673 struct lpfc_device_data *lun_info;
5674 int memory_flags;
5675
5676 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5677 !(phba->cfg_fof))
5678 return NULL;
5679
5680 /* Attempt to create the device data to contain lun info */
5681
5682 if (atomic_create)
5683 memory_flags = GFP_ATOMIC;
5684 else
5685 memory_flags = GFP_KERNEL;
5686 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5687 if (!lun_info)
5688 return NULL;
5689 INIT_LIST_HEAD(&lun_info->listentry);
5690 lun_info->rport_data = NULL;
5691 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5692 sizeof(struct lpfc_name));
5693 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5694 sizeof(struct lpfc_name));
5695 lun_info->device_id.lun = lun;
5696 lun_info->oas_enabled = false;
5697 lun_info->priority = pri;
5698 lun_info->available = false;
5699 return lun_info;
5700}
5701
5702/**
5703 * lpfc_delete_device_data - frees a device data structure for OAS
5704 * @pha: Pointer to host bus adapter structure.
5705 * @lun_info: Pointer to device data structure to free.
5706 *
5707 * This routine frees the previously allocated device data structure passed.
5708 *
5709 **/
5710void
5711lpfc_delete_device_data(struct lpfc_hba *phba,
5712 struct lpfc_device_data *lun_info)
5713{
5714
5715 if (unlikely(!phba) || !lun_info ||
5716 !(phba->cfg_fof))
5717 return;
5718
5719 if (!list_empty(&lun_info->listentry))
5720 list_del(&lun_info->listentry);
5721 mempool_free(lun_info, phba->device_data_mem_pool);
5722 return;
5723}
5724
5725/**
5726 * __lpfc_get_device_data - returns the device data for the specified lun
5727 * @pha: Pointer to host bus adapter structure.
5728 * @list: Point to list to search.
5729 * @vport_wwpn: Pointer to vport's wwpn information
5730 * @target_wwpn: Pointer to target's wwpn information
5731 * @lun: Lun on target
5732 *
5733 * This routine searches the list passed for the specified lun's device data.
5734 * This function does not hold locks, it is the responsibility of the caller
5735 * to ensure the proper lock is held before calling the function.
5736 *
5737 * Return codes:
5738 * NULL - Error
5739 * Pointer to lpfc_device_data - Success
5740 **/
5741struct lpfc_device_data*
5742__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5743 struct lpfc_name *vport_wwpn,
5744 struct lpfc_name *target_wwpn, uint64_t lun)
5745{
5746
5747 struct lpfc_device_data *lun_info;
5748
5749 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5750 !phba->cfg_fof)
5751 return NULL;
5752
5753 /* Check to see if the lun is already enabled for OAS. */
5754
5755 list_for_each_entry(lun_info, list, listentry) {
5756 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5757 sizeof(struct lpfc_name)) == 0) &&
5758 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5759 sizeof(struct lpfc_name)) == 0) &&
5760 (lun_info->device_id.lun == lun))
5761 return lun_info;
5762 }
5763
5764 return NULL;
5765}
5766
5767/**
5768 * lpfc_find_next_oas_lun - searches for the next oas lun
5769 * @pha: Pointer to host bus adapter structure.
5770 * @vport_wwpn: Pointer to vport's wwpn information
5771 * @target_wwpn: Pointer to target's wwpn information
5772 * @starting_lun: Pointer to the lun to start searching for
5773 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5774 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5775 * @found_lun: Pointer to the found lun.
5776 * @found_lun_status: Pointer to status of the found lun.
5777 *
5778 * This routine searches the luns list for the specified lun
5779 * or the first lun for the vport/target. If the vport wwpn contains
5780 * a zero value then a specific vport is not specified. In this case
5781 * any vport which contains the lun will be considered a match. If the
5782 * target wwpn contains a zero value then a specific target is not specified.
5783 * In this case any target which contains the lun will be considered a
5784 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5785 * are returned. The function will also return the next lun if available.
5786 * If the next lun is not found, starting_lun parameter will be set to
5787 * NO_MORE_OAS_LUN.
5788 *
5789 * Return codes:
5790 * non-0 - Error
5791 * 0 - Success
5792 **/
5793bool
5794lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5795 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5796 struct lpfc_name *found_vport_wwpn,
5797 struct lpfc_name *found_target_wwpn,
5798 uint64_t *found_lun,
5799 uint32_t *found_lun_status,
5800 uint32_t *found_lun_pri)
5801{
5802
5803 unsigned long flags;
5804 struct lpfc_device_data *lun_info;
5805 struct lpfc_device_id *device_id;
5806 uint64_t lun;
5807 bool found = false;
5808
5809 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5810 !starting_lun || !found_vport_wwpn ||
5811 !found_target_wwpn || !found_lun || !found_lun_status ||
5812 (*starting_lun == NO_MORE_OAS_LUN) ||
5813 !phba->cfg_fof)
5814 return false;
5815
5816 lun = *starting_lun;
5817 *found_lun = NO_MORE_OAS_LUN;
5818 *starting_lun = NO_MORE_OAS_LUN;
5819
5820 /* Search for lun or the lun closet in value */
5821
5822 spin_lock_irqsave(&phba->devicelock, flags);
5823 list_for_each_entry(lun_info, &phba->luns, listentry) {
5824 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5825 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5826 sizeof(struct lpfc_name)) == 0)) &&
5827 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5828 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5829 sizeof(struct lpfc_name)) == 0)) &&
5830 (lun_info->oas_enabled)) {
5831 device_id = &lun_info->device_id;
5832 if ((!found) &&
5833 ((lun == FIND_FIRST_OAS_LUN) ||
5834 (device_id->lun == lun))) {
5835 *found_lun = device_id->lun;
5836 memcpy(found_vport_wwpn,
5837 &device_id->vport_wwpn,
5838 sizeof(struct lpfc_name));
5839 memcpy(found_target_wwpn,
5840 &device_id->target_wwpn,
5841 sizeof(struct lpfc_name));
5842 if (lun_info->available)
5843 *found_lun_status =
5844 OAS_LUN_STATUS_EXISTS;
5845 else
5846 *found_lun_status = 0;
5847 *found_lun_pri = lun_info->priority;
5848 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5849 memset(vport_wwpn, 0x0,
5850 sizeof(struct lpfc_name));
5851 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5852 memset(target_wwpn, 0x0,
5853 sizeof(struct lpfc_name));
5854 found = true;
5855 } else if (found) {
5856 *starting_lun = device_id->lun;
5857 memcpy(vport_wwpn, &device_id->vport_wwpn,
5858 sizeof(struct lpfc_name));
5859 memcpy(target_wwpn, &device_id->target_wwpn,
5860 sizeof(struct lpfc_name));
5861 break;
5862 }
5863 }
5864 }
5865 spin_unlock_irqrestore(&phba->devicelock, flags);
5866 return found;
5867}
5868
5869/**
5870 * lpfc_enable_oas_lun - enables a lun for OAS operations
5871 * @pha: Pointer to host bus adapter structure.
5872 * @vport_wwpn: Pointer to vport's wwpn information
5873 * @target_wwpn: Pointer to target's wwpn information
5874 * @lun: Lun
5875 *
5876 * This routine enables a lun for oas operations. The routines does so by
5877 * doing the following :
5878 *
5879 * 1) Checks to see if the device data for the lun has been created.
5880 * 2) If found, sets the OAS enabled flag if not set and returns.
5881 * 3) Otherwise, creates a device data structure.
5882 * 4) If successfully created, indicates the device data is for an OAS lun,
5883 * indicates the lun is not available and add to the list of luns.
5884 *
5885 * Return codes:
5886 * false - Error
5887 * true - Success
5888 **/
5889bool
5890lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5891 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5892{
5893
5894 struct lpfc_device_data *lun_info;
5895 unsigned long flags;
5896
5897 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5898 !phba->cfg_fof)
5899 return false;
5900
5901 spin_lock_irqsave(&phba->devicelock, flags);
5902
5903 /* Check to see if the device data for the lun has been created */
5904 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5905 target_wwpn, lun);
5906 if (lun_info) {
5907 if (!lun_info->oas_enabled)
5908 lun_info->oas_enabled = true;
5909 lun_info->priority = pri;
5910 spin_unlock_irqrestore(&phba->devicelock, flags);
5911 return true;
5912 }
5913
5914 /* Create an lun info structure and add to list of luns */
5915 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5916 pri, true);
5917 if (lun_info) {
5918 lun_info->oas_enabled = true;
5919 lun_info->priority = pri;
5920 lun_info->available = false;
5921 list_add_tail(&lun_info->listentry, &phba->luns);
5922 spin_unlock_irqrestore(&phba->devicelock, flags);
5923 return true;
5924 }
5925 spin_unlock_irqrestore(&phba->devicelock, flags);
5926 return false;
5927}
5928
5929/**
5930 * lpfc_disable_oas_lun - disables a lun for OAS operations
5931 * @pha: Pointer to host bus adapter structure.
5932 * @vport_wwpn: Pointer to vport's wwpn information
5933 * @target_wwpn: Pointer to target's wwpn information
5934 * @lun: Lun
5935 *
5936 * This routine disables a lun for oas operations. The routines does so by
5937 * doing the following :
5938 *
5939 * 1) Checks to see if the device data for the lun is created.
5940 * 2) If present, clears the flag indicating this lun is for OAS.
5941 * 3) If the lun is not available by the system, the device data is
5942 * freed.
5943 *
5944 * Return codes:
5945 * false - Error
5946 * true - Success
5947 **/
5948bool
5949lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5950 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5951{
5952
5953 struct lpfc_device_data *lun_info;
5954 unsigned long flags;
5955
5956 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5957 !phba->cfg_fof)
5958 return false;
5959
5960 spin_lock_irqsave(&phba->devicelock, flags);
5961
5962 /* Check to see if the lun is available. */
5963 lun_info = __lpfc_get_device_data(phba,
5964 &phba->luns, vport_wwpn,
5965 target_wwpn, lun);
5966 if (lun_info) {
5967 lun_info->oas_enabled = false;
5968 lun_info->priority = pri;
5969 if (!lun_info->available)
5970 lpfc_delete_device_data(phba, lun_info);
5971 spin_unlock_irqrestore(&phba->devicelock, flags);
5972 return true;
5973 }
5974
5975 spin_unlock_irqrestore(&phba->devicelock, flags);
5976 return false;
5977}
5978
5979static int
5980lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5981{
5982 return SCSI_MLQUEUE_HOST_BUSY;
5983}
5984
5985static int
5986lpfc_no_handler(struct scsi_cmnd *cmnd)
5987{
5988 return FAILED;
5989}
5990
5991static int
5992lpfc_no_slave(struct scsi_device *sdev)
5993{
5994 return -ENODEV;
5995}
5996
5997struct scsi_host_template lpfc_template_nvme = {
5998 .module = THIS_MODULE,
5999 .name = LPFC_DRIVER_NAME,
6000 .proc_name = LPFC_DRIVER_NAME,
6001 .info = lpfc_info,
6002 .queuecommand = lpfc_no_command,
6003 .eh_abort_handler = lpfc_no_handler,
6004 .eh_device_reset_handler = lpfc_no_handler,
6005 .eh_target_reset_handler = lpfc_no_handler,
6006 .eh_bus_reset_handler = lpfc_no_handler,
6007 .eh_host_reset_handler = lpfc_no_handler,
6008 .slave_alloc = lpfc_no_slave,
6009 .slave_configure = lpfc_no_slave,
6010 .scan_finished = lpfc_scan_finished,
6011 .this_id = -1,
6012 .sg_tablesize = 1,
6013 .cmd_per_lun = 1,
6014 .shost_attrs = lpfc_hba_attrs,
6015 .max_sectors = 0xFFFF,
6016 .vendor_id = LPFC_NL_VENDOR_ID,
6017 .track_queue_depth = 0,
6018};
6019
6020struct scsi_host_template lpfc_template_no_hr = {
6021 .module = THIS_MODULE,
6022 .name = LPFC_DRIVER_NAME,
6023 .proc_name = LPFC_DRIVER_NAME,
6024 .info = lpfc_info,
6025 .queuecommand = lpfc_queuecommand,
6026 .eh_timed_out = fc_eh_timed_out,
6027 .eh_abort_handler = lpfc_abort_handler,
6028 .eh_device_reset_handler = lpfc_device_reset_handler,
6029 .eh_target_reset_handler = lpfc_target_reset_handler,
6030 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6031 .slave_alloc = lpfc_slave_alloc,
6032 .slave_configure = lpfc_slave_configure,
6033 .slave_destroy = lpfc_slave_destroy,
6034 .scan_finished = lpfc_scan_finished,
6035 .this_id = -1,
6036 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6037 .cmd_per_lun = LPFC_CMD_PER_LUN,
6038 .shost_attrs = lpfc_hba_attrs,
6039 .max_sectors = 0xFFFFFFFF,
6040 .vendor_id = LPFC_NL_VENDOR_ID,
6041 .change_queue_depth = scsi_change_queue_depth,
6042 .track_queue_depth = 1,
6043};
6044
6045struct scsi_host_template lpfc_template = {
6046 .module = THIS_MODULE,
6047 .name = LPFC_DRIVER_NAME,
6048 .proc_name = LPFC_DRIVER_NAME,
6049 .info = lpfc_info,
6050 .queuecommand = lpfc_queuecommand,
6051 .eh_timed_out = fc_eh_timed_out,
6052 .eh_abort_handler = lpfc_abort_handler,
6053 .eh_device_reset_handler = lpfc_device_reset_handler,
6054 .eh_target_reset_handler = lpfc_target_reset_handler,
6055 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6056 .eh_host_reset_handler = lpfc_host_reset_handler,
6057 .slave_alloc = lpfc_slave_alloc,
6058 .slave_configure = lpfc_slave_configure,
6059 .slave_destroy = lpfc_slave_destroy,
6060 .scan_finished = lpfc_scan_finished,
6061 .this_id = -1,
6062 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6063 .cmd_per_lun = LPFC_CMD_PER_LUN,
6064 .shost_attrs = lpfc_hba_attrs,
6065 .max_sectors = 0xFFFF,
6066 .vendor_id = LPFC_NL_VENDOR_ID,
6067 .change_queue_depth = scsi_change_queue_depth,
6068 .track_queue_depth = 1,
6069};
6070
6071struct scsi_host_template lpfc_vport_template = {
6072 .module = THIS_MODULE,
6073 .name = LPFC_DRIVER_NAME,
6074 .proc_name = LPFC_DRIVER_NAME,
6075 .info = lpfc_info,
6076 .queuecommand = lpfc_queuecommand,
6077 .eh_timed_out = fc_eh_timed_out,
6078 .eh_abort_handler = lpfc_abort_handler,
6079 .eh_device_reset_handler = lpfc_device_reset_handler,
6080 .eh_target_reset_handler = lpfc_target_reset_handler,
6081 .slave_alloc = lpfc_slave_alloc,
6082 .slave_configure = lpfc_slave_configure,
6083 .slave_destroy = lpfc_slave_destroy,
6084 .scan_finished = lpfc_scan_finished,
6085 .this_id = -1,
6086 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6087 .cmd_per_lun = LPFC_CMD_PER_LUN,
6088 .shost_attrs = lpfc_vport_attrs,
6089 .max_sectors = 0xFFFF,
6090 .change_queue_depth = scsi_change_queue_depth,
6091 .track_queue_depth = 1,
6092};