blob: 4731e464dfb95aa975f51ef68666eccda1651179 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55#include <linux/aer.h>
56#include <linux/raid_class.h>
57#include <asm/unaligned.h>
58
59#include "mpt3sas_base.h"
60
61#define RAID_CHANNEL 1
62
63#define PCIE_CHANNEL 2
64
65/* forward proto's */
66static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68static void _firmware_event_work(struct work_struct *work);
69
70static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 struct _sas_device *sas_device);
72static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 u8 retry_count, u8 is_pd);
74static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 struct _pcie_device *pcie_device);
77static void
78_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80
81/* global parameters */
82LIST_HEAD(mpt3sas_ioc_list);
83/* global ioc lock for list operations */
84DEFINE_SPINLOCK(gioc_lock);
85
86MODULE_AUTHOR(MPT3SAS_AUTHOR);
87MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88MODULE_LICENSE("GPL");
89MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90MODULE_ALIAS("mpt2sas");
91
92/* local parameters */
93static u8 scsi_io_cb_idx = -1;
94static u8 tm_cb_idx = -1;
95static u8 ctl_cb_idx = -1;
96static u8 base_cb_idx = -1;
97static u8 port_enable_cb_idx = -1;
98static u8 transport_cb_idx = -1;
99static u8 scsih_cb_idx = -1;
100static u8 config_cb_idx = -1;
101static int mpt2_ids;
102static int mpt3_ids;
103
104static u8 tm_tr_cb_idx = -1 ;
105static u8 tm_tr_volume_cb_idx = -1 ;
106static u8 tm_sas_control_cb_idx = -1;
107
108/* command line options */
109static u32 logging_level;
110MODULE_PARM_DESC(logging_level,
111 " bits for enabling additional logging info (default=0)");
112
113
114static ushort max_sectors = 0xFFFF;
115module_param(max_sectors, ushort, 0444);
116MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
117
118
119static int missing_delay[2] = {-1, -1};
120module_param_array(missing_delay, int, NULL, 0444);
121MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122
123/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124#define MPT3SAS_MAX_LUN (16895)
125static u64 max_lun = MPT3SAS_MAX_LUN;
126module_param(max_lun, ullong, 0444);
127MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128
129static ushort hbas_to_enumerate;
130module_param(hbas_to_enumerate, ushort, 0444);
131MODULE_PARM_DESC(hbas_to_enumerate,
132 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 1 - enumerates only SAS 2.0 generation HBAs\n \
134 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135
136/* diag_buffer_enable is bitwise
137 * bit 0 set = TRACE
138 * bit 1 set = SNAPSHOT
139 * bit 2 set = EXTENDED
140 *
141 * Either bit can be set, or both
142 */
143static int diag_buffer_enable = -1;
144module_param(diag_buffer_enable, int, 0444);
145MODULE_PARM_DESC(diag_buffer_enable,
146 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147static int disable_discovery = -1;
148module_param(disable_discovery, int, 0444);
149MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150
151
152/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153static int prot_mask = -1;
154module_param(prot_mask, int, 0444);
155MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156
157static bool enable_sdev_max_qd;
158module_param(enable_sdev_max_qd, bool, 0444);
159MODULE_PARM_DESC(enable_sdev_max_qd,
160 "Enable sdev max qd as can_queue, def=disabled(0)");
161
162/* raid transport support */
163static struct raid_template *mpt3sas_raid_template;
164static struct raid_template *mpt2sas_raid_template;
165
166
167/**
168 * struct sense_info - common structure for obtaining sense keys
169 * @skey: sense key
170 * @asc: additional sense code
171 * @ascq: additional sense code qualifier
172 */
173struct sense_info {
174 u8 skey;
175 u8 asc;
176 u8 ascq;
177};
178
179#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
184/**
185 * struct fw_event_work - firmware event struct
186 * @list: link list framework
187 * @work: work object (ioc->fault_reset_work_q)
188 * @ioc: per adapter object
189 * @device_handle: device handle
190 * @VF_ID: virtual function id
191 * @VP_ID: virtual port id
192 * @ignore: flag meaning this event has been marked to ignore
193 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194 * @refcount: kref for this event
195 * @event_data: reply event data payload follows
196 *
197 * This object stored on ioc->fw_event_list.
198 */
199struct fw_event_work {
200 struct list_head list;
201 struct work_struct work;
202
203 struct MPT3SAS_ADAPTER *ioc;
204 u16 device_handle;
205 u8 VF_ID;
206 u8 VP_ID;
207 u8 ignore;
208 u16 event;
209 struct kref refcount;
210 char event_data[0] __aligned(4);
211};
212
213static void fw_event_work_free(struct kref *r)
214{
215 kfree(container_of(r, struct fw_event_work, refcount));
216}
217
218static void fw_event_work_get(struct fw_event_work *fw_work)
219{
220 kref_get(&fw_work->refcount);
221}
222
223static void fw_event_work_put(struct fw_event_work *fw_work)
224{
225 kref_put(&fw_work->refcount, fw_event_work_free);
226}
227
228static struct fw_event_work *alloc_fw_event_work(int len)
229{
230 struct fw_event_work *fw_event;
231
232 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
233 if (!fw_event)
234 return NULL;
235
236 kref_init(&fw_event->refcount);
237 return fw_event;
238}
239
240/**
241 * struct _scsi_io_transfer - scsi io transfer
242 * @handle: sas device handle (assigned by firmware)
243 * @is_raid: flag set for hidden raid components
244 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245 * @data_length: data transfer length
246 * @data_dma: dma pointer to data
247 * @sense: sense data
248 * @lun: lun number
249 * @cdb_length: cdb length
250 * @cdb: cdb contents
251 * @timeout: timeout for this command
252 * @VF_ID: virtual function id
253 * @VP_ID: virtual port id
254 * @valid_reply: flag set for reply message
255 * @sense_length: sense length
256 * @ioc_status: ioc status
257 * @scsi_state: scsi state
258 * @scsi_status: scsi staus
259 * @log_info: log information
260 * @transfer_length: data length transfer when there is a reply message
261 *
262 * Used for sending internal scsi commands to devices within this module.
263 * Refer to _scsi_send_scsi_io().
264 */
265struct _scsi_io_transfer {
266 u16 handle;
267 u8 is_raid;
268 enum dma_data_direction dir;
269 u32 data_length;
270 dma_addr_t data_dma;
271 u8 sense[SCSI_SENSE_BUFFERSIZE];
272 u32 lun;
273 u8 cdb_length;
274 u8 cdb[32];
275 u8 timeout;
276 u8 VF_ID;
277 u8 VP_ID;
278 u8 valid_reply;
279 /* the following bits are only valid when 'valid_reply = 1' */
280 u32 sense_length;
281 u16 ioc_status;
282 u8 scsi_state;
283 u8 scsi_status;
284 u32 log_info;
285 u32 transfer_length;
286};
287
288/**
289 * _scsih_set_debug_level - global setting of ioc->logging_level.
290 * @val: ?
291 * @kp: ?
292 *
293 * Note: The logging levels are defined in mpt3sas_debug.h.
294 */
295static int
296_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
297{
298 int ret = param_set_int(val, kp);
299 struct MPT3SAS_ADAPTER *ioc;
300
301 if (ret)
302 return ret;
303
304 pr_info("setting logging_level(0x%08x)\n", logging_level);
305 spin_lock(&gioc_lock);
306 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 ioc->logging_level = logging_level;
308 spin_unlock(&gioc_lock);
309 return 0;
310}
311module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 &logging_level, 0644);
313
314/**
315 * _scsih_srch_boot_sas_address - search based on sas_address
316 * @sas_address: sas address
317 * @boot_device: boot device object from bios page 2
318 *
319 * Return: 1 when there's a match, 0 means no match.
320 */
321static inline int
322_scsih_srch_boot_sas_address(u64 sas_address,
323 Mpi2BootDeviceSasWwid_t *boot_device)
324{
325 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
326}
327
328/**
329 * _scsih_srch_boot_device_name - search based on device name
330 * @device_name: device name specified in INDENTIFY fram
331 * @boot_device: boot device object from bios page 2
332 *
333 * Return: 1 when there's a match, 0 means no match.
334 */
335static inline int
336_scsih_srch_boot_device_name(u64 device_name,
337 Mpi2BootDeviceDeviceName_t *boot_device)
338{
339 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340}
341
342/**
343 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344 * @enclosure_logical_id: enclosure logical id
345 * @slot_number: slot number
346 * @boot_device: boot device object from bios page 2
347 *
348 * Return: 1 when there's a match, 0 means no match.
349 */
350static inline int
351_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 Mpi2BootDeviceEnclosureSlot_t *boot_device)
353{
354 return (enclosure_logical_id == le64_to_cpu(boot_device->
355 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 SlotNumber)) ? 1 : 0;
357}
358
359/**
360 * _scsih_is_boot_device - search for matching boot device.
361 * @sas_address: sas address
362 * @device_name: device name specified in INDENTIFY fram
363 * @enclosure_logical_id: enclosure logical id
364 * @slot: slot number
365 * @form: specifies boot device form
366 * @boot_device: boot device object from bios page 2
367 *
368 * Return: 1 when there's a match, 0 means no match.
369 */
370static int
371_scsih_is_boot_device(u64 sas_address, u64 device_name,
372 u64 enclosure_logical_id, u16 slot, u8 form,
373 Mpi2BiosPage2BootDevice_t *boot_device)
374{
375 int rc = 0;
376
377 switch (form) {
378 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
379 if (!sas_address)
380 break;
381 rc = _scsih_srch_boot_sas_address(
382 sas_address, &boot_device->SasWwid);
383 break;
384 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 if (!enclosure_logical_id)
386 break;
387 rc = _scsih_srch_boot_encl_slot(
388 enclosure_logical_id,
389 slot, &boot_device->EnclosureSlot);
390 break;
391 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
392 if (!device_name)
393 break;
394 rc = _scsih_srch_boot_device_name(
395 device_name, &boot_device->DeviceName);
396 break;
397 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
398 break;
399 }
400
401 return rc;
402}
403
404/**
405 * _scsih_get_sas_address - set the sas_address for given device handle
406 * @ioc: ?
407 * @handle: device handle
408 * @sas_address: sas address
409 *
410 * Return: 0 success, non-zero when failure
411 */
412static int
413_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
414 u64 *sas_address)
415{
416 Mpi2SasDevicePage0_t sas_device_pg0;
417 Mpi2ConfigReply_t mpi_reply;
418 u32 ioc_status;
419
420 *sas_address = 0;
421
422 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 __FILE__, __LINE__, __func__);
426 return -ENXIO;
427 }
428
429 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 /* For HBA, vSES doesn't return HBA SAS address. Instead return
432 * vSES's sas address.
433 */
434 if ((handle <= ioc->sas_hba.num_phys) &&
435 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 MPI2_SAS_DEVICE_INFO_SEP)))
437 *sas_address = ioc->sas_hba.sas_address;
438 else
439 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
440 return 0;
441 }
442
443 /* we hit this because the given parent handle doesn't exist */
444 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
445 return -ENXIO;
446
447 /* else error case */
448 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 handle, ioc_status, __FILE__, __LINE__, __func__);
450 return -EIO;
451}
452
453/**
454 * _scsih_determine_boot_device - determine boot device.
455 * @ioc: per adapter object
456 * @device: sas_device or pcie_device object
457 * @channel: SAS or PCIe channel
458 *
459 * Determines whether this device should be first reported device to
460 * to scsi-ml or sas transport, this purpose is for persistent boot device.
461 * There are primary, alternate, and current entries in bios page 2. The order
462 * priority is primary, alternate, then current. This routine saves
463 * the corresponding device object.
464 * The saved data to be used later in _scsih_probe_boot_devices().
465 */
466static void
467_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
468 u32 channel)
469{
470 struct _sas_device *sas_device;
471 struct _pcie_device *pcie_device;
472 struct _raid_device *raid_device;
473 u64 sas_address;
474 u64 device_name;
475 u64 enclosure_logical_id;
476 u16 slot;
477
478 /* only process this function when driver loads */
479 if (!ioc->is_driver_loading)
480 return;
481
482 /* no Bios, return immediately */
483 if (!ioc->bios_pg3.BiosVersion)
484 return;
485
486 if (channel == RAID_CHANNEL) {
487 raid_device = device;
488 sas_address = raid_device->wwid;
489 device_name = 0;
490 enclosure_logical_id = 0;
491 slot = 0;
492 } else if (channel == PCIE_CHANNEL) {
493 pcie_device = device;
494 sas_address = pcie_device->wwid;
495 device_name = 0;
496 enclosure_logical_id = 0;
497 slot = 0;
498 } else {
499 sas_device = device;
500 sas_address = sas_device->sas_address;
501 device_name = sas_device->device_name;
502 enclosure_logical_id = sas_device->enclosure_logical_id;
503 slot = sas_device->slot;
504 }
505
506 if (!ioc->req_boot_device.device) {
507 if (_scsih_is_boot_device(sas_address, device_name,
508 enclosure_logical_id, slot,
509 (ioc->bios_pg2.ReqBootDeviceForm &
510 MPI2_BIOSPAGE2_FORM_MASK),
511 &ioc->bios_pg2.RequestedBootDevice)) {
512 dinitprintk(ioc,
513 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 __func__, (u64)sas_address));
515 ioc->req_boot_device.device = device;
516 ioc->req_boot_device.channel = channel;
517 }
518 }
519
520 if (!ioc->req_alt_boot_device.device) {
521 if (_scsih_is_boot_device(sas_address, device_name,
522 enclosure_logical_id, slot,
523 (ioc->bios_pg2.ReqAltBootDeviceForm &
524 MPI2_BIOSPAGE2_FORM_MASK),
525 &ioc->bios_pg2.RequestedAltBootDevice)) {
526 dinitprintk(ioc,
527 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 __func__, (u64)sas_address));
529 ioc->req_alt_boot_device.device = device;
530 ioc->req_alt_boot_device.channel = channel;
531 }
532 }
533
534 if (!ioc->current_boot_device.device) {
535 if (_scsih_is_boot_device(sas_address, device_name,
536 enclosure_logical_id, slot,
537 (ioc->bios_pg2.CurrentBootDeviceForm &
538 MPI2_BIOSPAGE2_FORM_MASK),
539 &ioc->bios_pg2.CurrentBootDevice)) {
540 dinitprintk(ioc,
541 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 __func__, (u64)sas_address));
543 ioc->current_boot_device.device = device;
544 ioc->current_boot_device.channel = channel;
545 }
546 }
547}
548
549static struct _sas_device *
550__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 struct MPT3SAS_TARGET *tgt_priv)
552{
553 struct _sas_device *ret;
554
555 assert_spin_locked(&ioc->sas_device_lock);
556
557 ret = tgt_priv->sas_dev;
558 if (ret)
559 sas_device_get(ret);
560
561 return ret;
562}
563
564static struct _sas_device *
565mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 struct MPT3SAS_TARGET *tgt_priv)
567{
568 struct _sas_device *ret;
569 unsigned long flags;
570
571 spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
574
575 return ret;
576}
577
578static struct _pcie_device *
579__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 struct MPT3SAS_TARGET *tgt_priv)
581{
582 struct _pcie_device *ret;
583
584 assert_spin_locked(&ioc->pcie_device_lock);
585
586 ret = tgt_priv->pcie_dev;
587 if (ret)
588 pcie_device_get(ret);
589
590 return ret;
591}
592
593/**
594 * mpt3sas_get_pdev_from_target - pcie device search
595 * @ioc: per adapter object
596 * @tgt_priv: starget private object
597 *
598 * Context: This function will acquire ioc->pcie_device_lock and will release
599 * before returning the pcie_device object.
600 *
601 * This searches for pcie_device from target, then return pcie_device object.
602 */
603static struct _pcie_device *
604mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 struct MPT3SAS_TARGET *tgt_priv)
606{
607 struct _pcie_device *ret;
608 unsigned long flags;
609
610 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
613
614 return ret;
615}
616
617struct _sas_device *
618__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
619 u64 sas_address)
620{
621 struct _sas_device *sas_device;
622
623 assert_spin_locked(&ioc->sas_device_lock);
624
625 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 if (sas_device->sas_address == sas_address)
627 goto found_device;
628
629 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 if (sas_device->sas_address == sas_address)
631 goto found_device;
632
633 return NULL;
634
635found_device:
636 sas_device_get(sas_device);
637 return sas_device;
638}
639
640/**
641 * mpt3sas_get_sdev_by_addr - sas device search
642 * @ioc: per adapter object
643 * @sas_address: sas address
644 * Context: Calling function should acquire ioc->sas_device_lock
645 *
646 * This searches for sas_device based on sas_address, then return sas_device
647 * object.
648 */
649struct _sas_device *
650mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
651 u64 sas_address)
652{
653 struct _sas_device *sas_device;
654 unsigned long flags;
655
656 spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
658 sas_address);
659 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
660
661 return sas_device;
662}
663
664static struct _sas_device *
665__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
666{
667 struct _sas_device *sas_device;
668
669 assert_spin_locked(&ioc->sas_device_lock);
670
671 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 if (sas_device->handle == handle)
673 goto found_device;
674
675 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 if (sas_device->handle == handle)
677 goto found_device;
678
679 return NULL;
680
681found_device:
682 sas_device_get(sas_device);
683 return sas_device;
684}
685
686/**
687 * mpt3sas_get_sdev_by_handle - sas device search
688 * @ioc: per adapter object
689 * @handle: sas device handle (assigned by firmware)
690 * Context: Calling function should acquire ioc->sas_device_lock
691 *
692 * This searches for sas_device based on sas_address, then return sas_device
693 * object.
694 */
695struct _sas_device *
696mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
697{
698 struct _sas_device *sas_device;
699 unsigned long flags;
700
701 spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
704
705 return sas_device;
706}
707
708/**
709 * _scsih_display_enclosure_chassis_info - display device location info
710 * @ioc: per adapter object
711 * @sas_device: per sas device object
712 * @sdev: scsi device struct
713 * @starget: scsi target struct
714 */
715static void
716_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 struct _sas_device *sas_device, struct scsi_device *sdev,
718 struct scsi_target *starget)
719{
720 if (sdev) {
721 if (sas_device->enclosure_handle != 0)
722 sdev_printk(KERN_INFO, sdev,
723 "enclosure logical id (0x%016llx), slot(%d) \n",
724 (unsigned long long)
725 sas_device->enclosure_logical_id,
726 sas_device->slot);
727 if (sas_device->connector_name[0] != '\0')
728 sdev_printk(KERN_INFO, sdev,
729 "enclosure level(0x%04x), connector name( %s)\n",
730 sas_device->enclosure_level,
731 sas_device->connector_name);
732 if (sas_device->is_chassis_slot_valid)
733 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 sas_device->chassis_slot);
735 } else if (starget) {
736 if (sas_device->enclosure_handle != 0)
737 starget_printk(KERN_INFO, starget,
738 "enclosure logical id(0x%016llx), slot(%d) \n",
739 (unsigned long long)
740 sas_device->enclosure_logical_id,
741 sas_device->slot);
742 if (sas_device->connector_name[0] != '\0')
743 starget_printk(KERN_INFO, starget,
744 "enclosure level(0x%04x), connector name( %s)\n",
745 sas_device->enclosure_level,
746 sas_device->connector_name);
747 if (sas_device->is_chassis_slot_valid)
748 starget_printk(KERN_INFO, starget,
749 "chassis slot(0x%04x)\n",
750 sas_device->chassis_slot);
751 } else {
752 if (sas_device->enclosure_handle != 0)
753 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 (u64)sas_device->enclosure_logical_id,
755 sas_device->slot);
756 if (sas_device->connector_name[0] != '\0')
757 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 sas_device->enclosure_level,
759 sas_device->connector_name);
760 if (sas_device->is_chassis_slot_valid)
761 ioc_info(ioc, "chassis slot(0x%04x)\n",
762 sas_device->chassis_slot);
763 }
764}
765
766/**
767 * _scsih_sas_device_remove - remove sas_device from list.
768 * @ioc: per adapter object
769 * @sas_device: the sas_device object
770 * Context: This function will acquire ioc->sas_device_lock.
771 *
772 * If sas_device is on the list, remove it and decrement its reference count.
773 */
774static void
775_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 struct _sas_device *sas_device)
777{
778 unsigned long flags;
779
780 if (!sas_device)
781 return;
782 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 sas_device->handle, (u64)sas_device->sas_address);
784
785 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
786
787 /*
788 * The lock serializes access to the list, but we still need to verify
789 * that nobody removed the entry while we were waiting on the lock.
790 */
791 spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 if (!list_empty(&sas_device->list)) {
793 list_del_init(&sas_device->list);
794 sas_device_put(sas_device);
795 }
796 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797}
798
799/**
800 * _scsih_device_remove_by_handle - removing device object by handle
801 * @ioc: per adapter object
802 * @handle: device handle
803 */
804static void
805_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
806{
807 struct _sas_device *sas_device;
808 unsigned long flags;
809
810 if (ioc->shost_recovery)
811 return;
812
813 spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
815 if (sas_device) {
816 list_del_init(&sas_device->list);
817 sas_device_put(sas_device);
818 }
819 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
820 if (sas_device) {
821 _scsih_remove_device(ioc, sas_device);
822 sas_device_put(sas_device);
823 }
824}
825
826/**
827 * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828 * @ioc: per adapter object
829 * @sas_address: device sas_address
830 */
831void
832mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
833 u64 sas_address)
834{
835 struct _sas_device *sas_device;
836 unsigned long flags;
837
838 if (ioc->shost_recovery)
839 return;
840
841 spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
843 if (sas_device) {
844 list_del_init(&sas_device->list);
845 sas_device_put(sas_device);
846 }
847 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
848 if (sas_device) {
849 _scsih_remove_device(ioc, sas_device);
850 sas_device_put(sas_device);
851 }
852}
853
854/**
855 * _scsih_sas_device_add - insert sas_device to the list.
856 * @ioc: per adapter object
857 * @sas_device: the sas_device object
858 * Context: This function will acquire ioc->sas_device_lock.
859 *
860 * Adding new object to the ioc->sas_device_list.
861 */
862static void
863_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 struct _sas_device *sas_device)
865{
866 unsigned long flags;
867
868 dewtprintk(ioc,
869 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 __func__, sas_device->handle,
871 (u64)sas_device->sas_address));
872
873 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
874 NULL, NULL));
875
876 spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 sas_device_get(sas_device);
878 list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
880
881 if (ioc->hide_drives) {
882 clear_bit(sas_device->handle, ioc->pend_os_device_add);
883 return;
884 }
885
886 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 sas_device->sas_address_parent)) {
888 _scsih_sas_device_remove(ioc, sas_device);
889 } else if (!sas_device->starget) {
890 /*
891 * When asyn scanning is enabled, its not possible to remove
892 * devices while scanning is turned on due to an oops in
893 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
894 */
895 if (!ioc->is_driver_loading) {
896 mpt3sas_transport_port_remove(ioc,
897 sas_device->sas_address,
898 sas_device->sas_address_parent);
899 _scsih_sas_device_remove(ioc, sas_device);
900 }
901 } else
902 clear_bit(sas_device->handle, ioc->pend_os_device_add);
903}
904
905/**
906 * _scsih_sas_device_init_add - insert sas_device to the list.
907 * @ioc: per adapter object
908 * @sas_device: the sas_device object
909 * Context: This function will acquire ioc->sas_device_lock.
910 *
911 * Adding new object at driver load time to the ioc->sas_device_init_list.
912 */
913static void
914_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 struct _sas_device *sas_device)
916{
917 unsigned long flags;
918
919 dewtprintk(ioc,
920 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 __func__, sas_device->handle,
922 (u64)sas_device->sas_address));
923
924 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
925 NULL, NULL));
926
927 spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 sas_device_get(sas_device);
929 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 _scsih_determine_boot_device(ioc, sas_device, 0);
931 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932}
933
934
935static struct _pcie_device *
936__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
937{
938 struct _pcie_device *pcie_device;
939
940 assert_spin_locked(&ioc->pcie_device_lock);
941
942 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 if (pcie_device->wwid == wwid)
944 goto found_device;
945
946 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 if (pcie_device->wwid == wwid)
948 goto found_device;
949
950 return NULL;
951
952found_device:
953 pcie_device_get(pcie_device);
954 return pcie_device;
955}
956
957
958/**
959 * mpt3sas_get_pdev_by_wwid - pcie device search
960 * @ioc: per adapter object
961 * @wwid: wwid
962 *
963 * Context: This function will acquire ioc->pcie_device_lock and will release
964 * before returning the pcie_device object.
965 *
966 * This searches for pcie_device based on wwid, then return pcie_device object.
967 */
968static struct _pcie_device *
969mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
970{
971 struct _pcie_device *pcie_device;
972 unsigned long flags;
973
974 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
977
978 return pcie_device;
979}
980
981
982static struct _pcie_device *
983__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
984 int channel)
985{
986 struct _pcie_device *pcie_device;
987
988 assert_spin_locked(&ioc->pcie_device_lock);
989
990 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 if (pcie_device->id == id && pcie_device->channel == channel)
992 goto found_device;
993
994 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 if (pcie_device->id == id && pcie_device->channel == channel)
996 goto found_device;
997
998 return NULL;
999
1000found_device:
1001 pcie_device_get(pcie_device);
1002 return pcie_device;
1003}
1004
1005static struct _pcie_device *
1006__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1007{
1008 struct _pcie_device *pcie_device;
1009
1010 assert_spin_locked(&ioc->pcie_device_lock);
1011
1012 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 if (pcie_device->handle == handle)
1014 goto found_device;
1015
1016 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 if (pcie_device->handle == handle)
1018 goto found_device;
1019
1020 return NULL;
1021
1022found_device:
1023 pcie_device_get(pcie_device);
1024 return pcie_device;
1025}
1026
1027
1028/**
1029 * mpt3sas_get_pdev_by_handle - pcie device search
1030 * @ioc: per adapter object
1031 * @handle: Firmware device handle
1032 *
1033 * Context: This function will acquire ioc->pcie_device_lock and will release
1034 * before returning the pcie_device object.
1035 *
1036 * This searches for pcie_device based on handle, then return pcie_device
1037 * object.
1038 */
1039struct _pcie_device *
1040mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1041{
1042 struct _pcie_device *pcie_device;
1043 unsigned long flags;
1044
1045 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1048
1049 return pcie_device;
1050}
1051
1052/**
1053 * _scsih_pcie_device_remove - remove pcie_device from list.
1054 * @ioc: per adapter object
1055 * @pcie_device: the pcie_device object
1056 * Context: This function will acquire ioc->pcie_device_lock.
1057 *
1058 * If pcie_device is on the list, remove it and decrement its reference count.
1059 */
1060static void
1061_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1062 struct _pcie_device *pcie_device)
1063{
1064 unsigned long flags;
1065 int was_on_pcie_device_list = 0;
1066
1067 if (!pcie_device)
1068 return;
1069 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1070 pcie_device->handle, (u64)pcie_device->wwid);
1071 if (pcie_device->enclosure_handle != 0)
1072 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1073 (u64)pcie_device->enclosure_logical_id,
1074 pcie_device->slot);
1075 if (pcie_device->connector_name[0] != '\0')
1076 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1077 pcie_device->enclosure_level,
1078 pcie_device->connector_name);
1079
1080 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1081 if (!list_empty(&pcie_device->list)) {
1082 list_del_init(&pcie_device->list);
1083 was_on_pcie_device_list = 1;
1084 }
1085 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1086 if (was_on_pcie_device_list) {
1087 kfree(pcie_device->serial_number);
1088 pcie_device_put(pcie_device);
1089 }
1090}
1091
1092
1093/**
1094 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1095 * @ioc: per adapter object
1096 * @handle: device handle
1097 */
1098static void
1099_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1100{
1101 struct _pcie_device *pcie_device;
1102 unsigned long flags;
1103 int was_on_pcie_device_list = 0;
1104
1105 if (ioc->shost_recovery)
1106 return;
1107
1108 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1109 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1110 if (pcie_device) {
1111 if (!list_empty(&pcie_device->list)) {
1112 list_del_init(&pcie_device->list);
1113 was_on_pcie_device_list = 1;
1114 pcie_device_put(pcie_device);
1115 }
1116 }
1117 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1118 if (was_on_pcie_device_list) {
1119 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1120 pcie_device_put(pcie_device);
1121 }
1122}
1123
1124/**
1125 * _scsih_pcie_device_add - add pcie_device object
1126 * @ioc: per adapter object
1127 * @pcie_device: pcie_device object
1128 *
1129 * This is added to the pcie_device_list link list.
1130 */
1131static void
1132_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1133 struct _pcie_device *pcie_device)
1134{
1135 unsigned long flags;
1136
1137 dewtprintk(ioc,
1138 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1139 __func__,
1140 pcie_device->handle, (u64)pcie_device->wwid));
1141 if (pcie_device->enclosure_handle != 0)
1142 dewtprintk(ioc,
1143 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1144 __func__,
1145 (u64)pcie_device->enclosure_logical_id,
1146 pcie_device->slot));
1147 if (pcie_device->connector_name[0] != '\0')
1148 dewtprintk(ioc,
1149 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1150 __func__, pcie_device->enclosure_level,
1151 pcie_device->connector_name));
1152
1153 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1154 pcie_device_get(pcie_device);
1155 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1156 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1157
1158 if (pcie_device->access_status ==
1159 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1160 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1161 return;
1162 }
1163 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1164 _scsih_pcie_device_remove(ioc, pcie_device);
1165 } else if (!pcie_device->starget) {
1166 if (!ioc->is_driver_loading) {
1167/*TODO-- Need to find out whether this condition will occur or not*/
1168 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1169 }
1170 } else
1171 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1172}
1173
1174/*
1175 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1176 * @ioc: per adapter object
1177 * @pcie_device: the pcie_device object
1178 * Context: This function will acquire ioc->pcie_device_lock.
1179 *
1180 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1181 */
1182static void
1183_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1184 struct _pcie_device *pcie_device)
1185{
1186 unsigned long flags;
1187
1188 dewtprintk(ioc,
1189 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1190 __func__,
1191 pcie_device->handle, (u64)pcie_device->wwid));
1192 if (pcie_device->enclosure_handle != 0)
1193 dewtprintk(ioc,
1194 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1195 __func__,
1196 (u64)pcie_device->enclosure_logical_id,
1197 pcie_device->slot));
1198 if (pcie_device->connector_name[0] != '\0')
1199 dewtprintk(ioc,
1200 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1201 __func__, pcie_device->enclosure_level,
1202 pcie_device->connector_name));
1203
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device_get(pcie_device);
1206 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1207 if (pcie_device->access_status !=
1208 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1209 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1210 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1211}
1212/**
1213 * _scsih_raid_device_find_by_id - raid device search
1214 * @ioc: per adapter object
1215 * @id: sas device target id
1216 * @channel: sas device channel
1217 * Context: Calling function should acquire ioc->raid_device_lock
1218 *
1219 * This searches for raid_device based on target id, then return raid_device
1220 * object.
1221 */
1222static struct _raid_device *
1223_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1224{
1225 struct _raid_device *raid_device, *r;
1226
1227 r = NULL;
1228 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1229 if (raid_device->id == id && raid_device->channel == channel) {
1230 r = raid_device;
1231 goto out;
1232 }
1233 }
1234
1235 out:
1236 return r;
1237}
1238
1239/**
1240 * mpt3sas_raid_device_find_by_handle - raid device search
1241 * @ioc: per adapter object
1242 * @handle: sas device handle (assigned by firmware)
1243 * Context: Calling function should acquire ioc->raid_device_lock
1244 *
1245 * This searches for raid_device based on handle, then return raid_device
1246 * object.
1247 */
1248struct _raid_device *
1249mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1250{
1251 struct _raid_device *raid_device, *r;
1252
1253 r = NULL;
1254 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1255 if (raid_device->handle != handle)
1256 continue;
1257 r = raid_device;
1258 goto out;
1259 }
1260
1261 out:
1262 return r;
1263}
1264
1265/**
1266 * _scsih_raid_device_find_by_wwid - raid device search
1267 * @ioc: per adapter object
1268 * @wwid: ?
1269 * Context: Calling function should acquire ioc->raid_device_lock
1270 *
1271 * This searches for raid_device based on wwid, then return raid_device
1272 * object.
1273 */
1274static struct _raid_device *
1275_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1276{
1277 struct _raid_device *raid_device, *r;
1278
1279 r = NULL;
1280 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1281 if (raid_device->wwid != wwid)
1282 continue;
1283 r = raid_device;
1284 goto out;
1285 }
1286
1287 out:
1288 return r;
1289}
1290
1291/**
1292 * _scsih_raid_device_add - add raid_device object
1293 * @ioc: per adapter object
1294 * @raid_device: raid_device object
1295 *
1296 * This is added to the raid_device_list link list.
1297 */
1298static void
1299_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1300 struct _raid_device *raid_device)
1301{
1302 unsigned long flags;
1303
1304 dewtprintk(ioc,
1305 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1306 __func__,
1307 raid_device->handle, (u64)raid_device->wwid));
1308
1309 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1310 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1311 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1312}
1313
1314/**
1315 * _scsih_raid_device_remove - delete raid_device object
1316 * @ioc: per adapter object
1317 * @raid_device: raid_device object
1318 *
1319 */
1320static void
1321_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1322 struct _raid_device *raid_device)
1323{
1324 unsigned long flags;
1325
1326 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1327 list_del(&raid_device->list);
1328 kfree(raid_device);
1329 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1330}
1331
1332/**
1333 * mpt3sas_scsih_expander_find_by_handle - expander device search
1334 * @ioc: per adapter object
1335 * @handle: expander handle (assigned by firmware)
1336 * Context: Calling function should acquire ioc->sas_device_lock
1337 *
1338 * This searches for expander device based on handle, then returns the
1339 * sas_node object.
1340 */
1341struct _sas_node *
1342mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1343{
1344 struct _sas_node *sas_expander, *r;
1345
1346 r = NULL;
1347 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1348 if (sas_expander->handle != handle)
1349 continue;
1350 r = sas_expander;
1351 goto out;
1352 }
1353 out:
1354 return r;
1355}
1356
1357/**
1358 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1359 * @ioc: per adapter object
1360 * @handle: enclosure handle (assigned by firmware)
1361 * Context: Calling function should acquire ioc->sas_device_lock
1362 *
1363 * This searches for enclosure device based on handle, then returns the
1364 * enclosure object.
1365 */
1366static struct _enclosure_node *
1367mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1368{
1369 struct _enclosure_node *enclosure_dev, *r;
1370
1371 r = NULL;
1372 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1373 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1374 continue;
1375 r = enclosure_dev;
1376 goto out;
1377 }
1378out:
1379 return r;
1380}
1381/**
1382 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1383 * @ioc: per adapter object
1384 * @sas_address: sas address
1385 * Context: Calling function should acquire ioc->sas_node_lock.
1386 *
1387 * This searches for expander device based on sas_address, then returns the
1388 * sas_node object.
1389 */
1390struct _sas_node *
1391mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1392 u64 sas_address)
1393{
1394 struct _sas_node *sas_expander, *r;
1395
1396 r = NULL;
1397 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 if (sas_expander->sas_address != sas_address)
1399 continue;
1400 r = sas_expander;
1401 goto out;
1402 }
1403 out:
1404 return r;
1405}
1406
1407/**
1408 * _scsih_expander_node_add - insert expander device to the list.
1409 * @ioc: per adapter object
1410 * @sas_expander: the sas_device object
1411 * Context: This function will acquire ioc->sas_node_lock.
1412 *
1413 * Adding new object to the ioc->sas_expander_list.
1414 */
1415static void
1416_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1417 struct _sas_node *sas_expander)
1418{
1419 unsigned long flags;
1420
1421 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1422 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1423 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1424}
1425
1426/**
1427 * _scsih_is_end_device - determines if device is an end device
1428 * @device_info: bitfield providing information about the device.
1429 * Context: none
1430 *
1431 * Return: 1 if end device.
1432 */
1433static int
1434_scsih_is_end_device(u32 device_info)
1435{
1436 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1437 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1438 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1439 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1440 return 1;
1441 else
1442 return 0;
1443}
1444
1445/**
1446 * _scsih_is_nvme_pciescsi_device - determines if
1447 * device is an pcie nvme/scsi device
1448 * @device_info: bitfield providing information about the device.
1449 * Context: none
1450 *
1451 * Returns 1 if device is pcie device type nvme/scsi.
1452 */
1453static int
1454_scsih_is_nvme_pciescsi_device(u32 device_info)
1455{
1456 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1457 == MPI26_PCIE_DEVINFO_NVME) ||
1458 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1459 == MPI26_PCIE_DEVINFO_SCSI))
1460 return 1;
1461 else
1462 return 0;
1463}
1464
1465/**
1466 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1467 * @ioc: per adapter object
1468 * @smid: system request message index
1469 *
1470 * Return: the smid stored scmd pointer.
1471 * Then will dereference the stored scmd pointer.
1472 */
1473struct scsi_cmnd *
1474mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1475{
1476 struct scsi_cmnd *scmd = NULL;
1477 struct scsiio_tracker *st;
1478 Mpi25SCSIIORequest_t *mpi_request;
1479
1480 if (smid > 0 &&
1481 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1482 u32 unique_tag = smid - 1;
1483
1484 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1485
1486 /*
1487 * If SCSI IO request is outstanding at driver level then
1488 * DevHandle filed must be non-zero. If DevHandle is zero
1489 * then it means that this smid is free at driver level,
1490 * so return NULL.
1491 */
1492 if (!mpi_request->DevHandle)
1493 return scmd;
1494
1495 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1496 if (scmd) {
1497 st = scsi_cmd_priv(scmd);
1498 if (st->cb_idx == 0xFF || st->smid == 0)
1499 scmd = NULL;
1500 }
1501 }
1502 return scmd;
1503}
1504
1505/**
1506 * scsih_change_queue_depth - setting device queue depth
1507 * @sdev: scsi device struct
1508 * @qdepth: requested queue depth
1509 *
1510 * Return: queue depth.
1511 */
1512static int
1513scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1514{
1515 struct Scsi_Host *shost = sdev->host;
1516 int max_depth;
1517 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1518 struct MPT3SAS_DEVICE *sas_device_priv_data;
1519 struct MPT3SAS_TARGET *sas_target_priv_data;
1520 struct _sas_device *sas_device;
1521 unsigned long flags;
1522
1523 max_depth = shost->can_queue;
1524
1525 /*
1526 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1527 * is disabled.
1528 */
1529 if (ioc->enable_sdev_max_qd)
1530 goto not_sata;
1531
1532 sas_device_priv_data = sdev->hostdata;
1533 if (!sas_device_priv_data)
1534 goto not_sata;
1535 sas_target_priv_data = sas_device_priv_data->sas_target;
1536 if (!sas_target_priv_data)
1537 goto not_sata;
1538 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1539 goto not_sata;
1540
1541 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1542 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1543 if (sas_device) {
1544 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1545 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1546
1547 sas_device_put(sas_device);
1548 }
1549 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1550
1551 not_sata:
1552
1553 if (!sdev->tagged_supported)
1554 max_depth = 1;
1555 if (qdepth > max_depth)
1556 qdepth = max_depth;
1557 return scsi_change_queue_depth(sdev, qdepth);
1558}
1559
1560/**
1561 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1562 * @sdev: scsi device struct
1563 * @qdepth: requested queue depth
1564 *
1565 * Returns nothing.
1566 */
1567void
1568mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1569{
1570 struct Scsi_Host *shost = sdev->host;
1571 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1572
1573 if (ioc->enable_sdev_max_qd)
1574 qdepth = shost->can_queue;
1575
1576 scsih_change_queue_depth(sdev, qdepth);
1577}
1578
1579/**
1580 * scsih_target_alloc - target add routine
1581 * @starget: scsi target struct
1582 *
1583 * Return: 0 if ok. Any other return is assumed to be an error and
1584 * the device is ignored.
1585 */
1586static int
1587scsih_target_alloc(struct scsi_target *starget)
1588{
1589 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1590 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1591 struct MPT3SAS_TARGET *sas_target_priv_data;
1592 struct _sas_device *sas_device;
1593 struct _raid_device *raid_device;
1594 struct _pcie_device *pcie_device;
1595 unsigned long flags;
1596 struct sas_rphy *rphy;
1597
1598 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1599 GFP_KERNEL);
1600 if (!sas_target_priv_data)
1601 return -ENOMEM;
1602
1603 starget->hostdata = sas_target_priv_data;
1604 sas_target_priv_data->starget = starget;
1605 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1606
1607 /* RAID volumes */
1608 if (starget->channel == RAID_CHANNEL) {
1609 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1610 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1611 starget->channel);
1612 if (raid_device) {
1613 sas_target_priv_data->handle = raid_device->handle;
1614 sas_target_priv_data->sas_address = raid_device->wwid;
1615 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1616 if (ioc->is_warpdrive)
1617 sas_target_priv_data->raid_device = raid_device;
1618 raid_device->starget = starget;
1619 }
1620 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1621 return 0;
1622 }
1623
1624 /* PCIe devices */
1625 if (starget->channel == PCIE_CHANNEL) {
1626 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1627 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1628 starget->channel);
1629 if (pcie_device) {
1630 sas_target_priv_data->handle = pcie_device->handle;
1631 sas_target_priv_data->sas_address = pcie_device->wwid;
1632 sas_target_priv_data->pcie_dev = pcie_device;
1633 pcie_device->starget = starget;
1634 pcie_device->id = starget->id;
1635 pcie_device->channel = starget->channel;
1636 sas_target_priv_data->flags |=
1637 MPT_TARGET_FLAGS_PCIE_DEVICE;
1638 if (pcie_device->fast_path)
1639 sas_target_priv_data->flags |=
1640 MPT_TARGET_FASTPATH_IO;
1641 }
1642 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1643 return 0;
1644 }
1645
1646 /* sas/sata devices */
1647 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1648 rphy = dev_to_rphy(starget->dev.parent);
1649 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1650 rphy->identify.sas_address);
1651
1652 if (sas_device) {
1653 sas_target_priv_data->handle = sas_device->handle;
1654 sas_target_priv_data->sas_address = sas_device->sas_address;
1655 sas_target_priv_data->sas_dev = sas_device;
1656 sas_device->starget = starget;
1657 sas_device->id = starget->id;
1658 sas_device->channel = starget->channel;
1659 if (test_bit(sas_device->handle, ioc->pd_handles))
1660 sas_target_priv_data->flags |=
1661 MPT_TARGET_FLAGS_RAID_COMPONENT;
1662 if (sas_device->fast_path)
1663 sas_target_priv_data->flags |=
1664 MPT_TARGET_FASTPATH_IO;
1665 }
1666 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1667
1668 return 0;
1669}
1670
1671/**
1672 * scsih_target_destroy - target destroy routine
1673 * @starget: scsi target struct
1674 */
1675static void
1676scsih_target_destroy(struct scsi_target *starget)
1677{
1678 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1679 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1680 struct MPT3SAS_TARGET *sas_target_priv_data;
1681 struct _sas_device *sas_device;
1682 struct _raid_device *raid_device;
1683 struct _pcie_device *pcie_device;
1684 unsigned long flags;
1685
1686 sas_target_priv_data = starget->hostdata;
1687 if (!sas_target_priv_data)
1688 return;
1689
1690 if (starget->channel == RAID_CHANNEL) {
1691 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1692 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1693 starget->channel);
1694 if (raid_device) {
1695 raid_device->starget = NULL;
1696 raid_device->sdev = NULL;
1697 }
1698 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1699 goto out;
1700 }
1701
1702 if (starget->channel == PCIE_CHANNEL) {
1703 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1704 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1705 sas_target_priv_data);
1706 if (pcie_device && (pcie_device->starget == starget) &&
1707 (pcie_device->id == starget->id) &&
1708 (pcie_device->channel == starget->channel))
1709 pcie_device->starget = NULL;
1710
1711 if (pcie_device) {
1712 /*
1713 * Corresponding get() is in _scsih_target_alloc()
1714 */
1715 sas_target_priv_data->pcie_dev = NULL;
1716 pcie_device_put(pcie_device);
1717 pcie_device_put(pcie_device);
1718 }
1719 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1720 goto out;
1721 }
1722
1723 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1724 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1725 if (sas_device && (sas_device->starget == starget) &&
1726 (sas_device->id == starget->id) &&
1727 (sas_device->channel == starget->channel))
1728 sas_device->starget = NULL;
1729
1730 if (sas_device) {
1731 /*
1732 * Corresponding get() is in _scsih_target_alloc()
1733 */
1734 sas_target_priv_data->sas_dev = NULL;
1735 sas_device_put(sas_device);
1736
1737 sas_device_put(sas_device);
1738 }
1739 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1740
1741 out:
1742 kfree(sas_target_priv_data);
1743 starget->hostdata = NULL;
1744}
1745
1746/**
1747 * scsih_slave_alloc - device add routine
1748 * @sdev: scsi device struct
1749 *
1750 * Return: 0 if ok. Any other return is assumed to be an error and
1751 * the device is ignored.
1752 */
1753static int
1754scsih_slave_alloc(struct scsi_device *sdev)
1755{
1756 struct Scsi_Host *shost;
1757 struct MPT3SAS_ADAPTER *ioc;
1758 struct MPT3SAS_TARGET *sas_target_priv_data;
1759 struct MPT3SAS_DEVICE *sas_device_priv_data;
1760 struct scsi_target *starget;
1761 struct _raid_device *raid_device;
1762 struct _sas_device *sas_device;
1763 struct _pcie_device *pcie_device;
1764 unsigned long flags;
1765
1766 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1767 GFP_KERNEL);
1768 if (!sas_device_priv_data)
1769 return -ENOMEM;
1770
1771 sas_device_priv_data->lun = sdev->lun;
1772 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1773
1774 starget = scsi_target(sdev);
1775 sas_target_priv_data = starget->hostdata;
1776 sas_target_priv_data->num_luns++;
1777 sas_device_priv_data->sas_target = sas_target_priv_data;
1778 sdev->hostdata = sas_device_priv_data;
1779 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1780 sdev->no_uld_attach = 1;
1781
1782 shost = dev_to_shost(&starget->dev);
1783 ioc = shost_priv(shost);
1784 if (starget->channel == RAID_CHANNEL) {
1785 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1786 raid_device = _scsih_raid_device_find_by_id(ioc,
1787 starget->id, starget->channel);
1788 if (raid_device)
1789 raid_device->sdev = sdev; /* raid is single lun */
1790 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1791 }
1792 if (starget->channel == PCIE_CHANNEL) {
1793 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1794 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1795 sas_target_priv_data->sas_address);
1796 if (pcie_device && (pcie_device->starget == NULL)) {
1797 sdev_printk(KERN_INFO, sdev,
1798 "%s : pcie_device->starget set to starget @ %d\n",
1799 __func__, __LINE__);
1800 pcie_device->starget = starget;
1801 }
1802
1803 if (pcie_device)
1804 pcie_device_put(pcie_device);
1805 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1806
1807 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1808 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1809 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1810 sas_target_priv_data->sas_address);
1811 if (sas_device && (sas_device->starget == NULL)) {
1812 sdev_printk(KERN_INFO, sdev,
1813 "%s : sas_device->starget set to starget @ %d\n",
1814 __func__, __LINE__);
1815 sas_device->starget = starget;
1816 }
1817
1818 if (sas_device)
1819 sas_device_put(sas_device);
1820
1821 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1822 }
1823
1824 return 0;
1825}
1826
1827/**
1828 * scsih_slave_destroy - device destroy routine
1829 * @sdev: scsi device struct
1830 */
1831static void
1832scsih_slave_destroy(struct scsi_device *sdev)
1833{
1834 struct MPT3SAS_TARGET *sas_target_priv_data;
1835 struct scsi_target *starget;
1836 struct Scsi_Host *shost;
1837 struct MPT3SAS_ADAPTER *ioc;
1838 struct _sas_device *sas_device;
1839 struct _pcie_device *pcie_device;
1840 unsigned long flags;
1841
1842 if (!sdev->hostdata)
1843 return;
1844
1845 starget = scsi_target(sdev);
1846 sas_target_priv_data = starget->hostdata;
1847 sas_target_priv_data->num_luns--;
1848
1849 shost = dev_to_shost(&starget->dev);
1850 ioc = shost_priv(shost);
1851
1852 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1853 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1854 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1855 sas_target_priv_data);
1856 if (pcie_device && !sas_target_priv_data->num_luns)
1857 pcie_device->starget = NULL;
1858
1859 if (pcie_device)
1860 pcie_device_put(pcie_device);
1861
1862 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1863
1864 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1865 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1866 sas_device = __mpt3sas_get_sdev_from_target(ioc,
1867 sas_target_priv_data);
1868 if (sas_device && !sas_target_priv_data->num_luns)
1869 sas_device->starget = NULL;
1870
1871 if (sas_device)
1872 sas_device_put(sas_device);
1873 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1874 }
1875
1876 kfree(sdev->hostdata);
1877 sdev->hostdata = NULL;
1878}
1879
1880/**
1881 * _scsih_display_sata_capabilities - sata capabilities
1882 * @ioc: per adapter object
1883 * @handle: device handle
1884 * @sdev: scsi device struct
1885 */
1886static void
1887_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1888 u16 handle, struct scsi_device *sdev)
1889{
1890 Mpi2ConfigReply_t mpi_reply;
1891 Mpi2SasDevicePage0_t sas_device_pg0;
1892 u32 ioc_status;
1893 u16 flags;
1894 u32 device_info;
1895
1896 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1897 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1898 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1899 __FILE__, __LINE__, __func__);
1900 return;
1901 }
1902
1903 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1904 MPI2_IOCSTATUS_MASK;
1905 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1906 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1907 __FILE__, __LINE__, __func__);
1908 return;
1909 }
1910
1911 flags = le16_to_cpu(sas_device_pg0.Flags);
1912 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1913
1914 sdev_printk(KERN_INFO, sdev,
1915 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1916 "sw_preserve(%s)\n",
1917 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1918 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1919 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1920 "n",
1921 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1922 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1923 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1924}
1925
1926/*
1927 * raid transport support -
1928 * Enabled for SLES11 and newer, in older kernels the driver will panic when
1929 * unloading the driver followed by a load - I believe that the subroutine
1930 * raid_class_release() is not cleaning up properly.
1931 */
1932
1933/**
1934 * scsih_is_raid - return boolean indicating device is raid volume
1935 * @dev: the device struct object
1936 */
1937static int
1938scsih_is_raid(struct device *dev)
1939{
1940 struct scsi_device *sdev = to_scsi_device(dev);
1941 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1942
1943 if (ioc->is_warpdrive)
1944 return 0;
1945 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1946}
1947
1948static int
1949scsih_is_nvme(struct device *dev)
1950{
1951 struct scsi_device *sdev = to_scsi_device(dev);
1952
1953 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1954}
1955
1956/**
1957 * scsih_get_resync - get raid volume resync percent complete
1958 * @dev: the device struct object
1959 */
1960static void
1961scsih_get_resync(struct device *dev)
1962{
1963 struct scsi_device *sdev = to_scsi_device(dev);
1964 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1965 static struct _raid_device *raid_device;
1966 unsigned long flags;
1967 Mpi2RaidVolPage0_t vol_pg0;
1968 Mpi2ConfigReply_t mpi_reply;
1969 u32 volume_status_flags;
1970 u8 percent_complete;
1971 u16 handle;
1972
1973 percent_complete = 0;
1974 handle = 0;
1975 if (ioc->is_warpdrive)
1976 goto out;
1977
1978 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1979 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1980 sdev->channel);
1981 if (raid_device) {
1982 handle = raid_device->handle;
1983 percent_complete = raid_device->percent_complete;
1984 }
1985 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1986
1987 if (!handle)
1988 goto out;
1989
1990 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1991 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1992 sizeof(Mpi2RaidVolPage0_t))) {
1993 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1994 __FILE__, __LINE__, __func__);
1995 percent_complete = 0;
1996 goto out;
1997 }
1998
1999 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2000 if (!(volume_status_flags &
2001 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2002 percent_complete = 0;
2003
2004 out:
2005
2006 switch (ioc->hba_mpi_version_belonged) {
2007 case MPI2_VERSION:
2008 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2009 break;
2010 case MPI25_VERSION:
2011 case MPI26_VERSION:
2012 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2013 break;
2014 }
2015}
2016
2017/**
2018 * scsih_get_state - get raid volume level
2019 * @dev: the device struct object
2020 */
2021static void
2022scsih_get_state(struct device *dev)
2023{
2024 struct scsi_device *sdev = to_scsi_device(dev);
2025 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2026 static struct _raid_device *raid_device;
2027 unsigned long flags;
2028 Mpi2RaidVolPage0_t vol_pg0;
2029 Mpi2ConfigReply_t mpi_reply;
2030 u32 volstate;
2031 enum raid_state state = RAID_STATE_UNKNOWN;
2032 u16 handle = 0;
2033
2034 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2035 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2036 sdev->channel);
2037 if (raid_device)
2038 handle = raid_device->handle;
2039 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2040
2041 if (!raid_device)
2042 goto out;
2043
2044 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2045 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2046 sizeof(Mpi2RaidVolPage0_t))) {
2047 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2048 __FILE__, __LINE__, __func__);
2049 goto out;
2050 }
2051
2052 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2053 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2054 state = RAID_STATE_RESYNCING;
2055 goto out;
2056 }
2057
2058 switch (vol_pg0.VolumeState) {
2059 case MPI2_RAID_VOL_STATE_OPTIMAL:
2060 case MPI2_RAID_VOL_STATE_ONLINE:
2061 state = RAID_STATE_ACTIVE;
2062 break;
2063 case MPI2_RAID_VOL_STATE_DEGRADED:
2064 state = RAID_STATE_DEGRADED;
2065 break;
2066 case MPI2_RAID_VOL_STATE_FAILED:
2067 case MPI2_RAID_VOL_STATE_MISSING:
2068 state = RAID_STATE_OFFLINE;
2069 break;
2070 }
2071 out:
2072 switch (ioc->hba_mpi_version_belonged) {
2073 case MPI2_VERSION:
2074 raid_set_state(mpt2sas_raid_template, dev, state);
2075 break;
2076 case MPI25_VERSION:
2077 case MPI26_VERSION:
2078 raid_set_state(mpt3sas_raid_template, dev, state);
2079 break;
2080 }
2081}
2082
2083/**
2084 * _scsih_set_level - set raid level
2085 * @ioc: ?
2086 * @sdev: scsi device struct
2087 * @volume_type: volume type
2088 */
2089static void
2090_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2091 struct scsi_device *sdev, u8 volume_type)
2092{
2093 enum raid_level level = RAID_LEVEL_UNKNOWN;
2094
2095 switch (volume_type) {
2096 case MPI2_RAID_VOL_TYPE_RAID0:
2097 level = RAID_LEVEL_0;
2098 break;
2099 case MPI2_RAID_VOL_TYPE_RAID10:
2100 level = RAID_LEVEL_10;
2101 break;
2102 case MPI2_RAID_VOL_TYPE_RAID1E:
2103 level = RAID_LEVEL_1E;
2104 break;
2105 case MPI2_RAID_VOL_TYPE_RAID1:
2106 level = RAID_LEVEL_1;
2107 break;
2108 }
2109
2110 switch (ioc->hba_mpi_version_belonged) {
2111 case MPI2_VERSION:
2112 raid_set_level(mpt2sas_raid_template,
2113 &sdev->sdev_gendev, level);
2114 break;
2115 case MPI25_VERSION:
2116 case MPI26_VERSION:
2117 raid_set_level(mpt3sas_raid_template,
2118 &sdev->sdev_gendev, level);
2119 break;
2120 }
2121}
2122
2123
2124/**
2125 * _scsih_get_volume_capabilities - volume capabilities
2126 * @ioc: per adapter object
2127 * @raid_device: the raid_device object
2128 *
2129 * Return: 0 for success, else 1
2130 */
2131static int
2132_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2133 struct _raid_device *raid_device)
2134{
2135 Mpi2RaidVolPage0_t *vol_pg0;
2136 Mpi2RaidPhysDiskPage0_t pd_pg0;
2137 Mpi2SasDevicePage0_t sas_device_pg0;
2138 Mpi2ConfigReply_t mpi_reply;
2139 u16 sz;
2140 u8 num_pds;
2141
2142 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2143 &num_pds)) || !num_pds) {
2144 dfailprintk(ioc,
2145 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2146 __FILE__, __LINE__, __func__));
2147 return 1;
2148 }
2149
2150 raid_device->num_pds = num_pds;
2151 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2152 sizeof(Mpi2RaidVol0PhysDisk_t));
2153 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2154 if (!vol_pg0) {
2155 dfailprintk(ioc,
2156 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2157 __FILE__, __LINE__, __func__));
2158 return 1;
2159 }
2160
2161 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2162 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2163 dfailprintk(ioc,
2164 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2165 __FILE__, __LINE__, __func__));
2166 kfree(vol_pg0);
2167 return 1;
2168 }
2169
2170 raid_device->volume_type = vol_pg0->VolumeType;
2171
2172 /* figure out what the underlying devices are by
2173 * obtaining the device_info bits for the 1st device
2174 */
2175 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2176 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2177 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2178 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2179 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2180 le16_to_cpu(pd_pg0.DevHandle)))) {
2181 raid_device->device_info =
2182 le32_to_cpu(sas_device_pg0.DeviceInfo);
2183 }
2184 }
2185
2186 kfree(vol_pg0);
2187 return 0;
2188}
2189
2190/**
2191 * _scsih_enable_tlr - setting TLR flags
2192 * @ioc: per adapter object
2193 * @sdev: scsi device struct
2194 *
2195 * Enabling Transaction Layer Retries for tape devices when
2196 * vpd page 0x90 is present
2197 *
2198 */
2199static void
2200_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2201{
2202
2203 /* only for TAPE */
2204 if (sdev->type != TYPE_TAPE)
2205 return;
2206
2207 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2208 return;
2209
2210 sas_enable_tlr(sdev);
2211 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2212 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2213 return;
2214
2215}
2216
2217/**
2218 * scsih_slave_configure - device configure routine.
2219 * @sdev: scsi device struct
2220 *
2221 * Return: 0 if ok. Any other return is assumed to be an error and
2222 * the device is ignored.
2223 */
2224static int
2225scsih_slave_configure(struct scsi_device *sdev)
2226{
2227 struct Scsi_Host *shost = sdev->host;
2228 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2229 struct MPT3SAS_DEVICE *sas_device_priv_data;
2230 struct MPT3SAS_TARGET *sas_target_priv_data;
2231 struct _sas_device *sas_device;
2232 struct _pcie_device *pcie_device;
2233 struct _raid_device *raid_device;
2234 unsigned long flags;
2235 int qdepth;
2236 u8 ssp_target = 0;
2237 char *ds = "";
2238 char *r_level = "";
2239 u16 handle, volume_handle = 0;
2240 u64 volume_wwid = 0;
2241
2242 qdepth = 1;
2243 sas_device_priv_data = sdev->hostdata;
2244 sas_device_priv_data->configured_lun = 1;
2245 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2246 sas_target_priv_data = sas_device_priv_data->sas_target;
2247 handle = sas_target_priv_data->handle;
2248
2249 /* raid volume handling */
2250 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2251
2252 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2253 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2254 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2255 if (!raid_device) {
2256 dfailprintk(ioc,
2257 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2258 __FILE__, __LINE__, __func__));
2259 return 1;
2260 }
2261
2262 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2263 dfailprintk(ioc,
2264 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2265 __FILE__, __LINE__, __func__));
2266 return 1;
2267 }
2268
2269 /*
2270 * WARPDRIVE: Initialize the required data for Direct IO
2271 */
2272 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2273
2274 /* RAID Queue Depth Support
2275 * IS volume = underlying qdepth of drive type, either
2276 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2277 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2278 */
2279 if (raid_device->device_info &
2280 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2281 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2282 ds = "SSP";
2283 } else {
2284 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2285 if (raid_device->device_info &
2286 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2287 ds = "SATA";
2288 else
2289 ds = "STP";
2290 }
2291
2292 switch (raid_device->volume_type) {
2293 case MPI2_RAID_VOL_TYPE_RAID0:
2294 r_level = "RAID0";
2295 break;
2296 case MPI2_RAID_VOL_TYPE_RAID1E:
2297 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2298 if (ioc->manu_pg10.OEMIdentifier &&
2299 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2300 MFG10_GF0_R10_DISPLAY) &&
2301 !(raid_device->num_pds % 2))
2302 r_level = "RAID10";
2303 else
2304 r_level = "RAID1E";
2305 break;
2306 case MPI2_RAID_VOL_TYPE_RAID1:
2307 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2308 r_level = "RAID1";
2309 break;
2310 case MPI2_RAID_VOL_TYPE_RAID10:
2311 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2312 r_level = "RAID10";
2313 break;
2314 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2315 default:
2316 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2317 r_level = "RAIDX";
2318 break;
2319 }
2320
2321 if (!ioc->hide_ir_msg)
2322 sdev_printk(KERN_INFO, sdev,
2323 "%s: handle(0x%04x), wwid(0x%016llx),"
2324 " pd_count(%d), type(%s)\n",
2325 r_level, raid_device->handle,
2326 (unsigned long long)raid_device->wwid,
2327 raid_device->num_pds, ds);
2328
2329 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2330 blk_queue_max_hw_sectors(sdev->request_queue,
2331 MPT3SAS_RAID_MAX_SECTORS);
2332 sdev_printk(KERN_INFO, sdev,
2333 "Set queue's max_sector to: %u\n",
2334 MPT3SAS_RAID_MAX_SECTORS);
2335 }
2336
2337 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2338
2339 /* raid transport support */
2340 if (!ioc->is_warpdrive)
2341 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2342 return 0;
2343 }
2344
2345 /* non-raid handling */
2346 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2347 if (mpt3sas_config_get_volume_handle(ioc, handle,
2348 &volume_handle)) {
2349 dfailprintk(ioc,
2350 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2351 __FILE__, __LINE__, __func__));
2352 return 1;
2353 }
2354 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2355 volume_handle, &volume_wwid)) {
2356 dfailprintk(ioc,
2357 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2358 __FILE__, __LINE__, __func__));
2359 return 1;
2360 }
2361 }
2362
2363 /* PCIe handling */
2364 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2365 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2366 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2367 sas_device_priv_data->sas_target->sas_address);
2368 if (!pcie_device) {
2369 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2370 dfailprintk(ioc,
2371 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2372 __FILE__, __LINE__, __func__));
2373 return 1;
2374 }
2375
2376 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2377 ds = "NVMe";
2378 sdev_printk(KERN_INFO, sdev,
2379 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2380 ds, handle, (unsigned long long)pcie_device->wwid,
2381 pcie_device->port_num);
2382 if (pcie_device->enclosure_handle != 0)
2383 sdev_printk(KERN_INFO, sdev,
2384 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2385 ds,
2386 (unsigned long long)pcie_device->enclosure_logical_id,
2387 pcie_device->slot);
2388 if (pcie_device->connector_name[0] != '\0')
2389 sdev_printk(KERN_INFO, sdev,
2390 "%s: enclosure level(0x%04x),"
2391 "connector name( %s)\n", ds,
2392 pcie_device->enclosure_level,
2393 pcie_device->connector_name);
2394
2395 if (pcie_device->nvme_mdts)
2396 blk_queue_max_hw_sectors(sdev->request_queue,
2397 pcie_device->nvme_mdts/512);
2398
2399 pcie_device_put(pcie_device);
2400 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2401 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2402 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2403 ** merged and can eliminate holes created during merging
2404 ** operation.
2405 **/
2406 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2407 sdev->request_queue);
2408 blk_queue_virt_boundary(sdev->request_queue,
2409 ioc->page_size - 1);
2410 return 0;
2411 }
2412
2413 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2414 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2415 sas_device_priv_data->sas_target->sas_address);
2416 if (!sas_device) {
2417 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2418 dfailprintk(ioc,
2419 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2420 __FILE__, __LINE__, __func__));
2421 return 1;
2422 }
2423
2424 sas_device->volume_handle = volume_handle;
2425 sas_device->volume_wwid = volume_wwid;
2426 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2427 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2428 ssp_target = 1;
2429 if (sas_device->device_info &
2430 MPI2_SAS_DEVICE_INFO_SEP) {
2431 sdev_printk(KERN_WARNING, sdev,
2432 "set ignore_delay_remove for handle(0x%04x)\n",
2433 sas_device_priv_data->sas_target->handle);
2434 sas_device_priv_data->ignore_delay_remove = 1;
2435 ds = "SES";
2436 } else
2437 ds = "SSP";
2438 } else {
2439 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2440 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2441 ds = "STP";
2442 else if (sas_device->device_info &
2443 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2444 ds = "SATA";
2445 }
2446
2447 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2448 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2449 ds, handle, (unsigned long long)sas_device->sas_address,
2450 sas_device->phy, (unsigned long long)sas_device->device_name);
2451
2452 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2453
2454 sas_device_put(sas_device);
2455 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2456
2457 if (!ssp_target)
2458 _scsih_display_sata_capabilities(ioc, handle, sdev);
2459
2460
2461 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2462
2463 if (ssp_target) {
2464 sas_read_port_mode_page(sdev);
2465 _scsih_enable_tlr(ioc, sdev);
2466 }
2467
2468 return 0;
2469}
2470
2471/**
2472 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2473 * @sdev: scsi device struct
2474 * @bdev: pointer to block device context
2475 * @capacity: device size (in 512 byte sectors)
2476 * @params: three element array to place output:
2477 * params[0] number of heads (max 255)
2478 * params[1] number of sectors (max 63)
2479 * params[2] number of cylinders
2480 */
2481static int
2482scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2483 sector_t capacity, int params[])
2484{
2485 int heads;
2486 int sectors;
2487 sector_t cylinders;
2488 ulong dummy;
2489
2490 heads = 64;
2491 sectors = 32;
2492
2493 dummy = heads * sectors;
2494 cylinders = capacity;
2495 sector_div(cylinders, dummy);
2496
2497 /*
2498 * Handle extended translation size for logical drives
2499 * > 1Gb
2500 */
2501 if ((ulong)capacity >= 0x200000) {
2502 heads = 255;
2503 sectors = 63;
2504 dummy = heads * sectors;
2505 cylinders = capacity;
2506 sector_div(cylinders, dummy);
2507 }
2508
2509 /* return result */
2510 params[0] = heads;
2511 params[1] = sectors;
2512 params[2] = cylinders;
2513
2514 return 0;
2515}
2516
2517/**
2518 * _scsih_response_code - translation of device response code
2519 * @ioc: per adapter object
2520 * @response_code: response code returned by the device
2521 */
2522static void
2523_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2524{
2525 char *desc;
2526
2527 switch (response_code) {
2528 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2529 desc = "task management request completed";
2530 break;
2531 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2532 desc = "invalid frame";
2533 break;
2534 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2535 desc = "task management request not supported";
2536 break;
2537 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2538 desc = "task management request failed";
2539 break;
2540 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2541 desc = "task management request succeeded";
2542 break;
2543 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2544 desc = "invalid lun";
2545 break;
2546 case 0xA:
2547 desc = "overlapped tag attempted";
2548 break;
2549 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2550 desc = "task queued, however not sent to target";
2551 break;
2552 default:
2553 desc = "unknown";
2554 break;
2555 }
2556 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2557}
2558
2559/**
2560 * _scsih_tm_done - tm completion routine
2561 * @ioc: per adapter object
2562 * @smid: system request message index
2563 * @msix_index: MSIX table index supplied by the OS
2564 * @reply: reply message frame(lower 32bit addr)
2565 * Context: none.
2566 *
2567 * The callback handler when using scsih_issue_tm.
2568 *
2569 * Return: 1 meaning mf should be freed from _base_interrupt
2570 * 0 means the mf is freed from this function.
2571 */
2572static u8
2573_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2574{
2575 MPI2DefaultReply_t *mpi_reply;
2576
2577 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2578 return 1;
2579 if (ioc->tm_cmds.smid != smid)
2580 return 1;
2581 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2582 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2583 if (mpi_reply) {
2584 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2585 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2586 }
2587 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2588 complete(&ioc->tm_cmds.done);
2589 return 1;
2590}
2591
2592/**
2593 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2594 * @ioc: per adapter object
2595 * @handle: device handle
2596 *
2597 * During taskmangement request, we need to freeze the device queue.
2598 */
2599void
2600mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2601{
2602 struct MPT3SAS_DEVICE *sas_device_priv_data;
2603 struct scsi_device *sdev;
2604 u8 skip = 0;
2605
2606 shost_for_each_device(sdev, ioc->shost) {
2607 if (skip)
2608 continue;
2609 sas_device_priv_data = sdev->hostdata;
2610 if (!sas_device_priv_data)
2611 continue;
2612 if (sas_device_priv_data->sas_target->handle == handle) {
2613 sas_device_priv_data->sas_target->tm_busy = 1;
2614 skip = 1;
2615 ioc->ignore_loginfos = 1;
2616 }
2617 }
2618}
2619
2620/**
2621 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2622 * @ioc: per adapter object
2623 * @handle: device handle
2624 *
2625 * During taskmangement request, we need to freeze the device queue.
2626 */
2627void
2628mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2629{
2630 struct MPT3SAS_DEVICE *sas_device_priv_data;
2631 struct scsi_device *sdev;
2632 u8 skip = 0;
2633
2634 shost_for_each_device(sdev, ioc->shost) {
2635 if (skip)
2636 continue;
2637 sas_device_priv_data = sdev->hostdata;
2638 if (!sas_device_priv_data)
2639 continue;
2640 if (sas_device_priv_data->sas_target->handle == handle) {
2641 sas_device_priv_data->sas_target->tm_busy = 0;
2642 skip = 1;
2643 ioc->ignore_loginfos = 0;
2644 }
2645 }
2646}
2647
2648/**
2649 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2650 * @ioc: per adapter struct
2651 * @handle: device handle
2652 * @lun: lun number
2653 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2654 * @smid_task: smid assigned to the task
2655 * @msix_task: MSIX table index supplied by the OS
2656 * @timeout: timeout in seconds
2657 * @tr_method: Target Reset Method
2658 * Context: user
2659 *
2660 * A generic API for sending task management requests to firmware.
2661 *
2662 * The callback index is set inside `ioc->tm_cb_idx`.
2663 * The caller is responsible to check for outstanding commands.
2664 *
2665 * Return: SUCCESS or FAILED.
2666 */
2667int
2668mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2669 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2670{
2671 Mpi2SCSITaskManagementRequest_t *mpi_request;
2672 Mpi2SCSITaskManagementReply_t *mpi_reply;
2673 u16 smid = 0;
2674 u32 ioc_state;
2675 int rc;
2676
2677 lockdep_assert_held(&ioc->tm_cmds.mutex);
2678
2679 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2680 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2681 return FAILED;
2682 }
2683
2684 if (ioc->shost_recovery || ioc->remove_host ||
2685 ioc->pci_error_recovery) {
2686 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2687 return FAILED;
2688 }
2689
2690 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2691 if (ioc_state & MPI2_DOORBELL_USED) {
2692 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2693 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2694 return (!rc) ? SUCCESS : FAILED;
2695 }
2696
2697 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2698 mpt3sas_base_fault_info(ioc, ioc_state &
2699 MPI2_DOORBELL_DATA_MASK);
2700 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2701 return (!rc) ? SUCCESS : FAILED;
2702 }
2703
2704 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2705 if (!smid) {
2706 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2707 return FAILED;
2708 }
2709
2710 dtmprintk(ioc,
2711 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2712 handle, type, smid_task, timeout, tr_method));
2713 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2714 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2715 ioc->tm_cmds.smid = smid;
2716 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2717 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2718 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2719 mpi_request->DevHandle = cpu_to_le16(handle);
2720 mpi_request->TaskType = type;
2721 mpi_request->MsgFlags = tr_method;
2722 mpi_request->TaskMID = cpu_to_le16(smid_task);
2723 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2724 mpt3sas_scsih_set_tm_flag(ioc, handle);
2725 init_completion(&ioc->tm_cmds.done);
2726 ioc->put_smid_hi_priority(ioc, smid, msix_task);
2727 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2728 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2729 if (mpt3sas_base_check_cmd_timeout(ioc,
2730 ioc->tm_cmds.status, mpi_request,
2731 sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2732 rc = mpt3sas_base_hard_reset_handler(ioc,
2733 FORCE_BIG_HAMMER);
2734 rc = (!rc) ? SUCCESS : FAILED;
2735 goto out;
2736 }
2737 }
2738
2739 /* sync IRQs in case those were busy during flush. */
2740 mpt3sas_base_sync_reply_irqs(ioc);
2741
2742 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2743 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2744 mpi_reply = ioc->tm_cmds.reply;
2745 dtmprintk(ioc,
2746 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2747 le16_to_cpu(mpi_reply->IOCStatus),
2748 le32_to_cpu(mpi_reply->IOCLogInfo),
2749 le32_to_cpu(mpi_reply->TerminationCount)));
2750 if (ioc->logging_level & MPT_DEBUG_TM) {
2751 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2752 if (mpi_reply->IOCStatus)
2753 _debug_dump_mf(mpi_request,
2754 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2755 }
2756 }
2757 rc = SUCCESS;
2758
2759out:
2760 mpt3sas_scsih_clear_tm_flag(ioc, handle);
2761 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2762 return rc;
2763}
2764
2765int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2766 u64 lun, u8 type, u16 smid_task, u16 msix_task,
2767 u8 timeout, u8 tr_method)
2768{
2769 int ret;
2770
2771 mutex_lock(&ioc->tm_cmds.mutex);
2772 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2773 msix_task, timeout, tr_method);
2774 mutex_unlock(&ioc->tm_cmds.mutex);
2775
2776 return ret;
2777}
2778
2779/**
2780 * _scsih_tm_display_info - displays info about the device
2781 * @ioc: per adapter struct
2782 * @scmd: pointer to scsi command object
2783 *
2784 * Called by task management callback handlers.
2785 */
2786static void
2787_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2788{
2789 struct scsi_target *starget = scmd->device->sdev_target;
2790 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2791 struct _sas_device *sas_device = NULL;
2792 struct _pcie_device *pcie_device = NULL;
2793 unsigned long flags;
2794 char *device_str = NULL;
2795
2796 if (!priv_target)
2797 return;
2798 if (ioc->hide_ir_msg)
2799 device_str = "WarpDrive";
2800 else
2801 device_str = "volume";
2802
2803 scsi_print_command(scmd);
2804 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2805 starget_printk(KERN_INFO, starget,
2806 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
2807 device_str, priv_target->handle,
2808 device_str, (unsigned long long)priv_target->sas_address);
2809
2810 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2811 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2812 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2813 if (pcie_device) {
2814 starget_printk(KERN_INFO, starget,
2815 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2816 pcie_device->handle,
2817 (unsigned long long)pcie_device->wwid,
2818 pcie_device->port_num);
2819 if (pcie_device->enclosure_handle != 0)
2820 starget_printk(KERN_INFO, starget,
2821 "enclosure logical id(0x%016llx), slot(%d)\n",
2822 (unsigned long long)
2823 pcie_device->enclosure_logical_id,
2824 pcie_device->slot);
2825 if (pcie_device->connector_name[0] != '\0')
2826 starget_printk(KERN_INFO, starget,
2827 "enclosure level(0x%04x), connector name( %s)\n",
2828 pcie_device->enclosure_level,
2829 pcie_device->connector_name);
2830 pcie_device_put(pcie_device);
2831 }
2832 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2833
2834 } else {
2835 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2836 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2837 if (sas_device) {
2838 if (priv_target->flags &
2839 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2840 starget_printk(KERN_INFO, starget,
2841 "volume handle(0x%04x), "
2842 "volume wwid(0x%016llx)\n",
2843 sas_device->volume_handle,
2844 (unsigned long long)sas_device->volume_wwid);
2845 }
2846 starget_printk(KERN_INFO, starget,
2847 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2848 sas_device->handle,
2849 (unsigned long long)sas_device->sas_address,
2850 sas_device->phy);
2851
2852 _scsih_display_enclosure_chassis_info(NULL, sas_device,
2853 NULL, starget);
2854
2855 sas_device_put(sas_device);
2856 }
2857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2858 }
2859}
2860
2861/**
2862 * scsih_abort - eh threads main abort routine
2863 * @scmd: pointer to scsi command object
2864 *
2865 * Return: SUCCESS if command aborted else FAILED
2866 */
2867static int
2868scsih_abort(struct scsi_cmnd *scmd)
2869{
2870 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2871 struct MPT3SAS_DEVICE *sas_device_priv_data;
2872 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2873 u16 handle;
2874 int r;
2875
2876 u8 timeout = 30;
2877 struct _pcie_device *pcie_device = NULL;
2878 sdev_printk(KERN_INFO, scmd->device,
2879 "attempting task abort! scmd(%p)\n", scmd);
2880 _scsih_tm_display_info(ioc, scmd);
2881
2882 sas_device_priv_data = scmd->device->hostdata;
2883 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2884 ioc->remove_host) {
2885 sdev_printk(KERN_INFO, scmd->device,
2886 "device been deleted! scmd(%p)\n", scmd);
2887 scmd->result = DID_NO_CONNECT << 16;
2888 scmd->scsi_done(scmd);
2889 r = SUCCESS;
2890 goto out;
2891 }
2892
2893 /* check for completed command */
2894 if (st == NULL || st->cb_idx == 0xFF) {
2895 scmd->result = DID_RESET << 16;
2896 r = SUCCESS;
2897 goto out;
2898 }
2899
2900 /* for hidden raid components and volumes this is not supported */
2901 if (sas_device_priv_data->sas_target->flags &
2902 MPT_TARGET_FLAGS_RAID_COMPONENT ||
2903 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2904 scmd->result = DID_RESET << 16;
2905 r = FAILED;
2906 goto out;
2907 }
2908
2909 mpt3sas_halt_firmware(ioc);
2910
2911 handle = sas_device_priv_data->sas_target->handle;
2912 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2913 if (pcie_device && (!ioc->tm_custom_handling) &&
2914 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
2915 timeout = ioc->nvme_abort_timeout;
2916 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2917 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2918 st->smid, st->msix_io, timeout, 0);
2919 /* Command must be cleared after abort */
2920 if (r == SUCCESS && st->cb_idx != 0xFF)
2921 r = FAILED;
2922 out:
2923 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2924 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2925 if (pcie_device)
2926 pcie_device_put(pcie_device);
2927 return r;
2928}
2929
2930/**
2931 * scsih_dev_reset - eh threads main device reset routine
2932 * @scmd: pointer to scsi command object
2933 *
2934 * Return: SUCCESS if command aborted else FAILED
2935 */
2936static int
2937scsih_dev_reset(struct scsi_cmnd *scmd)
2938{
2939 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2940 struct MPT3SAS_DEVICE *sas_device_priv_data;
2941 struct _sas_device *sas_device = NULL;
2942 struct _pcie_device *pcie_device = NULL;
2943 u16 handle;
2944 u8 tr_method = 0;
2945 u8 tr_timeout = 30;
2946 int r;
2947
2948 struct scsi_target *starget = scmd->device->sdev_target;
2949 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2950
2951 sdev_printk(KERN_INFO, scmd->device,
2952 "attempting device reset! scmd(%p)\n", scmd);
2953 _scsih_tm_display_info(ioc, scmd);
2954
2955 sas_device_priv_data = scmd->device->hostdata;
2956 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2957 ioc->remove_host) {
2958 sdev_printk(KERN_INFO, scmd->device,
2959 "device been deleted! scmd(%p)\n", scmd);
2960 scmd->result = DID_NO_CONNECT << 16;
2961 scmd->scsi_done(scmd);
2962 r = SUCCESS;
2963 goto out;
2964 }
2965
2966 /* for hidden raid components obtain the volume_handle */
2967 handle = 0;
2968 if (sas_device_priv_data->sas_target->flags &
2969 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2970 sas_device = mpt3sas_get_sdev_from_target(ioc,
2971 target_priv_data);
2972 if (sas_device)
2973 handle = sas_device->volume_handle;
2974 } else
2975 handle = sas_device_priv_data->sas_target->handle;
2976
2977 if (!handle) {
2978 scmd->result = DID_RESET << 16;
2979 r = FAILED;
2980 goto out;
2981 }
2982
2983 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2984
2985 if (pcie_device && (!ioc->tm_custom_handling) &&
2986 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
2987 tr_timeout = pcie_device->reset_timeout;
2988 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2989 } else
2990 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2991
2992 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2993 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2994 tr_timeout, tr_method);
2995 /* Check for busy commands after reset */
2996 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2997 r = FAILED;
2998 out:
2999 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
3000 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3001
3002 if (sas_device)
3003 sas_device_put(sas_device);
3004 if (pcie_device)
3005 pcie_device_put(pcie_device);
3006
3007 return r;
3008}
3009
3010/**
3011 * scsih_target_reset - eh threads main target reset routine
3012 * @scmd: pointer to scsi command object
3013 *
3014 * Return: SUCCESS if command aborted else FAILED
3015 */
3016static int
3017scsih_target_reset(struct scsi_cmnd *scmd)
3018{
3019 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3020 struct MPT3SAS_DEVICE *sas_device_priv_data;
3021 struct _sas_device *sas_device = NULL;
3022 struct _pcie_device *pcie_device = NULL;
3023 u16 handle;
3024 u8 tr_method = 0;
3025 u8 tr_timeout = 30;
3026 int r;
3027 struct scsi_target *starget = scmd->device->sdev_target;
3028 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3029
3030 starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
3031 scmd);
3032 _scsih_tm_display_info(ioc, scmd);
3033
3034 sas_device_priv_data = scmd->device->hostdata;
3035 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3036 ioc->remove_host) {
3037 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
3038 scmd);
3039 scmd->result = DID_NO_CONNECT << 16;
3040 scmd->scsi_done(scmd);
3041 r = SUCCESS;
3042 goto out;
3043 }
3044
3045 /* for hidden raid components obtain the volume_handle */
3046 handle = 0;
3047 if (sas_device_priv_data->sas_target->flags &
3048 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3049 sas_device = mpt3sas_get_sdev_from_target(ioc,
3050 target_priv_data);
3051 if (sas_device)
3052 handle = sas_device->volume_handle;
3053 } else
3054 handle = sas_device_priv_data->sas_target->handle;
3055
3056 if (!handle) {
3057 scmd->result = DID_RESET << 16;
3058 r = FAILED;
3059 goto out;
3060 }
3061
3062 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3063
3064 if (pcie_device && (!ioc->tm_custom_handling) &&
3065 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3066 tr_timeout = pcie_device->reset_timeout;
3067 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3068 } else
3069 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3070 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3071 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3072 tr_timeout, tr_method);
3073 /* Check for busy commands after reset */
3074 if (r == SUCCESS && atomic_read(&starget->target_busy))
3075 r = FAILED;
3076 out:
3077 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3078 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3079
3080 if (sas_device)
3081 sas_device_put(sas_device);
3082 if (pcie_device)
3083 pcie_device_put(pcie_device);
3084 return r;
3085}
3086
3087
3088/**
3089 * scsih_host_reset - eh threads main host reset routine
3090 * @scmd: pointer to scsi command object
3091 *
3092 * Return: SUCCESS if command aborted else FAILED
3093 */
3094static int
3095scsih_host_reset(struct scsi_cmnd *scmd)
3096{
3097 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3098 int r, retval;
3099
3100 ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3101 scsi_print_command(scmd);
3102
3103 if (ioc->is_driver_loading || ioc->remove_host) {
3104 ioc_info(ioc, "Blocking the host reset\n");
3105 r = FAILED;
3106 goto out;
3107 }
3108
3109 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3110 r = (retval < 0) ? FAILED : SUCCESS;
3111out:
3112 ioc_info(ioc, "host reset: %s scmd(%p)\n",
3113 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3114
3115 return r;
3116}
3117
3118/**
3119 * _scsih_fw_event_add - insert and queue up fw_event
3120 * @ioc: per adapter object
3121 * @fw_event: object describing the event
3122 * Context: This function will acquire ioc->fw_event_lock.
3123 *
3124 * This adds the firmware event object into link list, then queues it up to
3125 * be processed from user context.
3126 */
3127static void
3128_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3129{
3130 unsigned long flags;
3131
3132 if (ioc->firmware_event_thread == NULL)
3133 return;
3134
3135 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3136 fw_event_work_get(fw_event);
3137 INIT_LIST_HEAD(&fw_event->list);
3138 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3139 INIT_WORK(&fw_event->work, _firmware_event_work);
3140 fw_event_work_get(fw_event);
3141 queue_work(ioc->firmware_event_thread, &fw_event->work);
3142 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3143}
3144
3145/**
3146 * _scsih_fw_event_del_from_list - delete fw_event from the list
3147 * @ioc: per adapter object
3148 * @fw_event: object describing the event
3149 * Context: This function will acquire ioc->fw_event_lock.
3150 *
3151 * If the fw_event is on the fw_event_list, remove it and do a put.
3152 */
3153static void
3154_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3155 *fw_event)
3156{
3157 unsigned long flags;
3158
3159 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3160 if (!list_empty(&fw_event->list)) {
3161 list_del_init(&fw_event->list);
3162 fw_event_work_put(fw_event);
3163 }
3164 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3165}
3166
3167
3168 /**
3169 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3170 * @ioc: per adapter object
3171 * @event_data: trigger event data
3172 */
3173void
3174mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3175 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3176{
3177 struct fw_event_work *fw_event;
3178 u16 sz;
3179
3180 if (ioc->is_driver_loading)
3181 return;
3182 sz = sizeof(*event_data);
3183 fw_event = alloc_fw_event_work(sz);
3184 if (!fw_event)
3185 return;
3186 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3187 fw_event->ioc = ioc;
3188 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3189 _scsih_fw_event_add(ioc, fw_event);
3190 fw_event_work_put(fw_event);
3191}
3192
3193/**
3194 * _scsih_error_recovery_delete_devices - remove devices not responding
3195 * @ioc: per adapter object
3196 */
3197static void
3198_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3199{
3200 struct fw_event_work *fw_event;
3201
3202 if (ioc->is_driver_loading)
3203 return;
3204 fw_event = alloc_fw_event_work(0);
3205 if (!fw_event)
3206 return;
3207 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3208 fw_event->ioc = ioc;
3209 _scsih_fw_event_add(ioc, fw_event);
3210 fw_event_work_put(fw_event);
3211}
3212
3213/**
3214 * mpt3sas_port_enable_complete - port enable completed (fake event)
3215 * @ioc: per adapter object
3216 */
3217void
3218mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3219{
3220 struct fw_event_work *fw_event;
3221
3222 fw_event = alloc_fw_event_work(0);
3223 if (!fw_event)
3224 return;
3225 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3226 fw_event->ioc = ioc;
3227 _scsih_fw_event_add(ioc, fw_event);
3228 fw_event_work_put(fw_event);
3229}
3230
3231static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3232{
3233 unsigned long flags;
3234 struct fw_event_work *fw_event = NULL;
3235
3236 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3237 if (!list_empty(&ioc->fw_event_list)) {
3238 fw_event = list_first_entry(&ioc->fw_event_list,
3239 struct fw_event_work, list);
3240 list_del_init(&fw_event->list);
3241 fw_event_work_put(fw_event);
3242 }
3243 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3244
3245 return fw_event;
3246}
3247
3248/**
3249 * _scsih_fw_event_cleanup_queue - cleanup event queue
3250 * @ioc: per adapter object
3251 *
3252 * Walk the firmware event queue, either killing timers, or waiting
3253 * for outstanding events to complete
3254 */
3255static void
3256_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3257{
3258 struct fw_event_work *fw_event;
3259
3260 if (list_empty(&ioc->fw_event_list) ||
3261 !ioc->firmware_event_thread || in_interrupt())
3262 return;
3263
3264 while ((fw_event = dequeue_next_fw_event(ioc))) {
3265 /*
3266 * Wait on the fw_event to complete. If this returns 1, then
3267 * the event was never executed, and we need a put for the
3268 * reference the work had on the fw_event.
3269 *
3270 * If it did execute, we wait for it to finish, and the put will
3271 * happen from _firmware_event_work()
3272 */
3273 if (cancel_work_sync(&fw_event->work))
3274 fw_event_work_put(fw_event);
3275
3276 }
3277}
3278
3279/**
3280 * _scsih_internal_device_block - block the sdev device
3281 * @sdev: per device object
3282 * @sas_device_priv_data : per device driver private data
3283 *
3284 * make sure device is blocked without error, if not
3285 * print an error
3286 */
3287static void
3288_scsih_internal_device_block(struct scsi_device *sdev,
3289 struct MPT3SAS_DEVICE *sas_device_priv_data)
3290{
3291 int r = 0;
3292
3293 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3294 sas_device_priv_data->sas_target->handle);
3295 sas_device_priv_data->block = 1;
3296
3297 r = scsi_internal_device_block_nowait(sdev);
3298 if (r == -EINVAL)
3299 sdev_printk(KERN_WARNING, sdev,
3300 "device_block failed with return(%d) for handle(0x%04x)\n",
3301 r, sas_device_priv_data->sas_target->handle);
3302}
3303
3304/**
3305 * _scsih_internal_device_unblock - unblock the sdev device
3306 * @sdev: per device object
3307 * @sas_device_priv_data : per device driver private data
3308 * make sure device is unblocked without error, if not retry
3309 * by blocking and then unblocking
3310 */
3311
3312static void
3313_scsih_internal_device_unblock(struct scsi_device *sdev,
3314 struct MPT3SAS_DEVICE *sas_device_priv_data)
3315{
3316 int r = 0;
3317
3318 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3319 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3320 sas_device_priv_data->block = 0;
3321 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3322 if (r == -EINVAL) {
3323 /* The device has been set to SDEV_RUNNING by SD layer during
3324 * device addition but the request queue is still stopped by
3325 * our earlier block call. We need to perform a block again
3326 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3327
3328 sdev_printk(KERN_WARNING, sdev,
3329 "device_unblock failed with return(%d) for handle(0x%04x) "
3330 "performing a block followed by an unblock\n",
3331 r, sas_device_priv_data->sas_target->handle);
3332 sas_device_priv_data->block = 1;
3333 r = scsi_internal_device_block_nowait(sdev);
3334 if (r)
3335 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3336 "failed with return(%d) for handle(0x%04x)\n",
3337 r, sas_device_priv_data->sas_target->handle);
3338
3339 sas_device_priv_data->block = 0;
3340 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3341 if (r)
3342 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3343 " failed with return(%d) for handle(0x%04x)\n",
3344 r, sas_device_priv_data->sas_target->handle);
3345 }
3346}
3347
3348/**
3349 * _scsih_ublock_io_all_device - unblock every device
3350 * @ioc: per adapter object
3351 *
3352 * change the device state from block to running
3353 */
3354static void
3355_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3356{
3357 struct MPT3SAS_DEVICE *sas_device_priv_data;
3358 struct scsi_device *sdev;
3359
3360 shost_for_each_device(sdev, ioc->shost) {
3361 sas_device_priv_data = sdev->hostdata;
3362 if (!sas_device_priv_data)
3363 continue;
3364 if (!sas_device_priv_data->block)
3365 continue;
3366
3367 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3368 "device_running, handle(0x%04x)\n",
3369 sas_device_priv_data->sas_target->handle));
3370 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3371 }
3372}
3373
3374
3375/**
3376 * _scsih_ublock_io_device - prepare device to be deleted
3377 * @ioc: per adapter object
3378 * @sas_address: sas address
3379 *
3380 * unblock then put device in offline state
3381 */
3382static void
3383_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3384{
3385 struct MPT3SAS_DEVICE *sas_device_priv_data;
3386 struct scsi_device *sdev;
3387
3388 shost_for_each_device(sdev, ioc->shost) {
3389 sas_device_priv_data = sdev->hostdata;
3390 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3391 continue;
3392 if (sas_device_priv_data->sas_target->sas_address
3393 != sas_address)
3394 continue;
3395 if (sas_device_priv_data->block)
3396 _scsih_internal_device_unblock(sdev,
3397 sas_device_priv_data);
3398 }
3399}
3400
3401/**
3402 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3403 * @ioc: per adapter object
3404 *
3405 * During device pull we need to appropriately set the sdev state.
3406 */
3407static void
3408_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3409{
3410 struct MPT3SAS_DEVICE *sas_device_priv_data;
3411 struct scsi_device *sdev;
3412
3413 shost_for_each_device(sdev, ioc->shost) {
3414 sas_device_priv_data = sdev->hostdata;
3415 if (!sas_device_priv_data)
3416 continue;
3417 if (sas_device_priv_data->block)
3418 continue;
3419 if (sas_device_priv_data->ignore_delay_remove) {
3420 sdev_printk(KERN_INFO, sdev,
3421 "%s skip device_block for SES handle(0x%04x)\n",
3422 __func__, sas_device_priv_data->sas_target->handle);
3423 continue;
3424 }
3425 _scsih_internal_device_block(sdev, sas_device_priv_data);
3426 }
3427}
3428
3429/**
3430 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3431 * @ioc: per adapter object
3432 * @handle: device handle
3433 *
3434 * During device pull we need to appropriately set the sdev state.
3435 */
3436static void
3437_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3438{
3439 struct MPT3SAS_DEVICE *sas_device_priv_data;
3440 struct scsi_device *sdev;
3441 struct _sas_device *sas_device;
3442
3443 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3444
3445 shost_for_each_device(sdev, ioc->shost) {
3446 sas_device_priv_data = sdev->hostdata;
3447 if (!sas_device_priv_data)
3448 continue;
3449 if (sas_device_priv_data->sas_target->handle != handle)
3450 continue;
3451 if (sas_device_priv_data->block)
3452 continue;
3453 if (sas_device && sas_device->pend_sas_rphy_add)
3454 continue;
3455 if (sas_device_priv_data->ignore_delay_remove) {
3456 sdev_printk(KERN_INFO, sdev,
3457 "%s skip device_block for SES handle(0x%04x)\n",
3458 __func__, sas_device_priv_data->sas_target->handle);
3459 continue;
3460 }
3461 _scsih_internal_device_block(sdev, sas_device_priv_data);
3462 }
3463
3464 if (sas_device)
3465 sas_device_put(sas_device);
3466}
3467
3468/**
3469 * _scsih_block_io_to_children_attached_to_ex
3470 * @ioc: per adapter object
3471 * @sas_expander: the sas_device object
3472 *
3473 * This routine set sdev state to SDEV_BLOCK for all devices
3474 * attached to this expander. This function called when expander is
3475 * pulled.
3476 */
3477static void
3478_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3479 struct _sas_node *sas_expander)
3480{
3481 struct _sas_port *mpt3sas_port;
3482 struct _sas_device *sas_device;
3483 struct _sas_node *expander_sibling;
3484 unsigned long flags;
3485
3486 if (!sas_expander)
3487 return;
3488
3489 list_for_each_entry(mpt3sas_port,
3490 &sas_expander->sas_port_list, port_list) {
3491 if (mpt3sas_port->remote_identify.device_type ==
3492 SAS_END_DEVICE) {
3493 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3494 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3495 mpt3sas_port->remote_identify.sas_address);
3496 if (sas_device) {
3497 set_bit(sas_device->handle,
3498 ioc->blocking_handles);
3499 sas_device_put(sas_device);
3500 }
3501 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3502 }
3503 }
3504
3505 list_for_each_entry(mpt3sas_port,
3506 &sas_expander->sas_port_list, port_list) {
3507
3508 if (mpt3sas_port->remote_identify.device_type ==
3509 SAS_EDGE_EXPANDER_DEVICE ||
3510 mpt3sas_port->remote_identify.device_type ==
3511 SAS_FANOUT_EXPANDER_DEVICE) {
3512 expander_sibling =
3513 mpt3sas_scsih_expander_find_by_sas_address(
3514 ioc, mpt3sas_port->remote_identify.sas_address);
3515 _scsih_block_io_to_children_attached_to_ex(ioc,
3516 expander_sibling);
3517 }
3518 }
3519}
3520
3521/**
3522 * _scsih_block_io_to_children_attached_directly
3523 * @ioc: per adapter object
3524 * @event_data: topology change event data
3525 *
3526 * This routine set sdev state to SDEV_BLOCK for all devices
3527 * direct attached during device pull.
3528 */
3529static void
3530_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3531 Mpi2EventDataSasTopologyChangeList_t *event_data)
3532{
3533 int i;
3534 u16 handle;
3535 u16 reason_code;
3536
3537 for (i = 0; i < event_data->NumEntries; i++) {
3538 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3539 if (!handle)
3540 continue;
3541 reason_code = event_data->PHY[i].PhyStatus &
3542 MPI2_EVENT_SAS_TOPO_RC_MASK;
3543 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3544 _scsih_block_io_device(ioc, handle);
3545 }
3546}
3547
3548/**
3549 * _scsih_block_io_to_pcie_children_attached_directly
3550 * @ioc: per adapter object
3551 * @event_data: topology change event data
3552 *
3553 * This routine set sdev state to SDEV_BLOCK for all devices
3554 * direct attached during device pull/reconnect.
3555 */
3556static void
3557_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3558 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3559{
3560 int i;
3561 u16 handle;
3562 u16 reason_code;
3563
3564 for (i = 0; i < event_data->NumEntries; i++) {
3565 handle =
3566 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3567 if (!handle)
3568 continue;
3569 reason_code = event_data->PortEntry[i].PortStatus;
3570 if (reason_code ==
3571 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3572 _scsih_block_io_device(ioc, handle);
3573 }
3574}
3575/**
3576 * _scsih_tm_tr_send - send task management request
3577 * @ioc: per adapter object
3578 * @handle: device handle
3579 * Context: interrupt time.
3580 *
3581 * This code is to initiate the device removal handshake protocol
3582 * with controller firmware. This function will issue target reset
3583 * using high priority request queue. It will send a sas iounit
3584 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3585 *
3586 * This is designed to send muliple task management request at the same
3587 * time to the fifo. If the fifo is full, we will append the request,
3588 * and process it in a future completion.
3589 */
3590static void
3591_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3592{
3593 Mpi2SCSITaskManagementRequest_t *mpi_request;
3594 u16 smid;
3595 struct _sas_device *sas_device = NULL;
3596 struct _pcie_device *pcie_device = NULL;
3597 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3598 u64 sas_address = 0;
3599 unsigned long flags;
3600 struct _tr_list *delayed_tr;
3601 u32 ioc_state;
3602 u8 tr_method = 0;
3603
3604 if (ioc->pci_error_recovery) {
3605 dewtprintk(ioc,
3606 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3607 __func__, handle));
3608 return;
3609 }
3610 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3611 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3612 dewtprintk(ioc,
3613 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3614 __func__, handle));
3615 return;
3616 }
3617
3618 /* if PD, then return */
3619 if (test_bit(handle, ioc->pd_handles))
3620 return;
3621
3622 clear_bit(handle, ioc->pend_os_device_add);
3623
3624 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3625 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3626 if (sas_device && sas_device->starget &&
3627 sas_device->starget->hostdata) {
3628 sas_target_priv_data = sas_device->starget->hostdata;
3629 sas_target_priv_data->deleted = 1;
3630 sas_address = sas_device->sas_address;
3631 }
3632 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3633 if (!sas_device) {
3634 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3635 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3636 if (pcie_device && pcie_device->starget &&
3637 pcie_device->starget->hostdata) {
3638 sas_target_priv_data = pcie_device->starget->hostdata;
3639 sas_target_priv_data->deleted = 1;
3640 sas_address = pcie_device->wwid;
3641 }
3642 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3643 if (pcie_device && (!ioc->tm_custom_handling) &&
3644 (!(mpt3sas_scsih_is_pcie_scsi_device(
3645 pcie_device->device_info))))
3646 tr_method =
3647 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3648 else
3649 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3650 }
3651 if (sas_target_priv_data) {
3652 dewtprintk(ioc,
3653 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3654 handle, (u64)sas_address));
3655 if (sas_device) {
3656 if (sas_device->enclosure_handle != 0)
3657 dewtprintk(ioc,
3658 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3659 (u64)sas_device->enclosure_logical_id,
3660 sas_device->slot));
3661 if (sas_device->connector_name[0] != '\0')
3662 dewtprintk(ioc,
3663 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3664 sas_device->enclosure_level,
3665 sas_device->connector_name));
3666 } else if (pcie_device) {
3667 if (pcie_device->enclosure_handle != 0)
3668 dewtprintk(ioc,
3669 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3670 (u64)pcie_device->enclosure_logical_id,
3671 pcie_device->slot));
3672 if (pcie_device->connector_name[0] != '\0')
3673 dewtprintk(ioc,
3674 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3675 pcie_device->enclosure_level,
3676 pcie_device->connector_name));
3677 }
3678 _scsih_ublock_io_device(ioc, sas_address);
3679 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3680 }
3681
3682 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3683 if (!smid) {
3684 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3685 if (!delayed_tr)
3686 goto out;
3687 INIT_LIST_HEAD(&delayed_tr->list);
3688 delayed_tr->handle = handle;
3689 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3690 dewtprintk(ioc,
3691 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3692 handle));
3693 goto out;
3694 }
3695
3696 dewtprintk(ioc,
3697 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3698 handle, smid, ioc->tm_tr_cb_idx));
3699 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3700 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3701 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3702 mpi_request->DevHandle = cpu_to_le16(handle);
3703 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3704 mpi_request->MsgFlags = tr_method;
3705 set_bit(handle, ioc->device_remove_in_progress);
3706 ioc->put_smid_hi_priority(ioc, smid, 0);
3707 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3708
3709out:
3710 if (sas_device)
3711 sas_device_put(sas_device);
3712 if (pcie_device)
3713 pcie_device_put(pcie_device);
3714}
3715
3716/**
3717 * _scsih_tm_tr_complete -
3718 * @ioc: per adapter object
3719 * @smid: system request message index
3720 * @msix_index: MSIX table index supplied by the OS
3721 * @reply: reply message frame(lower 32bit addr)
3722 * Context: interrupt time.
3723 *
3724 * This is the target reset completion routine.
3725 * This code is part of the code to initiate the device removal
3726 * handshake protocol with controller firmware.
3727 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3728 *
3729 * Return: 1 meaning mf should be freed from _base_interrupt
3730 * 0 means the mf is freed from this function.
3731 */
3732static u8
3733_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3734 u32 reply)
3735{
3736 u16 handle;
3737 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3738 Mpi2SCSITaskManagementReply_t *mpi_reply =
3739 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3740 Mpi2SasIoUnitControlRequest_t *mpi_request;
3741 u16 smid_sas_ctrl;
3742 u32 ioc_state;
3743 struct _sc_list *delayed_sc;
3744
3745 if (ioc->pci_error_recovery) {
3746 dewtprintk(ioc,
3747 ioc_info(ioc, "%s: host in pci error recovery\n",
3748 __func__));
3749 return 1;
3750 }
3751 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3752 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3753 dewtprintk(ioc,
3754 ioc_info(ioc, "%s: host is not operational\n",
3755 __func__));
3756 return 1;
3757 }
3758 if (unlikely(!mpi_reply)) {
3759 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3760 __FILE__, __LINE__, __func__);
3761 return 1;
3762 }
3763 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3764 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3765 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3766 dewtprintk(ioc,
3767 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3768 handle,
3769 le16_to_cpu(mpi_reply->DevHandle), smid));
3770 return 0;
3771 }
3772
3773 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3774 dewtprintk(ioc,
3775 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3776 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3777 le32_to_cpu(mpi_reply->IOCLogInfo),
3778 le32_to_cpu(mpi_reply->TerminationCount)));
3779
3780 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3781 if (!smid_sas_ctrl) {
3782 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3783 if (!delayed_sc)
3784 return _scsih_check_for_pending_tm(ioc, smid);
3785 INIT_LIST_HEAD(&delayed_sc->list);
3786 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3787 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3788 dewtprintk(ioc,
3789 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3790 handle));
3791 return _scsih_check_for_pending_tm(ioc, smid);
3792 }
3793
3794 dewtprintk(ioc,
3795 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3796 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3797 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3798 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3799 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3800 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3801 mpi_request->DevHandle = mpi_request_tm->DevHandle;
3802 ioc->put_smid_default(ioc, smid_sas_ctrl);
3803
3804 return _scsih_check_for_pending_tm(ioc, smid);
3805}
3806
3807/** _scsih_allow_scmd_to_device - check whether scmd needs to
3808 * issue to IOC or not.
3809 * @ioc: per adapter object
3810 * @scmd: pointer to scsi command object
3811 *
3812 * Returns true if scmd can be issued to IOC otherwise returns false.
3813 */
3814inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3815 struct scsi_cmnd *scmd)
3816{
3817
3818 if (ioc->pci_error_recovery)
3819 return false;
3820
3821 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3822 if (ioc->remove_host)
3823 return false;
3824
3825 return true;
3826 }
3827
3828 if (ioc->remove_host) {
3829
3830 switch (scmd->cmnd[0]) {
3831 case SYNCHRONIZE_CACHE:
3832 case START_STOP:
3833 return true;
3834 default:
3835 return false;
3836 }
3837 }
3838
3839 return true;
3840}
3841
3842/**
3843 * _scsih_sas_control_complete - completion routine
3844 * @ioc: per adapter object
3845 * @smid: system request message index
3846 * @msix_index: MSIX table index supplied by the OS
3847 * @reply: reply message frame(lower 32bit addr)
3848 * Context: interrupt time.
3849 *
3850 * This is the sas iounit control completion routine.
3851 * This code is part of the code to initiate the device removal
3852 * handshake protocol with controller firmware.
3853 *
3854 * Return: 1 meaning mf should be freed from _base_interrupt
3855 * 0 means the mf is freed from this function.
3856 */
3857static u8
3858_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3859 u8 msix_index, u32 reply)
3860{
3861 Mpi2SasIoUnitControlReply_t *mpi_reply =
3862 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3863
3864 if (likely(mpi_reply)) {
3865 dewtprintk(ioc,
3866 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3867 le16_to_cpu(mpi_reply->DevHandle), smid,
3868 le16_to_cpu(mpi_reply->IOCStatus),
3869 le32_to_cpu(mpi_reply->IOCLogInfo)));
3870 if (le16_to_cpu(mpi_reply->IOCStatus) ==
3871 MPI2_IOCSTATUS_SUCCESS) {
3872 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3873 ioc->device_remove_in_progress);
3874 }
3875 } else {
3876 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3877 __FILE__, __LINE__, __func__);
3878 }
3879 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3880}
3881
3882/**
3883 * _scsih_tm_tr_volume_send - send target reset request for volumes
3884 * @ioc: per adapter object
3885 * @handle: device handle
3886 * Context: interrupt time.
3887 *
3888 * This is designed to send muliple task management request at the same
3889 * time to the fifo. If the fifo is full, we will append the request,
3890 * and process it in a future completion.
3891 */
3892static void
3893_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3894{
3895 Mpi2SCSITaskManagementRequest_t *mpi_request;
3896 u16 smid;
3897 struct _tr_list *delayed_tr;
3898
3899 if (ioc->pci_error_recovery) {
3900 dewtprintk(ioc,
3901 ioc_info(ioc, "%s: host reset in progress!\n",
3902 __func__));
3903 return;
3904 }
3905
3906 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3907 if (!smid) {
3908 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3909 if (!delayed_tr)
3910 return;
3911 INIT_LIST_HEAD(&delayed_tr->list);
3912 delayed_tr->handle = handle;
3913 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3914 dewtprintk(ioc,
3915 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3916 handle));
3917 return;
3918 }
3919
3920 dewtprintk(ioc,
3921 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3922 handle, smid, ioc->tm_tr_volume_cb_idx));
3923 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3924 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3925 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3926 mpi_request->DevHandle = cpu_to_le16(handle);
3927 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3928 ioc->put_smid_hi_priority(ioc, smid, 0);
3929}
3930
3931/**
3932 * _scsih_tm_volume_tr_complete - target reset completion
3933 * @ioc: per adapter object
3934 * @smid: system request message index
3935 * @msix_index: MSIX table index supplied by the OS
3936 * @reply: reply message frame(lower 32bit addr)
3937 * Context: interrupt time.
3938 *
3939 * Return: 1 meaning mf should be freed from _base_interrupt
3940 * 0 means the mf is freed from this function.
3941 */
3942static u8
3943_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3944 u8 msix_index, u32 reply)
3945{
3946 u16 handle;
3947 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3948 Mpi2SCSITaskManagementReply_t *mpi_reply =
3949 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3950
3951 if (ioc->shost_recovery || ioc->pci_error_recovery) {
3952 dewtprintk(ioc,
3953 ioc_info(ioc, "%s: host reset in progress!\n",
3954 __func__));
3955 return 1;
3956 }
3957 if (unlikely(!mpi_reply)) {
3958 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3959 __FILE__, __LINE__, __func__);
3960 return 1;
3961 }
3962
3963 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3964 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3965 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3966 dewtprintk(ioc,
3967 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3968 handle, le16_to_cpu(mpi_reply->DevHandle),
3969 smid));
3970 return 0;
3971 }
3972
3973 dewtprintk(ioc,
3974 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3975 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3976 le32_to_cpu(mpi_reply->IOCLogInfo),
3977 le32_to_cpu(mpi_reply->TerminationCount)));
3978
3979 return _scsih_check_for_pending_tm(ioc, smid);
3980}
3981
3982/**
3983 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3984 * @ioc: per adapter object
3985 * @smid: system request message index
3986 * @event: Event ID
3987 * @event_context: used to track events uniquely
3988 *
3989 * Context - processed in interrupt context.
3990 */
3991static void
3992_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3993 U32 event_context)
3994{
3995 Mpi2EventAckRequest_t *ack_request;
3996 int i = smid - ioc->internal_smid;
3997 unsigned long flags;
3998
3999 /* Without releasing the smid just update the
4000 * call back index and reuse the same smid for
4001 * processing this delayed request
4002 */
4003 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4004 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4005 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4006
4007 dewtprintk(ioc,
4008 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4009 le16_to_cpu(event), smid, ioc->base_cb_idx));
4010 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4011 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4012 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4013 ack_request->Event = event;
4014 ack_request->EventContext = event_context;
4015 ack_request->VF_ID = 0; /* TODO */
4016 ack_request->VP_ID = 0;
4017 ioc->put_smid_default(ioc, smid);
4018}
4019
4020/**
4021 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4022 * sas_io_unit_ctrl messages
4023 * @ioc: per adapter object
4024 * @smid: system request message index
4025 * @handle: device handle
4026 *
4027 * Context - processed in interrupt context.
4028 */
4029static void
4030_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4031 u16 smid, u16 handle)
4032{
4033 Mpi2SasIoUnitControlRequest_t *mpi_request;
4034 u32 ioc_state;
4035 int i = smid - ioc->internal_smid;
4036 unsigned long flags;
4037
4038 if (ioc->remove_host) {
4039 dewtprintk(ioc,
4040 ioc_info(ioc, "%s: host has been removed\n",
4041 __func__));
4042 return;
4043 } else if (ioc->pci_error_recovery) {
4044 dewtprintk(ioc,
4045 ioc_info(ioc, "%s: host in pci error recovery\n",
4046 __func__));
4047 return;
4048 }
4049 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4050 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4051 dewtprintk(ioc,
4052 ioc_info(ioc, "%s: host is not operational\n",
4053 __func__));
4054 return;
4055 }
4056
4057 /* Without releasing the smid just update the
4058 * call back index and reuse the same smid for
4059 * processing this delayed request
4060 */
4061 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4062 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4063 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4064
4065 dewtprintk(ioc,
4066 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4067 handle, smid, ioc->tm_sas_control_cb_idx));
4068 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4069 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4070 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4071 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4072 mpi_request->DevHandle = cpu_to_le16(handle);
4073 ioc->put_smid_default(ioc, smid);
4074}
4075
4076/**
4077 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4078 * @ioc: per adapter object
4079 * @smid: system request message index
4080 *
4081 * Context: Executed in interrupt context
4082 *
4083 * This will check delayed internal messages list, and process the
4084 * next request.
4085 *
4086 * Return: 1 meaning mf should be freed from _base_interrupt
4087 * 0 means the mf is freed from this function.
4088 */
4089u8
4090mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4091{
4092 struct _sc_list *delayed_sc;
4093 struct _event_ack_list *delayed_event_ack;
4094
4095 if (!list_empty(&ioc->delayed_event_ack_list)) {
4096 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4097 struct _event_ack_list, list);
4098 _scsih_issue_delayed_event_ack(ioc, smid,
4099 delayed_event_ack->Event, delayed_event_ack->EventContext);
4100 list_del(&delayed_event_ack->list);
4101 kfree(delayed_event_ack);
4102 return 0;
4103 }
4104
4105 if (!list_empty(&ioc->delayed_sc_list)) {
4106 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4107 struct _sc_list, list);
4108 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4109 delayed_sc->handle);
4110 list_del(&delayed_sc->list);
4111 kfree(delayed_sc);
4112 return 0;
4113 }
4114 return 1;
4115}
4116
4117/**
4118 * _scsih_check_for_pending_tm - check for pending task management
4119 * @ioc: per adapter object
4120 * @smid: system request message index
4121 *
4122 * This will check delayed target reset list, and feed the
4123 * next reqeust.
4124 *
4125 * Return: 1 meaning mf should be freed from _base_interrupt
4126 * 0 means the mf is freed from this function.
4127 */
4128static u8
4129_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4130{
4131 struct _tr_list *delayed_tr;
4132
4133 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4134 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4135 struct _tr_list, list);
4136 mpt3sas_base_free_smid(ioc, smid);
4137 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4138 list_del(&delayed_tr->list);
4139 kfree(delayed_tr);
4140 return 0;
4141 }
4142
4143 if (!list_empty(&ioc->delayed_tr_list)) {
4144 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4145 struct _tr_list, list);
4146 mpt3sas_base_free_smid(ioc, smid);
4147 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4148 list_del(&delayed_tr->list);
4149 kfree(delayed_tr);
4150 return 0;
4151 }
4152
4153 return 1;
4154}
4155
4156/**
4157 * _scsih_check_topo_delete_events - sanity check on topo events
4158 * @ioc: per adapter object
4159 * @event_data: the event data payload
4160 *
4161 * This routine added to better handle cable breaker.
4162 *
4163 * This handles the case where driver receives multiple expander
4164 * add and delete events in a single shot. When there is a delete event
4165 * the routine will void any pending add events waiting in the event queue.
4166 */
4167static void
4168_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4169 Mpi2EventDataSasTopologyChangeList_t *event_data)
4170{
4171 struct fw_event_work *fw_event;
4172 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4173 u16 expander_handle;
4174 struct _sas_node *sas_expander;
4175 unsigned long flags;
4176 int i, reason_code;
4177 u16 handle;
4178
4179 for (i = 0 ; i < event_data->NumEntries; i++) {
4180 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4181 if (!handle)
4182 continue;
4183 reason_code = event_data->PHY[i].PhyStatus &
4184 MPI2_EVENT_SAS_TOPO_RC_MASK;
4185 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4186 _scsih_tm_tr_send(ioc, handle);
4187 }
4188
4189 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4190 if (expander_handle < ioc->sas_hba.num_phys) {
4191 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4192 return;
4193 }
4194 if (event_data->ExpStatus ==
4195 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4196 /* put expander attached devices into blocking state */
4197 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4198 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4199 expander_handle);
4200 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4201 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4202 do {
4203 handle = find_first_bit(ioc->blocking_handles,
4204 ioc->facts.MaxDevHandle);
4205 if (handle < ioc->facts.MaxDevHandle)
4206 _scsih_block_io_device(ioc, handle);
4207 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4208 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4209 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4210
4211 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4212 return;
4213
4214 /* mark ignore flag for pending events */
4215 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4216 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4217 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4218 fw_event->ignore)
4219 continue;
4220 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4221 fw_event->event_data;
4222 if (local_event_data->ExpStatus ==
4223 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4224 local_event_data->ExpStatus ==
4225 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4226 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4227 expander_handle) {
4228 dewtprintk(ioc,
4229 ioc_info(ioc, "setting ignoring flag\n"));
4230 fw_event->ignore = 1;
4231 }
4232 }
4233 }
4234 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4235}
4236
4237/**
4238 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4239 * events
4240 * @ioc: per adapter object
4241 * @event_data: the event data payload
4242 *
4243 * This handles the case where driver receives multiple switch
4244 * or device add and delete events in a single shot. When there
4245 * is a delete event the routine will void any pending add
4246 * events waiting in the event queue.
4247 */
4248static void
4249_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4250 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4251{
4252 struct fw_event_work *fw_event;
4253 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4254 unsigned long flags;
4255 int i, reason_code;
4256 u16 handle, switch_handle;
4257
4258 for (i = 0; i < event_data->NumEntries; i++) {
4259 handle =
4260 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4261 if (!handle)
4262 continue;
4263 reason_code = event_data->PortEntry[i].PortStatus;
4264 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4265 _scsih_tm_tr_send(ioc, handle);
4266 }
4267
4268 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4269 if (!switch_handle) {
4270 _scsih_block_io_to_pcie_children_attached_directly(
4271 ioc, event_data);
4272 return;
4273 }
4274 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4275 if ((event_data->SwitchStatus
4276 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4277 (event_data->SwitchStatus ==
4278 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4279 _scsih_block_io_to_pcie_children_attached_directly(
4280 ioc, event_data);
4281
4282 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4283 return;
4284
4285 /* mark ignore flag for pending events */
4286 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4287 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4288 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4289 fw_event->ignore)
4290 continue;
4291 local_event_data =
4292 (Mpi26EventDataPCIeTopologyChangeList_t *)
4293 fw_event->event_data;
4294 if (local_event_data->SwitchStatus ==
4295 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4296 local_event_data->SwitchStatus ==
4297 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4298 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4299 switch_handle) {
4300 dewtprintk(ioc,
4301 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4302 fw_event->ignore = 1;
4303 }
4304 }
4305 }
4306 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4307}
4308
4309/**
4310 * _scsih_set_volume_delete_flag - setting volume delete flag
4311 * @ioc: per adapter object
4312 * @handle: device handle
4313 *
4314 * This returns nothing.
4315 */
4316static void
4317_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4318{
4319 struct _raid_device *raid_device;
4320 struct MPT3SAS_TARGET *sas_target_priv_data;
4321 unsigned long flags;
4322
4323 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4324 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4325 if (raid_device && raid_device->starget &&
4326 raid_device->starget->hostdata) {
4327 sas_target_priv_data =
4328 raid_device->starget->hostdata;
4329 sas_target_priv_data->deleted = 1;
4330 dewtprintk(ioc,
4331 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4332 handle, (u64)raid_device->wwid));
4333 }
4334 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4335}
4336
4337/**
4338 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4339 * @handle: input handle
4340 * @a: handle for volume a
4341 * @b: handle for volume b
4342 *
4343 * IR firmware only supports two raid volumes. The purpose of this
4344 * routine is to set the volume handle in either a or b. When the given
4345 * input handle is non-zero, or when a and b have not been set before.
4346 */
4347static void
4348_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4349{
4350 if (!handle || handle == *a || handle == *b)
4351 return;
4352 if (!*a)
4353 *a = handle;
4354 else if (!*b)
4355 *b = handle;
4356}
4357
4358/**
4359 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4360 * @ioc: per adapter object
4361 * @event_data: the event data payload
4362 * Context: interrupt time.
4363 *
4364 * This routine will send target reset to volume, followed by target
4365 * resets to the PDs. This is called when a PD has been removed, or
4366 * volume has been deleted or removed. When the target reset is sent
4367 * to volume, the PD target resets need to be queued to start upon
4368 * completion of the volume target reset.
4369 */
4370static void
4371_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4372 Mpi2EventDataIrConfigChangeList_t *event_data)
4373{
4374 Mpi2EventIrConfigElement_t *element;
4375 int i;
4376 u16 handle, volume_handle, a, b;
4377 struct _tr_list *delayed_tr;
4378
4379 a = 0;
4380 b = 0;
4381
4382 if (ioc->is_warpdrive)
4383 return;
4384
4385 /* Volume Resets for Deleted or Removed */
4386 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4387 for (i = 0; i < event_data->NumElements; i++, element++) {
4388 if (le32_to_cpu(event_data->Flags) &
4389 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4390 continue;
4391 if (element->ReasonCode ==
4392 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4393 element->ReasonCode ==
4394 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4395 volume_handle = le16_to_cpu(element->VolDevHandle);
4396 _scsih_set_volume_delete_flag(ioc, volume_handle);
4397 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4398 }
4399 }
4400
4401 /* Volume Resets for UNHIDE events */
4402 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4403 for (i = 0; i < event_data->NumElements; i++, element++) {
4404 if (le32_to_cpu(event_data->Flags) &
4405 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4406 continue;
4407 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4408 volume_handle = le16_to_cpu(element->VolDevHandle);
4409 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4410 }
4411 }
4412
4413 if (a)
4414 _scsih_tm_tr_volume_send(ioc, a);
4415 if (b)
4416 _scsih_tm_tr_volume_send(ioc, b);
4417
4418 /* PD target resets */
4419 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4420 for (i = 0; i < event_data->NumElements; i++, element++) {
4421 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4422 continue;
4423 handle = le16_to_cpu(element->PhysDiskDevHandle);
4424 volume_handle = le16_to_cpu(element->VolDevHandle);
4425 clear_bit(handle, ioc->pd_handles);
4426 if (!volume_handle)
4427 _scsih_tm_tr_send(ioc, handle);
4428 else if (volume_handle == a || volume_handle == b) {
4429 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4430 BUG_ON(!delayed_tr);
4431 INIT_LIST_HEAD(&delayed_tr->list);
4432 delayed_tr->handle = handle;
4433 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4434 dewtprintk(ioc,
4435 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4436 handle));
4437 } else
4438 _scsih_tm_tr_send(ioc, handle);
4439 }
4440}
4441
4442
4443/**
4444 * _scsih_check_volume_delete_events - set delete flag for volumes
4445 * @ioc: per adapter object
4446 * @event_data: the event data payload
4447 * Context: interrupt time.
4448 *
4449 * This will handle the case when the cable connected to entire volume is
4450 * pulled. We will take care of setting the deleted flag so normal IO will
4451 * not be sent.
4452 */
4453static void
4454_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4455 Mpi2EventDataIrVolume_t *event_data)
4456{
4457 u32 state;
4458
4459 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4460 return;
4461 state = le32_to_cpu(event_data->NewValue);
4462 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4463 MPI2_RAID_VOL_STATE_FAILED)
4464 _scsih_set_volume_delete_flag(ioc,
4465 le16_to_cpu(event_data->VolDevHandle));
4466}
4467
4468/**
4469 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4470 * @ioc: per adapter object
4471 * @event_data: the temp threshold event data
4472 * Context: interrupt time.
4473 */
4474static void
4475_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4476 Mpi2EventDataTemperature_t *event_data)
4477{
4478 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4479 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4480 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4481 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4482 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4483 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4484 event_data->SensorNum);
4485 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4486 event_data->CurrentTemperature);
4487 }
4488}
4489
4490static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4491{
4492 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4493
4494 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4495 return 0;
4496
4497 if (pending)
4498 return test_and_set_bit(0, &priv->ata_command_pending);
4499
4500 clear_bit(0, &priv->ata_command_pending);
4501 return 0;
4502}
4503
4504/**
4505 * _scsih_flush_running_cmds - completing outstanding commands.
4506 * @ioc: per adapter object
4507 *
4508 * The flushing out of all pending scmd commands following host reset,
4509 * where all IO is dropped to the floor.
4510 */
4511static void
4512_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4513{
4514 struct scsi_cmnd *scmd;
4515 struct scsiio_tracker *st;
4516 u16 smid;
4517 int count = 0;
4518
4519 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4520 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4521 if (!scmd)
4522 continue;
4523 count++;
4524 _scsih_set_satl_pending(scmd, false);
4525 st = scsi_cmd_priv(scmd);
4526 mpt3sas_base_clear_st(ioc, st);
4527 scsi_dma_unmap(scmd);
4528 if (ioc->pci_error_recovery || ioc->remove_host)
4529 scmd->result = DID_NO_CONNECT << 16;
4530 else
4531 scmd->result = DID_RESET << 16;
4532 scmd->scsi_done(scmd);
4533 }
4534 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4535}
4536
4537/**
4538 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4539 * @ioc: per adapter object
4540 * @scmd: pointer to scsi command object
4541 * @mpi_request: pointer to the SCSI_IO request message frame
4542 *
4543 * Supporting protection 1 and 3.
4544 */
4545static void
4546_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4547 Mpi25SCSIIORequest_t *mpi_request)
4548{
4549 u16 eedp_flags;
4550 unsigned char prot_op = scsi_get_prot_op(scmd);
4551 unsigned char prot_type = scsi_get_prot_type(scmd);
4552 Mpi25SCSIIORequest_t *mpi_request_3v =
4553 (Mpi25SCSIIORequest_t *)mpi_request;
4554
4555 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4556 return;
4557
4558 if (prot_op == SCSI_PROT_READ_STRIP)
4559 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4560 else if (prot_op == SCSI_PROT_WRITE_INSERT)
4561 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4562 else
4563 return;
4564
4565 switch (prot_type) {
4566 case SCSI_PROT_DIF_TYPE1:
4567 case SCSI_PROT_DIF_TYPE2:
4568
4569 /*
4570 * enable ref/guard checking
4571 * auto increment ref tag
4572 */
4573 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4574 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4575 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4576 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4577 cpu_to_be32(t10_pi_ref_tag(scmd->request));
4578 break;
4579
4580 case SCSI_PROT_DIF_TYPE3:
4581
4582 /*
4583 * enable guard checking
4584 */
4585 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4586
4587 break;
4588 }
4589
4590 mpi_request_3v->EEDPBlockSize =
4591 cpu_to_le16(scmd->device->sector_size);
4592
4593 if (ioc->is_gen35_ioc)
4594 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4595 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4596}
4597
4598/**
4599 * _scsih_eedp_error_handling - return sense code for EEDP errors
4600 * @scmd: pointer to scsi command object
4601 * @ioc_status: ioc status
4602 */
4603static void
4604_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4605{
4606 u8 ascq;
4607
4608 switch (ioc_status) {
4609 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4610 ascq = 0x01;
4611 break;
4612 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4613 ascq = 0x02;
4614 break;
4615 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4616 ascq = 0x03;
4617 break;
4618 default:
4619 ascq = 0x00;
4620 break;
4621 }
4622 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4623 ascq);
4624 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4625 SAM_STAT_CHECK_CONDITION;
4626}
4627
4628/**
4629 * scsih_qcmd - main scsi request entry point
4630 * @shost: SCSI host pointer
4631 * @scmd: pointer to scsi command object
4632 *
4633 * The callback index is set inside `ioc->scsi_io_cb_idx`.
4634 *
4635 * Return: 0 on success. If there's a failure, return either:
4636 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4637 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4638 */
4639static int
4640scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4641{
4642 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4643 struct MPT3SAS_DEVICE *sas_device_priv_data;
4644 struct MPT3SAS_TARGET *sas_target_priv_data;
4645 struct _raid_device *raid_device;
4646 struct request *rq = scmd->request;
4647 int class;
4648 Mpi25SCSIIORequest_t *mpi_request;
4649 struct _pcie_device *pcie_device = NULL;
4650 u32 mpi_control;
4651 u16 smid;
4652 u16 handle;
4653
4654 if (ioc->logging_level & MPT_DEBUG_SCSI)
4655 scsi_print_command(scmd);
4656
4657 sas_device_priv_data = scmd->device->hostdata;
4658 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4659 scmd->result = DID_NO_CONNECT << 16;
4660 scmd->scsi_done(scmd);
4661 return 0;
4662 }
4663
4664 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4665 scmd->result = DID_NO_CONNECT << 16;
4666 scmd->scsi_done(scmd);
4667 return 0;
4668 }
4669
4670 sas_target_priv_data = sas_device_priv_data->sas_target;
4671
4672 /* invalid device handle */
4673 handle = sas_target_priv_data->handle;
4674 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4675 scmd->result = DID_NO_CONNECT << 16;
4676 scmd->scsi_done(scmd);
4677 return 0;
4678 }
4679
4680
4681 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4682 /* host recovery or link resets sent via IOCTLs */
4683 return SCSI_MLQUEUE_HOST_BUSY;
4684 } else if (sas_target_priv_data->deleted) {
4685 /* device has been deleted */
4686 scmd->result = DID_NO_CONNECT << 16;
4687 scmd->scsi_done(scmd);
4688 return 0;
4689 } else if (sas_target_priv_data->tm_busy ||
4690 sas_device_priv_data->block) {
4691 /* device busy with task management */
4692 return SCSI_MLQUEUE_DEVICE_BUSY;
4693 }
4694
4695 /*
4696 * Bug work around for firmware SATL handling. The loop
4697 * is based on atomic operations and ensures consistency
4698 * since we're lockless at this point
4699 */
4700 do {
4701 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4702 return SCSI_MLQUEUE_DEVICE_BUSY;
4703 } while (_scsih_set_satl_pending(scmd, true));
4704
4705 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4706 mpi_control = MPI2_SCSIIO_CONTROL_READ;
4707 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4708 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4709 else
4710 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4711
4712 /* set tags */
4713 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4714 /* NCQ Prio supported, make sure control indicated high priority */
4715 if (sas_device_priv_data->ncq_prio_enable) {
4716 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4717 if (class == IOPRIO_CLASS_RT)
4718 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4719 }
4720 /* Make sure Device is not raid volume.
4721 * We do not expose raid functionality to upper layer for warpdrive.
4722 */
4723 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4724 && !scsih_is_nvme(&scmd->device->sdev_gendev))
4725 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4726 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4727
4728 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4729 if (!smid) {
4730 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4731 _scsih_set_satl_pending(scmd, false);
4732 goto out;
4733 }
4734 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4735 memset(mpi_request, 0, ioc->request_sz);
4736 _scsih_setup_eedp(ioc, scmd, mpi_request);
4737
4738 if (scmd->cmd_len == 32)
4739 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4740 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4741 if (sas_device_priv_data->sas_target->flags &
4742 MPT_TARGET_FLAGS_RAID_COMPONENT)
4743 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4744 else
4745 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4746 mpi_request->DevHandle = cpu_to_le16(handle);
4747 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4748 mpi_request->Control = cpu_to_le32(mpi_control);
4749 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4750 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4751 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4752 mpi_request->SenseBufferLowAddress =
4753 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4754 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4755 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4756 mpi_request->LUN);
4757 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4758
4759 if (mpi_request->DataLength) {
4760 pcie_device = sas_target_priv_data->pcie_dev;
4761 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4762 mpt3sas_base_free_smid(ioc, smid);
4763 _scsih_set_satl_pending(scmd, false);
4764 goto out;
4765 }
4766 } else
4767 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4768
4769 raid_device = sas_target_priv_data->raid_device;
4770 if (raid_device && raid_device->direct_io_enabled)
4771 mpt3sas_setup_direct_io(ioc, scmd,
4772 raid_device, mpi_request);
4773
4774 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4775 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4776 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4777 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4778 ioc->put_smid_fast_path(ioc, smid, handle);
4779 } else
4780 ioc->put_smid_scsi_io(ioc, smid,
4781 le16_to_cpu(mpi_request->DevHandle));
4782 } else
4783 ioc->put_smid_default(ioc, smid);
4784 return 0;
4785
4786 out:
4787 return SCSI_MLQUEUE_HOST_BUSY;
4788}
4789
4790/**
4791 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4792 * @sense_buffer: sense data returned by target
4793 * @data: normalized skey/asc/ascq
4794 */
4795static void
4796_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4797{
4798 if ((sense_buffer[0] & 0x7F) >= 0x72) {
4799 /* descriptor format */
4800 data->skey = sense_buffer[1] & 0x0F;
4801 data->asc = sense_buffer[2];
4802 data->ascq = sense_buffer[3];
4803 } else {
4804 /* fixed format */
4805 data->skey = sense_buffer[2] & 0x0F;
4806 data->asc = sense_buffer[12];
4807 data->ascq = sense_buffer[13];
4808 }
4809}
4810
4811/**
4812 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4813 * @ioc: per adapter object
4814 * @scmd: pointer to scsi command object
4815 * @mpi_reply: reply mf payload returned from firmware
4816 * @smid: ?
4817 *
4818 * scsi_status - SCSI Status code returned from target device
4819 * scsi_state - state info associated with SCSI_IO determined by ioc
4820 * ioc_status - ioc supplied status info
4821 */
4822static void
4823_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4824 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4825{
4826 u32 response_info;
4827 u8 *response_bytes;
4828 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4829 MPI2_IOCSTATUS_MASK;
4830 u8 scsi_state = mpi_reply->SCSIState;
4831 u8 scsi_status = mpi_reply->SCSIStatus;
4832 char *desc_ioc_state = NULL;
4833 char *desc_scsi_status = NULL;
4834 char *desc_scsi_state = ioc->tmp_string;
4835 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4836 struct _sas_device *sas_device = NULL;
4837 struct _pcie_device *pcie_device = NULL;
4838 struct scsi_target *starget = scmd->device->sdev_target;
4839 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4840 char *device_str = NULL;
4841
4842 if (!priv_target)
4843 return;
4844 if (ioc->hide_ir_msg)
4845 device_str = "WarpDrive";
4846 else
4847 device_str = "volume";
4848
4849 if (log_info == 0x31170000)
4850 return;
4851
4852 switch (ioc_status) {
4853 case MPI2_IOCSTATUS_SUCCESS:
4854 desc_ioc_state = "success";
4855 break;
4856 case MPI2_IOCSTATUS_INVALID_FUNCTION:
4857 desc_ioc_state = "invalid function";
4858 break;
4859 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4860 desc_ioc_state = "scsi recovered error";
4861 break;
4862 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4863 desc_ioc_state = "scsi invalid dev handle";
4864 break;
4865 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4866 desc_ioc_state = "scsi device not there";
4867 break;
4868 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4869 desc_ioc_state = "scsi data overrun";
4870 break;
4871 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4872 desc_ioc_state = "scsi data underrun";
4873 break;
4874 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4875 desc_ioc_state = "scsi io data error";
4876 break;
4877 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4878 desc_ioc_state = "scsi protocol error";
4879 break;
4880 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4881 desc_ioc_state = "scsi task terminated";
4882 break;
4883 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4884 desc_ioc_state = "scsi residual mismatch";
4885 break;
4886 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4887 desc_ioc_state = "scsi task mgmt failed";
4888 break;
4889 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4890 desc_ioc_state = "scsi ioc terminated";
4891 break;
4892 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4893 desc_ioc_state = "scsi ext terminated";
4894 break;
4895 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4896 desc_ioc_state = "eedp guard error";
4897 break;
4898 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4899 desc_ioc_state = "eedp ref tag error";
4900 break;
4901 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4902 desc_ioc_state = "eedp app tag error";
4903 break;
4904 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4905 desc_ioc_state = "insufficient power";
4906 break;
4907 default:
4908 desc_ioc_state = "unknown";
4909 break;
4910 }
4911
4912 switch (scsi_status) {
4913 case MPI2_SCSI_STATUS_GOOD:
4914 desc_scsi_status = "good";
4915 break;
4916 case MPI2_SCSI_STATUS_CHECK_CONDITION:
4917 desc_scsi_status = "check condition";
4918 break;
4919 case MPI2_SCSI_STATUS_CONDITION_MET:
4920 desc_scsi_status = "condition met";
4921 break;
4922 case MPI2_SCSI_STATUS_BUSY:
4923 desc_scsi_status = "busy";
4924 break;
4925 case MPI2_SCSI_STATUS_INTERMEDIATE:
4926 desc_scsi_status = "intermediate";
4927 break;
4928 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4929 desc_scsi_status = "intermediate condmet";
4930 break;
4931 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4932 desc_scsi_status = "reservation conflict";
4933 break;
4934 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4935 desc_scsi_status = "command terminated";
4936 break;
4937 case MPI2_SCSI_STATUS_TASK_SET_FULL:
4938 desc_scsi_status = "task set full";
4939 break;
4940 case MPI2_SCSI_STATUS_ACA_ACTIVE:
4941 desc_scsi_status = "aca active";
4942 break;
4943 case MPI2_SCSI_STATUS_TASK_ABORTED:
4944 desc_scsi_status = "task aborted";
4945 break;
4946 default:
4947 desc_scsi_status = "unknown";
4948 break;
4949 }
4950
4951 desc_scsi_state[0] = '\0';
4952 if (!scsi_state)
4953 desc_scsi_state = " ";
4954 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4955 strcat(desc_scsi_state, "response info ");
4956 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4957 strcat(desc_scsi_state, "state terminated ");
4958 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4959 strcat(desc_scsi_state, "no status ");
4960 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4961 strcat(desc_scsi_state, "autosense failed ");
4962 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4963 strcat(desc_scsi_state, "autosense valid ");
4964
4965 scsi_print_command(scmd);
4966
4967 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4968 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4969 device_str, (u64)priv_target->sas_address);
4970 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4971 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4972 if (pcie_device) {
4973 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4974 (u64)pcie_device->wwid, pcie_device->port_num);
4975 if (pcie_device->enclosure_handle != 0)
4976 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4977 (u64)pcie_device->enclosure_logical_id,
4978 pcie_device->slot);
4979 if (pcie_device->connector_name[0])
4980 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4981 pcie_device->enclosure_level,
4982 pcie_device->connector_name);
4983 pcie_device_put(pcie_device);
4984 }
4985 } else {
4986 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4987 if (sas_device) {
4988 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4989 (u64)sas_device->sas_address, sas_device->phy);
4990
4991 _scsih_display_enclosure_chassis_info(ioc, sas_device,
4992 NULL, NULL);
4993
4994 sas_device_put(sas_device);
4995 }
4996 }
4997
4998 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4999 le16_to_cpu(mpi_reply->DevHandle),
5000 desc_ioc_state, ioc_status, smid);
5001 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5002 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5003 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5004 le16_to_cpu(mpi_reply->TaskTag),
5005 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5006 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5007 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5008
5009 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5010 struct sense_info data;
5011 _scsih_normalize_sense(scmd->sense_buffer, &data);
5012 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5013 data.skey, data.asc, data.ascq,
5014 le32_to_cpu(mpi_reply->SenseCount));
5015 }
5016 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5017 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5018 response_bytes = (u8 *)&response_info;
5019 _scsih_response_code(ioc, response_bytes[0]);
5020 }
5021}
5022
5023/**
5024 * _scsih_turn_on_pfa_led - illuminate PFA LED
5025 * @ioc: per adapter object
5026 * @handle: device handle
5027 * Context: process
5028 */
5029static void
5030_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5031{
5032 Mpi2SepReply_t mpi_reply;
5033 Mpi2SepRequest_t mpi_request;
5034 struct _sas_device *sas_device;
5035
5036 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5037 if (!sas_device)
5038 return;
5039
5040 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5041 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5042 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5043 mpi_request.SlotStatus =
5044 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5045 mpi_request.DevHandle = cpu_to_le16(handle);
5046 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5047 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5048 &mpi_request)) != 0) {
5049 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5050 __FILE__, __LINE__, __func__);
5051 goto out;
5052 }
5053 sas_device->pfa_led_on = 1;
5054
5055 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5056 dewtprintk(ioc,
5057 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5058 le16_to_cpu(mpi_reply.IOCStatus),
5059 le32_to_cpu(mpi_reply.IOCLogInfo)));
5060 goto out;
5061 }
5062out:
5063 sas_device_put(sas_device);
5064}
5065
5066/**
5067 * _scsih_turn_off_pfa_led - turn off Fault LED
5068 * @ioc: per adapter object
5069 * @sas_device: sas device whose PFA LED has to turned off
5070 * Context: process
5071 */
5072static void
5073_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5074 struct _sas_device *sas_device)
5075{
5076 Mpi2SepReply_t mpi_reply;
5077 Mpi2SepRequest_t mpi_request;
5078
5079 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5080 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5081 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5082 mpi_request.SlotStatus = 0;
5083 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5084 mpi_request.DevHandle = 0;
5085 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5086 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5087 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5088 &mpi_request)) != 0) {
5089 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5090 __FILE__, __LINE__, __func__);
5091 return;
5092 }
5093
5094 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5095 dewtprintk(ioc,
5096 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5097 le16_to_cpu(mpi_reply.IOCStatus),
5098 le32_to_cpu(mpi_reply.IOCLogInfo)));
5099 return;
5100 }
5101}
5102
5103/**
5104 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5105 * @ioc: per adapter object
5106 * @handle: device handle
5107 * Context: interrupt.
5108 */
5109static void
5110_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5111{
5112 struct fw_event_work *fw_event;
5113
5114 fw_event = alloc_fw_event_work(0);
5115 if (!fw_event)
5116 return;
5117 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5118 fw_event->device_handle = handle;
5119 fw_event->ioc = ioc;
5120 _scsih_fw_event_add(ioc, fw_event);
5121 fw_event_work_put(fw_event);
5122}
5123
5124/**
5125 * _scsih_smart_predicted_fault - process smart errors
5126 * @ioc: per adapter object
5127 * @handle: device handle
5128 * Context: interrupt.
5129 */
5130static void
5131_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5132{
5133 struct scsi_target *starget;
5134 struct MPT3SAS_TARGET *sas_target_priv_data;
5135 Mpi2EventNotificationReply_t *event_reply;
5136 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5137 struct _sas_device *sas_device;
5138 ssize_t sz;
5139 unsigned long flags;
5140
5141 /* only handle non-raid devices */
5142 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5143 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5144 if (!sas_device)
5145 goto out_unlock;
5146
5147 starget = sas_device->starget;
5148 sas_target_priv_data = starget->hostdata;
5149
5150 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5151 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5152 goto out_unlock;
5153
5154 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5155
5156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5157
5158 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5159 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5160
5161 /* insert into event log */
5162 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5163 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5164 event_reply = kzalloc(sz, GFP_KERNEL);
5165 if (!event_reply) {
5166 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5167 __FILE__, __LINE__, __func__);
5168 goto out;
5169 }
5170
5171 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5172 event_reply->Event =
5173 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5174 event_reply->MsgLength = sz/4;
5175 event_reply->EventDataLength =
5176 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5177 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5178 event_reply->EventData;
5179 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5180 event_data->ASC = 0x5D;
5181 event_data->DevHandle = cpu_to_le16(handle);
5182 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5183 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5184 kfree(event_reply);
5185out:
5186 if (sas_device)
5187 sas_device_put(sas_device);
5188 return;
5189
5190out_unlock:
5191 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5192 goto out;
5193}
5194
5195/**
5196 * _scsih_io_done - scsi request callback
5197 * @ioc: per adapter object
5198 * @smid: system request message index
5199 * @msix_index: MSIX table index supplied by the OS
5200 * @reply: reply message frame(lower 32bit addr)
5201 *
5202 * Callback handler when using _scsih_qcmd.
5203 *
5204 * Return: 1 meaning mf should be freed from _base_interrupt
5205 * 0 means the mf is freed from this function.
5206 */
5207static u8
5208_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5209{
5210 Mpi25SCSIIORequest_t *mpi_request;
5211 Mpi2SCSIIOReply_t *mpi_reply;
5212 struct scsi_cmnd *scmd;
5213 struct scsiio_tracker *st;
5214 u16 ioc_status;
5215 u32 xfer_cnt;
5216 u8 scsi_state;
5217 u8 scsi_status;
5218 u32 log_info;
5219 struct MPT3SAS_DEVICE *sas_device_priv_data;
5220 u32 response_code = 0;
5221
5222 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5223
5224 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5225 if (scmd == NULL)
5226 return 1;
5227
5228 _scsih_set_satl_pending(scmd, false);
5229
5230 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5231
5232 if (mpi_reply == NULL) {
5233 scmd->result = DID_OK << 16;
5234 goto out;
5235 }
5236
5237 sas_device_priv_data = scmd->device->hostdata;
5238 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5239 sas_device_priv_data->sas_target->deleted) {
5240 scmd->result = DID_NO_CONNECT << 16;
5241 goto out;
5242 }
5243 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5244
5245 /*
5246 * WARPDRIVE: If direct_io is set then it is directIO,
5247 * the failed direct I/O should be redirected to volume
5248 */
5249 st = scsi_cmd_priv(scmd);
5250 if (st->direct_io &&
5251 ((ioc_status & MPI2_IOCSTATUS_MASK)
5252 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5253 st->direct_io = 0;
5254 st->scmd = scmd;
5255 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5256 mpi_request->DevHandle =
5257 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5258 ioc->put_smid_scsi_io(ioc, smid,
5259 sas_device_priv_data->sas_target->handle);
5260 return 0;
5261 }
5262 /* turning off TLR */
5263 scsi_state = mpi_reply->SCSIState;
5264 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5265 response_code =
5266 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5267 if (!sas_device_priv_data->tlr_snoop_check) {
5268 sas_device_priv_data->tlr_snoop_check++;
5269 if ((!ioc->is_warpdrive &&
5270 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5271 !scsih_is_nvme(&scmd->device->sdev_gendev))
5272 && sas_is_tlr_enabled(scmd->device) &&
5273 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5274 sas_disable_tlr(scmd->device);
5275 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5276 }
5277 }
5278
5279 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5280 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5281 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5282 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5283 else
5284 log_info = 0;
5285 ioc_status &= MPI2_IOCSTATUS_MASK;
5286 scsi_status = mpi_reply->SCSIStatus;
5287
5288 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5289 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5290 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5291 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5292 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5293 }
5294
5295 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5296 struct sense_info data;
5297 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5298 smid);
5299 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5300 le32_to_cpu(mpi_reply->SenseCount));
5301 memcpy(scmd->sense_buffer, sense_data, sz);
5302 _scsih_normalize_sense(scmd->sense_buffer, &data);
5303 /* failure prediction threshold exceeded */
5304 if (data.asc == 0x5D)
5305 _scsih_smart_predicted_fault(ioc,
5306 le16_to_cpu(mpi_reply->DevHandle));
5307 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5308
5309 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5310 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5311 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5312 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5313 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5314 }
5315 switch (ioc_status) {
5316 case MPI2_IOCSTATUS_BUSY:
5317 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5318 scmd->result = SAM_STAT_BUSY;
5319 break;
5320
5321 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5322 scmd->result = DID_NO_CONNECT << 16;
5323 break;
5324
5325 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5326 if (sas_device_priv_data->block) {
5327 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5328 goto out;
5329 }
5330 if (log_info == 0x31110630) {
5331 if (scmd->retries > 2) {
5332 scmd->result = DID_NO_CONNECT << 16;
5333 scsi_device_set_state(scmd->device,
5334 SDEV_OFFLINE);
5335 } else {
5336 scmd->result = DID_SOFT_ERROR << 16;
5337 scmd->device->expecting_cc_ua = 1;
5338 }
5339 break;
5340 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5341 scmd->result = DID_RESET << 16;
5342 break;
5343 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5344 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5345 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5346 scmd->result = DID_RESET << 16;
5347 break;
5348 }
5349 scmd->result = DID_SOFT_ERROR << 16;
5350 break;
5351 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5352 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5353 scmd->result = DID_RESET << 16;
5354 break;
5355
5356 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5357 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5358 scmd->result = DID_SOFT_ERROR << 16;
5359 else
5360 scmd->result = (DID_OK << 16) | scsi_status;
5361 break;
5362
5363 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5364 scmd->result = (DID_OK << 16) | scsi_status;
5365
5366 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5367 break;
5368
5369 if (xfer_cnt < scmd->underflow) {
5370 if (scsi_status == SAM_STAT_BUSY)
5371 scmd->result = SAM_STAT_BUSY;
5372 else
5373 scmd->result = DID_SOFT_ERROR << 16;
5374 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5375 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5376 scmd->result = DID_SOFT_ERROR << 16;
5377 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5378 scmd->result = DID_RESET << 16;
5379 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5380 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5381 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5382 scmd->result = (DRIVER_SENSE << 24) |
5383 SAM_STAT_CHECK_CONDITION;
5384 scmd->sense_buffer[0] = 0x70;
5385 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5386 scmd->sense_buffer[12] = 0x20;
5387 scmd->sense_buffer[13] = 0;
5388 }
5389 break;
5390
5391 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5392 scsi_set_resid(scmd, 0);
5393 /* fall through */
5394 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5395 case MPI2_IOCSTATUS_SUCCESS:
5396 scmd->result = (DID_OK << 16) | scsi_status;
5397 if (response_code ==
5398 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5399 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5400 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5401 scmd->result = DID_SOFT_ERROR << 16;
5402 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5403 scmd->result = DID_RESET << 16;
5404 break;
5405
5406 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5407 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5408 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5409 _scsih_eedp_error_handling(scmd, ioc_status);
5410 break;
5411
5412 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5413 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5414 case MPI2_IOCSTATUS_INVALID_SGL:
5415 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5416 case MPI2_IOCSTATUS_INVALID_FIELD:
5417 case MPI2_IOCSTATUS_INVALID_STATE:
5418 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5419 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5420 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5421 default:
5422 scmd->result = DID_SOFT_ERROR << 16;
5423 break;
5424
5425 }
5426
5427 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5428 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5429
5430 out:
5431
5432 scsi_dma_unmap(scmd);
5433 mpt3sas_base_free_smid(ioc, smid);
5434 scmd->scsi_done(scmd);
5435 return 0;
5436}
5437
5438/**
5439 * _scsih_sas_host_refresh - refreshing sas host object contents
5440 * @ioc: per adapter object
5441 * Context: user
5442 *
5443 * During port enable, fw will send topology events for every device. Its
5444 * possible that the handles may change from the previous setting, so this
5445 * code keeping handles updating if changed.
5446 */
5447static void
5448_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5449{
5450 u16 sz;
5451 u16 ioc_status;
5452 int i;
5453 Mpi2ConfigReply_t mpi_reply;
5454 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5455 u16 attached_handle;
5456 u8 link_rate;
5457
5458 dtmprintk(ioc,
5459 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5460 (u64)ioc->sas_hba.sas_address));
5461
5462 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5463 * sizeof(Mpi2SasIOUnit0PhyData_t));
5464 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5465 if (!sas_iounit_pg0) {
5466 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5467 __FILE__, __LINE__, __func__);
5468 return;
5469 }
5470
5471 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5472 sas_iounit_pg0, sz)) != 0)
5473 goto out;
5474 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5475 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5476 goto out;
5477 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5478 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5479 if (i == 0)
5480 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5481 PhyData[0].ControllerDevHandle);
5482 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5483 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5484 AttachedDevHandle);
5485 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5486 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5487 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5488 attached_handle, i, link_rate);
5489 }
5490 out:
5491 kfree(sas_iounit_pg0);
5492}
5493
5494/**
5495 * _scsih_sas_host_add - create sas host object
5496 * @ioc: per adapter object
5497 *
5498 * Creating host side data object, stored in ioc->sas_hba
5499 */
5500static void
5501_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5502{
5503 int i;
5504 Mpi2ConfigReply_t mpi_reply;
5505 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5506 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5507 Mpi2SasPhyPage0_t phy_pg0;
5508 Mpi2SasDevicePage0_t sas_device_pg0;
5509 Mpi2SasEnclosurePage0_t enclosure_pg0;
5510 u16 ioc_status;
5511 u16 sz;
5512 u8 device_missing_delay;
5513 u8 num_phys;
5514
5515 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5516 if (!num_phys) {
5517 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5518 __FILE__, __LINE__, __func__);
5519 return;
5520 }
5521 ioc->sas_hba.phy = kcalloc(num_phys,
5522 sizeof(struct _sas_phy), GFP_KERNEL);
5523 if (!ioc->sas_hba.phy) {
5524 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5525 __FILE__, __LINE__, __func__);
5526 goto out;
5527 }
5528 ioc->sas_hba.num_phys = num_phys;
5529
5530 /* sas_iounit page 0 */
5531 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5532 sizeof(Mpi2SasIOUnit0PhyData_t));
5533 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5534 if (!sas_iounit_pg0) {
5535 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5536 __FILE__, __LINE__, __func__);
5537 return;
5538 }
5539 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5540 sas_iounit_pg0, sz))) {
5541 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5542 __FILE__, __LINE__, __func__);
5543 goto out;
5544 }
5545 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5546 MPI2_IOCSTATUS_MASK;
5547 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5548 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5549 __FILE__, __LINE__, __func__);
5550 goto out;
5551 }
5552
5553 /* sas_iounit page 1 */
5554 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5555 sizeof(Mpi2SasIOUnit1PhyData_t));
5556 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5557 if (!sas_iounit_pg1) {
5558 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5559 __FILE__, __LINE__, __func__);
5560 goto out;
5561 }
5562 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5563 sas_iounit_pg1, sz))) {
5564 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5565 __FILE__, __LINE__, __func__);
5566 goto out;
5567 }
5568 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5569 MPI2_IOCSTATUS_MASK;
5570 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5571 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5572 __FILE__, __LINE__, __func__);
5573 goto out;
5574 }
5575
5576 ioc->io_missing_delay =
5577 sas_iounit_pg1->IODeviceMissingDelay;
5578 device_missing_delay =
5579 sas_iounit_pg1->ReportDeviceMissingDelay;
5580 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5581 ioc->device_missing_delay = (device_missing_delay &
5582 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5583 else
5584 ioc->device_missing_delay = device_missing_delay &
5585 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5586
5587 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5588 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5589 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5590 i))) {
5591 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5592 __FILE__, __LINE__, __func__);
5593 goto out;
5594 }
5595 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5596 MPI2_IOCSTATUS_MASK;
5597 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5598 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5599 __FILE__, __LINE__, __func__);
5600 goto out;
5601 }
5602
5603 if (i == 0)
5604 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5605 PhyData[0].ControllerDevHandle);
5606 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5607 ioc->sas_hba.phy[i].phy_id = i;
5608 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5609 phy_pg0, ioc->sas_hba.parent_dev);
5610 }
5611 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5612 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5613 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5614 __FILE__, __LINE__, __func__);
5615 goto out;
5616 }
5617 ioc->sas_hba.enclosure_handle =
5618 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5619 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5620 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5621 ioc->sas_hba.handle,
5622 (u64)ioc->sas_hba.sas_address,
5623 ioc->sas_hba.num_phys);
5624
5625 if (ioc->sas_hba.enclosure_handle) {
5626 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5627 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5628 ioc->sas_hba.enclosure_handle)))
5629 ioc->sas_hba.enclosure_logical_id =
5630 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5631 }
5632
5633 out:
5634 kfree(sas_iounit_pg1);
5635 kfree(sas_iounit_pg0);
5636}
5637
5638/**
5639 * _scsih_expander_add - creating expander object
5640 * @ioc: per adapter object
5641 * @handle: expander handle
5642 *
5643 * Creating expander object, stored in ioc->sas_expander_list.
5644 *
5645 * Return: 0 for success, else error.
5646 */
5647static int
5648_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5649{
5650 struct _sas_node *sas_expander;
5651 struct _enclosure_node *enclosure_dev;
5652 Mpi2ConfigReply_t mpi_reply;
5653 Mpi2ExpanderPage0_t expander_pg0;
5654 Mpi2ExpanderPage1_t expander_pg1;
5655 u32 ioc_status;
5656 u16 parent_handle;
5657 u64 sas_address, sas_address_parent = 0;
5658 int i;
5659 unsigned long flags;
5660 struct _sas_port *mpt3sas_port = NULL;
5661
5662 int rc = 0;
5663
5664 if (!handle)
5665 return -1;
5666
5667 if (ioc->shost_recovery || ioc->pci_error_recovery)
5668 return -1;
5669
5670 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5671 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5672 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5673 __FILE__, __LINE__, __func__);
5674 return -1;
5675 }
5676
5677 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5678 MPI2_IOCSTATUS_MASK;
5679 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5680 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5681 __FILE__, __LINE__, __func__);
5682 return -1;
5683 }
5684
5685 /* handle out of order topology events */
5686 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5687 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5688 != 0) {
5689 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5690 __FILE__, __LINE__, __func__);
5691 return -1;
5692 }
5693 if (sas_address_parent != ioc->sas_hba.sas_address) {
5694 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5695 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5696 sas_address_parent);
5697 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5698 if (!sas_expander) {
5699 rc = _scsih_expander_add(ioc, parent_handle);
5700 if (rc != 0)
5701 return rc;
5702 }
5703 }
5704
5705 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5706 sas_address = le64_to_cpu(expander_pg0.SASAddress);
5707 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5708 sas_address);
5709 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5710
5711 if (sas_expander)
5712 return 0;
5713
5714 sas_expander = kzalloc(sizeof(struct _sas_node),
5715 GFP_KERNEL);
5716 if (!sas_expander) {
5717 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5718 __FILE__, __LINE__, __func__);
5719 return -1;
5720 }
5721
5722 sas_expander->handle = handle;
5723 sas_expander->num_phys = expander_pg0.NumPhys;
5724 sas_expander->sas_address_parent = sas_address_parent;
5725 sas_expander->sas_address = sas_address;
5726
5727 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5728 handle, parent_handle,
5729 (u64)sas_expander->sas_address, sas_expander->num_phys);
5730
5731 if (!sas_expander->num_phys) {
5732 rc = -1;
5733 goto out_fail;
5734 }
5735 sas_expander->phy = kcalloc(sas_expander->num_phys,
5736 sizeof(struct _sas_phy), GFP_KERNEL);
5737 if (!sas_expander->phy) {
5738 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5739 __FILE__, __LINE__, __func__);
5740 rc = -1;
5741 goto out_fail;
5742 }
5743
5744 INIT_LIST_HEAD(&sas_expander->sas_port_list);
5745 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5746 sas_address_parent);
5747 if (!mpt3sas_port) {
5748 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5749 __FILE__, __LINE__, __func__);
5750 rc = -1;
5751 goto out_fail;
5752 }
5753 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5754
5755 for (i = 0 ; i < sas_expander->num_phys ; i++) {
5756 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5757 &expander_pg1, i, handle))) {
5758 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5759 __FILE__, __LINE__, __func__);
5760 rc = -1;
5761 goto out_fail;
5762 }
5763 sas_expander->phy[i].handle = handle;
5764 sas_expander->phy[i].phy_id = i;
5765
5766 if ((mpt3sas_transport_add_expander_phy(ioc,
5767 &sas_expander->phy[i], expander_pg1,
5768 sas_expander->parent_dev))) {
5769 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5770 __FILE__, __LINE__, __func__);
5771 rc = -1;
5772 goto out_fail;
5773 }
5774 }
5775
5776 if (sas_expander->enclosure_handle) {
5777 enclosure_dev =
5778 mpt3sas_scsih_enclosure_find_by_handle(ioc,
5779 sas_expander->enclosure_handle);
5780 if (enclosure_dev)
5781 sas_expander->enclosure_logical_id =
5782 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5783 }
5784
5785 _scsih_expander_node_add(ioc, sas_expander);
5786 return 0;
5787
5788 out_fail:
5789
5790 if (mpt3sas_port)
5791 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5792 sas_address_parent);
5793 kfree(sas_expander);
5794 return rc;
5795}
5796
5797/**
5798 * mpt3sas_expander_remove - removing expander object
5799 * @ioc: per adapter object
5800 * @sas_address: expander sas_address
5801 */
5802void
5803mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5804{
5805 struct _sas_node *sas_expander;
5806 unsigned long flags;
5807
5808 if (ioc->shost_recovery)
5809 return;
5810
5811 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5812 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5813 sas_address);
5814 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5815 if (sas_expander)
5816 _scsih_expander_node_remove(ioc, sas_expander);
5817}
5818
5819/**
5820 * _scsih_done - internal SCSI_IO callback handler.
5821 * @ioc: per adapter object
5822 * @smid: system request message index
5823 * @msix_index: MSIX table index supplied by the OS
5824 * @reply: reply message frame(lower 32bit addr)
5825 *
5826 * Callback handler when sending internal generated SCSI_IO.
5827 * The callback index passed is `ioc->scsih_cb_idx`
5828 *
5829 * Return: 1 meaning mf should be freed from _base_interrupt
5830 * 0 means the mf is freed from this function.
5831 */
5832static u8
5833_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5834{
5835 MPI2DefaultReply_t *mpi_reply;
5836
5837 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5838 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5839 return 1;
5840 if (ioc->scsih_cmds.smid != smid)
5841 return 1;
5842 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5843 if (mpi_reply) {
5844 memcpy(ioc->scsih_cmds.reply, mpi_reply,
5845 mpi_reply->MsgLength*4);
5846 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5847 }
5848 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5849 complete(&ioc->scsih_cmds.done);
5850 return 1;
5851}
5852
5853
5854
5855
5856#define MPT3_MAX_LUNS (255)
5857
5858
5859/**
5860 * _scsih_check_access_status - check access flags
5861 * @ioc: per adapter object
5862 * @sas_address: sas address
5863 * @handle: sas device handle
5864 * @access_status: errors returned during discovery of the device
5865 *
5866 * Return: 0 for success, else failure
5867 */
5868static u8
5869_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5870 u16 handle, u8 access_status)
5871{
5872 u8 rc = 1;
5873 char *desc = NULL;
5874
5875 switch (access_status) {
5876 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5877 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5878 rc = 0;
5879 break;
5880 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5881 desc = "sata capability failed";
5882 break;
5883 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5884 desc = "sata affiliation conflict";
5885 break;
5886 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5887 desc = "route not addressable";
5888 break;
5889 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5890 desc = "smp error not addressable";
5891 break;
5892 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5893 desc = "device blocked";
5894 break;
5895 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5896 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5897 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5898 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5899 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5900 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5901 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5902 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5903 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5904 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5905 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5906 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5907 desc = "sata initialization failed";
5908 break;
5909 default:
5910 desc = "unknown";
5911 break;
5912 }
5913
5914 if (!rc)
5915 return 0;
5916
5917 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5918 desc, (u64)sas_address, handle);
5919 return rc;
5920}
5921
5922/**
5923 * _scsih_check_device - checking device responsiveness
5924 * @ioc: per adapter object
5925 * @parent_sas_address: sas address of parent expander or sas host
5926 * @handle: attached device handle
5927 * @phy_number: phy number
5928 * @link_rate: new link rate
5929 */
5930static void
5931_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5932 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5933{
5934 Mpi2ConfigReply_t mpi_reply;
5935 Mpi2SasDevicePage0_t sas_device_pg0;
5936 struct _sas_device *sas_device;
5937 struct _enclosure_node *enclosure_dev = NULL;
5938 u32 ioc_status;
5939 unsigned long flags;
5940 u64 sas_address;
5941 struct scsi_target *starget;
5942 struct MPT3SAS_TARGET *sas_target_priv_data;
5943 u32 device_info;
5944
5945 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5946 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5947 return;
5948
5949 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5950 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5951 return;
5952
5953 /* wide port handling ~ we need only handle device once for the phy that
5954 * is matched in sas device page zero
5955 */
5956 if (phy_number != sas_device_pg0.PhyNum)
5957 return;
5958
5959 /* check if this is end device */
5960 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5961 if (!(_scsih_is_end_device(device_info)))
5962 return;
5963
5964 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5965 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5966 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5967 sas_address);
5968
5969 if (!sas_device)
5970 goto out_unlock;
5971
5972 if (unlikely(sas_device->handle != handle)) {
5973 starget = sas_device->starget;
5974 sas_target_priv_data = starget->hostdata;
5975 starget_printk(KERN_INFO, starget,
5976 "handle changed from(0x%04x) to (0x%04x)!!!\n",
5977 sas_device->handle, handle);
5978 sas_target_priv_data->handle = handle;
5979 sas_device->handle = handle;
5980 if (le16_to_cpu(sas_device_pg0.Flags) &
5981 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5982 sas_device->enclosure_level =
5983 sas_device_pg0.EnclosureLevel;
5984 memcpy(sas_device->connector_name,
5985 sas_device_pg0.ConnectorName, 4);
5986 sas_device->connector_name[4] = '\0';
5987 } else {
5988 sas_device->enclosure_level = 0;
5989 sas_device->connector_name[0] = '\0';
5990 }
5991
5992 sas_device->enclosure_handle =
5993 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5994 sas_device->is_chassis_slot_valid = 0;
5995 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5996 sas_device->enclosure_handle);
5997 if (enclosure_dev) {
5998 sas_device->enclosure_logical_id =
5999 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6000 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6001 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6002 sas_device->is_chassis_slot_valid = 1;
6003 sas_device->chassis_slot =
6004 enclosure_dev->pg0.ChassisSlot;
6005 }
6006 }
6007 }
6008
6009 /* check if device is present */
6010 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6011 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6012 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6013 handle);
6014 goto out_unlock;
6015 }
6016
6017 /* check if there were any issues with discovery */
6018 if (_scsih_check_access_status(ioc, sas_address, handle,
6019 sas_device_pg0.AccessStatus))
6020 goto out_unlock;
6021
6022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6023 _scsih_ublock_io_device(ioc, sas_address);
6024
6025 if (sas_device)
6026 sas_device_put(sas_device);
6027 return;
6028
6029out_unlock:
6030 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6031 if (sas_device)
6032 sas_device_put(sas_device);
6033}
6034
6035/**
6036 * _scsih_add_device - creating sas device object
6037 * @ioc: per adapter object
6038 * @handle: sas device handle
6039 * @phy_num: phy number end device attached to
6040 * @is_pd: is this hidden raid component
6041 *
6042 * Creating end device object, stored in ioc->sas_device_list.
6043 *
6044 * Return: 0 for success, non-zero for failure.
6045 */
6046static int
6047_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6048 u8 is_pd)
6049{
6050 Mpi2ConfigReply_t mpi_reply;
6051 Mpi2SasDevicePage0_t sas_device_pg0;
6052 struct _sas_device *sas_device;
6053 struct _enclosure_node *enclosure_dev = NULL;
6054 u32 ioc_status;
6055 u64 sas_address;
6056 u32 device_info;
6057
6058 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6059 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6060 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6061 __FILE__, __LINE__, __func__);
6062 return -1;
6063 }
6064
6065 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6066 MPI2_IOCSTATUS_MASK;
6067 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6068 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6069 __FILE__, __LINE__, __func__);
6070 return -1;
6071 }
6072
6073 /* check if this is end device */
6074 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6075 if (!(_scsih_is_end_device(device_info)))
6076 return -1;
6077 set_bit(handle, ioc->pend_os_device_add);
6078 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6079
6080 /* check if device is present */
6081 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6082 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6083 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6084 handle);
6085 return -1;
6086 }
6087
6088 /* check if there were any issues with discovery */
6089 if (_scsih_check_access_status(ioc, sas_address, handle,
6090 sas_device_pg0.AccessStatus))
6091 return -1;
6092
6093 sas_device = mpt3sas_get_sdev_by_addr(ioc,
6094 sas_address);
6095 if (sas_device) {
6096 clear_bit(handle, ioc->pend_os_device_add);
6097 sas_device_put(sas_device);
6098 return -1;
6099 }
6100
6101 if (sas_device_pg0.EnclosureHandle) {
6102 enclosure_dev =
6103 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6104 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6105 if (enclosure_dev == NULL)
6106 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6107 sas_device_pg0.EnclosureHandle);
6108 }
6109
6110 sas_device = kzalloc(sizeof(struct _sas_device),
6111 GFP_KERNEL);
6112 if (!sas_device) {
6113 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6114 __FILE__, __LINE__, __func__);
6115 return 0;
6116 }
6117
6118 kref_init(&sas_device->refcount);
6119 sas_device->handle = handle;
6120 if (_scsih_get_sas_address(ioc,
6121 le16_to_cpu(sas_device_pg0.ParentDevHandle),
6122 &sas_device->sas_address_parent) != 0)
6123 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6124 __FILE__, __LINE__, __func__);
6125 sas_device->enclosure_handle =
6126 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6127 if (sas_device->enclosure_handle != 0)
6128 sas_device->slot =
6129 le16_to_cpu(sas_device_pg0.Slot);
6130 sas_device->device_info = device_info;
6131 sas_device->sas_address = sas_address;
6132 sas_device->phy = sas_device_pg0.PhyNum;
6133 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6134 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6135
6136 if (le16_to_cpu(sas_device_pg0.Flags)
6137 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6138 sas_device->enclosure_level =
6139 sas_device_pg0.EnclosureLevel;
6140 memcpy(sas_device->connector_name,
6141 sas_device_pg0.ConnectorName, 4);
6142 sas_device->connector_name[4] = '\0';
6143 } else {
6144 sas_device->enclosure_level = 0;
6145 sas_device->connector_name[0] = '\0';
6146 }
6147 /* get enclosure_logical_id & chassis_slot*/
6148 sas_device->is_chassis_slot_valid = 0;
6149 if (enclosure_dev) {
6150 sas_device->enclosure_logical_id =
6151 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6152 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6153 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6154 sas_device->is_chassis_slot_valid = 1;
6155 sas_device->chassis_slot =
6156 enclosure_dev->pg0.ChassisSlot;
6157 }
6158 }
6159
6160 /* get device name */
6161 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6162
6163 if (ioc->wait_for_discovery_to_complete)
6164 _scsih_sas_device_init_add(ioc, sas_device);
6165 else
6166 _scsih_sas_device_add(ioc, sas_device);
6167
6168 sas_device_put(sas_device);
6169 return 0;
6170}
6171
6172/**
6173 * _scsih_remove_device - removing sas device object
6174 * @ioc: per adapter object
6175 * @sas_device: the sas_device object
6176 */
6177static void
6178_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6179 struct _sas_device *sas_device)
6180{
6181 struct MPT3SAS_TARGET *sas_target_priv_data;
6182
6183 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6184 (sas_device->pfa_led_on)) {
6185 _scsih_turn_off_pfa_led(ioc, sas_device);
6186 sas_device->pfa_led_on = 0;
6187 }
6188
6189 dewtprintk(ioc,
6190 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6191 __func__,
6192 sas_device->handle, (u64)sas_device->sas_address));
6193
6194 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6195 NULL, NULL));
6196
6197 if (sas_device->starget && sas_device->starget->hostdata) {
6198 sas_target_priv_data = sas_device->starget->hostdata;
6199 sas_target_priv_data->deleted = 1;
6200 _scsih_ublock_io_device(ioc, sas_device->sas_address);
6201 sas_target_priv_data->handle =
6202 MPT3SAS_INVALID_DEVICE_HANDLE;
6203 }
6204
6205 if (!ioc->hide_drives)
6206 mpt3sas_transport_port_remove(ioc,
6207 sas_device->sas_address,
6208 sas_device->sas_address_parent);
6209
6210 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6211 sas_device->handle, (u64)sas_device->sas_address);
6212
6213 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6214
6215 dewtprintk(ioc,
6216 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6217 __func__,
6218 sas_device->handle, (u64)sas_device->sas_address));
6219 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6220 NULL, NULL));
6221}
6222
6223/**
6224 * _scsih_sas_topology_change_event_debug - debug for topology event
6225 * @ioc: per adapter object
6226 * @event_data: event data payload
6227 * Context: user.
6228 */
6229static void
6230_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6231 Mpi2EventDataSasTopologyChangeList_t *event_data)
6232{
6233 int i;
6234 u16 handle;
6235 u16 reason_code;
6236 u8 phy_number;
6237 char *status_str = NULL;
6238 u8 link_rate, prev_link_rate;
6239
6240 switch (event_data->ExpStatus) {
6241 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6242 status_str = "add";
6243 break;
6244 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6245 status_str = "remove";
6246 break;
6247 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6248 case 0:
6249 status_str = "responding";
6250 break;
6251 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6252 status_str = "remove delay";
6253 break;
6254 default:
6255 status_str = "unknown status";
6256 break;
6257 }
6258 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6259 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6260 "start_phy(%02d), count(%d)\n",
6261 le16_to_cpu(event_data->ExpanderDevHandle),
6262 le16_to_cpu(event_data->EnclosureHandle),
6263 event_data->StartPhyNum, event_data->NumEntries);
6264 for (i = 0; i < event_data->NumEntries; i++) {
6265 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6266 if (!handle)
6267 continue;
6268 phy_number = event_data->StartPhyNum + i;
6269 reason_code = event_data->PHY[i].PhyStatus &
6270 MPI2_EVENT_SAS_TOPO_RC_MASK;
6271 switch (reason_code) {
6272 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6273 status_str = "target add";
6274 break;
6275 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6276 status_str = "target remove";
6277 break;
6278 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6279 status_str = "delay target remove";
6280 break;
6281 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6282 status_str = "link rate change";
6283 break;
6284 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6285 status_str = "target responding";
6286 break;
6287 default:
6288 status_str = "unknown";
6289 break;
6290 }
6291 link_rate = event_data->PHY[i].LinkRate >> 4;
6292 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6293 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6294 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6295 handle, status_str, link_rate, prev_link_rate);
6296
6297 }
6298}
6299
6300/**
6301 * _scsih_sas_topology_change_event - handle topology changes
6302 * @ioc: per adapter object
6303 * @fw_event: The fw_event_work object
6304 * Context: user.
6305 *
6306 */
6307static int
6308_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6309 struct fw_event_work *fw_event)
6310{
6311 int i;
6312 u16 parent_handle, handle;
6313 u16 reason_code;
6314 u8 phy_number, max_phys;
6315 struct _sas_node *sas_expander;
6316 u64 sas_address;
6317 unsigned long flags;
6318 u8 link_rate, prev_link_rate;
6319 Mpi2EventDataSasTopologyChangeList_t *event_data =
6320 (Mpi2EventDataSasTopologyChangeList_t *)
6321 fw_event->event_data;
6322
6323 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6324 _scsih_sas_topology_change_event_debug(ioc, event_data);
6325
6326 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6327 return 0;
6328
6329 if (!ioc->sas_hba.num_phys)
6330 _scsih_sas_host_add(ioc);
6331 else
6332 _scsih_sas_host_refresh(ioc);
6333
6334 if (fw_event->ignore) {
6335 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6336 return 0;
6337 }
6338
6339 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6340
6341 /* handle expander add */
6342 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6343 if (_scsih_expander_add(ioc, parent_handle) != 0)
6344 return 0;
6345
6346 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6347 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6348 parent_handle);
6349 if (sas_expander) {
6350 sas_address = sas_expander->sas_address;
6351 max_phys = sas_expander->num_phys;
6352 } else if (parent_handle < ioc->sas_hba.num_phys) {
6353 sas_address = ioc->sas_hba.sas_address;
6354 max_phys = ioc->sas_hba.num_phys;
6355 } else {
6356 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6357 return 0;
6358 }
6359 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6360
6361 /* handle siblings events */
6362 for (i = 0; i < event_data->NumEntries; i++) {
6363 if (fw_event->ignore) {
6364 dewtprintk(ioc,
6365 ioc_info(ioc, "ignoring expander event\n"));
6366 return 0;
6367 }
6368 if (ioc->remove_host || ioc->pci_error_recovery)
6369 return 0;
6370 phy_number = event_data->StartPhyNum + i;
6371 if (phy_number >= max_phys)
6372 continue;
6373 reason_code = event_data->PHY[i].PhyStatus &
6374 MPI2_EVENT_SAS_TOPO_RC_MASK;
6375 if ((event_data->PHY[i].PhyStatus &
6376 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6377 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6378 continue;
6379 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6380 if (!handle)
6381 continue;
6382 link_rate = event_data->PHY[i].LinkRate >> 4;
6383 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6384 switch (reason_code) {
6385 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6386
6387 if (ioc->shost_recovery)
6388 break;
6389
6390 if (link_rate == prev_link_rate)
6391 break;
6392
6393 mpt3sas_transport_update_links(ioc, sas_address,
6394 handle, phy_number, link_rate);
6395
6396 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6397 break;
6398
6399 _scsih_check_device(ioc, sas_address, handle,
6400 phy_number, link_rate);
6401
6402 if (!test_bit(handle, ioc->pend_os_device_add))
6403 break;
6404
6405 /* fall through */
6406
6407 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6408
6409 if (ioc->shost_recovery)
6410 break;
6411
6412 mpt3sas_transport_update_links(ioc, sas_address,
6413 handle, phy_number, link_rate);
6414
6415 _scsih_add_device(ioc, handle, phy_number, 0);
6416
6417 break;
6418 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6419
6420 _scsih_device_remove_by_handle(ioc, handle);
6421 break;
6422 }
6423 }
6424
6425 /* handle expander removal */
6426 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6427 sas_expander)
6428 mpt3sas_expander_remove(ioc, sas_address);
6429
6430 return 0;
6431}
6432
6433/**
6434 * _scsih_sas_device_status_change_event_debug - debug for device event
6435 * @ioc: ?
6436 * @event_data: event data payload
6437 * Context: user.
6438 */
6439static void
6440_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6441 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6442{
6443 char *reason_str = NULL;
6444
6445 switch (event_data->ReasonCode) {
6446 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6447 reason_str = "smart data";
6448 break;
6449 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6450 reason_str = "unsupported device discovered";
6451 break;
6452 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6453 reason_str = "internal device reset";
6454 break;
6455 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6456 reason_str = "internal task abort";
6457 break;
6458 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6459 reason_str = "internal task abort set";
6460 break;
6461 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6462 reason_str = "internal clear task set";
6463 break;
6464 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6465 reason_str = "internal query task";
6466 break;
6467 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6468 reason_str = "sata init failure";
6469 break;
6470 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6471 reason_str = "internal device reset complete";
6472 break;
6473 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6474 reason_str = "internal task abort complete";
6475 break;
6476 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6477 reason_str = "internal async notification";
6478 break;
6479 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6480 reason_str = "expander reduced functionality";
6481 break;
6482 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6483 reason_str = "expander reduced functionality complete";
6484 break;
6485 default:
6486 reason_str = "unknown reason";
6487 break;
6488 }
6489 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6490 reason_str, le16_to_cpu(event_data->DevHandle),
6491 (u64)le64_to_cpu(event_data->SASAddress),
6492 le16_to_cpu(event_data->TaskTag));
6493 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6494 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6495 event_data->ASC, event_data->ASCQ);
6496 pr_cont("\n");
6497}
6498
6499/**
6500 * _scsih_sas_device_status_change_event - handle device status change
6501 * @ioc: per adapter object
6502 * @event_data: The fw event
6503 * Context: user.
6504 */
6505static void
6506_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6507 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6508{
6509 struct MPT3SAS_TARGET *target_priv_data;
6510 struct _sas_device *sas_device;
6511 u64 sas_address;
6512 unsigned long flags;
6513
6514 /* In MPI Revision K (0xC), the internal device reset complete was
6515 * implemented, so avoid setting tm_busy flag for older firmware.
6516 */
6517 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6518 return;
6519
6520 if (event_data->ReasonCode !=
6521 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6522 event_data->ReasonCode !=
6523 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6524 return;
6525
6526 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6527 sas_address = le64_to_cpu(event_data->SASAddress);
6528 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6529 sas_address);
6530
6531 if (!sas_device || !sas_device->starget)
6532 goto out;
6533
6534 target_priv_data = sas_device->starget->hostdata;
6535 if (!target_priv_data)
6536 goto out;
6537
6538 if (event_data->ReasonCode ==
6539 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6540 target_priv_data->tm_busy = 1;
6541 else
6542 target_priv_data->tm_busy = 0;
6543
6544 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6545 ioc_info(ioc,
6546 "%s tm_busy flag for handle(0x%04x)\n",
6547 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6548 target_priv_data->handle);
6549
6550out:
6551 if (sas_device)
6552 sas_device_put(sas_device);
6553
6554 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6555}
6556
6557
6558/**
6559 * _scsih_check_pcie_access_status - check access flags
6560 * @ioc: per adapter object
6561 * @wwid: wwid
6562 * @handle: sas device handle
6563 * @access_status: errors returned during discovery of the device
6564 *
6565 * Return: 0 for success, else failure
6566 */
6567static u8
6568_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6569 u16 handle, u8 access_status)
6570{
6571 u8 rc = 1;
6572 char *desc = NULL;
6573
6574 switch (access_status) {
6575 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6576 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6577 rc = 0;
6578 break;
6579 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6580 desc = "PCIe device capability failed";
6581 break;
6582 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6583 desc = "PCIe device blocked";
6584 ioc_info(ioc,
6585 "Device with Access Status (%s): wwid(0x%016llx), "
6586 "handle(0x%04x)\n ll only be added to the internal list",
6587 desc, (u64)wwid, handle);
6588 rc = 0;
6589 break;
6590 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6591 desc = "PCIe device mem space access failed";
6592 break;
6593 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6594 desc = "PCIe device unsupported";
6595 break;
6596 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6597 desc = "PCIe device MSIx Required";
6598 break;
6599 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6600 desc = "PCIe device init fail max";
6601 break;
6602 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6603 desc = "PCIe device status unknown";
6604 break;
6605 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6606 desc = "nvme ready timeout";
6607 break;
6608 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6609 desc = "nvme device configuration unsupported";
6610 break;
6611 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6612 desc = "nvme identify failed";
6613 break;
6614 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6615 desc = "nvme qconfig failed";
6616 break;
6617 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6618 desc = "nvme qcreation failed";
6619 break;
6620 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6621 desc = "nvme eventcfg failed";
6622 break;
6623 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6624 desc = "nvme get feature stat failed";
6625 break;
6626 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6627 desc = "nvme idle timeout";
6628 break;
6629 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6630 desc = "nvme failure status";
6631 break;
6632 default:
6633 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6634 access_status, (u64)wwid, handle);
6635 return rc;
6636 }
6637
6638 if (!rc)
6639 return rc;
6640
6641 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6642 desc, (u64)wwid, handle);
6643 return rc;
6644}
6645
6646/**
6647 * _scsih_pcie_device_remove_from_sml - removing pcie device
6648 * from SML and free up associated memory
6649 * @ioc: per adapter object
6650 * @pcie_device: the pcie_device object
6651 */
6652static void
6653_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6654 struct _pcie_device *pcie_device)
6655{
6656 struct MPT3SAS_TARGET *sas_target_priv_data;
6657
6658 dewtprintk(ioc,
6659 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6660 __func__,
6661 pcie_device->handle, (u64)pcie_device->wwid));
6662 if (pcie_device->enclosure_handle != 0)
6663 dewtprintk(ioc,
6664 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6665 __func__,
6666 (u64)pcie_device->enclosure_logical_id,
6667 pcie_device->slot));
6668 if (pcie_device->connector_name[0] != '\0')
6669 dewtprintk(ioc,
6670 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6671 __func__,
6672 pcie_device->enclosure_level,
6673 pcie_device->connector_name));
6674
6675 if (pcie_device->starget && pcie_device->starget->hostdata) {
6676 sas_target_priv_data = pcie_device->starget->hostdata;
6677 sas_target_priv_data->deleted = 1;
6678 _scsih_ublock_io_device(ioc, pcie_device->wwid);
6679 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6680 }
6681
6682 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6683 pcie_device->handle, (u64)pcie_device->wwid);
6684 if (pcie_device->enclosure_handle != 0)
6685 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6686 (u64)pcie_device->enclosure_logical_id,
6687 pcie_device->slot);
6688 if (pcie_device->connector_name[0] != '\0')
6689 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6690 pcie_device->enclosure_level,
6691 pcie_device->connector_name);
6692
6693 if (pcie_device->starget && (pcie_device->access_status !=
6694 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6695 scsi_remove_target(&pcie_device->starget->dev);
6696 dewtprintk(ioc,
6697 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6698 __func__,
6699 pcie_device->handle, (u64)pcie_device->wwid));
6700 if (pcie_device->enclosure_handle != 0)
6701 dewtprintk(ioc,
6702 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6703 __func__,
6704 (u64)pcie_device->enclosure_logical_id,
6705 pcie_device->slot));
6706 if (pcie_device->connector_name[0] != '\0')
6707 dewtprintk(ioc,
6708 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6709 __func__,
6710 pcie_device->enclosure_level,
6711 pcie_device->connector_name));
6712
6713 kfree(pcie_device->serial_number);
6714}
6715
6716
6717/**
6718 * _scsih_pcie_check_device - checking device responsiveness
6719 * @ioc: per adapter object
6720 * @handle: attached device handle
6721 */
6722static void
6723_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6724{
6725 Mpi2ConfigReply_t mpi_reply;
6726 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6727 u32 ioc_status;
6728 struct _pcie_device *pcie_device;
6729 u64 wwid;
6730 unsigned long flags;
6731 struct scsi_target *starget;
6732 struct MPT3SAS_TARGET *sas_target_priv_data;
6733 u32 device_info;
6734
6735 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6736 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6737 return;
6738
6739 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6740 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6741 return;
6742
6743 /* check if this is end device */
6744 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6745 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
6746 return;
6747
6748 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6749 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6750 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6751
6752 if (!pcie_device) {
6753 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6754 return;
6755 }
6756
6757 if (unlikely(pcie_device->handle != handle)) {
6758 starget = pcie_device->starget;
6759 sas_target_priv_data = starget->hostdata;
6760 pcie_device->access_status = pcie_device_pg0.AccessStatus;
6761 starget_printk(KERN_INFO, starget,
6762 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6763 pcie_device->handle, handle);
6764 sas_target_priv_data->handle = handle;
6765 pcie_device->handle = handle;
6766
6767 if (le32_to_cpu(pcie_device_pg0.Flags) &
6768 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6769 pcie_device->enclosure_level =
6770 pcie_device_pg0.EnclosureLevel;
6771 memcpy(&pcie_device->connector_name[0],
6772 &pcie_device_pg0.ConnectorName[0], 4);
6773 } else {
6774 pcie_device->enclosure_level = 0;
6775 pcie_device->connector_name[0] = '\0';
6776 }
6777 }
6778
6779 /* check if device is present */
6780 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6781 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6782 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6783 handle);
6784 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6785 pcie_device_put(pcie_device);
6786 return;
6787 }
6788
6789 /* check if there were any issues with discovery */
6790 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6791 pcie_device_pg0.AccessStatus)) {
6792 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6793 pcie_device_put(pcie_device);
6794 return;
6795 }
6796
6797 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6798 pcie_device_put(pcie_device);
6799
6800 _scsih_ublock_io_device(ioc, wwid);
6801
6802 return;
6803}
6804
6805/**
6806 * _scsih_pcie_add_device - creating pcie device object
6807 * @ioc: per adapter object
6808 * @handle: pcie device handle
6809 *
6810 * Creating end device object, stored in ioc->pcie_device_list.
6811 *
6812 * Return: 1 means queue the event later, 0 means complete the event
6813 */
6814static int
6815_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6816{
6817 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6818 Mpi26PCIeDevicePage2_t pcie_device_pg2;
6819 Mpi2ConfigReply_t mpi_reply;
6820 struct _pcie_device *pcie_device;
6821 struct _enclosure_node *enclosure_dev;
6822 u32 ioc_status;
6823 u64 wwid;
6824
6825 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6826 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6827 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6828 __FILE__, __LINE__, __func__);
6829 return 0;
6830 }
6831 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6832 MPI2_IOCSTATUS_MASK;
6833 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6834 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6835 __FILE__, __LINE__, __func__);
6836 return 0;
6837 }
6838
6839 set_bit(handle, ioc->pend_os_device_add);
6840 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6841
6842 /* check if device is present */
6843 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6844 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6845 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6846 handle);
6847 return 0;
6848 }
6849
6850 /* check if there were any issues with discovery */
6851 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6852 pcie_device_pg0.AccessStatus))
6853 return 0;
6854
6855 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
6856 (pcie_device_pg0.DeviceInfo))))
6857 return 0;
6858
6859 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6860 if (pcie_device) {
6861 clear_bit(handle, ioc->pend_os_device_add);
6862 pcie_device_put(pcie_device);
6863 return 0;
6864 }
6865
6866 /* PCIe Device Page 2 contains read-only information about a
6867 * specific NVMe device; therefore, this page is only
6868 * valid for NVMe devices and skip for pcie devices of type scsi.
6869 */
6870 if (!(mpt3sas_scsih_is_pcie_scsi_device(
6871 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6872 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6873 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
6874 handle)) {
6875 ioc_err(ioc,
6876 "failure at %s:%d/%s()!\n", __FILE__,
6877 __LINE__, __func__);
6878 return 0;
6879 }
6880
6881 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6882 MPI2_IOCSTATUS_MASK;
6883 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6884 ioc_err(ioc,
6885 "failure at %s:%d/%s()!\n", __FILE__,
6886 __LINE__, __func__);
6887 return 0;
6888 }
6889 }
6890
6891 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6892 if (!pcie_device) {
6893 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6894 __FILE__, __LINE__, __func__);
6895 return 0;
6896 }
6897
6898 kref_init(&pcie_device->refcount);
6899 pcie_device->id = ioc->pcie_target_id++;
6900 pcie_device->channel = PCIE_CHANNEL;
6901 pcie_device->handle = handle;
6902 pcie_device->access_status = pcie_device_pg0.AccessStatus;
6903 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6904 pcie_device->wwid = wwid;
6905 pcie_device->port_num = pcie_device_pg0.PortNum;
6906 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6907 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6908
6909 pcie_device->enclosure_handle =
6910 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6911 if (pcie_device->enclosure_handle != 0)
6912 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6913
6914 if (le32_to_cpu(pcie_device_pg0.Flags) &
6915 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6916 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6917 memcpy(&pcie_device->connector_name[0],
6918 &pcie_device_pg0.ConnectorName[0], 4);
6919 } else {
6920 pcie_device->enclosure_level = 0;
6921 pcie_device->connector_name[0] = '\0';
6922 }
6923
6924 /* get enclosure_logical_id */
6925 if (pcie_device->enclosure_handle) {
6926 enclosure_dev =
6927 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6928 pcie_device->enclosure_handle);
6929 if (enclosure_dev)
6930 pcie_device->enclosure_logical_id =
6931 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6932 }
6933 /* TODO -- Add device name once FW supports it */
6934 if (!(mpt3sas_scsih_is_pcie_scsi_device(
6935 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6936 pcie_device->nvme_mdts =
6937 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6938 if (pcie_device_pg2.ControllerResetTO)
6939 pcie_device->reset_timeout =
6940 pcie_device_pg2.ControllerResetTO;
6941 else
6942 pcie_device->reset_timeout = 30;
6943 } else
6944 pcie_device->reset_timeout = 30;
6945
6946 if (ioc->wait_for_discovery_to_complete)
6947 _scsih_pcie_device_init_add(ioc, pcie_device);
6948 else
6949 _scsih_pcie_device_add(ioc, pcie_device);
6950
6951 pcie_device_put(pcie_device);
6952 return 0;
6953}
6954
6955/**
6956 * _scsih_pcie_topology_change_event_debug - debug for topology
6957 * event
6958 * @ioc: per adapter object
6959 * @event_data: event data payload
6960 * Context: user.
6961 */
6962static void
6963_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6964 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6965{
6966 int i;
6967 u16 handle;
6968 u16 reason_code;
6969 u8 port_number;
6970 char *status_str = NULL;
6971 u8 link_rate, prev_link_rate;
6972
6973 switch (event_data->SwitchStatus) {
6974 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6975 status_str = "add";
6976 break;
6977 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6978 status_str = "remove";
6979 break;
6980 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6981 case 0:
6982 status_str = "responding";
6983 break;
6984 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6985 status_str = "remove delay";
6986 break;
6987 default:
6988 status_str = "unknown status";
6989 break;
6990 }
6991 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6992 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6993 "start_port(%02d), count(%d)\n",
6994 le16_to_cpu(event_data->SwitchDevHandle),
6995 le16_to_cpu(event_data->EnclosureHandle),
6996 event_data->StartPortNum, event_data->NumEntries);
6997 for (i = 0; i < event_data->NumEntries; i++) {
6998 handle =
6999 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7000 if (!handle)
7001 continue;
7002 port_number = event_data->StartPortNum + i;
7003 reason_code = event_data->PortEntry[i].PortStatus;
7004 switch (reason_code) {
7005 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7006 status_str = "target add";
7007 break;
7008 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7009 status_str = "target remove";
7010 break;
7011 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7012 status_str = "delay target remove";
7013 break;
7014 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7015 status_str = "link rate change";
7016 break;
7017 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7018 status_str = "target responding";
7019 break;
7020 default:
7021 status_str = "unknown";
7022 break;
7023 }
7024 link_rate = event_data->PortEntry[i].CurrentPortInfo &
7025 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7026 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7027 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7028 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7029 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
7030 handle, status_str, link_rate, prev_link_rate);
7031 }
7032}
7033
7034/**
7035 * _scsih_pcie_topology_change_event - handle PCIe topology
7036 * changes
7037 * @ioc: per adapter object
7038 * @fw_event: The fw_event_work object
7039 * Context: user.
7040 *
7041 */
7042static void
7043_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7044 struct fw_event_work *fw_event)
7045{
7046 int i;
7047 u16 handle;
7048 u16 reason_code;
7049 u8 link_rate, prev_link_rate;
7050 unsigned long flags;
7051 int rc;
7052 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7053 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7054 struct _pcie_device *pcie_device;
7055
7056 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7057 _scsih_pcie_topology_change_event_debug(ioc, event_data);
7058
7059 if (ioc->shost_recovery || ioc->remove_host ||
7060 ioc->pci_error_recovery)
7061 return;
7062
7063 if (fw_event->ignore) {
7064 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7065 return;
7066 }
7067
7068 /* handle siblings events */
7069 for (i = 0; i < event_data->NumEntries; i++) {
7070 if (fw_event->ignore) {
7071 dewtprintk(ioc,
7072 ioc_info(ioc, "ignoring switch event\n"));
7073 return;
7074 }
7075 if (ioc->remove_host || ioc->pci_error_recovery)
7076 return;
7077 reason_code = event_data->PortEntry[i].PortStatus;
7078 handle =
7079 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7080 if (!handle)
7081 continue;
7082
7083 link_rate = event_data->PortEntry[i].CurrentPortInfo
7084 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7085 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7086 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7087
7088 switch (reason_code) {
7089 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7090 if (ioc->shost_recovery)
7091 break;
7092 if (link_rate == prev_link_rate)
7093 break;
7094 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7095 break;
7096
7097 _scsih_pcie_check_device(ioc, handle);
7098
7099 /* This code after this point handles the test case
7100 * where a device has been added, however its returning
7101 * BUSY for sometime. Then before the Device Missing
7102 * Delay expires and the device becomes READY, the
7103 * device is removed and added back.
7104 */
7105 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7106 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7107 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7108
7109 if (pcie_device) {
7110 pcie_device_put(pcie_device);
7111 break;
7112 }
7113
7114 if (!test_bit(handle, ioc->pend_os_device_add))
7115 break;
7116
7117 dewtprintk(ioc,
7118 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7119 handle));
7120 event_data->PortEntry[i].PortStatus &= 0xF0;
7121 event_data->PortEntry[i].PortStatus |=
7122 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7123 /* fall through */
7124 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7125 if (ioc->shost_recovery)
7126 break;
7127 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7128 break;
7129
7130 rc = _scsih_pcie_add_device(ioc, handle);
7131 if (!rc) {
7132 /* mark entry vacant */
7133 /* TODO This needs to be reviewed and fixed,
7134 * we dont have an entry
7135 * to make an event void like vacant
7136 */
7137 event_data->PortEntry[i].PortStatus |=
7138 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7139 }
7140 break;
7141 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7142 _scsih_pcie_device_remove_by_handle(ioc, handle);
7143 break;
7144 }
7145 }
7146}
7147
7148/**
7149 * _scsih_pcie_device_status_change_event_debug - debug for device event
7150 * @ioc: ?
7151 * @event_data: event data payload
7152 * Context: user.
7153 */
7154static void
7155_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7156 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7157{
7158 char *reason_str = NULL;
7159
7160 switch (event_data->ReasonCode) {
7161 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7162 reason_str = "smart data";
7163 break;
7164 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7165 reason_str = "unsupported device discovered";
7166 break;
7167 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7168 reason_str = "internal device reset";
7169 break;
7170 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7171 reason_str = "internal task abort";
7172 break;
7173 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7174 reason_str = "internal task abort set";
7175 break;
7176 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7177 reason_str = "internal clear task set";
7178 break;
7179 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7180 reason_str = "internal query task";
7181 break;
7182 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7183 reason_str = "device init failure";
7184 break;
7185 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7186 reason_str = "internal device reset complete";
7187 break;
7188 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7189 reason_str = "internal task abort complete";
7190 break;
7191 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7192 reason_str = "internal async notification";
7193 break;
7194 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7195 reason_str = "pcie hot reset failed";
7196 break;
7197 default:
7198 reason_str = "unknown reason";
7199 break;
7200 }
7201
7202 ioc_info(ioc, "PCIE device status change: (%s)\n"
7203 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7204 reason_str, le16_to_cpu(event_data->DevHandle),
7205 (u64)le64_to_cpu(event_data->WWID),
7206 le16_to_cpu(event_data->TaskTag));
7207 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7208 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7209 event_data->ASC, event_data->ASCQ);
7210 pr_cont("\n");
7211}
7212
7213/**
7214 * _scsih_pcie_device_status_change_event - handle device status
7215 * change
7216 * @ioc: per adapter object
7217 * @fw_event: The fw_event_work object
7218 * Context: user.
7219 */
7220static void
7221_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7222 struct fw_event_work *fw_event)
7223{
7224 struct MPT3SAS_TARGET *target_priv_data;
7225 struct _pcie_device *pcie_device;
7226 u64 wwid;
7227 unsigned long flags;
7228 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7229 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7230 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7231 _scsih_pcie_device_status_change_event_debug(ioc,
7232 event_data);
7233
7234 if (event_data->ReasonCode !=
7235 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7236 event_data->ReasonCode !=
7237 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7238 return;
7239
7240 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7241 wwid = le64_to_cpu(event_data->WWID);
7242 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7243
7244 if (!pcie_device || !pcie_device->starget)
7245 goto out;
7246
7247 target_priv_data = pcie_device->starget->hostdata;
7248 if (!target_priv_data)
7249 goto out;
7250
7251 if (event_data->ReasonCode ==
7252 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7253 target_priv_data->tm_busy = 1;
7254 else
7255 target_priv_data->tm_busy = 0;
7256out:
7257 if (pcie_device)
7258 pcie_device_put(pcie_device);
7259
7260 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7261}
7262
7263/**
7264 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7265 * event
7266 * @ioc: per adapter object
7267 * @event_data: event data payload
7268 * Context: user.
7269 */
7270static void
7271_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7272 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7273{
7274 char *reason_str = NULL;
7275
7276 switch (event_data->ReasonCode) {
7277 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7278 reason_str = "enclosure add";
7279 break;
7280 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7281 reason_str = "enclosure remove";
7282 break;
7283 default:
7284 reason_str = "unknown reason";
7285 break;
7286 }
7287
7288 ioc_info(ioc, "enclosure status change: (%s)\n"
7289 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7290 reason_str,
7291 le16_to_cpu(event_data->EnclosureHandle),
7292 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7293 le16_to_cpu(event_data->StartSlot));
7294}
7295
7296/**
7297 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7298 * @ioc: per adapter object
7299 * @fw_event: The fw_event_work object
7300 * Context: user.
7301 */
7302static void
7303_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7304 struct fw_event_work *fw_event)
7305{
7306 Mpi2ConfigReply_t mpi_reply;
7307 struct _enclosure_node *enclosure_dev = NULL;
7308 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7309 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7310 int rc;
7311 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7312
7313 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7314 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7315 (Mpi2EventDataSasEnclDevStatusChange_t *)
7316 fw_event->event_data);
7317 if (ioc->shost_recovery)
7318 return;
7319
7320 if (enclosure_handle)
7321 enclosure_dev =
7322 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7323 enclosure_handle);
7324 switch (event_data->ReasonCode) {
7325 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7326 if (!enclosure_dev) {
7327 enclosure_dev =
7328 kzalloc(sizeof(struct _enclosure_node),
7329 GFP_KERNEL);
7330 if (!enclosure_dev) {
7331 ioc_info(ioc, "failure at %s:%d/%s()!\n",
7332 __FILE__, __LINE__, __func__);
7333 return;
7334 }
7335 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7336 &enclosure_dev->pg0,
7337 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7338 enclosure_handle);
7339
7340 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7341 MPI2_IOCSTATUS_MASK)) {
7342 kfree(enclosure_dev);
7343 return;
7344 }
7345
7346 list_add_tail(&enclosure_dev->list,
7347 &ioc->enclosure_list);
7348 }
7349 break;
7350 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7351 if (enclosure_dev) {
7352 list_del(&enclosure_dev->list);
7353 kfree(enclosure_dev);
7354 }
7355 break;
7356 default:
7357 break;
7358 }
7359}
7360
7361/**
7362 * _scsih_sas_broadcast_primitive_event - handle broadcast events
7363 * @ioc: per adapter object
7364 * @fw_event: The fw_event_work object
7365 * Context: user.
7366 */
7367static void
7368_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7369 struct fw_event_work *fw_event)
7370{
7371 struct scsi_cmnd *scmd;
7372 struct scsi_device *sdev;
7373 struct scsiio_tracker *st;
7374 u16 smid, handle;
7375 u32 lun;
7376 struct MPT3SAS_DEVICE *sas_device_priv_data;
7377 u32 termination_count;
7378 u32 query_count;
7379 Mpi2SCSITaskManagementReply_t *mpi_reply;
7380 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7381 (Mpi2EventDataSasBroadcastPrimitive_t *)
7382 fw_event->event_data;
7383 u16 ioc_status;
7384 unsigned long flags;
7385 int r;
7386 u8 max_retries = 0;
7387 u8 task_abort_retries;
7388
7389 mutex_lock(&ioc->tm_cmds.mutex);
7390 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7391 __func__, event_data->PhyNum, event_data->PortWidth);
7392
7393 _scsih_block_io_all_device(ioc);
7394
7395 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7396 mpi_reply = ioc->tm_cmds.reply;
7397 broadcast_aen_retry:
7398
7399 /* sanity checks for retrying this loop */
7400 if (max_retries++ == 5) {
7401 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7402 goto out;
7403 } else if (max_retries > 1)
7404 dewtprintk(ioc,
7405 ioc_info(ioc, "%s: %d retry\n",
7406 __func__, max_retries - 1));
7407
7408 termination_count = 0;
7409 query_count = 0;
7410 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7411 if (ioc->shost_recovery)
7412 goto out;
7413 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7414 if (!scmd)
7415 continue;
7416 st = scsi_cmd_priv(scmd);
7417 sdev = scmd->device;
7418 sas_device_priv_data = sdev->hostdata;
7419 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7420 continue;
7421 /* skip hidden raid components */
7422 if (sas_device_priv_data->sas_target->flags &
7423 MPT_TARGET_FLAGS_RAID_COMPONENT)
7424 continue;
7425 /* skip volumes */
7426 if (sas_device_priv_data->sas_target->flags &
7427 MPT_TARGET_FLAGS_VOLUME)
7428 continue;
7429 /* skip PCIe devices */
7430 if (sas_device_priv_data->sas_target->flags &
7431 MPT_TARGET_FLAGS_PCIE_DEVICE)
7432 continue;
7433
7434 handle = sas_device_priv_data->sas_target->handle;
7435 lun = sas_device_priv_data->lun;
7436 query_count++;
7437
7438 if (ioc->shost_recovery)
7439 goto out;
7440
7441 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7442 r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7443 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7444 st->msix_io, 30, 0);
7445 if (r == FAILED) {
7446 sdev_printk(KERN_WARNING, sdev,
7447 "mpt3sas_scsih_issue_tm: FAILED when sending "
7448 "QUERY_TASK: scmd(%p)\n", scmd);
7449 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7450 goto broadcast_aen_retry;
7451 }
7452 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7453 & MPI2_IOCSTATUS_MASK;
7454 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7455 sdev_printk(KERN_WARNING, sdev,
7456 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7457 ioc_status, scmd);
7458 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7459 goto broadcast_aen_retry;
7460 }
7461
7462 /* see if IO is still owned by IOC and target */
7463 if (mpi_reply->ResponseCode ==
7464 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7465 mpi_reply->ResponseCode ==
7466 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7467 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7468 continue;
7469 }
7470 task_abort_retries = 0;
7471 tm_retry:
7472 if (task_abort_retries++ == 60) {
7473 dewtprintk(ioc,
7474 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7475 __func__));
7476 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7477 goto broadcast_aen_retry;
7478 }
7479
7480 if (ioc->shost_recovery)
7481 goto out_no_lock;
7482
7483 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7484 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7485 st->msix_io, 30, 0);
7486 if (r == FAILED || st->cb_idx != 0xFF) {
7487 sdev_printk(KERN_WARNING, sdev,
7488 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7489 "scmd(%p)\n", scmd);
7490 goto tm_retry;
7491 }
7492
7493 if (task_abort_retries > 1)
7494 sdev_printk(KERN_WARNING, sdev,
7495 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7496 " scmd(%p)\n",
7497 task_abort_retries - 1, scmd);
7498
7499 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7500 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7501 }
7502
7503 if (ioc->broadcast_aen_pending) {
7504 dewtprintk(ioc,
7505 ioc_info(ioc,
7506 "%s: loop back due to pending AEN\n",
7507 __func__));
7508 ioc->broadcast_aen_pending = 0;
7509 goto broadcast_aen_retry;
7510 }
7511
7512 out:
7513 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7514 out_no_lock:
7515
7516 dewtprintk(ioc,
7517 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7518 __func__, query_count, termination_count));
7519
7520 ioc->broadcast_aen_busy = 0;
7521 if (!ioc->shost_recovery)
7522 _scsih_ublock_io_all_device(ioc);
7523 mutex_unlock(&ioc->tm_cmds.mutex);
7524}
7525
7526/**
7527 * _scsih_sas_discovery_event - handle discovery events
7528 * @ioc: per adapter object
7529 * @fw_event: The fw_event_work object
7530 * Context: user.
7531 */
7532static void
7533_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7534 struct fw_event_work *fw_event)
7535{
7536 Mpi2EventDataSasDiscovery_t *event_data =
7537 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7538
7539 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7540 ioc_info(ioc, "discovery event: (%s)",
7541 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7542 "start" : "stop");
7543 if (event_data->DiscoveryStatus)
7544 pr_cont("discovery_status(0x%08x)",
7545 le32_to_cpu(event_data->DiscoveryStatus));
7546 pr_cont("\n");
7547 }
7548
7549 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7550 !ioc->sas_hba.num_phys) {
7551 if (disable_discovery > 0 && ioc->shost_recovery) {
7552 /* Wait for the reset to complete */
7553 while (ioc->shost_recovery)
7554 ssleep(1);
7555 }
7556 _scsih_sas_host_add(ioc);
7557 }
7558}
7559
7560/**
7561 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7562 * events
7563 * @ioc: per adapter object
7564 * @fw_event: The fw_event_work object
7565 * Context: user.
7566 */
7567static void
7568_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7569 struct fw_event_work *fw_event)
7570{
7571 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7572 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7573
7574 switch (event_data->ReasonCode) {
7575 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7576 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7577 le16_to_cpu(event_data->DevHandle),
7578 (u64)le64_to_cpu(event_data->SASAddress),
7579 event_data->PhysicalPort);
7580 break;
7581 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7582 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7583 le16_to_cpu(event_data->DevHandle),
7584 (u64)le64_to_cpu(event_data->SASAddress),
7585 event_data->PhysicalPort);
7586 break;
7587 default:
7588 break;
7589 }
7590}
7591
7592/**
7593 * _scsih_pcie_enumeration_event - handle enumeration events
7594 * @ioc: per adapter object
7595 * @fw_event: The fw_event_work object
7596 * Context: user.
7597 */
7598static void
7599_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7600 struct fw_event_work *fw_event)
7601{
7602 Mpi26EventDataPCIeEnumeration_t *event_data =
7603 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7604
7605 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7606 return;
7607
7608 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7609 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7610 "started" : "completed",
7611 event_data->Flags);
7612 if (event_data->EnumerationStatus)
7613 pr_cont("enumeration_status(0x%08x)",
7614 le32_to_cpu(event_data->EnumerationStatus));
7615 pr_cont("\n");
7616}
7617
7618/**
7619 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7620 * @ioc: per adapter object
7621 * @handle: device handle for physical disk
7622 * @phys_disk_num: physical disk number
7623 *
7624 * Return: 0 for success, else failure.
7625 */
7626static int
7627_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7628{
7629 Mpi2RaidActionRequest_t *mpi_request;
7630 Mpi2RaidActionReply_t *mpi_reply;
7631 u16 smid;
7632 u8 issue_reset = 0;
7633 int rc = 0;
7634 u16 ioc_status;
7635 u32 log_info;
7636
7637 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7638 return rc;
7639
7640 mutex_lock(&ioc->scsih_cmds.mutex);
7641
7642 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7643 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7644 rc = -EAGAIN;
7645 goto out;
7646 }
7647 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7648
7649 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7650 if (!smid) {
7651 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7652 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7653 rc = -EAGAIN;
7654 goto out;
7655 }
7656
7657 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7658 ioc->scsih_cmds.smid = smid;
7659 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7660
7661 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7662 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7663 mpi_request->PhysDiskNum = phys_disk_num;
7664
7665 dewtprintk(ioc,
7666 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7667 handle, phys_disk_num));
7668
7669 init_completion(&ioc->scsih_cmds.done);
7670 ioc->put_smid_default(ioc, smid);
7671 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7672
7673 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7674 issue_reset =
7675 mpt3sas_base_check_cmd_timeout(ioc,
7676 ioc->scsih_cmds.status, mpi_request,
7677 sizeof(Mpi2RaidActionRequest_t)/4);
7678 rc = -EFAULT;
7679 goto out;
7680 }
7681
7682 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7683
7684 mpi_reply = ioc->scsih_cmds.reply;
7685 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7686 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7687 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
7688 else
7689 log_info = 0;
7690 ioc_status &= MPI2_IOCSTATUS_MASK;
7691 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7692 dewtprintk(ioc,
7693 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7694 ioc_status, log_info));
7695 rc = -EFAULT;
7696 } else
7697 dewtprintk(ioc,
7698 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7699 }
7700
7701 out:
7702 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7703 mutex_unlock(&ioc->scsih_cmds.mutex);
7704
7705 if (issue_reset)
7706 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7707 return rc;
7708}
7709
7710/**
7711 * _scsih_reprobe_lun - reprobing lun
7712 * @sdev: scsi device struct
7713 * @no_uld_attach: sdev->no_uld_attach flag setting
7714 *
7715 **/
7716static void
7717_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7718{
7719 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7720 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7721 sdev->no_uld_attach ? "hiding" : "exposing");
7722 WARN_ON(scsi_device_reprobe(sdev));
7723}
7724
7725/**
7726 * _scsih_sas_volume_add - add new volume
7727 * @ioc: per adapter object
7728 * @element: IR config element data
7729 * Context: user.
7730 */
7731static void
7732_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7733 Mpi2EventIrConfigElement_t *element)
7734{
7735 struct _raid_device *raid_device;
7736 unsigned long flags;
7737 u64 wwid;
7738 u16 handle = le16_to_cpu(element->VolDevHandle);
7739 int rc;
7740
7741 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7742 if (!wwid) {
7743 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7744 __FILE__, __LINE__, __func__);
7745 return;
7746 }
7747
7748 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7749 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7750 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7751
7752 if (raid_device)
7753 return;
7754
7755 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7756 if (!raid_device) {
7757 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7758 __FILE__, __LINE__, __func__);
7759 return;
7760 }
7761
7762 raid_device->id = ioc->sas_id++;
7763 raid_device->channel = RAID_CHANNEL;
7764 raid_device->handle = handle;
7765 raid_device->wwid = wwid;
7766 _scsih_raid_device_add(ioc, raid_device);
7767 if (!ioc->wait_for_discovery_to_complete) {
7768 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7769 raid_device->id, 0);
7770 if (rc)
7771 _scsih_raid_device_remove(ioc, raid_device);
7772 } else {
7773 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7774 _scsih_determine_boot_device(ioc, raid_device, 1);
7775 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7776 }
7777}
7778
7779/**
7780 * _scsih_sas_volume_delete - delete volume
7781 * @ioc: per adapter object
7782 * @handle: volume device handle
7783 * Context: user.
7784 */
7785static void
7786_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7787{
7788 struct _raid_device *raid_device;
7789 unsigned long flags;
7790 struct MPT3SAS_TARGET *sas_target_priv_data;
7791 struct scsi_target *starget = NULL;
7792
7793 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7794 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7795 if (raid_device) {
7796 if (raid_device->starget) {
7797 starget = raid_device->starget;
7798 sas_target_priv_data = starget->hostdata;
7799 sas_target_priv_data->deleted = 1;
7800 }
7801 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7802 raid_device->handle, (u64)raid_device->wwid);
7803 list_del(&raid_device->list);
7804 kfree(raid_device);
7805 }
7806 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7807 if (starget)
7808 scsi_remove_target(&starget->dev);
7809}
7810
7811/**
7812 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7813 * @ioc: per adapter object
7814 * @element: IR config element data
7815 * Context: user.
7816 */
7817static void
7818_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7819 Mpi2EventIrConfigElement_t *element)
7820{
7821 struct _sas_device *sas_device;
7822 struct scsi_target *starget = NULL;
7823 struct MPT3SAS_TARGET *sas_target_priv_data;
7824 unsigned long flags;
7825 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7826
7827 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7828 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7829 if (sas_device) {
7830 sas_device->volume_handle = 0;
7831 sas_device->volume_wwid = 0;
7832 clear_bit(handle, ioc->pd_handles);
7833 if (sas_device->starget && sas_device->starget->hostdata) {
7834 starget = sas_device->starget;
7835 sas_target_priv_data = starget->hostdata;
7836 sas_target_priv_data->flags &=
7837 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7838 }
7839 }
7840 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7841 if (!sas_device)
7842 return;
7843
7844 /* exposing raid component */
7845 if (starget)
7846 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7847
7848 sas_device_put(sas_device);
7849}
7850
7851/**
7852 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7853 * @ioc: per adapter object
7854 * @element: IR config element data
7855 * Context: user.
7856 */
7857static void
7858_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7859 Mpi2EventIrConfigElement_t *element)
7860{
7861 struct _sas_device *sas_device;
7862 struct scsi_target *starget = NULL;
7863 struct MPT3SAS_TARGET *sas_target_priv_data;
7864 unsigned long flags;
7865 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7866 u16 volume_handle = 0;
7867 u64 volume_wwid = 0;
7868
7869 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7870 if (volume_handle)
7871 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7872 &volume_wwid);
7873
7874 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7875 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7876 if (sas_device) {
7877 set_bit(handle, ioc->pd_handles);
7878 if (sas_device->starget && sas_device->starget->hostdata) {
7879 starget = sas_device->starget;
7880 sas_target_priv_data = starget->hostdata;
7881 sas_target_priv_data->flags |=
7882 MPT_TARGET_FLAGS_RAID_COMPONENT;
7883 sas_device->volume_handle = volume_handle;
7884 sas_device->volume_wwid = volume_wwid;
7885 }
7886 }
7887 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7888 if (!sas_device)
7889 return;
7890
7891 /* hiding raid component */
7892 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7893
7894 if (starget)
7895 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7896
7897 sas_device_put(sas_device);
7898}
7899
7900/**
7901 * _scsih_sas_pd_delete - delete pd component
7902 * @ioc: per adapter object
7903 * @element: IR config element data
7904 * Context: user.
7905 */
7906static void
7907_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7908 Mpi2EventIrConfigElement_t *element)
7909{
7910 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7911
7912 _scsih_device_remove_by_handle(ioc, handle);
7913}
7914
7915/**
7916 * _scsih_sas_pd_add - remove pd component
7917 * @ioc: per adapter object
7918 * @element: IR config element data
7919 * Context: user.
7920 */
7921static void
7922_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7923 Mpi2EventIrConfigElement_t *element)
7924{
7925 struct _sas_device *sas_device;
7926 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7927 Mpi2ConfigReply_t mpi_reply;
7928 Mpi2SasDevicePage0_t sas_device_pg0;
7929 u32 ioc_status;
7930 u64 sas_address;
7931 u16 parent_handle;
7932
7933 set_bit(handle, ioc->pd_handles);
7934
7935 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7936 if (sas_device) {
7937 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7938 sas_device_put(sas_device);
7939 return;
7940 }
7941
7942 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7943 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7944 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7945 __FILE__, __LINE__, __func__);
7946 return;
7947 }
7948
7949 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7950 MPI2_IOCSTATUS_MASK;
7951 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7952 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7953 __FILE__, __LINE__, __func__);
7954 return;
7955 }
7956
7957 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7958 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7959 mpt3sas_transport_update_links(ioc, sas_address, handle,
7960 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7961
7962 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7963 _scsih_add_device(ioc, handle, 0, 1);
7964}
7965
7966/**
7967 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7968 * @ioc: per adapter object
7969 * @event_data: event data payload
7970 * Context: user.
7971 */
7972static void
7973_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7974 Mpi2EventDataIrConfigChangeList_t *event_data)
7975{
7976 Mpi2EventIrConfigElement_t *element;
7977 u8 element_type;
7978 int i;
7979 char *reason_str = NULL, *element_str = NULL;
7980
7981 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7982
7983 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7984 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7985 "foreign" : "native",
7986 event_data->NumElements);
7987 for (i = 0; i < event_data->NumElements; i++, element++) {
7988 switch (element->ReasonCode) {
7989 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7990 reason_str = "add";
7991 break;
7992 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7993 reason_str = "remove";
7994 break;
7995 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7996 reason_str = "no change";
7997 break;
7998 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7999 reason_str = "hide";
8000 break;
8001 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8002 reason_str = "unhide";
8003 break;
8004 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8005 reason_str = "volume_created";
8006 break;
8007 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8008 reason_str = "volume_deleted";
8009 break;
8010 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8011 reason_str = "pd_created";
8012 break;
8013 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8014 reason_str = "pd_deleted";
8015 break;
8016 default:
8017 reason_str = "unknown reason";
8018 break;
8019 }
8020 element_type = le16_to_cpu(element->ElementFlags) &
8021 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8022 switch (element_type) {
8023 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8024 element_str = "volume";
8025 break;
8026 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8027 element_str = "phys disk";
8028 break;
8029 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8030 element_str = "hot spare";
8031 break;
8032 default:
8033 element_str = "unknown element";
8034 break;
8035 }
8036 pr_info("\t(%s:%s), vol handle(0x%04x), " \
8037 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8038 reason_str, le16_to_cpu(element->VolDevHandle),
8039 le16_to_cpu(element->PhysDiskDevHandle),
8040 element->PhysDiskNum);
8041 }
8042}
8043
8044/**
8045 * _scsih_sas_ir_config_change_event - handle ir configuration change events
8046 * @ioc: per adapter object
8047 * @fw_event: The fw_event_work object
8048 * Context: user.
8049 */
8050static void
8051_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8052 struct fw_event_work *fw_event)
8053{
8054 Mpi2EventIrConfigElement_t *element;
8055 int i;
8056 u8 foreign_config;
8057 Mpi2EventDataIrConfigChangeList_t *event_data =
8058 (Mpi2EventDataIrConfigChangeList_t *)
8059 fw_event->event_data;
8060
8061 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8062 (!ioc->hide_ir_msg))
8063 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
8064
8065 foreign_config = (le32_to_cpu(event_data->Flags) &
8066 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8067
8068 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8069 if (ioc->shost_recovery &&
8070 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8071 for (i = 0; i < event_data->NumElements; i++, element++) {
8072 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8073 _scsih_ir_fastpath(ioc,
8074 le16_to_cpu(element->PhysDiskDevHandle),
8075 element->PhysDiskNum);
8076 }
8077 return;
8078 }
8079
8080 for (i = 0; i < event_data->NumElements; i++, element++) {
8081
8082 switch (element->ReasonCode) {
8083 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8084 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8085 if (!foreign_config)
8086 _scsih_sas_volume_add(ioc, element);
8087 break;
8088 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8089 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8090 if (!foreign_config)
8091 _scsih_sas_volume_delete(ioc,
8092 le16_to_cpu(element->VolDevHandle));
8093 break;
8094 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8095 if (!ioc->is_warpdrive)
8096 _scsih_sas_pd_hide(ioc, element);
8097 break;
8098 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8099 if (!ioc->is_warpdrive)
8100 _scsih_sas_pd_expose(ioc, element);
8101 break;
8102 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8103 if (!ioc->is_warpdrive)
8104 _scsih_sas_pd_add(ioc, element);
8105 break;
8106 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8107 if (!ioc->is_warpdrive)
8108 _scsih_sas_pd_delete(ioc, element);
8109 break;
8110 }
8111 }
8112}
8113
8114/**
8115 * _scsih_sas_ir_volume_event - IR volume event
8116 * @ioc: per adapter object
8117 * @fw_event: The fw_event_work object
8118 * Context: user.
8119 */
8120static void
8121_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8122 struct fw_event_work *fw_event)
8123{
8124 u64 wwid;
8125 unsigned long flags;
8126 struct _raid_device *raid_device;
8127 u16 handle;
8128 u32 state;
8129 int rc;
8130 Mpi2EventDataIrVolume_t *event_data =
8131 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
8132
8133 if (ioc->shost_recovery)
8134 return;
8135
8136 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8137 return;
8138
8139 handle = le16_to_cpu(event_data->VolDevHandle);
8140 state = le32_to_cpu(event_data->NewValue);
8141 if (!ioc->hide_ir_msg)
8142 dewtprintk(ioc,
8143 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8144 __func__, handle,
8145 le32_to_cpu(event_data->PreviousValue),
8146 state));
8147 switch (state) {
8148 case MPI2_RAID_VOL_STATE_MISSING:
8149 case MPI2_RAID_VOL_STATE_FAILED:
8150 _scsih_sas_volume_delete(ioc, handle);
8151 break;
8152
8153 case MPI2_RAID_VOL_STATE_ONLINE:
8154 case MPI2_RAID_VOL_STATE_DEGRADED:
8155 case MPI2_RAID_VOL_STATE_OPTIMAL:
8156
8157 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8158 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8159 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8160
8161 if (raid_device)
8162 break;
8163
8164 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8165 if (!wwid) {
8166 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8167 __FILE__, __LINE__, __func__);
8168 break;
8169 }
8170
8171 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8172 if (!raid_device) {
8173 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8174 __FILE__, __LINE__, __func__);
8175 break;
8176 }
8177
8178 raid_device->id = ioc->sas_id++;
8179 raid_device->channel = RAID_CHANNEL;
8180 raid_device->handle = handle;
8181 raid_device->wwid = wwid;
8182 _scsih_raid_device_add(ioc, raid_device);
8183 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8184 raid_device->id, 0);
8185 if (rc)
8186 _scsih_raid_device_remove(ioc, raid_device);
8187 break;
8188
8189 case MPI2_RAID_VOL_STATE_INITIALIZING:
8190 default:
8191 break;
8192 }
8193}
8194
8195/**
8196 * _scsih_sas_ir_physical_disk_event - PD event
8197 * @ioc: per adapter object
8198 * @fw_event: The fw_event_work object
8199 * Context: user.
8200 */
8201static void
8202_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8203 struct fw_event_work *fw_event)
8204{
8205 u16 handle, parent_handle;
8206 u32 state;
8207 struct _sas_device *sas_device;
8208 Mpi2ConfigReply_t mpi_reply;
8209 Mpi2SasDevicePage0_t sas_device_pg0;
8210 u32 ioc_status;
8211 Mpi2EventDataIrPhysicalDisk_t *event_data =
8212 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8213 u64 sas_address;
8214
8215 if (ioc->shost_recovery)
8216 return;
8217
8218 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8219 return;
8220
8221 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8222 state = le32_to_cpu(event_data->NewValue);
8223
8224 if (!ioc->hide_ir_msg)
8225 dewtprintk(ioc,
8226 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8227 __func__, handle,
8228 le32_to_cpu(event_data->PreviousValue),
8229 state));
8230
8231 switch (state) {
8232 case MPI2_RAID_PD_STATE_ONLINE:
8233 case MPI2_RAID_PD_STATE_DEGRADED:
8234 case MPI2_RAID_PD_STATE_REBUILDING:
8235 case MPI2_RAID_PD_STATE_OPTIMAL:
8236 case MPI2_RAID_PD_STATE_HOT_SPARE:
8237
8238 if (!ioc->is_warpdrive)
8239 set_bit(handle, ioc->pd_handles);
8240
8241 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8242 if (sas_device) {
8243 sas_device_put(sas_device);
8244 return;
8245 }
8246
8247 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8248 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8249 handle))) {
8250 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8251 __FILE__, __LINE__, __func__);
8252 return;
8253 }
8254
8255 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8256 MPI2_IOCSTATUS_MASK;
8257 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8258 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8259 __FILE__, __LINE__, __func__);
8260 return;
8261 }
8262
8263 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8264 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8265 mpt3sas_transport_update_links(ioc, sas_address, handle,
8266 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8267
8268 _scsih_add_device(ioc, handle, 0, 1);
8269
8270 break;
8271
8272 case MPI2_RAID_PD_STATE_OFFLINE:
8273 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8274 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8275 default:
8276 break;
8277 }
8278}
8279
8280/**
8281 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8282 * @ioc: per adapter object
8283 * @event_data: event data payload
8284 * Context: user.
8285 */
8286static void
8287_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8288 Mpi2EventDataIrOperationStatus_t *event_data)
8289{
8290 char *reason_str = NULL;
8291
8292 switch (event_data->RAIDOperation) {
8293 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8294 reason_str = "resync";
8295 break;
8296 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8297 reason_str = "online capacity expansion";
8298 break;
8299 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8300 reason_str = "consistency check";
8301 break;
8302 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8303 reason_str = "background init";
8304 break;
8305 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8306 reason_str = "make data consistent";
8307 break;
8308 }
8309
8310 if (!reason_str)
8311 return;
8312
8313 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8314 reason_str,
8315 le16_to_cpu(event_data->VolDevHandle),
8316 event_data->PercentComplete);
8317}
8318
8319/**
8320 * _scsih_sas_ir_operation_status_event - handle RAID operation events
8321 * @ioc: per adapter object
8322 * @fw_event: The fw_event_work object
8323 * Context: user.
8324 */
8325static void
8326_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8327 struct fw_event_work *fw_event)
8328{
8329 Mpi2EventDataIrOperationStatus_t *event_data =
8330 (Mpi2EventDataIrOperationStatus_t *)
8331 fw_event->event_data;
8332 static struct _raid_device *raid_device;
8333 unsigned long flags;
8334 u16 handle;
8335
8336 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8337 (!ioc->hide_ir_msg))
8338 _scsih_sas_ir_operation_status_event_debug(ioc,
8339 event_data);
8340
8341 /* code added for raid transport support */
8342 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8343
8344 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8345 handle = le16_to_cpu(event_data->VolDevHandle);
8346 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8347 if (raid_device)
8348 raid_device->percent_complete =
8349 event_data->PercentComplete;
8350 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8351 }
8352}
8353
8354/**
8355 * _scsih_prep_device_scan - initialize parameters prior to device scan
8356 * @ioc: per adapter object
8357 *
8358 * Set the deleted flag prior to device scan. If the device is found during
8359 * the scan, then we clear the deleted flag.
8360 */
8361static void
8362_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8363{
8364 struct MPT3SAS_DEVICE *sas_device_priv_data;
8365 struct scsi_device *sdev;
8366
8367 shost_for_each_device(sdev, ioc->shost) {
8368 sas_device_priv_data = sdev->hostdata;
8369 if (sas_device_priv_data && sas_device_priv_data->sas_target)
8370 sas_device_priv_data->sas_target->deleted = 1;
8371 }
8372}
8373
8374/**
8375 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8376 * @ioc: per adapter object
8377 * @sas_device_pg0: SAS Device page 0
8378 *
8379 * After host reset, find out whether devices are still responding.
8380 * Used in _scsih_remove_unresponsive_sas_devices.
8381 */
8382static void
8383_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8384Mpi2SasDevicePage0_t *sas_device_pg0)
8385{
8386 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8387 struct scsi_target *starget;
8388 struct _sas_device *sas_device = NULL;
8389 struct _enclosure_node *enclosure_dev = NULL;
8390 unsigned long flags;
8391
8392 if (sas_device_pg0->EnclosureHandle) {
8393 enclosure_dev =
8394 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8395 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8396 if (enclosure_dev == NULL)
8397 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8398 sas_device_pg0->EnclosureHandle);
8399 }
8400 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8401 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8402 if ((sas_device->sas_address == le64_to_cpu(
8403 sas_device_pg0->SASAddress)) && (sas_device->slot ==
8404 le16_to_cpu(sas_device_pg0->Slot))) {
8405 sas_device->responding = 1;
8406 starget = sas_device->starget;
8407 if (starget && starget->hostdata) {
8408 sas_target_priv_data = starget->hostdata;
8409 sas_target_priv_data->tm_busy = 0;
8410 sas_target_priv_data->deleted = 0;
8411 } else
8412 sas_target_priv_data = NULL;
8413 if (starget) {
8414 starget_printk(KERN_INFO, starget,
8415 "handle(0x%04x), sas_addr(0x%016llx)\n",
8416 le16_to_cpu(sas_device_pg0->DevHandle),
8417 (unsigned long long)
8418 sas_device->sas_address);
8419
8420 if (sas_device->enclosure_handle != 0)
8421 starget_printk(KERN_INFO, starget,
8422 "enclosure logical id(0x%016llx),"
8423 " slot(%d)\n",
8424 (unsigned long long)
8425 sas_device->enclosure_logical_id,
8426 sas_device->slot);
8427 }
8428 if (le16_to_cpu(sas_device_pg0->Flags) &
8429 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8430 sas_device->enclosure_level =
8431 sas_device_pg0->EnclosureLevel;
8432 memcpy(&sas_device->connector_name[0],
8433 &sas_device_pg0->ConnectorName[0], 4);
8434 } else {
8435 sas_device->enclosure_level = 0;
8436 sas_device->connector_name[0] = '\0';
8437 }
8438
8439 sas_device->enclosure_handle =
8440 le16_to_cpu(sas_device_pg0->EnclosureHandle);
8441 sas_device->is_chassis_slot_valid = 0;
8442 if (enclosure_dev) {
8443 sas_device->enclosure_logical_id = le64_to_cpu(
8444 enclosure_dev->pg0.EnclosureLogicalID);
8445 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8446 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8447 sas_device->is_chassis_slot_valid = 1;
8448 sas_device->chassis_slot =
8449 enclosure_dev->pg0.ChassisSlot;
8450 }
8451 }
8452
8453 if (sas_device->handle == le16_to_cpu(
8454 sas_device_pg0->DevHandle))
8455 goto out;
8456 pr_info("\thandle changed from(0x%04x)!!!\n",
8457 sas_device->handle);
8458 sas_device->handle = le16_to_cpu(
8459 sas_device_pg0->DevHandle);
8460 if (sas_target_priv_data)
8461 sas_target_priv_data->handle =
8462 le16_to_cpu(sas_device_pg0->DevHandle);
8463 goto out;
8464 }
8465 }
8466 out:
8467 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8468}
8469
8470/**
8471 * _scsih_create_enclosure_list_after_reset - Free Existing list,
8472 * And create enclosure list by scanning all Enclosure Page(0)s
8473 * @ioc: per adapter object
8474 */
8475static void
8476_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8477{
8478 struct _enclosure_node *enclosure_dev;
8479 Mpi2ConfigReply_t mpi_reply;
8480 u16 enclosure_handle;
8481 int rc;
8482
8483 /* Free existing enclosure list */
8484 mpt3sas_free_enclosure_list(ioc);
8485
8486 /* Re constructing enclosure list after reset*/
8487 enclosure_handle = 0xFFFF;
8488 do {
8489 enclosure_dev =
8490 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8491 if (!enclosure_dev) {
8492 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8493 __FILE__, __LINE__, __func__);
8494 return;
8495 }
8496 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8497 &enclosure_dev->pg0,
8498 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8499 enclosure_handle);
8500
8501 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8502 MPI2_IOCSTATUS_MASK)) {
8503 kfree(enclosure_dev);
8504 return;
8505 }
8506 list_add_tail(&enclosure_dev->list,
8507 &ioc->enclosure_list);
8508 enclosure_handle =
8509 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8510 } while (1);
8511}
8512
8513/**
8514 * _scsih_search_responding_sas_devices -
8515 * @ioc: per adapter object
8516 *
8517 * After host reset, find out whether devices are still responding.
8518 * If not remove.
8519 */
8520static void
8521_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8522{
8523 Mpi2SasDevicePage0_t sas_device_pg0;
8524 Mpi2ConfigReply_t mpi_reply;
8525 u16 ioc_status;
8526 u16 handle;
8527 u32 device_info;
8528
8529 ioc_info(ioc, "search for end-devices: start\n");
8530
8531 if (list_empty(&ioc->sas_device_list))
8532 goto out;
8533
8534 handle = 0xFFFF;
8535 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8536 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8537 handle))) {
8538 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8539 MPI2_IOCSTATUS_MASK;
8540 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8541 break;
8542 handle = le16_to_cpu(sas_device_pg0.DevHandle);
8543 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8544 if (!(_scsih_is_end_device(device_info)))
8545 continue;
8546 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8547 }
8548
8549 out:
8550 ioc_info(ioc, "search for end-devices: complete\n");
8551}
8552
8553/**
8554 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8555 * @ioc: per adapter object
8556 * @pcie_device_pg0: PCIe Device page 0
8557 *
8558 * After host reset, find out whether devices are still responding.
8559 * Used in _scsih_remove_unresponding_devices.
8560 */
8561static void
8562_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8563 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8564{
8565 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8566 struct scsi_target *starget;
8567 struct _pcie_device *pcie_device;
8568 unsigned long flags;
8569
8570 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8571 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8572 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8573 && (pcie_device->slot == le16_to_cpu(
8574 pcie_device_pg0->Slot))) {
8575 pcie_device->access_status =
8576 pcie_device_pg0->AccessStatus;
8577 pcie_device->responding = 1;
8578 starget = pcie_device->starget;
8579 if (starget && starget->hostdata) {
8580 sas_target_priv_data = starget->hostdata;
8581 sas_target_priv_data->tm_busy = 0;
8582 sas_target_priv_data->deleted = 0;
8583 } else
8584 sas_target_priv_data = NULL;
8585 if (starget) {
8586 starget_printk(KERN_INFO, starget,
8587 "handle(0x%04x), wwid(0x%016llx) ",
8588 pcie_device->handle,
8589 (unsigned long long)pcie_device->wwid);
8590 if (pcie_device->enclosure_handle != 0)
8591 starget_printk(KERN_INFO, starget,
8592 "enclosure logical id(0x%016llx), "
8593 "slot(%d)\n",
8594 (unsigned long long)
8595 pcie_device->enclosure_logical_id,
8596 pcie_device->slot);
8597 }
8598
8599 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8600 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8601 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8602 pcie_device->enclosure_level =
8603 pcie_device_pg0->EnclosureLevel;
8604 memcpy(&pcie_device->connector_name[0],
8605 &pcie_device_pg0->ConnectorName[0], 4);
8606 } else {
8607 pcie_device->enclosure_level = 0;
8608 pcie_device->connector_name[0] = '\0';
8609 }
8610
8611 if (pcie_device->handle == le16_to_cpu(
8612 pcie_device_pg0->DevHandle))
8613 goto out;
8614 pr_info("\thandle changed from(0x%04x)!!!\n",
8615 pcie_device->handle);
8616 pcie_device->handle = le16_to_cpu(
8617 pcie_device_pg0->DevHandle);
8618 if (sas_target_priv_data)
8619 sas_target_priv_data->handle =
8620 le16_to_cpu(pcie_device_pg0->DevHandle);
8621 goto out;
8622 }
8623 }
8624
8625 out:
8626 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8627}
8628
8629/**
8630 * _scsih_search_responding_pcie_devices -
8631 * @ioc: per adapter object
8632 *
8633 * After host reset, find out whether devices are still responding.
8634 * If not remove.
8635 */
8636static void
8637_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8638{
8639 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8640 Mpi2ConfigReply_t mpi_reply;
8641 u16 ioc_status;
8642 u16 handle;
8643 u32 device_info;
8644
8645 ioc_info(ioc, "search for end-devices: start\n");
8646
8647 if (list_empty(&ioc->pcie_device_list))
8648 goto out;
8649
8650 handle = 0xFFFF;
8651 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8652 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8653 handle))) {
8654 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8655 MPI2_IOCSTATUS_MASK;
8656 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8657 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8658 __func__, ioc_status,
8659 le32_to_cpu(mpi_reply.IOCLogInfo));
8660 break;
8661 }
8662 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8663 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8664 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8665 continue;
8666 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8667 }
8668out:
8669 ioc_info(ioc, "search for PCIe end-devices: complete\n");
8670}
8671
8672/**
8673 * _scsih_mark_responding_raid_device - mark a raid_device as responding
8674 * @ioc: per adapter object
8675 * @wwid: world wide identifier for raid volume
8676 * @handle: device handle
8677 *
8678 * After host reset, find out whether devices are still responding.
8679 * Used in _scsih_remove_unresponsive_raid_devices.
8680 */
8681static void
8682_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8683 u16 handle)
8684{
8685 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8686 struct scsi_target *starget;
8687 struct _raid_device *raid_device;
8688 unsigned long flags;
8689
8690 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8691 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8692 if (raid_device->wwid == wwid && raid_device->starget) {
8693 starget = raid_device->starget;
8694 if (starget && starget->hostdata) {
8695 sas_target_priv_data = starget->hostdata;
8696 sas_target_priv_data->deleted = 0;
8697 } else
8698 sas_target_priv_data = NULL;
8699 raid_device->responding = 1;
8700 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8701 starget_printk(KERN_INFO, raid_device->starget,
8702 "handle(0x%04x), wwid(0x%016llx)\n", handle,
8703 (unsigned long long)raid_device->wwid);
8704
8705 /*
8706 * WARPDRIVE: The handles of the PDs might have changed
8707 * across the host reset so re-initialize the
8708 * required data for Direct IO
8709 */
8710 mpt3sas_init_warpdrive_properties(ioc, raid_device);
8711 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8712 if (raid_device->handle == handle) {
8713 spin_unlock_irqrestore(&ioc->raid_device_lock,
8714 flags);
8715 return;
8716 }
8717 pr_info("\thandle changed from(0x%04x)!!!\n",
8718 raid_device->handle);
8719 raid_device->handle = handle;
8720 if (sas_target_priv_data)
8721 sas_target_priv_data->handle = handle;
8722 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8723 return;
8724 }
8725 }
8726 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8727}
8728
8729/**
8730 * _scsih_search_responding_raid_devices -
8731 * @ioc: per adapter object
8732 *
8733 * After host reset, find out whether devices are still responding.
8734 * If not remove.
8735 */
8736static void
8737_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8738{
8739 Mpi2RaidVolPage1_t volume_pg1;
8740 Mpi2RaidVolPage0_t volume_pg0;
8741 Mpi2RaidPhysDiskPage0_t pd_pg0;
8742 Mpi2ConfigReply_t mpi_reply;
8743 u16 ioc_status;
8744 u16 handle;
8745 u8 phys_disk_num;
8746
8747 if (!ioc->ir_firmware)
8748 return;
8749
8750 ioc_info(ioc, "search for raid volumes: start\n");
8751
8752 if (list_empty(&ioc->raid_device_list))
8753 goto out;
8754
8755 handle = 0xFFFF;
8756 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8757 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8758 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8759 MPI2_IOCSTATUS_MASK;
8760 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8761 break;
8762 handle = le16_to_cpu(volume_pg1.DevHandle);
8763
8764 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8765 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8766 sizeof(Mpi2RaidVolPage0_t)))
8767 continue;
8768
8769 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8770 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8771 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8772 _scsih_mark_responding_raid_device(ioc,
8773 le64_to_cpu(volume_pg1.WWID), handle);
8774 }
8775
8776 /* refresh the pd_handles */
8777 if (!ioc->is_warpdrive) {
8778 phys_disk_num = 0xFF;
8779 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8780 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8781 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8782 phys_disk_num))) {
8783 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8784 MPI2_IOCSTATUS_MASK;
8785 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8786 break;
8787 phys_disk_num = pd_pg0.PhysDiskNum;
8788 handle = le16_to_cpu(pd_pg0.DevHandle);
8789 set_bit(handle, ioc->pd_handles);
8790 }
8791 }
8792 out:
8793 ioc_info(ioc, "search for responding raid volumes: complete\n");
8794}
8795
8796/**
8797 * _scsih_mark_responding_expander - mark a expander as responding
8798 * @ioc: per adapter object
8799 * @expander_pg0:SAS Expander Config Page0
8800 *
8801 * After host reset, find out whether devices are still responding.
8802 * Used in _scsih_remove_unresponsive_expanders.
8803 */
8804static void
8805_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8806 Mpi2ExpanderPage0_t *expander_pg0)
8807{
8808 struct _sas_node *sas_expander = NULL;
8809 unsigned long flags;
8810 int i;
8811 struct _enclosure_node *enclosure_dev = NULL;
8812 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8813 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8814 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8815
8816 if (enclosure_handle)
8817 enclosure_dev =
8818 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8819 enclosure_handle);
8820
8821 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8822 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8823 if (sas_expander->sas_address != sas_address)
8824 continue;
8825 sas_expander->responding = 1;
8826
8827 if (enclosure_dev) {
8828 sas_expander->enclosure_logical_id =
8829 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8830 sas_expander->enclosure_handle =
8831 le16_to_cpu(expander_pg0->EnclosureHandle);
8832 }
8833
8834 if (sas_expander->handle == handle)
8835 goto out;
8836 pr_info("\texpander(0x%016llx): handle changed" \
8837 " from(0x%04x) to (0x%04x)!!!\n",
8838 (unsigned long long)sas_expander->sas_address,
8839 sas_expander->handle, handle);
8840 sas_expander->handle = handle;
8841 for (i = 0 ; i < sas_expander->num_phys ; i++)
8842 sas_expander->phy[i].handle = handle;
8843 goto out;
8844 }
8845 out:
8846 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8847}
8848
8849/**
8850 * _scsih_search_responding_expanders -
8851 * @ioc: per adapter object
8852 *
8853 * After host reset, find out whether devices are still responding.
8854 * If not remove.
8855 */
8856static void
8857_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8858{
8859 Mpi2ExpanderPage0_t expander_pg0;
8860 Mpi2ConfigReply_t mpi_reply;
8861 u16 ioc_status;
8862 u64 sas_address;
8863 u16 handle;
8864
8865 ioc_info(ioc, "search for expanders: start\n");
8866
8867 if (list_empty(&ioc->sas_expander_list))
8868 goto out;
8869
8870 handle = 0xFFFF;
8871 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8872 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8873
8874 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8875 MPI2_IOCSTATUS_MASK;
8876 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8877 break;
8878
8879 handle = le16_to_cpu(expander_pg0.DevHandle);
8880 sas_address = le64_to_cpu(expander_pg0.SASAddress);
8881 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8882 handle,
8883 (unsigned long long)sas_address);
8884 _scsih_mark_responding_expander(ioc, &expander_pg0);
8885 }
8886
8887 out:
8888 ioc_info(ioc, "search for expanders: complete\n");
8889}
8890
8891/**
8892 * _scsih_remove_unresponding_devices - removing unresponding devices
8893 * @ioc: per adapter object
8894 */
8895static void
8896_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8897{
8898 struct _sas_device *sas_device, *sas_device_next;
8899 struct _sas_node *sas_expander, *sas_expander_next;
8900 struct _raid_device *raid_device, *raid_device_next;
8901 struct _pcie_device *pcie_device, *pcie_device_next;
8902 struct list_head tmp_list;
8903 unsigned long flags;
8904 LIST_HEAD(head);
8905
8906 ioc_info(ioc, "removing unresponding devices: start\n");
8907
8908 /* removing unresponding end devices */
8909 ioc_info(ioc, "removing unresponding devices: end-devices\n");
8910 /*
8911 * Iterate, pulling off devices marked as non-responding. We become the
8912 * owner for the reference the list had on any object we prune.
8913 */
8914 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8915 list_for_each_entry_safe(sas_device, sas_device_next,
8916 &ioc->sas_device_list, list) {
8917 if (!sas_device->responding)
8918 list_move_tail(&sas_device->list, &head);
8919 else
8920 sas_device->responding = 0;
8921 }
8922 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8923
8924 /*
8925 * Now, uninitialize and remove the unresponding devices we pruned.
8926 */
8927 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8928 _scsih_remove_device(ioc, sas_device);
8929 list_del_init(&sas_device->list);
8930 sas_device_put(sas_device);
8931 }
8932
8933 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8934 INIT_LIST_HEAD(&head);
8935 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8936 list_for_each_entry_safe(pcie_device, pcie_device_next,
8937 &ioc->pcie_device_list, list) {
8938 if (!pcie_device->responding)
8939 list_move_tail(&pcie_device->list, &head);
8940 else
8941 pcie_device->responding = 0;
8942 }
8943 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8944
8945 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8946 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8947 list_del_init(&pcie_device->list);
8948 pcie_device_put(pcie_device);
8949 }
8950
8951 /* removing unresponding volumes */
8952 if (ioc->ir_firmware) {
8953 ioc_info(ioc, "removing unresponding devices: volumes\n");
8954 list_for_each_entry_safe(raid_device, raid_device_next,
8955 &ioc->raid_device_list, list) {
8956 if (!raid_device->responding)
8957 _scsih_sas_volume_delete(ioc,
8958 raid_device->handle);
8959 else
8960 raid_device->responding = 0;
8961 }
8962 }
8963
8964 /* removing unresponding expanders */
8965 ioc_info(ioc, "removing unresponding devices: expanders\n");
8966 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8967 INIT_LIST_HEAD(&tmp_list);
8968 list_for_each_entry_safe(sas_expander, sas_expander_next,
8969 &ioc->sas_expander_list, list) {
8970 if (!sas_expander->responding)
8971 list_move_tail(&sas_expander->list, &tmp_list);
8972 else
8973 sas_expander->responding = 0;
8974 }
8975 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8976 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8977 list) {
8978 _scsih_expander_node_remove(ioc, sas_expander);
8979 }
8980
8981 ioc_info(ioc, "removing unresponding devices: complete\n");
8982
8983 /* unblock devices */
8984 _scsih_ublock_io_all_device(ioc);
8985}
8986
8987static void
8988_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8989 struct _sas_node *sas_expander, u16 handle)
8990{
8991 Mpi2ExpanderPage1_t expander_pg1;
8992 Mpi2ConfigReply_t mpi_reply;
8993 int i;
8994
8995 for (i = 0 ; i < sas_expander->num_phys ; i++) {
8996 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8997 &expander_pg1, i, handle))) {
8998 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8999 __FILE__, __LINE__, __func__);
9000 return;
9001 }
9002
9003 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9004 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9005 expander_pg1.NegotiatedLinkRate >> 4);
9006 }
9007}
9008
9009/**
9010 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9011 * @ioc: per adapter object
9012 */
9013static void
9014_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9015{
9016 Mpi2ExpanderPage0_t expander_pg0;
9017 Mpi2SasDevicePage0_t sas_device_pg0;
9018 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9019 Mpi2RaidVolPage1_t volume_pg1;
9020 Mpi2RaidVolPage0_t volume_pg0;
9021 Mpi2RaidPhysDiskPage0_t pd_pg0;
9022 Mpi2EventIrConfigElement_t element;
9023 Mpi2ConfigReply_t mpi_reply;
9024 u8 phys_disk_num;
9025 u16 ioc_status;
9026 u16 handle, parent_handle;
9027 u64 sas_address;
9028 struct _sas_device *sas_device;
9029 struct _pcie_device *pcie_device;
9030 struct _sas_node *expander_device;
9031 static struct _raid_device *raid_device;
9032 u8 retry_count;
9033 unsigned long flags;
9034
9035 ioc_info(ioc, "scan devices: start\n");
9036
9037 _scsih_sas_host_refresh(ioc);
9038
9039 ioc_info(ioc, "\tscan devices: expanders start\n");
9040
9041 /* expanders */
9042 handle = 0xFFFF;
9043 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9044 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9045 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9046 MPI2_IOCSTATUS_MASK;
9047 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9048 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9049 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9050 break;
9051 }
9052 handle = le16_to_cpu(expander_pg0.DevHandle);
9053 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9054 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9055 ioc, le64_to_cpu(expander_pg0.SASAddress));
9056 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9057 if (expander_device)
9058 _scsih_refresh_expander_links(ioc, expander_device,
9059 handle);
9060 else {
9061 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9062 handle,
9063 (u64)le64_to_cpu(expander_pg0.SASAddress));
9064 _scsih_expander_add(ioc, handle);
9065 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9066 handle,
9067 (u64)le64_to_cpu(expander_pg0.SASAddress));
9068 }
9069 }
9070
9071 ioc_info(ioc, "\tscan devices: expanders complete\n");
9072
9073 if (!ioc->ir_firmware)
9074 goto skip_to_sas;
9075
9076 ioc_info(ioc, "\tscan devices: phys disk start\n");
9077
9078 /* phys disk */
9079 phys_disk_num = 0xFF;
9080 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9081 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9082 phys_disk_num))) {
9083 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9084 MPI2_IOCSTATUS_MASK;
9085 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9086 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9087 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9088 break;
9089 }
9090 phys_disk_num = pd_pg0.PhysDiskNum;
9091 handle = le16_to_cpu(pd_pg0.DevHandle);
9092 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9093 if (sas_device) {
9094 sas_device_put(sas_device);
9095 continue;
9096 }
9097 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9098 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9099 handle) != 0)
9100 continue;
9101 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9102 MPI2_IOCSTATUS_MASK;
9103 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9104 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9105 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9106 break;
9107 }
9108 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9109 if (!_scsih_get_sas_address(ioc, parent_handle,
9110 &sas_address)) {
9111 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9112 handle,
9113 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9114 mpt3sas_transport_update_links(ioc, sas_address,
9115 handle, sas_device_pg0.PhyNum,
9116 MPI2_SAS_NEG_LINK_RATE_1_5);
9117 set_bit(handle, ioc->pd_handles);
9118 retry_count = 0;
9119 /* This will retry adding the end device.
9120 * _scsih_add_device() will decide on retries and
9121 * return "1" when it should be retried
9122 */
9123 while (_scsih_add_device(ioc, handle, retry_count++,
9124 1)) {
9125 ssleep(1);
9126 }
9127 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9128 handle,
9129 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9130 }
9131 }
9132
9133 ioc_info(ioc, "\tscan devices: phys disk complete\n");
9134
9135 ioc_info(ioc, "\tscan devices: volumes start\n");
9136
9137 /* volumes */
9138 handle = 0xFFFF;
9139 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9140 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9141 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9142 MPI2_IOCSTATUS_MASK;
9143 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9144 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9145 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9146 break;
9147 }
9148 handle = le16_to_cpu(volume_pg1.DevHandle);
9149 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9150 raid_device = _scsih_raid_device_find_by_wwid(ioc,
9151 le64_to_cpu(volume_pg1.WWID));
9152 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9153 if (raid_device)
9154 continue;
9155 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9156 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9157 sizeof(Mpi2RaidVolPage0_t)))
9158 continue;
9159 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9160 MPI2_IOCSTATUS_MASK;
9161 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9162 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9163 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9164 break;
9165 }
9166 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9167 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9168 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9169 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9170 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9171 element.VolDevHandle = volume_pg1.DevHandle;
9172 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9173 volume_pg1.DevHandle);
9174 _scsih_sas_volume_add(ioc, &element);
9175 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9176 volume_pg1.DevHandle);
9177 }
9178 }
9179
9180 ioc_info(ioc, "\tscan devices: volumes complete\n");
9181
9182 skip_to_sas:
9183
9184 ioc_info(ioc, "\tscan devices: end devices start\n");
9185
9186 /* sas devices */
9187 handle = 0xFFFF;
9188 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9189 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9190 handle))) {
9191 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9192 MPI2_IOCSTATUS_MASK;
9193 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9194 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9195 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9196 break;
9197 }
9198 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9199 if (!(_scsih_is_end_device(
9200 le32_to_cpu(sas_device_pg0.DeviceInfo))))
9201 continue;
9202 sas_device = mpt3sas_get_sdev_by_addr(ioc,
9203 le64_to_cpu(sas_device_pg0.SASAddress));
9204 if (sas_device) {
9205 sas_device_put(sas_device);
9206 continue;
9207 }
9208 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9209 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9210 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9211 handle,
9212 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9213 mpt3sas_transport_update_links(ioc, sas_address, handle,
9214 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9215 retry_count = 0;
9216 /* This will retry adding the end device.
9217 * _scsih_add_device() will decide on retries and
9218 * return "1" when it should be retried
9219 */
9220 while (_scsih_add_device(ioc, handle, retry_count++,
9221 0)) {
9222 ssleep(1);
9223 }
9224 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9225 handle,
9226 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9227 }
9228 }
9229 ioc_info(ioc, "\tscan devices: end devices complete\n");
9230 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9231
9232 /* pcie devices */
9233 handle = 0xFFFF;
9234 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9235 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9236 handle))) {
9237 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9238 & MPI2_IOCSTATUS_MASK;
9239 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9240 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9241 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9242 break;
9243 }
9244 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9245 if (!(_scsih_is_nvme_pciescsi_device(
9246 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9247 continue;
9248 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9249 le64_to_cpu(pcie_device_pg0.WWID));
9250 if (pcie_device) {
9251 pcie_device_put(pcie_device);
9252 continue;
9253 }
9254 retry_count = 0;
9255 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9256 _scsih_pcie_add_device(ioc, handle);
9257
9258 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9259 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9260 }
9261 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9262 ioc_info(ioc, "scan devices: complete\n");
9263}
9264
9265/**
9266 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9267 * @ioc: per adapter object
9268 *
9269 * The handler for doing any required cleanup or initialization.
9270 */
9271void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9272{
9273 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9274}
9275
9276/**
9277 * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9278 * @ioc: per adapter object
9279 *
9280 * The handler for doing any required cleanup or initialization.
9281 */
9282void
9283mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9284{
9285 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9286 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9287 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9288 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9289 complete(&ioc->scsih_cmds.done);
9290 }
9291 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9292 ioc->tm_cmds.status |= MPT3_CMD_RESET;
9293 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9294 complete(&ioc->tm_cmds.done);
9295 }
9296
9297 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9298 memset(ioc->device_remove_in_progress, 0,
9299 ioc->device_remove_in_progress_sz);
9300 _scsih_fw_event_cleanup_queue(ioc);
9301 _scsih_flush_running_cmds(ioc);
9302}
9303
9304/**
9305 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9306 * @ioc: per adapter object
9307 *
9308 * The handler for doing any required cleanup or initialization.
9309 */
9310void
9311mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9312{
9313 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9314 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9315 !ioc->sas_hba.num_phys)) {
9316 _scsih_prep_device_scan(ioc);
9317 _scsih_create_enclosure_list_after_reset(ioc);
9318 _scsih_search_responding_sas_devices(ioc);
9319 _scsih_search_responding_pcie_devices(ioc);
9320 _scsih_search_responding_raid_devices(ioc);
9321 _scsih_search_responding_expanders(ioc);
9322 _scsih_error_recovery_delete_devices(ioc);
9323 }
9324}
9325
9326/**
9327 * _mpt3sas_fw_work - delayed task for processing firmware events
9328 * @ioc: per adapter object
9329 * @fw_event: The fw_event_work object
9330 * Context: user.
9331 */
9332static void
9333_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9334{
9335 _scsih_fw_event_del_from_list(ioc, fw_event);
9336
9337 /* the queue is being flushed so ignore this event */
9338 if (ioc->remove_host || ioc->pci_error_recovery) {
9339 fw_event_work_put(fw_event);
9340 return;
9341 }
9342
9343 switch (fw_event->event) {
9344 case MPT3SAS_PROCESS_TRIGGER_DIAG:
9345 mpt3sas_process_trigger_data(ioc,
9346 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9347 fw_event->event_data);
9348 break;
9349 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9350 while (scsi_host_in_recovery(ioc->shost) ||
9351 ioc->shost_recovery) {
9352 /*
9353 * If we're unloading, bail. Otherwise, this can become
9354 * an infinite loop.
9355 */
9356 if (ioc->remove_host)
9357 goto out;
9358 ssleep(1);
9359 }
9360 _scsih_remove_unresponding_devices(ioc);
9361 _scsih_scan_for_devices_after_reset(ioc);
9362 break;
9363 case MPT3SAS_PORT_ENABLE_COMPLETE:
9364 ioc->start_scan = 0;
9365 if (missing_delay[0] != -1 && missing_delay[1] != -1)
9366 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9367 missing_delay[1]);
9368 dewtprintk(ioc,
9369 ioc_info(ioc, "port enable: complete from worker thread\n"));
9370 break;
9371 case MPT3SAS_TURN_ON_PFA_LED:
9372 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9373 break;
9374 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9375 _scsih_sas_topology_change_event(ioc, fw_event);
9376 break;
9377 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9378 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9379 _scsih_sas_device_status_change_event_debug(ioc,
9380 (Mpi2EventDataSasDeviceStatusChange_t *)
9381 fw_event->event_data);
9382 break;
9383 case MPI2_EVENT_SAS_DISCOVERY:
9384 _scsih_sas_discovery_event(ioc, fw_event);
9385 break;
9386 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9387 _scsih_sas_device_discovery_error_event(ioc, fw_event);
9388 break;
9389 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9390 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
9391 break;
9392 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9393 _scsih_sas_enclosure_dev_status_change_event(ioc,
9394 fw_event);
9395 break;
9396 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9397 _scsih_sas_ir_config_change_event(ioc, fw_event);
9398 break;
9399 case MPI2_EVENT_IR_VOLUME:
9400 _scsih_sas_ir_volume_event(ioc, fw_event);
9401 break;
9402 case MPI2_EVENT_IR_PHYSICAL_DISK:
9403 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
9404 break;
9405 case MPI2_EVENT_IR_OPERATION_STATUS:
9406 _scsih_sas_ir_operation_status_event(ioc, fw_event);
9407 break;
9408 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9409 _scsih_pcie_device_status_change_event(ioc, fw_event);
9410 break;
9411 case MPI2_EVENT_PCIE_ENUMERATION:
9412 _scsih_pcie_enumeration_event(ioc, fw_event);
9413 break;
9414 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9415 _scsih_pcie_topology_change_event(ioc, fw_event);
9416 return;
9417 break;
9418 }
9419out:
9420 fw_event_work_put(fw_event);
9421}
9422
9423/**
9424 * _firmware_event_work
9425 * @work: The fw_event_work object
9426 * Context: user.
9427 *
9428 * wrappers for the work thread handling firmware events
9429 */
9430
9431static void
9432_firmware_event_work(struct work_struct *work)
9433{
9434 struct fw_event_work *fw_event = container_of(work,
9435 struct fw_event_work, work);
9436
9437 _mpt3sas_fw_work(fw_event->ioc, fw_event);
9438}
9439
9440/**
9441 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9442 * @ioc: per adapter object
9443 * @msix_index: MSIX table index supplied by the OS
9444 * @reply: reply message frame(lower 32bit addr)
9445 * Context: interrupt.
9446 *
9447 * This function merely adds a new work task into ioc->firmware_event_thread.
9448 * The tasks are worked from _firmware_event_work in user context.
9449 *
9450 * Return: 1 meaning mf should be freed from _base_interrupt
9451 * 0 means the mf is freed from this function.
9452 */
9453u8
9454mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9455 u32 reply)
9456{
9457 struct fw_event_work *fw_event;
9458 Mpi2EventNotificationReply_t *mpi_reply;
9459 u16 event;
9460 u16 sz;
9461 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9462
9463 /* events turned off due to host reset */
9464 if (ioc->pci_error_recovery)
9465 return 1;
9466
9467 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9468
9469 if (unlikely(!mpi_reply)) {
9470 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9471 __FILE__, __LINE__, __func__);
9472 return 1;
9473 }
9474
9475 event = le16_to_cpu(mpi_reply->Event);
9476
9477 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9478 mpt3sas_trigger_event(ioc, event, 0);
9479
9480 switch (event) {
9481 /* handle these */
9482 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9483 {
9484 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9485 (Mpi2EventDataSasBroadcastPrimitive_t *)
9486 mpi_reply->EventData;
9487
9488 if (baen_data->Primitive !=
9489 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9490 return 1;
9491
9492 if (ioc->broadcast_aen_busy) {
9493 ioc->broadcast_aen_pending++;
9494 return 1;
9495 } else
9496 ioc->broadcast_aen_busy = 1;
9497 break;
9498 }
9499
9500 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9501 _scsih_check_topo_delete_events(ioc,
9502 (Mpi2EventDataSasTopologyChangeList_t *)
9503 mpi_reply->EventData);
9504 break;
9505 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9506 _scsih_check_pcie_topo_remove_events(ioc,
9507 (Mpi26EventDataPCIeTopologyChangeList_t *)
9508 mpi_reply->EventData);
9509 break;
9510 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9511 _scsih_check_ir_config_unhide_events(ioc,
9512 (Mpi2EventDataIrConfigChangeList_t *)
9513 mpi_reply->EventData);
9514 break;
9515 case MPI2_EVENT_IR_VOLUME:
9516 _scsih_check_volume_delete_events(ioc,
9517 (Mpi2EventDataIrVolume_t *)
9518 mpi_reply->EventData);
9519 break;
9520 case MPI2_EVENT_LOG_ENTRY_ADDED:
9521 {
9522 Mpi2EventDataLogEntryAdded_t *log_entry;
9523 u32 *log_code;
9524
9525 if (!ioc->is_warpdrive)
9526 break;
9527
9528 log_entry = (Mpi2EventDataLogEntryAdded_t *)
9529 mpi_reply->EventData;
9530 log_code = (u32 *)log_entry->LogData;
9531
9532 if (le16_to_cpu(log_entry->LogEntryQualifier)
9533 != MPT2_WARPDRIVE_LOGENTRY)
9534 break;
9535
9536 switch (le32_to_cpu(*log_code)) {
9537 case MPT2_WARPDRIVE_LC_SSDT:
9538 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9539 break;
9540 case MPT2_WARPDRIVE_LC_SSDLW:
9541 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9542 break;
9543 case MPT2_WARPDRIVE_LC_SSDLF:
9544 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9545 break;
9546 case MPT2_WARPDRIVE_LC_BRMF:
9547 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9548 break;
9549 }
9550
9551 break;
9552 }
9553 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9554 _scsih_sas_device_status_change_event(ioc,
9555 (Mpi2EventDataSasDeviceStatusChange_t *)
9556 mpi_reply->EventData);
9557 break;
9558 case MPI2_EVENT_IR_OPERATION_STATUS:
9559 case MPI2_EVENT_SAS_DISCOVERY:
9560 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9561 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9562 case MPI2_EVENT_IR_PHYSICAL_DISK:
9563 case MPI2_EVENT_PCIE_ENUMERATION:
9564 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9565 break;
9566
9567 case MPI2_EVENT_TEMP_THRESHOLD:
9568 _scsih_temp_threshold_events(ioc,
9569 (Mpi2EventDataTemperature_t *)
9570 mpi_reply->EventData);
9571 break;
9572 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9573 ActiveCableEventData =
9574 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9575 switch (ActiveCableEventData->ReasonCode) {
9576 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9577 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9578 ActiveCableEventData->ReceptacleID);
9579 pr_notice("cannot be powered and devices connected\n");
9580 pr_notice("to this active cable will not be seen\n");
9581 pr_notice("This active cable requires %d mW of power\n",
9582 ActiveCableEventData->ActiveCablePowerRequirement);
9583 break;
9584
9585 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9586 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9587 ActiveCableEventData->ReceptacleID);
9588 pr_notice(
9589 "is not running at optimal speed(12 Gb/s rate)\n");
9590 break;
9591 }
9592
9593 break;
9594
9595 default: /* ignore the rest */
9596 return 1;
9597 }
9598
9599 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9600 fw_event = alloc_fw_event_work(sz);
9601 if (!fw_event) {
9602 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9603 __FILE__, __LINE__, __func__);
9604 return 1;
9605 }
9606
9607 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9608 fw_event->ioc = ioc;
9609 fw_event->VF_ID = mpi_reply->VF_ID;
9610 fw_event->VP_ID = mpi_reply->VP_ID;
9611 fw_event->event = event;
9612 _scsih_fw_event_add(ioc, fw_event);
9613 fw_event_work_put(fw_event);
9614 return 1;
9615}
9616
9617/**
9618 * _scsih_expander_node_remove - removing expander device from list.
9619 * @ioc: per adapter object
9620 * @sas_expander: the sas_device object
9621 *
9622 * Removing object and freeing associated memory from the
9623 * ioc->sas_expander_list.
9624 */
9625static void
9626_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9627 struct _sas_node *sas_expander)
9628{
9629 struct _sas_port *mpt3sas_port, *next;
9630 unsigned long flags;
9631
9632 /* remove sibling ports attached to this expander */
9633 list_for_each_entry_safe(mpt3sas_port, next,
9634 &sas_expander->sas_port_list, port_list) {
9635 if (ioc->shost_recovery)
9636 return;
9637 if (mpt3sas_port->remote_identify.device_type ==
9638 SAS_END_DEVICE)
9639 mpt3sas_device_remove_by_sas_address(ioc,
9640 mpt3sas_port->remote_identify.sas_address);
9641 else if (mpt3sas_port->remote_identify.device_type ==
9642 SAS_EDGE_EXPANDER_DEVICE ||
9643 mpt3sas_port->remote_identify.device_type ==
9644 SAS_FANOUT_EXPANDER_DEVICE)
9645 mpt3sas_expander_remove(ioc,
9646 mpt3sas_port->remote_identify.sas_address);
9647 }
9648
9649 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9650 sas_expander->sas_address_parent);
9651
9652 ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9653 sas_expander->handle, (unsigned long long)
9654 sas_expander->sas_address);
9655
9656 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9657 list_del(&sas_expander->list);
9658 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9659
9660 kfree(sas_expander->phy);
9661 kfree(sas_expander);
9662}
9663
9664/**
9665 * _scsih_ir_shutdown - IR shutdown notification
9666 * @ioc: per adapter object
9667 *
9668 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9669 * the host system is shutting down.
9670 */
9671static void
9672_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9673{
9674 Mpi2RaidActionRequest_t *mpi_request;
9675 Mpi2RaidActionReply_t *mpi_reply;
9676 u16 smid;
9677
9678 /* is IR firmware build loaded ? */
9679 if (!ioc->ir_firmware)
9680 return;
9681
9682 /* are there any volumes ? */
9683 if (list_empty(&ioc->raid_device_list))
9684 return;
9685
9686 mutex_lock(&ioc->scsih_cmds.mutex);
9687
9688 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9689 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9690 goto out;
9691 }
9692 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9693
9694 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9695 if (!smid) {
9696 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9697 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9698 goto out;
9699 }
9700
9701 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9702 ioc->scsih_cmds.smid = smid;
9703 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9704
9705 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9706 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9707
9708 if (!ioc->hide_ir_msg)
9709 ioc_info(ioc, "IR shutdown (sending)\n");
9710 init_completion(&ioc->scsih_cmds.done);
9711 ioc->put_smid_default(ioc, smid);
9712 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9713
9714 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9715 ioc_err(ioc, "%s: timeout\n", __func__);
9716 goto out;
9717 }
9718
9719 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9720 mpi_reply = ioc->scsih_cmds.reply;
9721 if (!ioc->hide_ir_msg)
9722 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9723 le16_to_cpu(mpi_reply->IOCStatus),
9724 le32_to_cpu(mpi_reply->IOCLogInfo));
9725 }
9726
9727 out:
9728 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9729 mutex_unlock(&ioc->scsih_cmds.mutex);
9730}
9731
9732/**
9733 * scsih_remove - detach and remove add host
9734 * @pdev: PCI device struct
9735 *
9736 * Routine called when unloading the driver.
9737 */
9738static void scsih_remove(struct pci_dev *pdev)
9739{
9740 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9741 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9742 struct _sas_port *mpt3sas_port, *next_port;
9743 struct _raid_device *raid_device, *next;
9744 struct MPT3SAS_TARGET *sas_target_priv_data;
9745 struct _pcie_device *pcie_device, *pcienext;
9746 struct workqueue_struct *wq;
9747 unsigned long flags;
9748 Mpi2ConfigReply_t mpi_reply;
9749
9750 ioc->remove_host = 1;
9751
9752 if (!pci_device_is_present(pdev))
9753 _scsih_flush_running_cmds(ioc);
9754
9755 _scsih_fw_event_cleanup_queue(ioc);
9756
9757 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9758 wq = ioc->firmware_event_thread;
9759 ioc->firmware_event_thread = NULL;
9760 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9761 if (wq)
9762 destroy_workqueue(wq);
9763 /*
9764 * Copy back the unmodified ioc page1. so that on next driver load,
9765 * current modified changes on ioc page1 won't take effect.
9766 */
9767 if (ioc->is_aero_ioc)
9768 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9769 &ioc->ioc_pg1_copy);
9770 /* release all the volumes */
9771 _scsih_ir_shutdown(ioc);
9772 sas_remove_host(shost);
9773 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9774 list) {
9775 if (raid_device->starget) {
9776 sas_target_priv_data =
9777 raid_device->starget->hostdata;
9778 sas_target_priv_data->deleted = 1;
9779 scsi_remove_target(&raid_device->starget->dev);
9780 }
9781 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9782 raid_device->handle, (u64)raid_device->wwid);
9783 _scsih_raid_device_remove(ioc, raid_device);
9784 }
9785 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9786 list) {
9787 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9788 list_del_init(&pcie_device->list);
9789 pcie_device_put(pcie_device);
9790 }
9791
9792 /* free ports attached to the sas_host */
9793 list_for_each_entry_safe(mpt3sas_port, next_port,
9794 &ioc->sas_hba.sas_port_list, port_list) {
9795 if (mpt3sas_port->remote_identify.device_type ==
9796 SAS_END_DEVICE)
9797 mpt3sas_device_remove_by_sas_address(ioc,
9798 mpt3sas_port->remote_identify.sas_address);
9799 else if (mpt3sas_port->remote_identify.device_type ==
9800 SAS_EDGE_EXPANDER_DEVICE ||
9801 mpt3sas_port->remote_identify.device_type ==
9802 SAS_FANOUT_EXPANDER_DEVICE)
9803 mpt3sas_expander_remove(ioc,
9804 mpt3sas_port->remote_identify.sas_address);
9805 }
9806
9807 /* free phys attached to the sas_host */
9808 if (ioc->sas_hba.num_phys) {
9809 kfree(ioc->sas_hba.phy);
9810 ioc->sas_hba.phy = NULL;
9811 ioc->sas_hba.num_phys = 0;
9812 }
9813
9814 mpt3sas_base_detach(ioc);
9815 spin_lock(&gioc_lock);
9816 list_del(&ioc->list);
9817 spin_unlock(&gioc_lock);
9818 scsi_host_put(shost);
9819}
9820
9821/**
9822 * scsih_shutdown - routine call during system shutdown
9823 * @pdev: PCI device struct
9824 */
9825static void
9826scsih_shutdown(struct pci_dev *pdev)
9827{
9828 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9829 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9830 struct workqueue_struct *wq;
9831 unsigned long flags;
9832 Mpi2ConfigReply_t mpi_reply;
9833
9834 ioc->remove_host = 1;
9835
9836 if (!pci_device_is_present(pdev))
9837 _scsih_flush_running_cmds(ioc);
9838
9839 _scsih_fw_event_cleanup_queue(ioc);
9840
9841 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9842 wq = ioc->firmware_event_thread;
9843 ioc->firmware_event_thread = NULL;
9844 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9845 if (wq)
9846 destroy_workqueue(wq);
9847 /*
9848 * Copy back the unmodified ioc page1 so that on next driver load,
9849 * current modified changes on ioc page1 won't take effect.
9850 */
9851 if (ioc->is_aero_ioc)
9852 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9853 &ioc->ioc_pg1_copy);
9854
9855 _scsih_ir_shutdown(ioc);
9856 mpt3sas_base_detach(ioc);
9857}
9858
9859
9860/**
9861 * _scsih_probe_boot_devices - reports 1st device
9862 * @ioc: per adapter object
9863 *
9864 * If specified in bios page 2, this routine reports the 1st
9865 * device scsi-ml or sas transport for persistent boot device
9866 * purposes. Please refer to function _scsih_determine_boot_device()
9867 */
9868static void
9869_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9870{
9871 u32 channel;
9872 void *device;
9873 struct _sas_device *sas_device;
9874 struct _raid_device *raid_device;
9875 struct _pcie_device *pcie_device;
9876 u16 handle;
9877 u64 sas_address_parent;
9878 u64 sas_address;
9879 unsigned long flags;
9880 int rc;
9881 int tid;
9882
9883 /* no Bios, return immediately */
9884 if (!ioc->bios_pg3.BiosVersion)
9885 return;
9886
9887 device = NULL;
9888 if (ioc->req_boot_device.device) {
9889 device = ioc->req_boot_device.device;
9890 channel = ioc->req_boot_device.channel;
9891 } else if (ioc->req_alt_boot_device.device) {
9892 device = ioc->req_alt_boot_device.device;
9893 channel = ioc->req_alt_boot_device.channel;
9894 } else if (ioc->current_boot_device.device) {
9895 device = ioc->current_boot_device.device;
9896 channel = ioc->current_boot_device.channel;
9897 }
9898
9899 if (!device)
9900 return;
9901
9902 if (channel == RAID_CHANNEL) {
9903 raid_device = device;
9904 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9905 raid_device->id, 0);
9906 if (rc)
9907 _scsih_raid_device_remove(ioc, raid_device);
9908 } else if (channel == PCIE_CHANNEL) {
9909 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9910 pcie_device = device;
9911 tid = pcie_device->id;
9912 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9913 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9914 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9915 if (rc)
9916 _scsih_pcie_device_remove(ioc, pcie_device);
9917 } else {
9918 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9919 sas_device = device;
9920 handle = sas_device->handle;
9921 sas_address_parent = sas_device->sas_address_parent;
9922 sas_address = sas_device->sas_address;
9923 list_move_tail(&sas_device->list, &ioc->sas_device_list);
9924 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9925
9926 if (ioc->hide_drives)
9927 return;
9928 if (!mpt3sas_transport_port_add(ioc, handle,
9929 sas_address_parent)) {
9930 _scsih_sas_device_remove(ioc, sas_device);
9931 } else if (!sas_device->starget) {
9932 if (!ioc->is_driver_loading) {
9933 mpt3sas_transport_port_remove(ioc,
9934 sas_address,
9935 sas_address_parent);
9936 _scsih_sas_device_remove(ioc, sas_device);
9937 }
9938 }
9939 }
9940}
9941
9942/**
9943 * _scsih_probe_raid - reporting raid volumes to scsi-ml
9944 * @ioc: per adapter object
9945 *
9946 * Called during initial loading of the driver.
9947 */
9948static void
9949_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9950{
9951 struct _raid_device *raid_device, *raid_next;
9952 int rc;
9953
9954 list_for_each_entry_safe(raid_device, raid_next,
9955 &ioc->raid_device_list, list) {
9956 if (raid_device->starget)
9957 continue;
9958 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9959 raid_device->id, 0);
9960 if (rc)
9961 _scsih_raid_device_remove(ioc, raid_device);
9962 }
9963}
9964
9965static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9966{
9967 struct _sas_device *sas_device = NULL;
9968 unsigned long flags;
9969
9970 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9971 if (!list_empty(&ioc->sas_device_init_list)) {
9972 sas_device = list_first_entry(&ioc->sas_device_init_list,
9973 struct _sas_device, list);
9974 sas_device_get(sas_device);
9975 }
9976 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9977
9978 return sas_device;
9979}
9980
9981static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9982 struct _sas_device *sas_device)
9983{
9984 unsigned long flags;
9985
9986 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9987
9988 /*
9989 * Since we dropped the lock during the call to port_add(), we need to
9990 * be careful here that somebody else didn't move or delete this item
9991 * while we were busy with other things.
9992 *
9993 * If it was on the list, we need a put() for the reference the list
9994 * had. Either way, we need a get() for the destination list.
9995 */
9996 if (!list_empty(&sas_device->list)) {
9997 list_del_init(&sas_device->list);
9998 sas_device_put(sas_device);
9999 }
10000
10001 sas_device_get(sas_device);
10002 list_add_tail(&sas_device->list, &ioc->sas_device_list);
10003
10004 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10005}
10006
10007/**
10008 * _scsih_probe_sas - reporting sas devices to sas transport
10009 * @ioc: per adapter object
10010 *
10011 * Called during initial loading of the driver.
10012 */
10013static void
10014_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10015{
10016 struct _sas_device *sas_device;
10017
10018 if (ioc->hide_drives)
10019 return;
10020
10021 while ((sas_device = get_next_sas_device(ioc))) {
10022 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10023 sas_device->sas_address_parent)) {
10024 _scsih_sas_device_remove(ioc, sas_device);
10025 sas_device_put(sas_device);
10026 continue;
10027 } else if (!sas_device->starget) {
10028 /*
10029 * When asyn scanning is enabled, its not possible to
10030 * remove devices while scanning is turned on due to an
10031 * oops in scsi_sysfs_add_sdev()->add_device()->
10032 * sysfs_addrm_start()
10033 */
10034 if (!ioc->is_driver_loading) {
10035 mpt3sas_transport_port_remove(ioc,
10036 sas_device->sas_address,
10037 sas_device->sas_address_parent);
10038 _scsih_sas_device_remove(ioc, sas_device);
10039 sas_device_put(sas_device);
10040 continue;
10041 }
10042 }
10043 sas_device_make_active(ioc, sas_device);
10044 sas_device_put(sas_device);
10045 }
10046}
10047
10048/**
10049 * get_next_pcie_device - Get the next pcie device
10050 * @ioc: per adapter object
10051 *
10052 * Get the next pcie device from pcie_device_init_list list.
10053 *
10054 * Return: pcie device structure if pcie_device_init_list list is not empty
10055 * otherwise returns NULL
10056 */
10057static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10058{
10059 struct _pcie_device *pcie_device = NULL;
10060 unsigned long flags;
10061
10062 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10063 if (!list_empty(&ioc->pcie_device_init_list)) {
10064 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10065 struct _pcie_device, list);
10066 pcie_device_get(pcie_device);
10067 }
10068 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10069
10070 return pcie_device;
10071}
10072
10073/**
10074 * pcie_device_make_active - Add pcie device to pcie_device_list list
10075 * @ioc: per adapter object
10076 * @pcie_device: pcie device object
10077 *
10078 * Add the pcie device which has registered with SCSI Transport Later to
10079 * pcie_device_list list
10080 */
10081static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10082 struct _pcie_device *pcie_device)
10083{
10084 unsigned long flags;
10085
10086 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10087
10088 if (!list_empty(&pcie_device->list)) {
10089 list_del_init(&pcie_device->list);
10090 pcie_device_put(pcie_device);
10091 }
10092 pcie_device_get(pcie_device);
10093 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10094
10095 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10096}
10097
10098/**
10099 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10100 * @ioc: per adapter object
10101 *
10102 * Called during initial loading of the driver.
10103 */
10104static void
10105_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10106{
10107 struct _pcie_device *pcie_device;
10108 int rc;
10109
10110 /* PCIe Device List */
10111 while ((pcie_device = get_next_pcie_device(ioc))) {
10112 if (pcie_device->starget) {
10113 pcie_device_put(pcie_device);
10114 continue;
10115 }
10116 if (pcie_device->access_status ==
10117 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10118 pcie_device_make_active(ioc, pcie_device);
10119 pcie_device_put(pcie_device);
10120 continue;
10121 }
10122 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10123 pcie_device->id, 0);
10124 if (rc) {
10125 _scsih_pcie_device_remove(ioc, pcie_device);
10126 pcie_device_put(pcie_device);
10127 continue;
10128 } else if (!pcie_device->starget) {
10129 /*
10130 * When async scanning is enabled, its not possible to
10131 * remove devices while scanning is turned on due to an
10132 * oops in scsi_sysfs_add_sdev()->add_device()->
10133 * sysfs_addrm_start()
10134 */
10135 if (!ioc->is_driver_loading) {
10136 /* TODO-- Need to find out whether this condition will
10137 * occur or not
10138 */
10139 _scsih_pcie_device_remove(ioc, pcie_device);
10140 pcie_device_put(pcie_device);
10141 continue;
10142 }
10143 }
10144 pcie_device_make_active(ioc, pcie_device);
10145 pcie_device_put(pcie_device);
10146 }
10147}
10148
10149/**
10150 * _scsih_probe_devices - probing for devices
10151 * @ioc: per adapter object
10152 *
10153 * Called during initial loading of the driver.
10154 */
10155static void
10156_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10157{
10158 u16 volume_mapping_flags;
10159
10160 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10161 return; /* return when IOC doesn't support initiator mode */
10162
10163 _scsih_probe_boot_devices(ioc);
10164
10165 if (ioc->ir_firmware) {
10166 volume_mapping_flags =
10167 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10168 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10169 if (volume_mapping_flags ==
10170 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10171 _scsih_probe_raid(ioc);
10172 _scsih_probe_sas(ioc);
10173 } else {
10174 _scsih_probe_sas(ioc);
10175 _scsih_probe_raid(ioc);
10176 }
10177 } else {
10178 _scsih_probe_sas(ioc);
10179 _scsih_probe_pcie(ioc);
10180 }
10181}
10182
10183/**
10184 * scsih_scan_start - scsi lld callback for .scan_start
10185 * @shost: SCSI host pointer
10186 *
10187 * The shost has the ability to discover targets on its own instead
10188 * of scanning the entire bus. In our implemention, we will kick off
10189 * firmware discovery.
10190 */
10191static void
10192scsih_scan_start(struct Scsi_Host *shost)
10193{
10194 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10195 int rc;
10196 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10197 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10198
10199 if (disable_discovery > 0)
10200 return;
10201
10202 ioc->start_scan = 1;
10203 rc = mpt3sas_port_enable(ioc);
10204
10205 if (rc != 0)
10206 ioc_info(ioc, "port enable: FAILED\n");
10207}
10208
10209/**
10210 * scsih_scan_finished - scsi lld callback for .scan_finished
10211 * @shost: SCSI host pointer
10212 * @time: elapsed time of the scan in jiffies
10213 *
10214 * This function will be called periodicallyn until it returns 1 with the
10215 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10216 * we wait for firmware discovery to complete, then return 1.
10217 */
10218static int
10219scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10220{
10221 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10222
10223 if (disable_discovery > 0) {
10224 ioc->is_driver_loading = 0;
10225 ioc->wait_for_discovery_to_complete = 0;
10226 return 1;
10227 }
10228
10229 if (time >= (300 * HZ)) {
10230 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10231 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10232 ioc->is_driver_loading = 0;
10233 return 1;
10234 }
10235
10236 if (ioc->start_scan)
10237 return 0;
10238
10239 if (ioc->start_scan_failed) {
10240 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10241 ioc->start_scan_failed);
10242 ioc->is_driver_loading = 0;
10243 ioc->wait_for_discovery_to_complete = 0;
10244 ioc->remove_host = 1;
10245 return 1;
10246 }
10247
10248 ioc_info(ioc, "port enable: SUCCESS\n");
10249 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10250
10251 if (ioc->wait_for_discovery_to_complete) {
10252 ioc->wait_for_discovery_to_complete = 0;
10253 _scsih_probe_devices(ioc);
10254 }
10255 mpt3sas_base_start_watchdog(ioc);
10256 ioc->is_driver_loading = 0;
10257 return 1;
10258}
10259
10260/* shost template for SAS 2.0 HBA devices */
10261static struct scsi_host_template mpt2sas_driver_template = {
10262 .module = THIS_MODULE,
10263 .name = "Fusion MPT SAS Host",
10264 .proc_name = MPT2SAS_DRIVER_NAME,
10265 .queuecommand = scsih_qcmd,
10266 .target_alloc = scsih_target_alloc,
10267 .slave_alloc = scsih_slave_alloc,
10268 .slave_configure = scsih_slave_configure,
10269 .target_destroy = scsih_target_destroy,
10270 .slave_destroy = scsih_slave_destroy,
10271 .scan_finished = scsih_scan_finished,
10272 .scan_start = scsih_scan_start,
10273 .change_queue_depth = scsih_change_queue_depth,
10274 .eh_abort_handler = scsih_abort,
10275 .eh_device_reset_handler = scsih_dev_reset,
10276 .eh_target_reset_handler = scsih_target_reset,
10277 .eh_host_reset_handler = scsih_host_reset,
10278 .bios_param = scsih_bios_param,
10279 .can_queue = 1,
10280 .this_id = -1,
10281 .sg_tablesize = MPT2SAS_SG_DEPTH,
10282 .max_sectors = 32767,
10283 .cmd_per_lun = 7,
10284 .shost_attrs = mpt3sas_host_attrs,
10285 .sdev_attrs = mpt3sas_dev_attrs,
10286 .track_queue_depth = 1,
10287 .cmd_size = sizeof(struct scsiio_tracker),
10288};
10289
10290/* raid transport support for SAS 2.0 HBA devices */
10291static struct raid_function_template mpt2sas_raid_functions = {
10292 .cookie = &mpt2sas_driver_template,
10293 .is_raid = scsih_is_raid,
10294 .get_resync = scsih_get_resync,
10295 .get_state = scsih_get_state,
10296};
10297
10298/* shost template for SAS 3.0 HBA devices */
10299static struct scsi_host_template mpt3sas_driver_template = {
10300 .module = THIS_MODULE,
10301 .name = "Fusion MPT SAS Host",
10302 .proc_name = MPT3SAS_DRIVER_NAME,
10303 .queuecommand = scsih_qcmd,
10304 .target_alloc = scsih_target_alloc,
10305 .slave_alloc = scsih_slave_alloc,
10306 .slave_configure = scsih_slave_configure,
10307 .target_destroy = scsih_target_destroy,
10308 .slave_destroy = scsih_slave_destroy,
10309 .scan_finished = scsih_scan_finished,
10310 .scan_start = scsih_scan_start,
10311 .change_queue_depth = scsih_change_queue_depth,
10312 .eh_abort_handler = scsih_abort,
10313 .eh_device_reset_handler = scsih_dev_reset,
10314 .eh_target_reset_handler = scsih_target_reset,
10315 .eh_host_reset_handler = scsih_host_reset,
10316 .bios_param = scsih_bios_param,
10317 .can_queue = 1,
10318 .this_id = -1,
10319 .sg_tablesize = MPT3SAS_SG_DEPTH,
10320 .max_sectors = 32767,
10321 .max_segment_size = 0xffffffff,
10322 .cmd_per_lun = 7,
10323 .shost_attrs = mpt3sas_host_attrs,
10324 .sdev_attrs = mpt3sas_dev_attrs,
10325 .track_queue_depth = 1,
10326 .cmd_size = sizeof(struct scsiio_tracker),
10327};
10328
10329/* raid transport support for SAS 3.0 HBA devices */
10330static struct raid_function_template mpt3sas_raid_functions = {
10331 .cookie = &mpt3sas_driver_template,
10332 .is_raid = scsih_is_raid,
10333 .get_resync = scsih_get_resync,
10334 .get_state = scsih_get_state,
10335};
10336
10337/**
10338 * _scsih_determine_hba_mpi_version - determine in which MPI version class
10339 * this device belongs to.
10340 * @pdev: PCI device struct
10341 *
10342 * return MPI2_VERSION for SAS 2.0 HBA devices,
10343 * MPI25_VERSION for SAS 3.0 HBA devices, and
10344 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10345 */
10346static u16
10347_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10348{
10349
10350 switch (pdev->device) {
10351 case MPI2_MFGPAGE_DEVID_SSS6200:
10352 case MPI2_MFGPAGE_DEVID_SAS2004:
10353 case MPI2_MFGPAGE_DEVID_SAS2008:
10354 case MPI2_MFGPAGE_DEVID_SAS2108_1:
10355 case MPI2_MFGPAGE_DEVID_SAS2108_2:
10356 case MPI2_MFGPAGE_DEVID_SAS2108_3:
10357 case MPI2_MFGPAGE_DEVID_SAS2116_1:
10358 case MPI2_MFGPAGE_DEVID_SAS2116_2:
10359 case MPI2_MFGPAGE_DEVID_SAS2208_1:
10360 case MPI2_MFGPAGE_DEVID_SAS2208_2:
10361 case MPI2_MFGPAGE_DEVID_SAS2208_3:
10362 case MPI2_MFGPAGE_DEVID_SAS2208_4:
10363 case MPI2_MFGPAGE_DEVID_SAS2208_5:
10364 case MPI2_MFGPAGE_DEVID_SAS2208_6:
10365 case MPI2_MFGPAGE_DEVID_SAS2308_1:
10366 case MPI2_MFGPAGE_DEVID_SAS2308_2:
10367 case MPI2_MFGPAGE_DEVID_SAS2308_3:
10368 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10369 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10370 return MPI2_VERSION;
10371 case MPI25_MFGPAGE_DEVID_SAS3004:
10372 case MPI25_MFGPAGE_DEVID_SAS3008:
10373 case MPI25_MFGPAGE_DEVID_SAS3108_1:
10374 case MPI25_MFGPAGE_DEVID_SAS3108_2:
10375 case MPI25_MFGPAGE_DEVID_SAS3108_5:
10376 case MPI25_MFGPAGE_DEVID_SAS3108_6:
10377 return MPI25_VERSION;
10378 case MPI26_MFGPAGE_DEVID_SAS3216:
10379 case MPI26_MFGPAGE_DEVID_SAS3224:
10380 case MPI26_MFGPAGE_DEVID_SAS3316_1:
10381 case MPI26_MFGPAGE_DEVID_SAS3316_2:
10382 case MPI26_MFGPAGE_DEVID_SAS3316_3:
10383 case MPI26_MFGPAGE_DEVID_SAS3316_4:
10384 case MPI26_MFGPAGE_DEVID_SAS3324_1:
10385 case MPI26_MFGPAGE_DEVID_SAS3324_2:
10386 case MPI26_MFGPAGE_DEVID_SAS3324_3:
10387 case MPI26_MFGPAGE_DEVID_SAS3324_4:
10388 case MPI26_MFGPAGE_DEVID_SAS3508:
10389 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10390 case MPI26_MFGPAGE_DEVID_SAS3408:
10391 case MPI26_MFGPAGE_DEVID_SAS3516:
10392 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10393 case MPI26_MFGPAGE_DEVID_SAS3416:
10394 case MPI26_MFGPAGE_DEVID_SAS3616:
10395 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10396 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10397 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10398 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10399 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10400 return MPI26_VERSION;
10401 }
10402 return 0;
10403}
10404
10405/**
10406 * _scsih_probe - attach and add scsi host
10407 * @pdev: PCI device struct
10408 * @id: pci device id
10409 *
10410 * Return: 0 success, anything else error.
10411 */
10412static int
10413_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10414{
10415 struct MPT3SAS_ADAPTER *ioc;
10416 struct Scsi_Host *shost = NULL;
10417 int rv;
10418 u16 hba_mpi_version;
10419
10420 /* Determine in which MPI version class this pci device belongs */
10421 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10422 if (hba_mpi_version == 0)
10423 return -ENODEV;
10424
10425 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10426 * for other generation HBA's return with -ENODEV
10427 */
10428 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
10429 return -ENODEV;
10430
10431 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10432 * for other generation HBA's return with -ENODEV
10433 */
10434 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
10435 || hba_mpi_version == MPI26_VERSION)))
10436 return -ENODEV;
10437
10438 switch (hba_mpi_version) {
10439 case MPI2_VERSION:
10440 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10441 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10442 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
10443 shost = scsi_host_alloc(&mpt2sas_driver_template,
10444 sizeof(struct MPT3SAS_ADAPTER));
10445 if (!shost)
10446 return -ENODEV;
10447 ioc = shost_priv(shost);
10448 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10449 ioc->hba_mpi_version_belonged = hba_mpi_version;
10450 ioc->id = mpt2_ids++;
10451 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10452 switch (pdev->device) {
10453 case MPI2_MFGPAGE_DEVID_SSS6200:
10454 ioc->is_warpdrive = 1;
10455 ioc->hide_ir_msg = 1;
10456 break;
10457 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10458 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10459 ioc->is_mcpu_endpoint = 1;
10460 break;
10461 default:
10462 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10463 break;
10464 }
10465 break;
10466 case MPI25_VERSION:
10467 case MPI26_VERSION:
10468 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
10469 shost = scsi_host_alloc(&mpt3sas_driver_template,
10470 sizeof(struct MPT3SAS_ADAPTER));
10471 if (!shost)
10472 return -ENODEV;
10473 ioc = shost_priv(shost);
10474 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10475 ioc->hba_mpi_version_belonged = hba_mpi_version;
10476 ioc->id = mpt3_ids++;
10477 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10478 switch (pdev->device) {
10479 case MPI26_MFGPAGE_DEVID_SAS3508:
10480 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10481 case MPI26_MFGPAGE_DEVID_SAS3408:
10482 case MPI26_MFGPAGE_DEVID_SAS3516:
10483 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10484 case MPI26_MFGPAGE_DEVID_SAS3416:
10485 case MPI26_MFGPAGE_DEVID_SAS3616:
10486 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10487 ioc->is_gen35_ioc = 1;
10488 break;
10489 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10490 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10491 dev_info(&pdev->dev,
10492 "HBA is in Configurable Secure mode\n");
10493 /* fall through */
10494 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10495 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10496 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10497 break;
10498 default:
10499 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10500 }
10501 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10502 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10503 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10504 ioc->combined_reply_queue = 1;
10505 if (ioc->is_gen35_ioc)
10506 ioc->combined_reply_index_count =
10507 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10508 else
10509 ioc->combined_reply_index_count =
10510 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10511 }
10512 break;
10513 default:
10514 return -ENODEV;
10515 }
10516
10517 INIT_LIST_HEAD(&ioc->list);
10518 spin_lock(&gioc_lock);
10519 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10520 spin_unlock(&gioc_lock);
10521 ioc->shost = shost;
10522 ioc->pdev = pdev;
10523 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10524 ioc->tm_cb_idx = tm_cb_idx;
10525 ioc->ctl_cb_idx = ctl_cb_idx;
10526 ioc->base_cb_idx = base_cb_idx;
10527 ioc->port_enable_cb_idx = port_enable_cb_idx;
10528 ioc->transport_cb_idx = transport_cb_idx;
10529 ioc->scsih_cb_idx = scsih_cb_idx;
10530 ioc->config_cb_idx = config_cb_idx;
10531 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10532 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10533 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10534 ioc->logging_level = logging_level;
10535 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10536 /*
10537 * Enable MEMORY MOVE support flag.
10538 */
10539 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10540
10541 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10542
10543 /* misc semaphores and spin locks */
10544 mutex_init(&ioc->reset_in_progress_mutex);
10545 /* initializing pci_access_mutex lock */
10546 mutex_init(&ioc->pci_access_mutex);
10547 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10548 spin_lock_init(&ioc->scsi_lookup_lock);
10549 spin_lock_init(&ioc->sas_device_lock);
10550 spin_lock_init(&ioc->sas_node_lock);
10551 spin_lock_init(&ioc->fw_event_lock);
10552 spin_lock_init(&ioc->raid_device_lock);
10553 spin_lock_init(&ioc->pcie_device_lock);
10554 spin_lock_init(&ioc->diag_trigger_lock);
10555
10556 INIT_LIST_HEAD(&ioc->sas_device_list);
10557 INIT_LIST_HEAD(&ioc->sas_device_init_list);
10558 INIT_LIST_HEAD(&ioc->sas_expander_list);
10559 INIT_LIST_HEAD(&ioc->enclosure_list);
10560 INIT_LIST_HEAD(&ioc->pcie_device_list);
10561 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10562 INIT_LIST_HEAD(&ioc->fw_event_list);
10563 INIT_LIST_HEAD(&ioc->raid_device_list);
10564 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10565 INIT_LIST_HEAD(&ioc->delayed_tr_list);
10566 INIT_LIST_HEAD(&ioc->delayed_sc_list);
10567 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10568 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10569 INIT_LIST_HEAD(&ioc->reply_queue_list);
10570
10571 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10572
10573 /* init shost parameters */
10574 shost->max_cmd_len = 32;
10575 shost->max_lun = max_lun;
10576 shost->transportt = mpt3sas_transport_template;
10577 shost->unique_id = ioc->id;
10578
10579 if (ioc->is_mcpu_endpoint) {
10580 /* mCPU MPI support 64K max IO */
10581 shost->max_sectors = 128;
10582 ioc_info(ioc, "The max_sectors value is set to %d\n",
10583 shost->max_sectors);
10584 } else {
10585 if (max_sectors != 0xFFFF) {
10586 if (max_sectors < 64) {
10587 shost->max_sectors = 64;
10588 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10589 max_sectors);
10590 } else if (max_sectors > 32767) {
10591 shost->max_sectors = 32767;
10592 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10593 max_sectors);
10594 } else {
10595 shost->max_sectors = max_sectors & 0xFFFE;
10596 ioc_info(ioc, "The max_sectors value is set to %d\n",
10597 shost->max_sectors);
10598 }
10599 }
10600 }
10601 /* register EEDP capabilities with SCSI layer */
10602 if (prot_mask > 0)
10603 scsi_host_set_prot(shost, prot_mask);
10604 else
10605 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10606 | SHOST_DIF_TYPE2_PROTECTION
10607 | SHOST_DIF_TYPE3_PROTECTION);
10608
10609 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10610
10611 /* event thread */
10612 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10613 "fw_event_%s%d", ioc->driver_name, ioc->id);
10614 ioc->firmware_event_thread = alloc_ordered_workqueue(
10615 ioc->firmware_event_name, 0);
10616 if (!ioc->firmware_event_thread) {
10617 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10618 __FILE__, __LINE__, __func__);
10619 rv = -ENODEV;
10620 goto out_thread_fail;
10621 }
10622
10623 ioc->is_driver_loading = 1;
10624 if ((mpt3sas_base_attach(ioc))) {
10625 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10626 __FILE__, __LINE__, __func__);
10627 rv = -ENODEV;
10628 goto out_attach_fail;
10629 }
10630
10631 if (ioc->is_warpdrive) {
10632 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
10633 ioc->hide_drives = 0;
10634 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
10635 ioc->hide_drives = 1;
10636 else {
10637 if (mpt3sas_get_num_volumes(ioc))
10638 ioc->hide_drives = 1;
10639 else
10640 ioc->hide_drives = 0;
10641 }
10642 } else
10643 ioc->hide_drives = 0;
10644
10645 rv = scsi_add_host(shost, &pdev->dev);
10646 if (rv) {
10647 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10648 __FILE__, __LINE__, __func__);
10649 goto out_add_shost_fail;
10650 }
10651
10652 scsi_scan_host(shost);
10653 return 0;
10654out_add_shost_fail:
10655 mpt3sas_base_detach(ioc);
10656 out_attach_fail:
10657 destroy_workqueue(ioc->firmware_event_thread);
10658 out_thread_fail:
10659 spin_lock(&gioc_lock);
10660 list_del(&ioc->list);
10661 spin_unlock(&gioc_lock);
10662 scsi_host_put(shost);
10663 return rv;
10664}
10665
10666#ifdef CONFIG_PM
10667/**
10668 * scsih_suspend - power management suspend main entry point
10669 * @pdev: PCI device struct
10670 * @state: PM state change to (usually PCI_D3)
10671 *
10672 * Return: 0 success, anything else error.
10673 */
10674static int
10675scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10676{
10677 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10678 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10679 pci_power_t device_state;
10680
10681 mpt3sas_base_stop_watchdog(ioc);
10682 flush_scheduled_work();
10683 scsi_block_requests(shost);
10684 device_state = pci_choose_state(pdev, state);
10685 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10686 pdev, pci_name(pdev), device_state);
10687
10688 pci_save_state(pdev);
10689 mpt3sas_base_free_resources(ioc);
10690 pci_set_power_state(pdev, device_state);
10691 return 0;
10692}
10693
10694/**
10695 * scsih_resume - power management resume main entry point
10696 * @pdev: PCI device struct
10697 *
10698 * Return: 0 success, anything else error.
10699 */
10700static int
10701scsih_resume(struct pci_dev *pdev)
10702{
10703 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10704 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10705 pci_power_t device_state = pdev->current_state;
10706 int r;
10707
10708 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10709 pdev, pci_name(pdev), device_state);
10710
10711 pci_set_power_state(pdev, PCI_D0);
10712 pci_enable_wake(pdev, PCI_D0, 0);
10713 pci_restore_state(pdev);
10714 ioc->pdev = pdev;
10715 r = mpt3sas_base_map_resources(ioc);
10716 if (r)
10717 return r;
10718
10719 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10720 scsi_unblock_requests(shost);
10721 mpt3sas_base_start_watchdog(ioc);
10722 return 0;
10723}
10724#endif /* CONFIG_PM */
10725
10726/**
10727 * scsih_pci_error_detected - Called when a PCI error is detected.
10728 * @pdev: PCI device struct
10729 * @state: PCI channel state
10730 *
10731 * Description: Called when a PCI error is detected.
10732 *
10733 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10734 */
10735static pci_ers_result_t
10736scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10737{
10738 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10739 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10740
10741 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10742
10743 switch (state) {
10744 case pci_channel_io_normal:
10745 return PCI_ERS_RESULT_CAN_RECOVER;
10746 case pci_channel_io_frozen:
10747 /* Fatal error, prepare for slot reset */
10748 ioc->pci_error_recovery = 1;
10749 scsi_block_requests(ioc->shost);
10750 mpt3sas_base_stop_watchdog(ioc);
10751 mpt3sas_base_free_resources(ioc);
10752 return PCI_ERS_RESULT_NEED_RESET;
10753 case pci_channel_io_perm_failure:
10754 /* Permanent error, prepare for device removal */
10755 ioc->pci_error_recovery = 1;
10756 mpt3sas_base_stop_watchdog(ioc);
10757 _scsih_flush_running_cmds(ioc);
10758 return PCI_ERS_RESULT_DISCONNECT;
10759 }
10760 return PCI_ERS_RESULT_NEED_RESET;
10761}
10762
10763/**
10764 * scsih_pci_slot_reset - Called when PCI slot has been reset.
10765 * @pdev: PCI device struct
10766 *
10767 * Description: This routine is called by the pci error recovery
10768 * code after the PCI slot has been reset, just before we
10769 * should resume normal operations.
10770 */
10771static pci_ers_result_t
10772scsih_pci_slot_reset(struct pci_dev *pdev)
10773{
10774 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10775 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10776 int rc;
10777
10778 ioc_info(ioc, "PCI error: slot reset callback!!\n");
10779
10780 ioc->pci_error_recovery = 0;
10781 ioc->pdev = pdev;
10782 pci_restore_state(pdev);
10783 rc = mpt3sas_base_map_resources(ioc);
10784 if (rc)
10785 return PCI_ERS_RESULT_DISCONNECT;
10786
10787 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10788
10789 ioc_warn(ioc, "hard reset: %s\n",
10790 (rc == 0) ? "success" : "failed");
10791
10792 if (!rc)
10793 return PCI_ERS_RESULT_RECOVERED;
10794 else
10795 return PCI_ERS_RESULT_DISCONNECT;
10796}
10797
10798/**
10799 * scsih_pci_resume() - resume normal ops after PCI reset
10800 * @pdev: pointer to PCI device
10801 *
10802 * Called when the error recovery driver tells us that its
10803 * OK to resume normal operation. Use completion to allow
10804 * halted scsi ops to resume.
10805 */
10806static void
10807scsih_pci_resume(struct pci_dev *pdev)
10808{
10809 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10810 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10811
10812 ioc_info(ioc, "PCI error: resume callback!!\n");
10813
10814 mpt3sas_base_start_watchdog(ioc);
10815 scsi_unblock_requests(ioc->shost);
10816}
10817
10818/**
10819 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10820 * @pdev: pointer to PCI device
10821 */
10822static pci_ers_result_t
10823scsih_pci_mmio_enabled(struct pci_dev *pdev)
10824{
10825 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10826 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10827
10828 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10829
10830 /* TODO - dump whatever for debugging purposes */
10831
10832 /* This called only if scsih_pci_error_detected returns
10833 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10834 * works, no need to reset slot.
10835 */
10836 return PCI_ERS_RESULT_RECOVERED;
10837}
10838
10839/**
10840 * scsih__ncq_prio_supp - Check for NCQ command priority support
10841 * @sdev: scsi device struct
10842 *
10843 * This is called when a user indicates they would like to enable
10844 * ncq command priorities. This works only on SATA devices.
10845 */
10846bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10847{
10848 unsigned char *buf;
10849 bool ncq_prio_supp = false;
10850
10851 if (!scsi_device_supports_vpd(sdev))
10852 return ncq_prio_supp;
10853
10854 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10855 if (!buf)
10856 return ncq_prio_supp;
10857
10858 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10859 ncq_prio_supp = (buf[213] >> 4) & 1;
10860
10861 kfree(buf);
10862 return ncq_prio_supp;
10863}
10864/*
10865 * The pci device ids are defined in mpi/mpi2_cnfg.h.
10866 */
10867static const struct pci_device_id mpt3sas_pci_table[] = {
10868 /* Spitfire ~ 2004 */
10869 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10870 PCI_ANY_ID, PCI_ANY_ID },
10871 /* Falcon ~ 2008 */
10872 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10873 PCI_ANY_ID, PCI_ANY_ID },
10874 /* Liberator ~ 2108 */
10875 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10876 PCI_ANY_ID, PCI_ANY_ID },
10877 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10878 PCI_ANY_ID, PCI_ANY_ID },
10879 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10880 PCI_ANY_ID, PCI_ANY_ID },
10881 /* Meteor ~ 2116 */
10882 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10883 PCI_ANY_ID, PCI_ANY_ID },
10884 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10885 PCI_ANY_ID, PCI_ANY_ID },
10886 /* Thunderbolt ~ 2208 */
10887 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10888 PCI_ANY_ID, PCI_ANY_ID },
10889 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10890 PCI_ANY_ID, PCI_ANY_ID },
10891 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10892 PCI_ANY_ID, PCI_ANY_ID },
10893 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10894 PCI_ANY_ID, PCI_ANY_ID },
10895 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10896 PCI_ANY_ID, PCI_ANY_ID },
10897 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10898 PCI_ANY_ID, PCI_ANY_ID },
10899 /* Mustang ~ 2308 */
10900 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10901 PCI_ANY_ID, PCI_ANY_ID },
10902 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10903 PCI_ANY_ID, PCI_ANY_ID },
10904 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10905 PCI_ANY_ID, PCI_ANY_ID },
10906 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10907 PCI_ANY_ID, PCI_ANY_ID },
10908 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10909 PCI_ANY_ID, PCI_ANY_ID },
10910 /* SSS6200 */
10911 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10912 PCI_ANY_ID, PCI_ANY_ID },
10913 /* Fury ~ 3004 and 3008 */
10914 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10915 PCI_ANY_ID, PCI_ANY_ID },
10916 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10917 PCI_ANY_ID, PCI_ANY_ID },
10918 /* Invader ~ 3108 */
10919 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10920 PCI_ANY_ID, PCI_ANY_ID },
10921 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10922 PCI_ANY_ID, PCI_ANY_ID },
10923 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10924 PCI_ANY_ID, PCI_ANY_ID },
10925 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10926 PCI_ANY_ID, PCI_ANY_ID },
10927 /* Cutlass ~ 3216 and 3224 */
10928 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10929 PCI_ANY_ID, PCI_ANY_ID },
10930 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10931 PCI_ANY_ID, PCI_ANY_ID },
10932 /* Intruder ~ 3316 and 3324 */
10933 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10934 PCI_ANY_ID, PCI_ANY_ID },
10935 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10936 PCI_ANY_ID, PCI_ANY_ID },
10937 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10938 PCI_ANY_ID, PCI_ANY_ID },
10939 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10940 PCI_ANY_ID, PCI_ANY_ID },
10941 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10942 PCI_ANY_ID, PCI_ANY_ID },
10943 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10944 PCI_ANY_ID, PCI_ANY_ID },
10945 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10946 PCI_ANY_ID, PCI_ANY_ID },
10947 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10948 PCI_ANY_ID, PCI_ANY_ID },
10949 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10950 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10951 PCI_ANY_ID, PCI_ANY_ID },
10952 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10953 PCI_ANY_ID, PCI_ANY_ID },
10954 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10955 PCI_ANY_ID, PCI_ANY_ID },
10956 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10957 PCI_ANY_ID, PCI_ANY_ID },
10958 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10959 PCI_ANY_ID, PCI_ANY_ID },
10960 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10961 PCI_ANY_ID, PCI_ANY_ID },
10962 /* Mercator ~ 3616*/
10963 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10964 PCI_ANY_ID, PCI_ANY_ID },
10965
10966 /* Aero SI 0x00E1 Configurable Secure
10967 * 0x00E2 Hard Secure
10968 */
10969 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10970 PCI_ANY_ID, PCI_ANY_ID },
10971 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10972 PCI_ANY_ID, PCI_ANY_ID },
10973
10974 /* Atlas PCIe Switch Management Port */
10975 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10976 PCI_ANY_ID, PCI_ANY_ID },
10977
10978 /* Sea SI 0x00E5 Configurable Secure
10979 * 0x00E6 Hard Secure
10980 */
10981 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10982 PCI_ANY_ID, PCI_ANY_ID },
10983 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10984 PCI_ANY_ID, PCI_ANY_ID },
10985
10986 {0} /* Terminating entry */
10987};
10988MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10989
10990static struct pci_error_handlers _mpt3sas_err_handler = {
10991 .error_detected = scsih_pci_error_detected,
10992 .mmio_enabled = scsih_pci_mmio_enabled,
10993 .slot_reset = scsih_pci_slot_reset,
10994 .resume = scsih_pci_resume,
10995};
10996
10997static struct pci_driver mpt3sas_driver = {
10998 .name = MPT3SAS_DRIVER_NAME,
10999 .id_table = mpt3sas_pci_table,
11000 .probe = _scsih_probe,
11001 .remove = scsih_remove,
11002 .shutdown = scsih_shutdown,
11003 .err_handler = &_mpt3sas_err_handler,
11004#ifdef CONFIG_PM
11005 .suspend = scsih_suspend,
11006 .resume = scsih_resume,
11007#endif
11008};
11009
11010/**
11011 * scsih_init - main entry point for this driver.
11012 *
11013 * Return: 0 success, anything else error.
11014 */
11015static int
11016scsih_init(void)
11017{
11018 mpt2_ids = 0;
11019 mpt3_ids = 0;
11020
11021 mpt3sas_base_initialize_callback_handler();
11022
11023 /* queuecommand callback hander */
11024 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11025
11026 /* task management callback handler */
11027 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11028
11029 /* base internal commands callback handler */
11030 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11031 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11032 mpt3sas_port_enable_done);
11033
11034 /* transport internal commands callback handler */
11035 transport_cb_idx = mpt3sas_base_register_callback_handler(
11036 mpt3sas_transport_done);
11037
11038 /* scsih internal commands callback handler */
11039 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11040
11041 /* configuration page API internal commands callback handler */
11042 config_cb_idx = mpt3sas_base_register_callback_handler(
11043 mpt3sas_config_done);
11044
11045 /* ctl module callback handler */
11046 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11047
11048 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11049 _scsih_tm_tr_complete);
11050
11051 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11052 _scsih_tm_volume_tr_complete);
11053
11054 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11055 _scsih_sas_control_complete);
11056
11057 return 0;
11058}
11059
11060/**
11061 * scsih_exit - exit point for this driver (when it is a module).
11062 *
11063 * Return: 0 success, anything else error.
11064 */
11065static void
11066scsih_exit(void)
11067{
11068
11069 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11070 mpt3sas_base_release_callback_handler(tm_cb_idx);
11071 mpt3sas_base_release_callback_handler(base_cb_idx);
11072 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11073 mpt3sas_base_release_callback_handler(transport_cb_idx);
11074 mpt3sas_base_release_callback_handler(scsih_cb_idx);
11075 mpt3sas_base_release_callback_handler(config_cb_idx);
11076 mpt3sas_base_release_callback_handler(ctl_cb_idx);
11077
11078 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11079 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11080 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11081
11082/* raid transport support */
11083 if (hbas_to_enumerate != 1)
11084 raid_class_release(mpt3sas_raid_template);
11085 if (hbas_to_enumerate != 2)
11086 raid_class_release(mpt2sas_raid_template);
11087 sas_release_transport(mpt3sas_transport_template);
11088}
11089
11090/**
11091 * _mpt3sas_init - main entry point for this driver.
11092 *
11093 * Return: 0 success, anything else error.
11094 */
11095static int __init
11096_mpt3sas_init(void)
11097{
11098 int error;
11099
11100 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11101 MPT3SAS_DRIVER_VERSION);
11102
11103 mpt3sas_transport_template =
11104 sas_attach_transport(&mpt3sas_transport_functions);
11105 if (!mpt3sas_transport_template)
11106 return -ENODEV;
11107
11108 /* No need attach mpt3sas raid functions template
11109 * if hbas_to_enumarate value is one.
11110 */
11111 if (hbas_to_enumerate != 1) {
11112 mpt3sas_raid_template =
11113 raid_class_attach(&mpt3sas_raid_functions);
11114 if (!mpt3sas_raid_template) {
11115 sas_release_transport(mpt3sas_transport_template);
11116 return -ENODEV;
11117 }
11118 }
11119
11120 /* No need to attach mpt2sas raid functions template
11121 * if hbas_to_enumarate value is two
11122 */
11123 if (hbas_to_enumerate != 2) {
11124 mpt2sas_raid_template =
11125 raid_class_attach(&mpt2sas_raid_functions);
11126 if (!mpt2sas_raid_template) {
11127 sas_release_transport(mpt3sas_transport_template);
11128 return -ENODEV;
11129 }
11130 }
11131
11132 error = scsih_init();
11133 if (error) {
11134 scsih_exit();
11135 return error;
11136 }
11137
11138 mpt3sas_ctl_init(hbas_to_enumerate);
11139
11140 error = pci_register_driver(&mpt3sas_driver);
11141 if (error) {
11142 mpt3sas_ctl_exit(hbas_to_enumerate);
11143 scsih_exit();
11144 }
11145
11146 return error;
11147}
11148
11149/**
11150 * _mpt3sas_exit - exit point for this driver (when it is a module).
11151 *
11152 */
11153static void __exit
11154_mpt3sas_exit(void)
11155{
11156 pr_info("mpt3sas version %s unloading\n",
11157 MPT3SAS_DRIVER_VERSION);
11158
11159 mpt3sas_ctl_exit(hbas_to_enumerate);
11160
11161 pci_unregister_driver(&mpt3sas_driver);
11162
11163 scsih_exit();
11164}
11165
11166module_init(_mpt3sas_init);
11167module_exit(_mpt3sas_exit);