blob: fb37df690b9a0b659aca6ea9221d7f5d781f8572 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com>
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com
45
46 For more information, goto:
47 http://www.lsi.com
48
49 Note: This version of the driver does not contain a bundled firmware
50 image.
51
52 History
53 -------
54 2.26.02.000 - Driver cleanup for kernel submission.
55 2.26.02.001 - Replace schedule_timeout() calls with msleep().
56 2.26.02.002 - Add support for PAE mode.
57 Add lun support.
58 Fix twa_remove() to free irq handler/unregister_chrdev()
59 before shutting down card.
60 Change to new 'change_queue_depth' api.
61 Fix 'handled=1' ISR usage, remove bogus IRQ check.
62 Remove un-needed eh_abort handler.
63 Add support for embedded firmware error strings.
64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65 2.26.02.004 - Add support for 9550SX controllers.
66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67 2.26.02.006 - Fix 9550SX pchip reset timeout.
68 Add big endian support.
69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70 2.26.02.008 - Free irq handler in __twa_shutdown().
71 Serialize reset code.
72 Add support for 9650SE controllers.
73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74 2.26.02.010 - Add support for 9690SA controllers.
75 2.26.02.011 - Increase max AENs drained to 256.
76 Add MSI support and "use_msi" module parameter.
77 Fix bug in twa_get_param() on 4GB+.
78 Use pci_resource_len() for ioremap().
79 2.26.02.012 - Add power management support.
80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
82*/
83
84#include <linux/module.h>
85#include <linux/reboot.h>
86#include <linux/spinlock.h>
87#include <linux/interrupt.h>
88#include <linux/moduleparam.h>
89#include <linux/errno.h>
90#include <linux/types.h>
91#include <linux/delay.h>
92#include <linux/pci.h>
93#include <linux/time.h>
94#include <linux/mutex.h>
95#include <linux/slab.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/uaccess.h>
99#include <scsi/scsi.h>
100#include <scsi/scsi_host.h>
101#include <scsi/scsi_tcq.h>
102#include <scsi/scsi_cmnd.h>
103#include "3w-9xxx.h"
104
105/* Globals */
106#define TW_DRIVER_VERSION "2.26.02.014"
107static DEFINE_MUTEX(twa_chrdev_mutex);
108static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109static unsigned int twa_device_extension_count;
110static int twa_major = -1;
111extern struct timezone sys_tz;
112
113/* Module parameters */
114MODULE_AUTHOR ("LSI");
115MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116MODULE_LICENSE("GPL");
117MODULE_VERSION(TW_DRIVER_VERSION);
118
119static int use_msi = 0;
120module_param(use_msi, int, S_IRUGO);
121MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122
123/* Function prototypes */
124static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126static char *twa_aen_severity_lookup(unsigned char severity_code);
127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129static int twa_chrdev_open(struct inode *inode, struct file *file);
130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152
153/* Functions */
154
155/* Show some statistics about the card */
156static ssize_t twa_show_stats(struct device *dev,
157 struct device_attribute *attr, char *buf)
158{
159 struct Scsi_Host *host = class_to_shost(dev);
160 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
161 unsigned long flags = 0;
162 ssize_t len;
163
164 spin_lock_irqsave(tw_dev->host->host_lock, flags);
165 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
166 "Current commands posted: %4d\n"
167 "Max commands posted: %4d\n"
168 "Current pending commands: %4d\n"
169 "Max pending commands: %4d\n"
170 "Last sgl length: %4d\n"
171 "Max sgl length: %4d\n"
172 "Last sector count: %4d\n"
173 "Max sector count: %4d\n"
174 "SCSI Host Resets: %4d\n"
175 "AEN's: %4d\n",
176 TW_DRIVER_VERSION,
177 tw_dev->posted_request_count,
178 tw_dev->max_posted_request_count,
179 tw_dev->pending_request_count,
180 tw_dev->max_pending_request_count,
181 tw_dev->sgl_entries,
182 tw_dev->max_sgl_entries,
183 tw_dev->sector_count,
184 tw_dev->max_sector_count,
185 tw_dev->num_resets,
186 tw_dev->aen_count);
187 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
188 return len;
189} /* End twa_show_stats() */
190
191/* This function will set a devices queue depth */
192static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
193 int reason)
194{
195 if (reason != SCSI_QDEPTH_DEFAULT)
196 return -EOPNOTSUPP;
197
198 if (queue_depth > TW_Q_LENGTH-2)
199 queue_depth = TW_Q_LENGTH-2;
200 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
201 return queue_depth;
202} /* End twa_change_queue_depth() */
203
204/* Create sysfs 'stats' entry */
205static struct device_attribute twa_host_stats_attr = {
206 .attr = {
207 .name = "stats",
208 .mode = S_IRUGO,
209 },
210 .show = twa_show_stats
211};
212
213/* Host attributes initializer */
214static struct device_attribute *twa_host_attrs[] = {
215 &twa_host_stats_attr,
216 NULL,
217};
218
219/* File operations struct for character device */
220static const struct file_operations twa_fops = {
221 .owner = THIS_MODULE,
222 .unlocked_ioctl = twa_chrdev_ioctl,
223 .open = twa_chrdev_open,
224 .release = NULL,
225 .llseek = noop_llseek,
226};
227
228/* This function will complete an aen request from the isr */
229static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
230{
231 TW_Command_Full *full_command_packet;
232 TW_Command *command_packet;
233 TW_Command_Apache_Header *header;
234 unsigned short aen;
235 int retval = 1;
236
237 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
238 tw_dev->posted_request_count--;
239 aen = le16_to_cpu(header->status_block.error);
240 full_command_packet = tw_dev->command_packet_virt[request_id];
241 command_packet = &full_command_packet->command.oldcommand;
242
243 /* First check for internal completion of set param for time sync */
244 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
245 /* Keep reading the queue in case there are more aen's */
246 if (twa_aen_read_queue(tw_dev, request_id))
247 goto out2;
248 else {
249 retval = 0;
250 goto out;
251 }
252 }
253
254 switch (aen) {
255 case TW_AEN_QUEUE_EMPTY:
256 /* Quit reading the queue if this is the last one */
257 break;
258 case TW_AEN_SYNC_TIME_WITH_HOST:
259 twa_aen_sync_time(tw_dev, request_id);
260 retval = 0;
261 goto out;
262 default:
263 twa_aen_queue_event(tw_dev, header);
264
265 /* If there are more aen's, keep reading the queue */
266 if (twa_aen_read_queue(tw_dev, request_id))
267 goto out2;
268 else {
269 retval = 0;
270 goto out;
271 }
272 }
273 retval = 0;
274out2:
275 tw_dev->state[request_id] = TW_S_COMPLETED;
276 twa_free_request_id(tw_dev, request_id);
277 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
278out:
279 return retval;
280} /* End twa_aen_complete() */
281
282/* This function will drain aen queue */
283static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
284{
285 int request_id = 0;
286 char cdb[TW_MAX_CDB_LEN];
287 TW_SG_Entry sglist[1];
288 int finished = 0, count = 0;
289 TW_Command_Full *full_command_packet;
290 TW_Command_Apache_Header *header;
291 unsigned short aen;
292 int first_reset = 0, queue = 0, retval = 1;
293
294 if (no_check_reset)
295 first_reset = 0;
296 else
297 first_reset = 1;
298
299 full_command_packet = tw_dev->command_packet_virt[request_id];
300 memset(full_command_packet, 0, sizeof(TW_Command_Full));
301
302 /* Initialize cdb */
303 memset(&cdb, 0, TW_MAX_CDB_LEN);
304 cdb[0] = REQUEST_SENSE; /* opcode */
305 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
306
307 /* Initialize sglist */
308 memset(&sglist, 0, sizeof(TW_SG_Entry));
309 sglist[0].length = TW_SECTOR_SIZE;
310 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
311
312 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
313 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
314 goto out;
315 }
316
317 /* Mark internal command */
318 tw_dev->srb[request_id] = NULL;
319
320 do {
321 /* Send command to the board */
322 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
323 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
324 goto out;
325 }
326
327 /* Now poll for completion */
328 if (twa_poll_response(tw_dev, request_id, 30)) {
329 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
330 tw_dev->posted_request_count--;
331 goto out;
332 }
333
334 tw_dev->posted_request_count--;
335 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
336 aen = le16_to_cpu(header->status_block.error);
337 queue = 0;
338 count++;
339
340 switch (aen) {
341 case TW_AEN_QUEUE_EMPTY:
342 if (first_reset != 1)
343 goto out;
344 else
345 finished = 1;
346 break;
347 case TW_AEN_SOFT_RESET:
348 if (first_reset == 0)
349 first_reset = 1;
350 else
351 queue = 1;
352 break;
353 case TW_AEN_SYNC_TIME_WITH_HOST:
354 break;
355 default:
356 queue = 1;
357 }
358
359 /* Now queue an event info */
360 if (queue)
361 twa_aen_queue_event(tw_dev, header);
362 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
363
364 if (count == TW_MAX_AEN_DRAIN)
365 goto out;
366
367 retval = 0;
368out:
369 tw_dev->state[request_id] = TW_S_INITIAL;
370 return retval;
371} /* End twa_aen_drain_queue() */
372
373/* This function will queue an event */
374static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
375{
376 u32 local_time;
377 struct timeval time;
378 TW_Event *event;
379 unsigned short aen;
380 char host[16];
381 char *error_str;
382
383 tw_dev->aen_count++;
384
385 /* Fill out event info */
386 event = tw_dev->event_queue[tw_dev->error_index];
387
388 /* Check for clobber */
389 host[0] = '\0';
390 if (tw_dev->host) {
391 sprintf(host, " scsi%d:", tw_dev->host->host_no);
392 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
393 tw_dev->aen_clobber = 1;
394 }
395
396 aen = le16_to_cpu(header->status_block.error);
397 memset(event, 0, sizeof(TW_Event));
398
399 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
400 do_gettimeofday(&time);
401 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
402 event->time_stamp_sec = local_time;
403 event->aen_code = aen;
404 event->retrieved = TW_AEN_NOT_RETRIEVED;
405 event->sequence_id = tw_dev->error_sequence_id;
406 tw_dev->error_sequence_id++;
407
408 /* Check for embedded error string */
409 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
410
411 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
412 event->parameter_len = strlen(header->err_specific_desc);
413 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
414 if (event->severity != TW_AEN_SEVERITY_DEBUG)
415 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
416 host,
417 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
418 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
419 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
420 header->err_specific_desc);
421 else
422 tw_dev->aen_count--;
423
424 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
425 tw_dev->event_queue_wrapped = 1;
426 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
427} /* End twa_aen_queue_event() */
428
429/* This function will read the aen queue from the isr */
430static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
431{
432 char cdb[TW_MAX_CDB_LEN];
433 TW_SG_Entry sglist[1];
434 TW_Command_Full *full_command_packet;
435 int retval = 1;
436
437 full_command_packet = tw_dev->command_packet_virt[request_id];
438 memset(full_command_packet, 0, sizeof(TW_Command_Full));
439
440 /* Initialize cdb */
441 memset(&cdb, 0, TW_MAX_CDB_LEN);
442 cdb[0] = REQUEST_SENSE; /* opcode */
443 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
444
445 /* Initialize sglist */
446 memset(&sglist, 0, sizeof(TW_SG_Entry));
447 sglist[0].length = TW_SECTOR_SIZE;
448 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
449
450 /* Mark internal command */
451 tw_dev->srb[request_id] = NULL;
452
453 /* Now post the command packet */
454 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
455 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
456 goto out;
457 }
458 retval = 0;
459out:
460 return retval;
461} /* End twa_aen_read_queue() */
462
463/* This function will look up an AEN severity string */
464static char *twa_aen_severity_lookup(unsigned char severity_code)
465{
466 char *retval = NULL;
467
468 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
469 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
470 goto out;
471
472 retval = twa_aen_severity_table[severity_code];
473out:
474 return retval;
475} /* End twa_aen_severity_lookup() */
476
477/* This function will sync firmware time with the host time */
478static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
479{
480 u32 schedulertime;
481 struct timeval utc;
482 TW_Command_Full *full_command_packet;
483 TW_Command *command_packet;
484 TW_Param_Apache *param;
485 u32 local_time;
486
487 /* Fill out the command packet */
488 full_command_packet = tw_dev->command_packet_virt[request_id];
489 memset(full_command_packet, 0, sizeof(TW_Command_Full));
490 command_packet = &full_command_packet->command.oldcommand;
491 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
492 command_packet->request_id = request_id;
493 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
494 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
495 command_packet->size = TW_COMMAND_SIZE;
496 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
497
498 /* Setup the param */
499 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
500 memset(param, 0, TW_SECTOR_SIZE);
501 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
502 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
503 param->parameter_size_bytes = cpu_to_le16(4);
504
505 /* Convert system time in UTC to local time seconds since last
506 Sunday 12:00AM */
507 do_gettimeofday(&utc);
508 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
509 schedulertime = local_time - (3 * 86400);
510 schedulertime = cpu_to_le32(schedulertime % 604800);
511
512 memcpy(param->data, &schedulertime, sizeof(u32));
513
514 /* Mark internal command */
515 tw_dev->srb[request_id] = NULL;
516
517 /* Now post the command */
518 twa_post_command_packet(tw_dev, request_id, 1);
519} /* End twa_aen_sync_time() */
520
521/* This function will allocate memory and check if it is correctly aligned */
522static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
523{
524 int i;
525 dma_addr_t dma_handle;
526 unsigned long *cpu_addr;
527 int retval = 1;
528
529 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
530 if (!cpu_addr) {
531 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
532 goto out;
533 }
534
535 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
536 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
537 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
538 goto out;
539 }
540
541 memset(cpu_addr, 0, size*TW_Q_LENGTH);
542
543 for (i = 0; i < TW_Q_LENGTH; i++) {
544 switch(which) {
545 case 0:
546 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
547 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
548 break;
549 case 1:
550 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
551 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
552 break;
553 }
554 }
555 retval = 0;
556out:
557 return retval;
558} /* End twa_allocate_memory() */
559
560/* This function will check the status register for unexpected bits */
561static int twa_check_bits(u32 status_reg_value)
562{
563 int retval = 1;
564
565 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
566 goto out;
567 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
568 goto out;
569
570 retval = 0;
571out:
572 return retval;
573} /* End twa_check_bits() */
574
575/* This function will check the srl and decide if we are compatible */
576static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
577{
578 int retval = 1;
579 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
580 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
581 u32 init_connect_result = 0;
582
583 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
584 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
585 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
586 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
587 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
588 &fw_on_ctlr_build, &init_connect_result)) {
589 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
590 goto out;
591 }
592
593 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
594 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
595 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
596
597 /* Try base mode compatibility */
598 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
599 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
600 TW_EXTENDED_INIT_CONNECT,
601 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
602 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
603 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
604 &fw_on_ctlr_branch, &fw_on_ctlr_build,
605 &init_connect_result)) {
606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
607 goto out;
608 }
609 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
610 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
611 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
612 } else {
613 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
614 }
615 goto out;
616 }
617 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
618 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
619 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
620 }
621
622 /* Load rest of compatibility struct */
623 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
624 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
625 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
626 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
627 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
628 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
629 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
630 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
631 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
632 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
633
634 retval = 0;
635out:
636 return retval;
637} /* End twa_check_srl() */
638
639/* This function handles ioctl for the character device */
640static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
641{
642 struct inode *inode = file->f_path.dentry->d_inode;
643 long timeout;
644 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
645 dma_addr_t dma_handle;
646 int request_id = 0;
647 unsigned int sequence_id = 0;
648 unsigned char event_index, start_index;
649 TW_Ioctl_Driver_Command driver_command;
650 TW_Ioctl_Buf_Apache *tw_ioctl;
651 TW_Lock *tw_lock;
652 TW_Command_Full *full_command_packet;
653 TW_Compatibility_Info *tw_compat_info;
654 TW_Event *event;
655 struct timeval current_time;
656 u32 current_time_ms;
657 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
658 int retval = TW_IOCTL_ERROR_OS_EFAULT;
659 void __user *argp = (void __user *)arg;
660
661 mutex_lock(&twa_chrdev_mutex);
662
663 /* Only let one of these through at a time */
664 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
665 retval = TW_IOCTL_ERROR_OS_EINTR;
666 goto out;
667 }
668
669 /* First copy down the driver command */
670 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
671 goto out2;
672
673 /* Check data buffer size */
674 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
675 retval = TW_IOCTL_ERROR_OS_EINVAL;
676 goto out2;
677 }
678
679 /* Hardware can only do multiple of 512 byte transfers */
680 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
681
682 /* Now allocate ioctl buf memory */
683 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
684 if (!cpu_addr) {
685 retval = TW_IOCTL_ERROR_OS_ENOMEM;
686 goto out2;
687 }
688
689 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
690
691 /* Now copy down the entire ioctl */
692 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
693 goto out3;
694
695 /* See which ioctl we are doing */
696 switch (cmd) {
697 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
698 spin_lock_irqsave(tw_dev->host->host_lock, flags);
699 twa_get_request_id(tw_dev, &request_id);
700
701 /* Flag internal command */
702 tw_dev->srb[request_id] = NULL;
703
704 /* Flag chrdev ioctl */
705 tw_dev->chrdev_request_id = request_id;
706
707 full_command_packet = &tw_ioctl->firmware_command;
708
709 /* Load request id and sglist for both command types */
710 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
711
712 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
713
714 /* Now post the command packet to the controller */
715 twa_post_command_packet(tw_dev, request_id, 1);
716 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
717
718 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
719
720 /* Now wait for command to complete */
721 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
722
723 /* We timed out, and didn't get an interrupt */
724 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
725 /* Now we need to reset the board */
726 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
727 tw_dev->host->host_no, TW_DRIVER, 0x37,
728 cmd);
729 retval = TW_IOCTL_ERROR_OS_EIO;
730 twa_reset_device_extension(tw_dev);
731 goto out3;
732 }
733
734 /* Now copy in the command packet response */
735 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
736
737 /* Now complete the io */
738 spin_lock_irqsave(tw_dev->host->host_lock, flags);
739 tw_dev->posted_request_count--;
740 tw_dev->state[request_id] = TW_S_COMPLETED;
741 twa_free_request_id(tw_dev, request_id);
742 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
743 break;
744 case TW_IOCTL_GET_COMPATIBILITY_INFO:
745 tw_ioctl->driver_command.status = 0;
746 /* Copy compatibility struct into ioctl data buffer */
747 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
748 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
749 break;
750 case TW_IOCTL_GET_LAST_EVENT:
751 if (tw_dev->event_queue_wrapped) {
752 if (tw_dev->aen_clobber) {
753 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
754 tw_dev->aen_clobber = 0;
755 } else
756 tw_ioctl->driver_command.status = 0;
757 } else {
758 if (!tw_dev->error_index) {
759 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
760 break;
761 }
762 tw_ioctl->driver_command.status = 0;
763 }
764 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
765 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
766 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
767 break;
768 case TW_IOCTL_GET_FIRST_EVENT:
769 if (tw_dev->event_queue_wrapped) {
770 if (tw_dev->aen_clobber) {
771 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
772 tw_dev->aen_clobber = 0;
773 } else
774 tw_ioctl->driver_command.status = 0;
775 event_index = tw_dev->error_index;
776 } else {
777 if (!tw_dev->error_index) {
778 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
779 break;
780 }
781 tw_ioctl->driver_command.status = 0;
782 event_index = 0;
783 }
784 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
785 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
786 break;
787 case TW_IOCTL_GET_NEXT_EVENT:
788 event = (TW_Event *)tw_ioctl->data_buffer;
789 sequence_id = event->sequence_id;
790 tw_ioctl->driver_command.status = 0;
791
792 if (tw_dev->event_queue_wrapped) {
793 if (tw_dev->aen_clobber) {
794 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
795 tw_dev->aen_clobber = 0;
796 }
797 start_index = tw_dev->error_index;
798 } else {
799 if (!tw_dev->error_index) {
800 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
801 break;
802 }
803 start_index = 0;
804 }
805 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
806
807 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
808 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
809 tw_dev->aen_clobber = 1;
810 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
811 break;
812 }
813 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
814 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
815 break;
816 case TW_IOCTL_GET_PREVIOUS_EVENT:
817 event = (TW_Event *)tw_ioctl->data_buffer;
818 sequence_id = event->sequence_id;
819 tw_ioctl->driver_command.status = 0;
820
821 if (tw_dev->event_queue_wrapped) {
822 if (tw_dev->aen_clobber) {
823 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
824 tw_dev->aen_clobber = 0;
825 }
826 start_index = tw_dev->error_index;
827 } else {
828 if (!tw_dev->error_index) {
829 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
830 break;
831 }
832 start_index = 0;
833 }
834 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
835
836 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
837 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
838 tw_dev->aen_clobber = 1;
839 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
840 break;
841 }
842 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
843 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
844 break;
845 case TW_IOCTL_GET_LOCK:
846 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
847 do_gettimeofday(&current_time);
848 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
849
850 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
851 tw_dev->ioctl_sem_lock = 1;
852 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
853 tw_ioctl->driver_command.status = 0;
854 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
855 } else {
856 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
857 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
858 }
859 break;
860 case TW_IOCTL_RELEASE_LOCK:
861 if (tw_dev->ioctl_sem_lock == 1) {
862 tw_dev->ioctl_sem_lock = 0;
863 tw_ioctl->driver_command.status = 0;
864 } else {
865 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
866 }
867 break;
868 default:
869 retval = TW_IOCTL_ERROR_OS_ENOTTY;
870 goto out3;
871 }
872
873 /* Now copy the entire response to userspace */
874 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
875 retval = 0;
876out3:
877 /* Now free ioctl buf memory */
878 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
879out2:
880 mutex_unlock(&tw_dev->ioctl_lock);
881out:
882 mutex_unlock(&twa_chrdev_mutex);
883 return retval;
884} /* End twa_chrdev_ioctl() */
885
886/* This function handles open for the character device */
887/* NOTE that this function will race with remove. */
888static int twa_chrdev_open(struct inode *inode, struct file *file)
889{
890 unsigned int minor_number;
891 int retval = TW_IOCTL_ERROR_OS_ENODEV;
892
893 minor_number = iminor(inode);
894 if (minor_number >= twa_device_extension_count)
895 goto out;
896 retval = 0;
897out:
898 return retval;
899} /* End twa_chrdev_open() */
900
901/* This function will print readable messages from status register errors */
902static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
903{
904 int retval = 1;
905
906 /* Check for various error conditions and handle them appropriately */
907 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
908 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
909 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
910 }
911
912 if (status_reg_value & TW_STATUS_PCI_ABORT) {
913 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
914 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
915 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
916 }
917
918 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
919 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
920 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
921 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
922 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
923 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
924 }
925
926 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
927 if (tw_dev->reset_print == 0) {
928 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
929 tw_dev->reset_print = 1;
930 }
931 goto out;
932 }
933 retval = 0;
934out:
935 return retval;
936} /* End twa_decode_bits() */
937
938/* This function will empty the response queue */
939static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
940{
941 u32 status_reg_value, response_que_value;
942 int count = 0, retval = 1;
943
944 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
945
946 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
947 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
948 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
949 count++;
950 }
951 if (count == TW_MAX_RESPONSE_DRAIN)
952 goto out;
953
954 retval = 0;
955out:
956 return retval;
957} /* End twa_empty_response_queue() */
958
959/* This function will clear the pchip/response queue on 9550SX */
960static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
961{
962 u32 response_que_value = 0;
963 unsigned long before;
964 int retval = 1;
965
966 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
967 before = jiffies;
968 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
969 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
970 msleep(1);
971 if (time_after(jiffies, before + HZ * 30))
972 goto out;
973 }
974 /* P-chip settle time */
975 msleep(500);
976 retval = 0;
977 } else
978 retval = 0;
979out:
980 return retval;
981} /* End twa_empty_response_queue_large() */
982
983/* This function passes sense keys from firmware to scsi layer */
984static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
985{
986 TW_Command_Full *full_command_packet;
987 unsigned short error;
988 int retval = 1;
989 char *error_str;
990
991 full_command_packet = tw_dev->command_packet_virt[request_id];
992
993 /* Check for embedded error string */
994 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
995
996 /* Don't print error for Logical unit not supported during rollcall */
997 error = le16_to_cpu(full_command_packet->header.status_block.error);
998 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
999 if (print_host)
1000 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1001 tw_dev->host->host_no,
1002 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1003 full_command_packet->header.status_block.error,
1004 error_str[0] == '\0' ?
1005 twa_string_lookup(twa_error_table,
1006 full_command_packet->header.status_block.error) : error_str,
1007 full_command_packet->header.err_specific_desc);
1008 else
1009 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1010 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1011 full_command_packet->header.status_block.error,
1012 error_str[0] == '\0' ?
1013 twa_string_lookup(twa_error_table,
1014 full_command_packet->header.status_block.error) : error_str,
1015 full_command_packet->header.err_specific_desc);
1016 }
1017
1018 if (copy_sense) {
1019 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1020 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1021 retval = TW_ISR_DONT_RESULT;
1022 goto out;
1023 }
1024 retval = 0;
1025out:
1026 return retval;
1027} /* End twa_fill_sense() */
1028
1029/* This function will free up device extension resources */
1030static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1031{
1032 if (tw_dev->command_packet_virt[0])
1033 pci_free_consistent(tw_dev->tw_pci_dev,
1034 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1035 tw_dev->command_packet_virt[0],
1036 tw_dev->command_packet_phys[0]);
1037
1038 if (tw_dev->generic_buffer_virt[0])
1039 pci_free_consistent(tw_dev->tw_pci_dev,
1040 TW_SECTOR_SIZE*TW_Q_LENGTH,
1041 tw_dev->generic_buffer_virt[0],
1042 tw_dev->generic_buffer_phys[0]);
1043
1044 kfree(tw_dev->event_queue[0]);
1045} /* End twa_free_device_extension() */
1046
1047/* This function will free a request id */
1048static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1049{
1050 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1051 tw_dev->state[request_id] = TW_S_FINISHED;
1052 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1053} /* End twa_free_request_id() */
1054
1055/* This function will get parameter table entries from the firmware */
1056static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1057{
1058 TW_Command_Full *full_command_packet;
1059 TW_Command *command_packet;
1060 TW_Param_Apache *param;
1061 void *retval = NULL;
1062
1063 /* Setup the command packet */
1064 full_command_packet = tw_dev->command_packet_virt[request_id];
1065 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1066 command_packet = &full_command_packet->command.oldcommand;
1067
1068 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1069 command_packet->size = TW_COMMAND_SIZE;
1070 command_packet->request_id = request_id;
1071 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1072
1073 /* Now setup the param */
1074 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1075 memset(param, 0, TW_SECTOR_SIZE);
1076 param->table_id = cpu_to_le16(table_id | 0x8000);
1077 param->parameter_id = cpu_to_le16(parameter_id);
1078 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1079
1080 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1081 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1082
1083 /* Post the command packet to the board */
1084 twa_post_command_packet(tw_dev, request_id, 1);
1085
1086 /* Poll for completion */
1087 if (twa_poll_response(tw_dev, request_id, 30))
1088 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1089 else
1090 retval = (void *)&(param->data[0]);
1091
1092 tw_dev->posted_request_count--;
1093 tw_dev->state[request_id] = TW_S_INITIAL;
1094
1095 return retval;
1096} /* End twa_get_param() */
1097
1098/* This function will assign an available request id */
1099static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1100{
1101 *request_id = tw_dev->free_queue[tw_dev->free_head];
1102 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1103 tw_dev->state[*request_id] = TW_S_STARTED;
1104} /* End twa_get_request_id() */
1105
1106/* This function will send an initconnection command to controller */
1107static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1108 u32 set_features, unsigned short current_fw_srl,
1109 unsigned short current_fw_arch_id,
1110 unsigned short current_fw_branch,
1111 unsigned short current_fw_build,
1112 unsigned short *fw_on_ctlr_srl,
1113 unsigned short *fw_on_ctlr_arch_id,
1114 unsigned short *fw_on_ctlr_branch,
1115 unsigned short *fw_on_ctlr_build,
1116 u32 *init_connect_result)
1117{
1118 TW_Command_Full *full_command_packet;
1119 TW_Initconnect *tw_initconnect;
1120 int request_id = 0, retval = 1;
1121
1122 /* Initialize InitConnection command packet */
1123 full_command_packet = tw_dev->command_packet_virt[request_id];
1124 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1125 full_command_packet->header.header_desc.size_header = 128;
1126
1127 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1128 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1129 tw_initconnect->request_id = request_id;
1130 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1131 tw_initconnect->features = set_features;
1132
1133 /* Turn on 64-bit sgl support if we need to */
1134 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1135
1136 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1137
1138 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1139 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1140 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1141 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1142 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1143 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1144 } else
1145 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1146
1147 /* Send command packet to the board */
1148 twa_post_command_packet(tw_dev, request_id, 1);
1149
1150 /* Poll for completion */
1151 if (twa_poll_response(tw_dev, request_id, 30)) {
1152 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1153 } else {
1154 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1155 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1156 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1157 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1158 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1159 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1160 }
1161 retval = 0;
1162 }
1163
1164 tw_dev->posted_request_count--;
1165 tw_dev->state[request_id] = TW_S_INITIAL;
1166
1167 return retval;
1168} /* End twa_initconnection() */
1169
1170/* This function will initialize the fields of a device extension */
1171static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1172{
1173 int i, retval = 1;
1174
1175 /* Initialize command packet buffers */
1176 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1177 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1178 goto out;
1179 }
1180
1181 /* Initialize generic buffer */
1182 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1183 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1184 goto out;
1185 }
1186
1187 /* Allocate event info space */
1188 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1189 if (!tw_dev->event_queue[0]) {
1190 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1191 goto out;
1192 }
1193
1194
1195 for (i = 0; i < TW_Q_LENGTH; i++) {
1196 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1197 tw_dev->free_queue[i] = i;
1198 tw_dev->state[i] = TW_S_INITIAL;
1199 }
1200
1201 tw_dev->pending_head = TW_Q_START;
1202 tw_dev->pending_tail = TW_Q_START;
1203 tw_dev->free_head = TW_Q_START;
1204 tw_dev->free_tail = TW_Q_START;
1205 tw_dev->error_sequence_id = 1;
1206 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1207
1208 mutex_init(&tw_dev->ioctl_lock);
1209 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1210
1211 retval = 0;
1212out:
1213 return retval;
1214} /* End twa_initialize_device_extension() */
1215
1216/* This function is the interrupt service routine */
1217static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1218{
1219 int request_id, error = 0;
1220 u32 status_reg_value;
1221 TW_Response_Queue response_que;
1222 TW_Command_Full *full_command_packet;
1223 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1224 int handled = 0;
1225
1226 /* Get the per adapter lock */
1227 spin_lock(tw_dev->host->host_lock);
1228
1229 /* Read the registers */
1230 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1231
1232 /* Check if this is our interrupt, otherwise bail */
1233 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1234 goto twa_interrupt_bail;
1235
1236 handled = 1;
1237
1238 /* If we are resetting, bail */
1239 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1240 goto twa_interrupt_bail;
1241
1242 /* Check controller for errors */
1243 if (twa_check_bits(status_reg_value)) {
1244 if (twa_decode_bits(tw_dev, status_reg_value)) {
1245 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1246 goto twa_interrupt_bail;
1247 }
1248 }
1249
1250 /* Handle host interrupt */
1251 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1252 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1253
1254 /* Handle attention interrupt */
1255 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1256 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1257 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1258 twa_get_request_id(tw_dev, &request_id);
1259
1260 error = twa_aen_read_queue(tw_dev, request_id);
1261 if (error) {
1262 tw_dev->state[request_id] = TW_S_COMPLETED;
1263 twa_free_request_id(tw_dev, request_id);
1264 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1265 }
1266 }
1267 }
1268
1269 /* Handle command interrupt */
1270 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1271 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1272 /* Drain as many pending commands as we can */
1273 while (tw_dev->pending_request_count > 0) {
1274 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1275 if (tw_dev->state[request_id] != TW_S_PENDING) {
1276 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1277 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1278 goto twa_interrupt_bail;
1279 }
1280 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1281 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1282 tw_dev->pending_request_count--;
1283 } else {
1284 /* If we get here, we will continue re-posting on the next command interrupt */
1285 break;
1286 }
1287 }
1288 }
1289
1290 /* Handle response interrupt */
1291 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1292
1293 /* Drain the response queue from the board */
1294 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1295 /* Complete the response */
1296 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1297 request_id = TW_RESID_OUT(response_que.response_id);
1298 full_command_packet = tw_dev->command_packet_virt[request_id];
1299 error = 0;
1300 /* Check for command packet errors */
1301 if (full_command_packet->command.newcommand.status != 0) {
1302 if (tw_dev->srb[request_id] != NULL) {
1303 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1304 } else {
1305 /* Skip ioctl error prints */
1306 if (request_id != tw_dev->chrdev_request_id) {
1307 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1308 }
1309 }
1310 }
1311
1312 /* Check for correct state */
1313 if (tw_dev->state[request_id] != TW_S_POSTED) {
1314 if (tw_dev->srb[request_id] != NULL) {
1315 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1316 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1317 goto twa_interrupt_bail;
1318 }
1319 }
1320
1321 /* Check for internal command completion */
1322 if (tw_dev->srb[request_id] == NULL) {
1323 if (request_id != tw_dev->chrdev_request_id) {
1324 if (twa_aen_complete(tw_dev, request_id))
1325 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1326 } else {
1327 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1328 wake_up(&tw_dev->ioctl_wqueue);
1329 }
1330 } else {
1331 struct scsi_cmnd *cmd;
1332
1333 cmd = tw_dev->srb[request_id];
1334
1335 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1336 /* If no error command was a success */
1337 if (error == 0) {
1338 cmd->result = (DID_OK << 16);
1339 }
1340
1341 /* If error, command failed */
1342 if (error == 1) {
1343 /* Ask for a host reset */
1344 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1345 }
1346
1347 /* Report residual bytes for single sgl */
1348 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1349 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1350 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1351 }
1352
1353 /* Now complete the io */
1354 scsi_dma_unmap(cmd);
1355 cmd->scsi_done(cmd);
1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1357 twa_free_request_id(tw_dev, request_id);
1358 tw_dev->posted_request_count--;
1359 }
1360
1361 /* Check for valid status after each drain */
1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1363 if (twa_check_bits(status_reg_value)) {
1364 if (twa_decode_bits(tw_dev, status_reg_value)) {
1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1366 goto twa_interrupt_bail;
1367 }
1368 }
1369 }
1370 }
1371
1372twa_interrupt_bail:
1373 spin_unlock(tw_dev->host->host_lock);
1374 return IRQ_RETVAL(handled);
1375} /* End twa_interrupt() */
1376
1377/* This function will load the request id and various sgls for ioctls */
1378static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1379{
1380 TW_Command *oldcommand;
1381 TW_Command_Apache *newcommand;
1382 TW_SG_Entry *sgl;
1383 unsigned int pae = 0;
1384
1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1386 pae = 1;
1387
1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1389 newcommand = &full_command_packet->command.newcommand;
1390 newcommand->request_id__lunl =
1391 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1392 if (length) {
1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1394 newcommand->sg_list[0].length = cpu_to_le32(length);
1395 }
1396 newcommand->sgl_entries__lunh =
1397 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1398 } else {
1399 oldcommand = &full_command_packet->command.oldcommand;
1400 oldcommand->request_id = request_id;
1401
1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1403 /* Load the sg list */
1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1406 else
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1409 sgl->length = cpu_to_le32(length);
1410
1411 oldcommand->size += pae;
1412 }
1413 }
1414} /* End twa_load_sgl() */
1415
1416/* This function will poll for a response interrupt of a request */
1417static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1418{
1419 int retval = 1, found = 0, response_request_id;
1420 TW_Response_Queue response_queue;
1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1422
1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1425 response_request_id = TW_RESID_OUT(response_queue.response_id);
1426 if (request_id != response_request_id) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1428 goto out;
1429 }
1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1431 if (full_command_packet->command.newcommand.status != 0) {
1432 /* bad response */
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1435 }
1436 found = 1;
1437 } else {
1438 if (full_command_packet->command.oldcommand.status != 0) {
1439 /* bad response */
1440 twa_fill_sense(tw_dev, request_id, 0, 0);
1441 goto out;
1442 }
1443 found = 1;
1444 }
1445 }
1446
1447 if (found)
1448 retval = 0;
1449out:
1450 return retval;
1451} /* End twa_poll_response() */
1452
1453/* This function will poll the status register for a flag */
1454static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1455{
1456 u32 status_reg_value;
1457 unsigned long before;
1458 int retval = 1;
1459
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461 before = jiffies;
1462
1463 if (twa_check_bits(status_reg_value))
1464 twa_decode_bits(tw_dev, status_reg_value);
1465
1466 while ((status_reg_value & flag) != flag) {
1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1468
1469 if (twa_check_bits(status_reg_value))
1470 twa_decode_bits(tw_dev, status_reg_value);
1471
1472 if (time_after(jiffies, before + HZ * seconds))
1473 goto out;
1474
1475 msleep(50);
1476 }
1477 retval = 0;
1478out:
1479 return retval;
1480} /* End twa_poll_status() */
1481
1482/* This function will poll the status register for disappearance of a flag */
1483static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1484{
1485 u32 status_reg_value;
1486 unsigned long before;
1487 int retval = 1;
1488
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 before = jiffies;
1491
1492 if (twa_check_bits(status_reg_value))
1493 twa_decode_bits(tw_dev, status_reg_value);
1494
1495 while ((status_reg_value & flag) != 0) {
1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1497 if (twa_check_bits(status_reg_value))
1498 twa_decode_bits(tw_dev, status_reg_value);
1499
1500 if (time_after(jiffies, before + HZ * seconds))
1501 goto out;
1502
1503 msleep(50);
1504 }
1505 retval = 0;
1506out:
1507 return retval;
1508} /* End twa_poll_status_gone() */
1509
1510/* This function will attempt to post a command packet to the board */
1511static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1512{
1513 u32 status_reg_value;
1514 dma_addr_t command_que_value;
1515 int retval = 1;
1516
1517 command_que_value = tw_dev->command_packet_phys[request_id];
1518
1519 /* For 9650SE write low 4 bytes first */
1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1522 command_que_value += TW_COMMAND_OFFSET;
1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1524 }
1525
1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1527
1528 if (twa_check_bits(status_reg_value))
1529 twa_decode_bits(tw_dev, status_reg_value);
1530
1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1532
1533 /* Only pend internal driver commands */
1534 if (!internal) {
1535 retval = SCSI_MLQUEUE_HOST_BUSY;
1536 goto out;
1537 }
1538
1539 /* Couldn't post the command packet, so we do it later */
1540 if (tw_dev->state[request_id] != TW_S_PENDING) {
1541 tw_dev->state[request_id] = TW_S_PENDING;
1542 tw_dev->pending_request_count++;
1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1545 }
1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1548 }
1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1550 goto out;
1551 } else {
1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1554 /* Now write upper 4 bytes */
1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1556 } else {
1557 if (sizeof(dma_addr_t) > 4) {
1558 command_que_value += TW_COMMAND_OFFSET;
1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1561 } else {
1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1563 }
1564 }
1565 tw_dev->state[request_id] = TW_S_POSTED;
1566 tw_dev->posted_request_count++;
1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1569 }
1570 }
1571 retval = 0;
1572out:
1573 return retval;
1574} /* End twa_post_command_packet() */
1575
1576/* This function will reset a device extension */
1577static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1578{
1579 int i = 0;
1580 int retval = 1;
1581 unsigned long flags = 0;
1582
1583 set_bit(TW_IN_RESET, &tw_dev->flags);
1584 TW_DISABLE_INTERRUPTS(tw_dev);
1585 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1586 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1587
1588 /* Abort all requests that are in progress */
1589 for (i = 0; i < TW_Q_LENGTH; i++) {
1590 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1591 (tw_dev->state[i] != TW_S_INITIAL) &&
1592 (tw_dev->state[i] != TW_S_COMPLETED)) {
1593 if (tw_dev->srb[i]) {
1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1595
1596 cmd->result = (DID_RESET << 16);
1597 scsi_dma_unmap(cmd);
1598 cmd->scsi_done(cmd);
1599 }
1600 }
1601 }
1602
1603 /* Reset queues and counts */
1604 for (i = 0; i < TW_Q_LENGTH; i++) {
1605 tw_dev->free_queue[i] = i;
1606 tw_dev->state[i] = TW_S_INITIAL;
1607 }
1608 tw_dev->free_head = TW_Q_START;
1609 tw_dev->free_tail = TW_Q_START;
1610 tw_dev->posted_request_count = 0;
1611 tw_dev->pending_request_count = 0;
1612 tw_dev->pending_head = TW_Q_START;
1613 tw_dev->pending_tail = TW_Q_START;
1614 tw_dev->reset_print = 0;
1615
1616 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1617
1618 if (twa_reset_sequence(tw_dev, 1))
1619 goto out;
1620
1621 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1622 clear_bit(TW_IN_RESET, &tw_dev->flags);
1623 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1624
1625 retval = 0;
1626out:
1627 return retval;
1628} /* End twa_reset_device_extension() */
1629
1630/* This function will reset a controller */
1631static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1632{
1633 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1634
1635 while (tries < TW_MAX_RESET_TRIES) {
1636 if (do_soft_reset) {
1637 TW_SOFT_RESET(tw_dev);
1638 /* Clear pchip/response queue on 9550SX */
1639 if (twa_empty_response_queue_large(tw_dev)) {
1640 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1641 do_soft_reset = 1;
1642 tries++;
1643 continue;
1644 }
1645 }
1646
1647 /* Make sure controller is in a good state */
1648 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1649 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1650 do_soft_reset = 1;
1651 tries++;
1652 continue;
1653 }
1654
1655 /* Empty response queue */
1656 if (twa_empty_response_queue(tw_dev)) {
1657 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1658 do_soft_reset = 1;
1659 tries++;
1660 continue;
1661 }
1662
1663 flashed = 0;
1664
1665 /* Check for compatibility/flash */
1666 if (twa_check_srl(tw_dev, &flashed)) {
1667 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1668 do_soft_reset = 1;
1669 tries++;
1670 continue;
1671 } else {
1672 if (flashed) {
1673 tries++;
1674 continue;
1675 }
1676 }
1677
1678 /* Drain the AEN queue */
1679 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1680 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1681 do_soft_reset = 1;
1682 tries++;
1683 continue;
1684 }
1685
1686 /* If we got here, controller is in a good state */
1687 retval = 0;
1688 goto out;
1689 }
1690out:
1691 return retval;
1692} /* End twa_reset_sequence() */
1693
1694/* This funciton returns unit geometry in cylinders/heads/sectors */
1695static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1696{
1697 int heads, sectors, cylinders;
1698 TW_Device_Extension *tw_dev;
1699
1700 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1701
1702 if (capacity >= 0x200000) {
1703 heads = 255;
1704 sectors = 63;
1705 cylinders = sector_div(capacity, heads * sectors);
1706 } else {
1707 heads = 64;
1708 sectors = 32;
1709 cylinders = sector_div(capacity, heads * sectors);
1710 }
1711
1712 geom[0] = heads;
1713 geom[1] = sectors;
1714 geom[2] = cylinders;
1715
1716 return 0;
1717} /* End twa_scsi_biosparam() */
1718
1719/* This is the new scsi eh reset function */
1720static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1721{
1722 TW_Device_Extension *tw_dev = NULL;
1723 int retval = FAILED;
1724
1725 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1726
1727 tw_dev->num_resets++;
1728
1729 sdev_printk(KERN_WARNING, SCpnt->device,
1730 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1731 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1732
1733 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1734 mutex_lock(&tw_dev->ioctl_lock);
1735
1736 /* Now reset the card and some of the device extension data */
1737 if (twa_reset_device_extension(tw_dev)) {
1738 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1739 goto out;
1740 }
1741
1742 retval = SUCCESS;
1743out:
1744 mutex_unlock(&tw_dev->ioctl_lock);
1745 return retval;
1746} /* End twa_scsi_eh_reset() */
1747
1748/* This is the main scsi queue function to handle scsi opcodes */
1749static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1750{
1751 int request_id, retval;
1752 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1753
1754 /* If we are resetting due to timed out ioctl, report as busy */
1755 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1756 retval = SCSI_MLQUEUE_HOST_BUSY;
1757 goto out;
1758 }
1759
1760 /* Check if this FW supports luns */
1761 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1762 SCpnt->result = (DID_BAD_TARGET << 16);
1763 done(SCpnt);
1764 retval = 0;
1765 goto out;
1766 }
1767
1768 /* Save done function into scsi_cmnd struct */
1769 SCpnt->scsi_done = done;
1770
1771 /* Get a free request id */
1772 twa_get_request_id(tw_dev, &request_id);
1773
1774 /* Save the scsi command for use by the ISR */
1775 tw_dev->srb[request_id] = SCpnt;
1776
1777 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1778 switch (retval) {
1779 case SCSI_MLQUEUE_HOST_BUSY:
1780 scsi_dma_unmap(SCpnt);
1781 twa_free_request_id(tw_dev, request_id);
1782 break;
1783 case 1:
1784 SCpnt->result = (DID_ERROR << 16);
1785 scsi_dma_unmap(SCpnt);
1786 done(SCpnt);
1787 tw_dev->state[request_id] = TW_S_COMPLETED;
1788 twa_free_request_id(tw_dev, request_id);
1789 retval = 0;
1790 }
1791out:
1792 return retval;
1793} /* End twa_scsi_queue() */
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797/* This function hands scsi cdb's to the firmware */
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1799{
1800 TW_Command_Full *full_command_packet;
1801 TW_Command_Apache *command_packet;
1802 u32 num_sectors = 0x0;
1803 int i, sg_count;
1804 struct scsi_cmnd *srb = NULL;
1805 struct scatterlist *sglist = NULL, *sg;
1806 int retval = 1;
1807
1808 if (tw_dev->srb[request_id]) {
1809 srb = tw_dev->srb[request_id];
1810 if (scsi_sglist(srb))
1811 sglist = scsi_sglist(srb);
1812 }
1813
1814 /* Initialize command packet */
1815 full_command_packet = tw_dev->command_packet_virt[request_id];
1816 full_command_packet->header.header_desc.size_header = 128;
1817 full_command_packet->header.status_block.error = 0;
1818 full_command_packet->header.status_block.severity__reserved = 0;
1819
1820 command_packet = &full_command_packet->command.newcommand;
1821 command_packet->status = 0;
1822 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1823
1824 /* We forced 16 byte cdb use earlier */
1825 if (!cdb)
1826 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1827 else
1828 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1829
1830 if (srb) {
1831 command_packet->unit = srb->device->id;
1832 command_packet->request_id__lunl =
1833 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1834 } else {
1835 command_packet->request_id__lunl =
1836 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1837 command_packet->unit = 0;
1838 }
1839
1840 command_packet->sgl_offset = 16;
1841
1842 if (!sglistarg) {
1843 /* Map sglist from scsi layer to cmd packet */
1844
1845 if (scsi_sg_count(srb)) {
1846 if ((scsi_sg_count(srb) == 1) &&
1847 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1848 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1849 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1850 scsi_sg_copy_to_buffer(srb,
1851 tw_dev->generic_buffer_virt[request_id],
1852 TW_SECTOR_SIZE);
1853 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1854 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1855 } else {
1856 sg_count = scsi_dma_map(srb);
1857 if (sg_count < 0)
1858 goto out;
1859
1860 scsi_for_each_sg(srb, sg, sg_count, i) {
1861 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1862 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1863 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1864 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1865 goto out;
1866 }
1867 }
1868 }
1869 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1870 }
1871 } else {
1872 /* Internal cdb post */
1873 for (i = 0; i < use_sg; i++) {
1874 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1875 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1876 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1877 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1878 goto out;
1879 }
1880 }
1881 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1882 }
1883
1884 if (srb) {
1885 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1886 num_sectors = (u32)srb->cmnd[4];
1887
1888 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1889 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1890 }
1891
1892 /* Update sector statistic */
1893 tw_dev->sector_count = num_sectors;
1894 if (tw_dev->sector_count > tw_dev->max_sector_count)
1895 tw_dev->max_sector_count = tw_dev->sector_count;
1896
1897 /* Update SG statistics */
1898 if (srb) {
1899 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1900 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1901 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1902 }
1903
1904 /* Now post the command to the board */
1905 if (srb) {
1906 retval = twa_post_command_packet(tw_dev, request_id, 0);
1907 } else {
1908 twa_post_command_packet(tw_dev, request_id, 1);
1909 retval = 0;
1910 }
1911out:
1912 return retval;
1913} /* End twa_scsiop_execute_scsi() */
1914
1915/* This function completes an execute scsi operation */
1916static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1917{
1918 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1919
1920 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1921 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1922 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1923 if (scsi_sg_count(cmd) == 1) {
1924 void *buf = tw_dev->generic_buffer_virt[request_id];
1925
1926 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1927 }
1928 }
1929} /* End twa_scsiop_execute_scsi_complete() */
1930
1931/* This function tells the controller to shut down */
1932static void __twa_shutdown(TW_Device_Extension *tw_dev)
1933{
1934 /* Disable interrupts */
1935 TW_DISABLE_INTERRUPTS(tw_dev);
1936
1937 /* Free up the IRQ */
1938 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1939
1940 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1941
1942 /* Tell the card we are shutting down */
1943 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1944 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1945 } else {
1946 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1947 }
1948
1949 /* Clear all interrupts just before exit */
1950 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1951} /* End __twa_shutdown() */
1952
1953/* Wrapper for __twa_shutdown */
1954static void twa_shutdown(struct pci_dev *pdev)
1955{
1956 struct Scsi_Host *host = pci_get_drvdata(pdev);
1957 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1958
1959 __twa_shutdown(tw_dev);
1960} /* End twa_shutdown() */
1961
1962/* This function will look up a string */
1963static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1964{
1965 int index;
1966
1967 for (index = 0; ((code != table[index].code) &&
1968 (table[index].text != (char *)0)); index++);
1969 return(table[index].text);
1970} /* End twa_string_lookup() */
1971
1972/* This function gets called when a disk is coming on-line */
1973static int twa_slave_configure(struct scsi_device *sdev)
1974{
1975 /* Force 60 second timeout */
1976 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1977
1978 return 0;
1979} /* End twa_slave_configure() */
1980
1981/* scsi_host_template initializer */
1982static struct scsi_host_template driver_template = {
1983 .module = THIS_MODULE,
1984 .name = "3ware 9000 Storage Controller",
1985 .queuecommand = twa_scsi_queue,
1986 .eh_host_reset_handler = twa_scsi_eh_reset,
1987 .bios_param = twa_scsi_biosparam,
1988 .change_queue_depth = twa_change_queue_depth,
1989 .can_queue = TW_Q_LENGTH-2,
1990 .slave_configure = twa_slave_configure,
1991 .this_id = -1,
1992 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1993 .max_sectors = TW_MAX_SECTORS,
1994 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1995 .use_clustering = ENABLE_CLUSTERING,
1996 .shost_attrs = twa_host_attrs,
1997 .emulated = 1
1998};
1999
2000/* This function will probe and initialize a card */
2001static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2002{
2003 struct Scsi_Host *host = NULL;
2004 TW_Device_Extension *tw_dev;
2005 unsigned long mem_addr, mem_len;
2006 int retval = -ENODEV;
2007
2008 retval = pci_enable_device(pdev);
2009 if (retval) {
2010 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2011 goto out_disable_device;
2012 }
2013
2014 pci_set_master(pdev);
2015 pci_try_set_mwi(pdev);
2016
2017 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2018 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2019 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2020 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2021 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2022 retval = -ENODEV;
2023 goto out_disable_device;
2024 }
2025
2026 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2027 if (!host) {
2028 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2029 retval = -ENOMEM;
2030 goto out_disable_device;
2031 }
2032 tw_dev = (TW_Device_Extension *)host->hostdata;
2033
2034 /* Save values to device extension */
2035 tw_dev->host = host;
2036 tw_dev->tw_pci_dev = pdev;
2037
2038 if (twa_initialize_device_extension(tw_dev)) {
2039 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2040 goto out_free_device_extension;
2041 }
2042
2043 /* Request IO regions */
2044 retval = pci_request_regions(pdev, "3w-9xxx");
2045 if (retval) {
2046 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2047 goto out_free_device_extension;
2048 }
2049
2050 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2051 mem_addr = pci_resource_start(pdev, 1);
2052 mem_len = pci_resource_len(pdev, 1);
2053 } else {
2054 mem_addr = pci_resource_start(pdev, 2);
2055 mem_len = pci_resource_len(pdev, 2);
2056 }
2057
2058 /* Save base address */
2059 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2060 if (!tw_dev->base_addr) {
2061 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2062 goto out_release_mem_region;
2063 }
2064
2065 /* Disable interrupts on the card */
2066 TW_DISABLE_INTERRUPTS(tw_dev);
2067
2068 /* Initialize the card */
2069 if (twa_reset_sequence(tw_dev, 0))
2070 goto out_iounmap;
2071
2072 /* Set host specific parameters */
2073 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2074 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2075 host->max_id = TW_MAX_UNITS_9650SE;
2076 else
2077 host->max_id = TW_MAX_UNITS;
2078
2079 host->max_cmd_len = TW_MAX_CDB_LEN;
2080
2081 /* Channels aren't supported by adapter */
2082 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2083 host->max_channel = 0;
2084
2085 /* Register the card with the kernel SCSI layer */
2086 retval = scsi_add_host(host, &pdev->dev);
2087 if (retval) {
2088 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2089 goto out_iounmap;
2090 }
2091
2092 pci_set_drvdata(pdev, host);
2093
2094 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2095 host->host_no, mem_addr, pdev->irq);
2096 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2097 host->host_no,
2098 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2099 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2100 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2101 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2102 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2103 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2104
2105 /* Try to enable MSI */
2106 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2107 !pci_enable_msi(pdev))
2108 set_bit(TW_USING_MSI, &tw_dev->flags);
2109
2110 /* Now setup the interrupt handler */
2111 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2112 if (retval) {
2113 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2114 goto out_remove_host;
2115 }
2116
2117 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2118 twa_device_extension_count++;
2119
2120 /* Re-enable interrupts on the card */
2121 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2122
2123 /* Finally, scan the host */
2124 scsi_scan_host(host);
2125
2126 if (twa_major == -1) {
2127 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2128 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2129 }
2130 return 0;
2131
2132out_remove_host:
2133 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2134 pci_disable_msi(pdev);
2135 scsi_remove_host(host);
2136out_iounmap:
2137 iounmap(tw_dev->base_addr);
2138out_release_mem_region:
2139 pci_release_regions(pdev);
2140out_free_device_extension:
2141 twa_free_device_extension(tw_dev);
2142 scsi_host_put(host);
2143out_disable_device:
2144 pci_disable_device(pdev);
2145
2146 return retval;
2147} /* End twa_probe() */
2148
2149/* This function is called to remove a device */
2150static void twa_remove(struct pci_dev *pdev)
2151{
2152 struct Scsi_Host *host = pci_get_drvdata(pdev);
2153 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2154
2155 scsi_remove_host(tw_dev->host);
2156
2157 /* Unregister character device */
2158 if (twa_major >= 0) {
2159 unregister_chrdev(twa_major, "twa");
2160 twa_major = -1;
2161 }
2162
2163 /* Shutdown the card */
2164 __twa_shutdown(tw_dev);
2165
2166 /* Disable MSI if enabled */
2167 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2168 pci_disable_msi(pdev);
2169
2170 /* Free IO remapping */
2171 iounmap(tw_dev->base_addr);
2172
2173 /* Free up the mem region */
2174 pci_release_regions(pdev);
2175
2176 /* Free up device extension resources */
2177 twa_free_device_extension(tw_dev);
2178
2179 scsi_host_put(tw_dev->host);
2180 pci_disable_device(pdev);
2181 twa_device_extension_count--;
2182} /* End twa_remove() */
2183
2184#ifdef CONFIG_PM
2185/* This function is called on PCI suspend */
2186static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2187{
2188 struct Scsi_Host *host = pci_get_drvdata(pdev);
2189 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2190
2191 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2192
2193 TW_DISABLE_INTERRUPTS(tw_dev);
2194 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2195
2196 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2197 pci_disable_msi(pdev);
2198
2199 /* Tell the card we are shutting down */
2200 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2201 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2202 } else {
2203 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2204 }
2205 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2206
2207 pci_save_state(pdev);
2208 pci_disable_device(pdev);
2209 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2210
2211 return 0;
2212} /* End twa_suspend() */
2213
2214/* This function is called on PCI resume */
2215static int twa_resume(struct pci_dev *pdev)
2216{
2217 int retval = 0;
2218 struct Scsi_Host *host = pci_get_drvdata(pdev);
2219 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2220
2221 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2222 pci_set_power_state(pdev, PCI_D0);
2223 pci_enable_wake(pdev, PCI_D0, 0);
2224 pci_restore_state(pdev);
2225
2226 retval = pci_enable_device(pdev);
2227 if (retval) {
2228 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2229 return retval;
2230 }
2231
2232 pci_set_master(pdev);
2233 pci_try_set_mwi(pdev);
2234
2235 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2236 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2237 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2238 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2239 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2240 retval = -ENODEV;
2241 goto out_disable_device;
2242 }
2243
2244 /* Initialize the card */
2245 if (twa_reset_sequence(tw_dev, 0)) {
2246 retval = -ENODEV;
2247 goto out_disable_device;
2248 }
2249
2250 /* Now setup the interrupt handler */
2251 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2252 if (retval) {
2253 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2254 retval = -ENODEV;
2255 goto out_disable_device;
2256 }
2257
2258 /* Now enable MSI if enabled */
2259 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2260 pci_enable_msi(pdev);
2261
2262 /* Re-enable interrupts on the card */
2263 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2264
2265 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2266 return 0;
2267
2268out_disable_device:
2269 scsi_remove_host(host);
2270 pci_disable_device(pdev);
2271
2272 return retval;
2273} /* End twa_resume() */
2274#endif
2275
2276/* PCI Devices supported by this driver */
2277static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2278 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2284 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2286 { }
2287};
2288MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2289
2290/* pci_driver initializer */
2291static struct pci_driver twa_driver = {
2292 .name = "3w-9xxx",
2293 .id_table = twa_pci_tbl,
2294 .probe = twa_probe,
2295 .remove = twa_remove,
2296#ifdef CONFIG_PM
2297 .suspend = twa_suspend,
2298 .resume = twa_resume,
2299#endif
2300 .shutdown = twa_shutdown
2301};
2302
2303/* This function is called on driver initialization */
2304static int __init twa_init(void)
2305{
2306 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2307
2308 return pci_register_driver(&twa_driver);
2309} /* End twa_init() */
2310
2311/* This function is called on driver exit */
2312static void __exit twa_exit(void)
2313{
2314 pci_unregister_driver(&twa_driver);
2315} /* End twa_exit() */
2316
2317module_init(twa_init);
2318module_exit(twa_exit);
2319