yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Adaptec AAC series RAID controller driver |
| 3 | * (c) Copyright 2001 Red Hat Inc. |
| 4 | * |
| 5 | * based on the old aacraid driver that is.. |
| 6 | * Adaptec aacraid device driver for Linux. |
| 7 | * |
| 8 | * Copyright (c) 2000-2010 Adaptec, Inc. |
| 9 | * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2, or (at your option) |
| 14 | * any later version. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, |
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 19 | * GNU General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License |
| 22 | * along with this program; see the file COPYING. If not, write to |
| 23 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 24 | * |
| 25 | * Module Name: |
| 26 | * rx.c |
| 27 | * |
| 28 | * Abstract: Hardware miniport for Drawbridge specific hardware functions. |
| 29 | * |
| 30 | */ |
| 31 | |
| 32 | #include <linux/kernel.h> |
| 33 | #include <linux/init.h> |
| 34 | #include <linux/types.h> |
| 35 | #include <linux/pci.h> |
| 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/blkdev.h> |
| 38 | #include <linux/delay.h> |
| 39 | #include <linux/completion.h> |
| 40 | #include <linux/time.h> |
| 41 | #include <linux/interrupt.h> |
| 42 | |
| 43 | #include <scsi/scsi_host.h> |
| 44 | |
| 45 | #include "aacraid.h" |
| 46 | |
| 47 | static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id) |
| 48 | { |
| 49 | struct aac_dev *dev = dev_id; |
| 50 | unsigned long bellbits; |
| 51 | u8 intstat = rx_readb(dev, MUnit.OISR); |
| 52 | |
| 53 | /* |
| 54 | * Read mask and invert because drawbridge is reversed. |
| 55 | * This allows us to only service interrupts that have |
| 56 | * been enabled. |
| 57 | * Check to see if this is our interrupt. If it isn't just return |
| 58 | */ |
| 59 | if (likely(intstat & ~(dev->OIMR))) { |
| 60 | bellbits = rx_readl(dev, OutboundDoorbellReg); |
| 61 | if (unlikely(bellbits & DoorBellPrintfReady)) { |
| 62 | aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5])); |
| 63 | rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); |
| 64 | rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); |
| 65 | } |
| 66 | else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) { |
| 67 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady); |
| 68 | aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); |
| 69 | } |
| 70 | else if (likely(bellbits & DoorBellAdapterNormRespReady)) { |
| 71 | rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady); |
| 72 | aac_response_normal(&dev->queues->queue[HostNormRespQueue]); |
| 73 | } |
| 74 | else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) { |
| 75 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); |
| 76 | } |
| 77 | else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) { |
| 78 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull); |
| 79 | rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull); |
| 80 | } |
| 81 | return IRQ_HANDLED; |
| 82 | } |
| 83 | return IRQ_NONE; |
| 84 | } |
| 85 | |
| 86 | static irqreturn_t aac_rx_intr_message(int irq, void *dev_id) |
| 87 | { |
| 88 | int isAif, isFastResponse, isSpecial; |
| 89 | struct aac_dev *dev = dev_id; |
| 90 | u32 Index = rx_readl(dev, MUnit.OutboundQueue); |
| 91 | if (unlikely(Index == 0xFFFFFFFFL)) |
| 92 | Index = rx_readl(dev, MUnit.OutboundQueue); |
| 93 | if (likely(Index != 0xFFFFFFFFL)) { |
| 94 | do { |
| 95 | isAif = isFastResponse = isSpecial = 0; |
| 96 | if (Index & 0x00000002L) { |
| 97 | isAif = 1; |
| 98 | if (Index == 0xFFFFFFFEL) |
| 99 | isSpecial = 1; |
| 100 | Index &= ~0x00000002L; |
| 101 | } else { |
| 102 | if (Index & 0x00000001L) |
| 103 | isFastResponse = 1; |
| 104 | Index >>= 2; |
| 105 | } |
| 106 | if (!isSpecial) { |
| 107 | if (unlikely(aac_intr_normal(dev, |
| 108 | Index, isAif, |
| 109 | isFastResponse, NULL))) { |
| 110 | rx_writel(dev, |
| 111 | MUnit.OutboundQueue, |
| 112 | Index); |
| 113 | rx_writel(dev, |
| 114 | MUnit.ODR, |
| 115 | DoorBellAdapterNormRespReady); |
| 116 | } |
| 117 | } |
| 118 | Index = rx_readl(dev, MUnit.OutboundQueue); |
| 119 | } while (Index != 0xFFFFFFFFL); |
| 120 | return IRQ_HANDLED; |
| 121 | } |
| 122 | return IRQ_NONE; |
| 123 | } |
| 124 | |
| 125 | /** |
| 126 | * aac_rx_disable_interrupt - Disable interrupts |
| 127 | * @dev: Adapter |
| 128 | */ |
| 129 | |
| 130 | static void aac_rx_disable_interrupt(struct aac_dev *dev) |
| 131 | { |
| 132 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); |
| 133 | } |
| 134 | |
| 135 | /** |
| 136 | * aac_rx_enable_interrupt_producer - Enable interrupts |
| 137 | * @dev: Adapter |
| 138 | */ |
| 139 | |
| 140 | static void aac_rx_enable_interrupt_producer(struct aac_dev *dev) |
| 141 | { |
| 142 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb); |
| 143 | } |
| 144 | |
| 145 | /** |
| 146 | * aac_rx_enable_interrupt_message - Enable interrupts |
| 147 | * @dev: Adapter |
| 148 | */ |
| 149 | |
| 150 | static void aac_rx_enable_interrupt_message(struct aac_dev *dev) |
| 151 | { |
| 152 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); |
| 153 | } |
| 154 | |
| 155 | /** |
| 156 | * rx_sync_cmd - send a command and wait |
| 157 | * @dev: Adapter |
| 158 | * @command: Command to execute |
| 159 | * @p1: first parameter |
| 160 | * @ret: adapter status |
| 161 | * |
| 162 | * This routine will send a synchronous command to the adapter and wait |
| 163 | * for its completion. |
| 164 | */ |
| 165 | |
| 166 | static int rx_sync_cmd(struct aac_dev *dev, u32 command, |
| 167 | u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, |
| 168 | u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) |
| 169 | { |
| 170 | unsigned long start; |
| 171 | int ok; |
| 172 | /* |
| 173 | * Write the command into Mailbox 0 |
| 174 | */ |
| 175 | writel(command, &dev->IndexRegs->Mailbox[0]); |
| 176 | /* |
| 177 | * Write the parameters into Mailboxes 1 - 6 |
| 178 | */ |
| 179 | writel(p1, &dev->IndexRegs->Mailbox[1]); |
| 180 | writel(p2, &dev->IndexRegs->Mailbox[2]); |
| 181 | writel(p3, &dev->IndexRegs->Mailbox[3]); |
| 182 | writel(p4, &dev->IndexRegs->Mailbox[4]); |
| 183 | /* |
| 184 | * Clear the synch command doorbell to start on a clean slate. |
| 185 | */ |
| 186 | rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); |
| 187 | /* |
| 188 | * Disable doorbell interrupts |
| 189 | */ |
| 190 | rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff); |
| 191 | /* |
| 192 | * Force the completion of the mask register write before issuing |
| 193 | * the interrupt. |
| 194 | */ |
| 195 | rx_readb (dev, MUnit.OIMR); |
| 196 | /* |
| 197 | * Signal that there is a new synch command |
| 198 | */ |
| 199 | rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0); |
| 200 | |
| 201 | ok = 0; |
| 202 | start = jiffies; |
| 203 | |
| 204 | /* |
| 205 | * Wait up to 30 seconds |
| 206 | */ |
| 207 | while (time_before(jiffies, start+30*HZ)) |
| 208 | { |
| 209 | udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ |
| 210 | /* |
| 211 | * Mon960 will set doorbell0 bit when it has completed the command. |
| 212 | */ |
| 213 | if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) { |
| 214 | /* |
| 215 | * Clear the doorbell. |
| 216 | */ |
| 217 | rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); |
| 218 | ok = 1; |
| 219 | break; |
| 220 | } |
| 221 | /* |
| 222 | * Yield the processor in case we are slow |
| 223 | */ |
| 224 | msleep(1); |
| 225 | } |
| 226 | if (unlikely(ok != 1)) { |
| 227 | /* |
| 228 | * Restore interrupt mask even though we timed out |
| 229 | */ |
| 230 | aac_adapter_enable_int(dev); |
| 231 | return -ETIMEDOUT; |
| 232 | } |
| 233 | /* |
| 234 | * Pull the synch status from Mailbox 0. |
| 235 | */ |
| 236 | if (status) |
| 237 | *status = readl(&dev->IndexRegs->Mailbox[0]); |
| 238 | if (r1) |
| 239 | *r1 = readl(&dev->IndexRegs->Mailbox[1]); |
| 240 | if (r2) |
| 241 | *r2 = readl(&dev->IndexRegs->Mailbox[2]); |
| 242 | if (r3) |
| 243 | *r3 = readl(&dev->IndexRegs->Mailbox[3]); |
| 244 | if (r4) |
| 245 | *r4 = readl(&dev->IndexRegs->Mailbox[4]); |
| 246 | /* |
| 247 | * Clear the synch command doorbell. |
| 248 | */ |
| 249 | rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0); |
| 250 | /* |
| 251 | * Restore interrupt mask |
| 252 | */ |
| 253 | aac_adapter_enable_int(dev); |
| 254 | return 0; |
| 255 | |
| 256 | } |
| 257 | |
| 258 | /** |
| 259 | * aac_rx_interrupt_adapter - interrupt adapter |
| 260 | * @dev: Adapter |
| 261 | * |
| 262 | * Send an interrupt to the i960 and breakpoint it. |
| 263 | */ |
| 264 | |
| 265 | static void aac_rx_interrupt_adapter(struct aac_dev *dev) |
| 266 | { |
| 267 | rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * aac_rx_notify_adapter - send an event to the adapter |
| 272 | * @dev: Adapter |
| 273 | * @event: Event to send |
| 274 | * |
| 275 | * Notify the i960 that something it probably cares about has |
| 276 | * happened. |
| 277 | */ |
| 278 | |
| 279 | static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event) |
| 280 | { |
| 281 | switch (event) { |
| 282 | |
| 283 | case AdapNormCmdQue: |
| 284 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1); |
| 285 | break; |
| 286 | case HostNormRespNotFull: |
| 287 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4); |
| 288 | break; |
| 289 | case AdapNormRespQue: |
| 290 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2); |
| 291 | break; |
| 292 | case HostNormCmdNotFull: |
| 293 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); |
| 294 | break; |
| 295 | case HostShutdown: |
| 296 | break; |
| 297 | case FastIo: |
| 298 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); |
| 299 | break; |
| 300 | case AdapPrintfDone: |
| 301 | rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5); |
| 302 | break; |
| 303 | default: |
| 304 | BUG(); |
| 305 | break; |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | /** |
| 310 | * aac_rx_start_adapter - activate adapter |
| 311 | * @dev: Adapter |
| 312 | * |
| 313 | * Start up processing on an i960 based AAC adapter |
| 314 | */ |
| 315 | |
| 316 | static void aac_rx_start_adapter(struct aac_dev *dev) |
| 317 | { |
| 318 | struct aac_init *init; |
| 319 | |
| 320 | init = dev->init; |
| 321 | init->HostElapsedSeconds = cpu_to_le32(get_seconds()); |
| 322 | // We can only use a 32 bit address here |
| 323 | rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, |
| 324 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
| 325 | } |
| 326 | |
| 327 | /** |
| 328 | * aac_rx_check_health |
| 329 | * @dev: device to check if healthy |
| 330 | * |
| 331 | * Will attempt to determine if the specified adapter is alive and |
| 332 | * capable of handling requests, returning 0 if alive. |
| 333 | */ |
| 334 | static int aac_rx_check_health(struct aac_dev *dev) |
| 335 | { |
| 336 | u32 status = rx_readl(dev, MUnit.OMRx[0]); |
| 337 | |
| 338 | /* |
| 339 | * Check to see if the board failed any self tests. |
| 340 | */ |
| 341 | if (unlikely(status & SELF_TEST_FAILED)) |
| 342 | return -1; |
| 343 | /* |
| 344 | * Check to see if the board panic'd. |
| 345 | */ |
| 346 | if (unlikely(status & KERNEL_PANIC)) { |
| 347 | char * buffer; |
| 348 | struct POSTSTATUS { |
| 349 | __le32 Post_Command; |
| 350 | __le32 Post_Address; |
| 351 | } * post; |
| 352 | dma_addr_t paddr, baddr; |
| 353 | int ret; |
| 354 | |
| 355 | if (likely((status & 0xFF000000L) == 0xBC000000L)) |
| 356 | return (status >> 16) & 0xFF; |
| 357 | buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); |
| 358 | ret = -2; |
| 359 | if (unlikely(buffer == NULL)) |
| 360 | return ret; |
| 361 | post = pci_alloc_consistent(dev->pdev, |
| 362 | sizeof(struct POSTSTATUS), &paddr); |
| 363 | if (unlikely(post == NULL)) { |
| 364 | pci_free_consistent(dev->pdev, 512, buffer, baddr); |
| 365 | return ret; |
| 366 | } |
| 367 | memset(buffer, 0, 512); |
| 368 | post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS); |
| 369 | post->Post_Address = cpu_to_le32(baddr); |
| 370 | rx_writel(dev, MUnit.IMRx[0], paddr); |
| 371 | rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, |
| 372 | NULL, NULL, NULL, NULL, NULL); |
| 373 | pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), |
| 374 | post, paddr); |
| 375 | if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { |
| 376 | ret = (hex_to_bin(buffer[2]) << 4) + |
| 377 | hex_to_bin(buffer[3]); |
| 378 | } |
| 379 | pci_free_consistent(dev->pdev, 512, buffer, baddr); |
| 380 | return ret; |
| 381 | } |
| 382 | /* |
| 383 | * Wait for the adapter to be up and running. |
| 384 | */ |
| 385 | if (unlikely(!(status & KERNEL_UP_AND_RUNNING))) |
| 386 | return -3; |
| 387 | /* |
| 388 | * Everything is OK |
| 389 | */ |
| 390 | return 0; |
| 391 | } |
| 392 | |
| 393 | /** |
| 394 | * aac_rx_deliver_producer |
| 395 | * @fib: fib to issue |
| 396 | * |
| 397 | * Will send a fib, returning 0 if successful. |
| 398 | */ |
| 399 | int aac_rx_deliver_producer(struct fib * fib) |
| 400 | { |
| 401 | struct aac_dev *dev = fib->dev; |
| 402 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
| 403 | unsigned long qflags; |
| 404 | u32 Index; |
| 405 | unsigned long nointr = 0; |
| 406 | |
| 407 | spin_lock_irqsave(q->lock, qflags); |
| 408 | aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); |
| 409 | |
| 410 | q->numpending++; |
| 411 | *(q->headers.producer) = cpu_to_le32(Index + 1); |
| 412 | spin_unlock_irqrestore(q->lock, qflags); |
| 413 | if (!(nointr & aac_config.irq_mod)) |
| 414 | aac_adapter_notify(dev, AdapNormCmdQueue); |
| 415 | |
| 416 | return 0; |
| 417 | } |
| 418 | |
| 419 | /** |
| 420 | * aac_rx_deliver_message |
| 421 | * @fib: fib to issue |
| 422 | * |
| 423 | * Will send a fib, returning 0 if successful. |
| 424 | */ |
| 425 | static int aac_rx_deliver_message(struct fib * fib) |
| 426 | { |
| 427 | struct aac_dev *dev = fib->dev; |
| 428 | struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; |
| 429 | unsigned long qflags; |
| 430 | u32 Index; |
| 431 | u64 addr; |
| 432 | volatile void __iomem *device; |
| 433 | |
| 434 | unsigned long count = 10000000L; /* 50 seconds */ |
| 435 | spin_lock_irqsave(q->lock, qflags); |
| 436 | q->numpending++; |
| 437 | spin_unlock_irqrestore(q->lock, qflags); |
| 438 | for(;;) { |
| 439 | Index = rx_readl(dev, MUnit.InboundQueue); |
| 440 | if (unlikely(Index == 0xFFFFFFFFL)) |
| 441 | Index = rx_readl(dev, MUnit.InboundQueue); |
| 442 | if (likely(Index != 0xFFFFFFFFL)) |
| 443 | break; |
| 444 | if (--count == 0) { |
| 445 | spin_lock_irqsave(q->lock, qflags); |
| 446 | q->numpending--; |
| 447 | spin_unlock_irqrestore(q->lock, qflags); |
| 448 | return -ETIMEDOUT; |
| 449 | } |
| 450 | udelay(5); |
| 451 | } |
| 452 | device = dev->base + Index; |
| 453 | addr = fib->hw_fib_pa; |
| 454 | writel((u32)(addr & 0xffffffff), device); |
| 455 | device += sizeof(u32); |
| 456 | writel((u32)(addr >> 32), device); |
| 457 | device += sizeof(u32); |
| 458 | writel(le16_to_cpu(fib->hw_fib_va->header.Size), device); |
| 459 | rx_writel(dev, MUnit.InboundQueue, Index); |
| 460 | return 0; |
| 461 | } |
| 462 | |
| 463 | /** |
| 464 | * aac_rx_ioremap |
| 465 | * @size: mapping resize request |
| 466 | * |
| 467 | */ |
| 468 | static int aac_rx_ioremap(struct aac_dev * dev, u32 size) |
| 469 | { |
| 470 | if (!size) { |
| 471 | iounmap(dev->regs.rx); |
| 472 | return 0; |
| 473 | } |
| 474 | dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size); |
| 475 | if (dev->base == NULL) |
| 476 | return -1; |
| 477 | dev->IndexRegs = &dev->regs.rx->IndexRegs; |
| 478 | return 0; |
| 479 | } |
| 480 | |
| 481 | static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) |
| 482 | { |
| 483 | u32 var; |
| 484 | |
| 485 | if (!(dev->supplement_adapter_info.SupportedOptions2 & |
| 486 | AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { |
| 487 | if (bled) |
| 488 | printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", |
| 489 | dev->name, dev->id, bled); |
| 490 | else { |
| 491 | bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, |
| 492 | 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); |
| 493 | if (!bled && (var != 0x00000001) && (var != 0x3803000F)) |
| 494 | bled = -EINVAL; |
| 495 | } |
| 496 | if (bled && (bled != -ETIMEDOUT)) |
| 497 | bled = aac_adapter_sync_cmd(dev, IOP_RESET, |
| 498 | 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); |
| 499 | |
| 500 | if (bled && (bled != -ETIMEDOUT)) |
| 501 | return -EINVAL; |
| 502 | } |
| 503 | if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ |
| 504 | rx_writel(dev, MUnit.reserved2, 3); |
| 505 | msleep(5000); /* Delay 5 seconds */ |
| 506 | var = 0x00000001; |
| 507 | } |
| 508 | if (var != 0x00000001) |
| 509 | return -EINVAL; |
| 510 | if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) |
| 511 | return -ENODEV; |
| 512 | if (startup_timeout < 300) |
| 513 | startup_timeout = 300; |
| 514 | return 0; |
| 515 | } |
| 516 | |
| 517 | /** |
| 518 | * aac_rx_select_comm - Select communications method |
| 519 | * @dev: Adapter |
| 520 | * @comm: communications method |
| 521 | */ |
| 522 | |
| 523 | int aac_rx_select_comm(struct aac_dev *dev, int comm) |
| 524 | { |
| 525 | switch (comm) { |
| 526 | case AAC_COMM_PRODUCER: |
| 527 | dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer; |
| 528 | dev->a_ops.adapter_intr = aac_rx_intr_producer; |
| 529 | dev->a_ops.adapter_deliver = aac_rx_deliver_producer; |
| 530 | break; |
| 531 | case AAC_COMM_MESSAGE: |
| 532 | dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message; |
| 533 | dev->a_ops.adapter_intr = aac_rx_intr_message; |
| 534 | dev->a_ops.adapter_deliver = aac_rx_deliver_message; |
| 535 | break; |
| 536 | default: |
| 537 | return 1; |
| 538 | } |
| 539 | return 0; |
| 540 | } |
| 541 | |
| 542 | /** |
| 543 | * aac_rx_init - initialize an i960 based AAC card |
| 544 | * @dev: device to configure |
| 545 | * |
| 546 | * Allocate and set up resources for the i960 based AAC variants. The |
| 547 | * device_interface in the commregion will be allocated and linked |
| 548 | * to the comm region. |
| 549 | */ |
| 550 | |
| 551 | int _aac_rx_init(struct aac_dev *dev) |
| 552 | { |
| 553 | unsigned long start; |
| 554 | unsigned long status; |
| 555 | int restart = 0; |
| 556 | int instance = dev->id; |
| 557 | const char * name = dev->name; |
| 558 | |
| 559 | if (aac_adapter_ioremap(dev, dev->base_size)) { |
| 560 | printk(KERN_WARNING "%s: unable to map adapter.\n", name); |
| 561 | goto error_iounmap; |
| 562 | } |
| 563 | |
| 564 | /* Failure to reset here is an option ... */ |
| 565 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; |
| 566 | dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt; |
| 567 | dev->OIMR = status = rx_readb (dev, MUnit.OIMR); |
| 568 | if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) && |
| 569 | !aac_rx_restart_adapter(dev, 0)) |
| 570 | /* Make sure the Hardware FIFO is empty */ |
| 571 | while ((++restart < 512) && |
| 572 | (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL)); |
| 573 | /* |
| 574 | * Check to see if the board panic'd while booting. |
| 575 | */ |
| 576 | status = rx_readl(dev, MUnit.OMRx[0]); |
| 577 | if (status & KERNEL_PANIC) { |
| 578 | if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev))) |
| 579 | goto error_iounmap; |
| 580 | ++restart; |
| 581 | } |
| 582 | /* |
| 583 | * Check to see if the board failed any self tests. |
| 584 | */ |
| 585 | status = rx_readl(dev, MUnit.OMRx[0]); |
| 586 | if (status & SELF_TEST_FAILED) { |
| 587 | printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); |
| 588 | goto error_iounmap; |
| 589 | } |
| 590 | /* |
| 591 | * Check to see if the monitor panic'd while booting. |
| 592 | */ |
| 593 | if (status & MONITOR_PANIC) { |
| 594 | printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); |
| 595 | goto error_iounmap; |
| 596 | } |
| 597 | start = jiffies; |
| 598 | /* |
| 599 | * Wait for the adapter to be up and running. Wait up to 3 minutes |
| 600 | */ |
| 601 | while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING)) |
| 602 | { |
| 603 | if ((restart && |
| 604 | (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || |
| 605 | time_after(jiffies, start+HZ*startup_timeout)) { |
| 606 | printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", |
| 607 | dev->name, instance, status); |
| 608 | goto error_iounmap; |
| 609 | } |
| 610 | if (!restart && |
| 611 | ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) || |
| 612 | time_after(jiffies, start + HZ * |
| 613 | ((startup_timeout > 60) |
| 614 | ? (startup_timeout - 60) |
| 615 | : (startup_timeout / 2))))) { |
| 616 | if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))) |
| 617 | start = jiffies; |
| 618 | ++restart; |
| 619 | } |
| 620 | msleep(1); |
| 621 | } |
| 622 | if (restart && aac_commit) |
| 623 | aac_commit = 1; |
| 624 | /* |
| 625 | * Fill in the common function dispatch table. |
| 626 | */ |
| 627 | dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; |
| 628 | dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt; |
| 629 | dev->a_ops.adapter_notify = aac_rx_notify_adapter; |
| 630 | dev->a_ops.adapter_sync_cmd = rx_sync_cmd; |
| 631 | dev->a_ops.adapter_check_health = aac_rx_check_health; |
| 632 | dev->a_ops.adapter_restart = aac_rx_restart_adapter; |
| 633 | |
| 634 | /* |
| 635 | * First clear out all interrupts. Then enable the one's that we |
| 636 | * can handle. |
| 637 | */ |
| 638 | aac_adapter_comm(dev, AAC_COMM_PRODUCER); |
| 639 | aac_adapter_disable_int(dev); |
| 640 | rx_writel(dev, MUnit.ODR, 0xffffffff); |
| 641 | aac_adapter_enable_int(dev); |
| 642 | |
| 643 | if (aac_init_adapter(dev) == NULL) |
| 644 | goto error_iounmap; |
| 645 | aac_adapter_comm(dev, dev->comm_interface); |
| 646 | dev->sync_mode = 0; /* sync. mode not supported */ |
| 647 | dev->msi = aac_msi && !pci_enable_msi(dev->pdev); |
| 648 | if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, |
| 649 | IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { |
| 650 | if (dev->msi) |
| 651 | pci_disable_msi(dev->pdev); |
| 652 | printk(KERN_ERR "%s%d: Interrupt unavailable.\n", |
| 653 | name, instance); |
| 654 | goto error_iounmap; |
| 655 | } |
| 656 | dev->dbg_base = dev->scsi_host_ptr->base; |
| 657 | dev->dbg_base_mapped = dev->base; |
| 658 | dev->dbg_size = dev->base_size; |
| 659 | |
| 660 | aac_adapter_enable_int(dev); |
| 661 | /* |
| 662 | * Tell the adapter that all is configured, and it can |
| 663 | * start accepting requests |
| 664 | */ |
| 665 | aac_rx_start_adapter(dev); |
| 666 | |
| 667 | return 0; |
| 668 | |
| 669 | error_iounmap: |
| 670 | |
| 671 | return -1; |
| 672 | } |
| 673 | |
| 674 | int aac_rx_init(struct aac_dev *dev) |
| 675 | { |
| 676 | /* |
| 677 | * Fill in the function dispatch table. |
| 678 | */ |
| 679 | dev->a_ops.adapter_ioremap = aac_rx_ioremap; |
| 680 | dev->a_ops.adapter_comm = aac_rx_select_comm; |
| 681 | |
| 682 | return _aac_rx_init(dev); |
| 683 | } |