| rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * nicstar.c | 
|  | 3 | * | 
|  | 4 | * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. | 
|  | 5 | * | 
|  | 6 | * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. | 
|  | 7 | *            It was taken from the frle-0.22 device driver. | 
|  | 8 | *            As the file doesn't have a copyright notice, in the file | 
|  | 9 | *            nicstarmac.copyright I put the copyright notice from the | 
|  | 10 | *            frle-0.22 device driver. | 
|  | 11 | *            Some code is based on the nicstar driver by M. Welsh. | 
|  | 12 | * | 
|  | 13 | * Author: Rui Prior (rprior@inescn.pt) | 
|  | 14 | * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 | 
|  | 15 | * | 
|  | 16 | * | 
|  | 17 | * (C) INESC 1999 | 
|  | 18 | */ | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * IMPORTANT INFORMATION | 
|  | 22 | * | 
|  | 23 | * There are currently three types of spinlocks: | 
|  | 24 | * | 
|  | 25 | * 1 - Per card interrupt spinlock (to protect structures and such) | 
|  | 26 | * 2 - Per SCQ scq spinlock | 
|  | 27 | * 3 - Per card resource spinlock (to access registers, etc.) | 
|  | 28 | * | 
|  | 29 | * These must NEVER be grabbed in reverse order. | 
|  | 30 | * | 
|  | 31 | */ | 
|  | 32 |  | 
|  | 33 | /* Header files */ | 
|  | 34 |  | 
|  | 35 | #include <linux/module.h> | 
|  | 36 | #include <linux/kernel.h> | 
|  | 37 | #include <linux/skbuff.h> | 
|  | 38 | #include <linux/atmdev.h> | 
|  | 39 | #include <linux/atm.h> | 
|  | 40 | #include <linux/pci.h> | 
|  | 41 | #include <linux/dma-mapping.h> | 
|  | 42 | #include <linux/types.h> | 
|  | 43 | #include <linux/string.h> | 
|  | 44 | #include <linux/delay.h> | 
|  | 45 | #include <linux/init.h> | 
|  | 46 | #include <linux/sched.h> | 
|  | 47 | #include <linux/timer.h> | 
|  | 48 | #include <linux/interrupt.h> | 
|  | 49 | #include <linux/bitops.h> | 
|  | 50 | #include <linux/slab.h> | 
|  | 51 | #include <linux/idr.h> | 
|  | 52 | #include <asm/io.h> | 
|  | 53 | #include <linux/uaccess.h> | 
|  | 54 | #include <linux/atomic.h> | 
|  | 55 | #include <linux/etherdevice.h> | 
|  | 56 | #include "nicstar.h" | 
|  | 57 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI | 
|  | 58 | #include "suni.h" | 
|  | 59 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ | 
|  | 60 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 | 
|  | 61 | #include "idt77105.h" | 
|  | 62 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | 
|  | 63 |  | 
|  | 64 | /* Additional code */ | 
|  | 65 |  | 
|  | 66 | #include "nicstarmac.c" | 
|  | 67 |  | 
|  | 68 | /* Configurable parameters */ | 
|  | 69 |  | 
|  | 70 | #undef PHY_LOOPBACK | 
|  | 71 | #undef TX_DEBUG | 
|  | 72 | #undef RX_DEBUG | 
|  | 73 | #undef GENERAL_DEBUG | 
|  | 74 | #undef EXTRA_DEBUG | 
|  | 75 |  | 
|  | 76 | /* Do not touch these */ | 
|  | 77 |  | 
|  | 78 | #ifdef TX_DEBUG | 
|  | 79 | #define TXPRINTK(args...) printk(args) | 
|  | 80 | #else | 
|  | 81 | #define TXPRINTK(args...) | 
|  | 82 | #endif /* TX_DEBUG */ | 
|  | 83 |  | 
|  | 84 | #ifdef RX_DEBUG | 
|  | 85 | #define RXPRINTK(args...) printk(args) | 
|  | 86 | #else | 
|  | 87 | #define RXPRINTK(args...) | 
|  | 88 | #endif /* RX_DEBUG */ | 
|  | 89 |  | 
|  | 90 | #ifdef GENERAL_DEBUG | 
|  | 91 | #define PRINTK(args...) printk(args) | 
|  | 92 | #else | 
|  | 93 | #define PRINTK(args...) | 
|  | 94 | #endif /* GENERAL_DEBUG */ | 
|  | 95 |  | 
|  | 96 | #ifdef EXTRA_DEBUG | 
|  | 97 | #define XPRINTK(args...) printk(args) | 
|  | 98 | #else | 
|  | 99 | #define XPRINTK(args...) | 
|  | 100 | #endif /* EXTRA_DEBUG */ | 
|  | 101 |  | 
|  | 102 | /* Macros */ | 
|  | 103 |  | 
|  | 104 | #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) | 
|  | 105 |  | 
|  | 106 | #define NS_DELAY mdelay(1) | 
|  | 107 |  | 
|  | 108 | #define PTR_DIFF(a, b)	((u32)((unsigned long)(a) - (unsigned long)(b))) | 
|  | 109 |  | 
|  | 110 | #ifndef ATM_SKB | 
|  | 111 | #define ATM_SKB(s) (&(s)->atm) | 
|  | 112 | #endif | 
|  | 113 |  | 
|  | 114 | #define scq_virt_to_bus(scq, p) \ | 
|  | 115 | (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) | 
|  | 116 |  | 
|  | 117 | /* Function declarations */ | 
|  | 118 |  | 
|  | 119 | static u32 ns_read_sram(ns_dev * card, u32 sram_address); | 
|  | 120 | static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, | 
|  | 121 | int count); | 
|  | 122 | static int ns_init_card(int i, struct pci_dev *pcidev); | 
|  | 123 | static void ns_init_card_error(ns_dev * card, int error); | 
|  | 124 | static scq_info *get_scq(ns_dev *card, int size, u32 scd); | 
|  | 125 | static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); | 
|  | 126 | static void push_rxbufs(ns_dev *, struct sk_buff *); | 
|  | 127 | static irqreturn_t ns_irq_handler(int irq, void *dev_id); | 
|  | 128 | static int ns_open(struct atm_vcc *vcc); | 
|  | 129 | static void ns_close(struct atm_vcc *vcc); | 
|  | 130 | static void fill_tst(ns_dev * card, int n, vc_map * vc); | 
|  | 131 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); | 
|  | 132 | static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, | 
|  | 133 | struct sk_buff *skb); | 
|  | 134 | static void process_tsq(ns_dev * card); | 
|  | 135 | static void drain_scq(ns_dev * card, scq_info * scq, int pos); | 
|  | 136 | static void process_rsq(ns_dev * card); | 
|  | 137 | static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); | 
|  | 138 | static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); | 
|  | 139 | static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); | 
|  | 140 | static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); | 
|  | 141 | static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); | 
|  | 142 | static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); | 
|  | 143 | static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); | 
|  | 144 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); | 
|  | 145 | #ifdef EXTRA_DEBUG | 
|  | 146 | static void which_list(ns_dev * card, struct sk_buff *skb); | 
|  | 147 | #endif | 
|  | 148 | static void ns_poll(unsigned long arg); | 
|  | 149 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 
|  | 150 | unsigned long addr); | 
|  | 151 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); | 
|  | 152 |  | 
|  | 153 | /* Global variables */ | 
|  | 154 |  | 
|  | 155 | static struct ns_dev *cards[NS_MAX_CARDS]; | 
|  | 156 | static unsigned num_cards; | 
|  | 157 | static const struct atmdev_ops atm_ops = { | 
|  | 158 | .open = ns_open, | 
|  | 159 | .close = ns_close, | 
|  | 160 | .ioctl = ns_ioctl, | 
|  | 161 | .send = ns_send, | 
|  | 162 | .phy_put = ns_phy_put, | 
|  | 163 | .phy_get = ns_phy_get, | 
|  | 164 | .proc_read = ns_proc_read, | 
|  | 165 | .owner = THIS_MODULE, | 
|  | 166 | }; | 
|  | 167 |  | 
|  | 168 | static struct timer_list ns_timer; | 
|  | 169 | static char *mac[NS_MAX_CARDS]; | 
|  | 170 | module_param_array(mac, charp, NULL, 0); | 
|  | 171 | MODULE_LICENSE("GPL"); | 
|  | 172 |  | 
|  | 173 | /* Functions */ | 
|  | 174 |  | 
|  | 175 | static int nicstar_init_one(struct pci_dev *pcidev, | 
|  | 176 | const struct pci_device_id *ent) | 
|  | 177 | { | 
|  | 178 | static int index = -1; | 
|  | 179 | unsigned int error; | 
|  | 180 |  | 
|  | 181 | index++; | 
|  | 182 | cards[index] = NULL; | 
|  | 183 |  | 
|  | 184 | error = ns_init_card(index, pcidev); | 
|  | 185 | if (error) { | 
|  | 186 | cards[index--] = NULL;	/* don't increment index */ | 
|  | 187 | goto err_out; | 
|  | 188 | } | 
|  | 189 |  | 
|  | 190 | return 0; | 
|  | 191 | err_out: | 
|  | 192 | return -ENODEV; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | static void nicstar_remove_one(struct pci_dev *pcidev) | 
|  | 196 | { | 
|  | 197 | int i, j; | 
|  | 198 | ns_dev *card = pci_get_drvdata(pcidev); | 
|  | 199 | struct sk_buff *hb; | 
|  | 200 | struct sk_buff *iovb; | 
|  | 201 | struct sk_buff *lb; | 
|  | 202 | struct sk_buff *sb; | 
|  | 203 |  | 
|  | 204 | i = card->index; | 
|  | 205 |  | 
|  | 206 | if (cards[i] == NULL) | 
|  | 207 | return; | 
|  | 208 |  | 
|  | 209 | if (card->atmdev->phy && card->atmdev->phy->stop) | 
|  | 210 | card->atmdev->phy->stop(card->atmdev); | 
|  | 211 |  | 
|  | 212 | /* Stop everything */ | 
|  | 213 | writel(0x00000000, card->membase + CFG); | 
|  | 214 |  | 
|  | 215 | /* De-register device */ | 
|  | 216 | atm_dev_deregister(card->atmdev); | 
|  | 217 |  | 
|  | 218 | /* Disable PCI device */ | 
|  | 219 | pci_disable_device(pcidev); | 
|  | 220 |  | 
|  | 221 | /* Free up resources */ | 
|  | 222 | j = 0; | 
|  | 223 | PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); | 
|  | 224 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { | 
|  | 225 | dev_kfree_skb_any(hb); | 
|  | 226 | j++; | 
|  | 227 | } | 
|  | 228 | PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); | 
|  | 229 | j = 0; | 
|  | 230 | PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, | 
|  | 231 | card->iovpool.count); | 
|  | 232 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { | 
|  | 233 | dev_kfree_skb_any(iovb); | 
|  | 234 | j++; | 
|  | 235 | } | 
|  | 236 | PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); | 
|  | 237 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | 
|  | 238 | dev_kfree_skb_any(lb); | 
|  | 239 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | 
|  | 240 | dev_kfree_skb_any(sb); | 
|  | 241 | free_scq(card, card->scq0, NULL); | 
|  | 242 | for (j = 0; j < NS_FRSCD_NUM; j++) { | 
|  | 243 | if (card->scd2vc[j] != NULL) | 
|  | 244 | free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); | 
|  | 245 | } | 
|  | 246 | idr_destroy(&card->idr); | 
|  | 247 | dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, | 
|  | 248 | card->rsq.org, card->rsq.dma); | 
|  | 249 | dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, | 
|  | 250 | card->tsq.org, card->tsq.dma); | 
|  | 251 | free_irq(card->pcidev->irq, card); | 
|  | 252 | iounmap(card->membase); | 
|  | 253 | kfree(card); | 
|  | 254 | } | 
|  | 255 |  | 
|  | 256 | static const struct pci_device_id nicstar_pci_tbl[] = { | 
|  | 257 | { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, | 
|  | 258 | {0,}			/* terminate list */ | 
|  | 259 | }; | 
|  | 260 |  | 
|  | 261 | MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); | 
|  | 262 |  | 
|  | 263 | static struct pci_driver nicstar_driver = { | 
|  | 264 | .name = "nicstar", | 
|  | 265 | .id_table = nicstar_pci_tbl, | 
|  | 266 | .probe = nicstar_init_one, | 
|  | 267 | .remove = nicstar_remove_one, | 
|  | 268 | }; | 
|  | 269 |  | 
|  | 270 | static int __init nicstar_init(void) | 
|  | 271 | { | 
|  | 272 | unsigned error = 0;	/* Initialized to remove compile warning */ | 
|  | 273 |  | 
|  | 274 | XPRINTK("nicstar: nicstar_init() called.\n"); | 
|  | 275 |  | 
|  | 276 | error = pci_register_driver(&nicstar_driver); | 
|  | 277 |  | 
|  | 278 | TXPRINTK("nicstar: TX debug enabled.\n"); | 
|  | 279 | RXPRINTK("nicstar: RX debug enabled.\n"); | 
|  | 280 | PRINTK("nicstar: General debug enabled.\n"); | 
|  | 281 | #ifdef PHY_LOOPBACK | 
|  | 282 | printk("nicstar: using PHY loopback.\n"); | 
|  | 283 | #endif /* PHY_LOOPBACK */ | 
|  | 284 | XPRINTK("nicstar: nicstar_init() returned.\n"); | 
|  | 285 |  | 
|  | 286 | if (!error) { | 
|  | 287 | init_timer(&ns_timer); | 
|  | 288 | ns_timer.expires = jiffies + NS_POLL_PERIOD; | 
|  | 289 | ns_timer.data = 0UL; | 
|  | 290 | ns_timer.function = ns_poll; | 
|  | 291 | add_timer(&ns_timer); | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | return error; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | static void __exit nicstar_cleanup(void) | 
|  | 298 | { | 
|  | 299 | XPRINTK("nicstar: nicstar_cleanup() called.\n"); | 
|  | 300 |  | 
|  | 301 | del_timer(&ns_timer); | 
|  | 302 |  | 
|  | 303 | pci_unregister_driver(&nicstar_driver); | 
|  | 304 |  | 
|  | 305 | XPRINTK("nicstar: nicstar_cleanup() returned.\n"); | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | static u32 ns_read_sram(ns_dev * card, u32 sram_address) | 
|  | 309 | { | 
|  | 310 | unsigned long flags; | 
|  | 311 | u32 data; | 
|  | 312 | sram_address <<= 2; | 
|  | 313 | sram_address &= 0x0007FFFC;	/* address must be dword aligned */ | 
|  | 314 | sram_address |= 0x50000000;	/* SRAM read command */ | 
|  | 315 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 316 | while (CMD_BUSY(card)) ; | 
|  | 317 | writel(sram_address, card->membase + CMD); | 
|  | 318 | while (CMD_BUSY(card)) ; | 
|  | 319 | data = readl(card->membase + DR0); | 
|  | 320 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 321 | return data; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, | 
|  | 325 | int count) | 
|  | 326 | { | 
|  | 327 | unsigned long flags; | 
|  | 328 | int i, c; | 
|  | 329 | count--;		/* count range now is 0..3 instead of 1..4 */ | 
|  | 330 | c = count; | 
|  | 331 | c <<= 2;		/* to use increments of 4 */ | 
|  | 332 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 333 | while (CMD_BUSY(card)) ; | 
|  | 334 | for (i = 0; i <= c; i += 4) | 
|  | 335 | writel(*(value++), card->membase + i); | 
|  | 336 | /* Note: DR# registers are the first 4 dwords in nicstar's memspace, | 
|  | 337 | so card->membase + DR0 == card->membase */ | 
|  | 338 | sram_address <<= 2; | 
|  | 339 | sram_address &= 0x0007FFFC; | 
|  | 340 | sram_address |= (0x40000000 | count); | 
|  | 341 | writel(sram_address, card->membase + CMD); | 
|  | 342 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 343 | } | 
|  | 344 |  | 
|  | 345 | static int ns_init_card(int i, struct pci_dev *pcidev) | 
|  | 346 | { | 
|  | 347 | int j; | 
|  | 348 | struct ns_dev *card = NULL; | 
|  | 349 | unsigned char pci_latency; | 
|  | 350 | unsigned error; | 
|  | 351 | u32 data; | 
|  | 352 | u32 u32d[4]; | 
|  | 353 | u32 ns_cfg_rctsize; | 
|  | 354 | int bcount; | 
|  | 355 | unsigned long membase; | 
|  | 356 |  | 
|  | 357 | error = 0; | 
|  | 358 |  | 
|  | 359 | if (pci_enable_device(pcidev)) { | 
|  | 360 | printk("nicstar%d: can't enable PCI device\n", i); | 
|  | 361 | error = 2; | 
|  | 362 | ns_init_card_error(card, error); | 
|  | 363 | return error; | 
|  | 364 | } | 
|  | 365 | if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) { | 
|  | 366 | printk(KERN_WARNING | 
|  | 367 | "nicstar%d: No suitable DMA available.\n", i); | 
|  | 368 | error = 2; | 
|  | 369 | ns_init_card_error(card, error); | 
|  | 370 | return error; | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | card = kmalloc(sizeof(*card), GFP_KERNEL); | 
|  | 374 | if (!card) { | 
|  | 375 | printk | 
|  | 376 | ("nicstar%d: can't allocate memory for device structure.\n", | 
|  | 377 | i); | 
|  | 378 | error = 2; | 
|  | 379 | ns_init_card_error(card, error); | 
|  | 380 | return error; | 
|  | 381 | } | 
|  | 382 | cards[i] = card; | 
|  | 383 | spin_lock_init(&card->int_lock); | 
|  | 384 | spin_lock_init(&card->res_lock); | 
|  | 385 |  | 
|  | 386 | pci_set_drvdata(pcidev, card); | 
|  | 387 |  | 
|  | 388 | card->index = i; | 
|  | 389 | card->atmdev = NULL; | 
|  | 390 | card->pcidev = pcidev; | 
|  | 391 | membase = pci_resource_start(pcidev, 1); | 
|  | 392 | card->membase = ioremap(membase, NS_IOREMAP_SIZE); | 
|  | 393 | if (!card->membase) { | 
|  | 394 | printk("nicstar%d: can't ioremap() membase.\n", i); | 
|  | 395 | error = 3; | 
|  | 396 | ns_init_card_error(card, error); | 
|  | 397 | return error; | 
|  | 398 | } | 
|  | 399 | PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); | 
|  | 400 |  | 
|  | 401 | pci_set_master(pcidev); | 
|  | 402 |  | 
|  | 403 | if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { | 
|  | 404 | printk("nicstar%d: can't read PCI latency timer.\n", i); | 
|  | 405 | error = 6; | 
|  | 406 | ns_init_card_error(card, error); | 
|  | 407 | return error; | 
|  | 408 | } | 
|  | 409 | #ifdef NS_PCI_LATENCY | 
|  | 410 | if (pci_latency < NS_PCI_LATENCY) { | 
|  | 411 | PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, | 
|  | 412 | NS_PCI_LATENCY); | 
|  | 413 | for (j = 1; j < 4; j++) { | 
|  | 414 | if (pci_write_config_byte | 
|  | 415 | (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) | 
|  | 416 | break; | 
|  | 417 | } | 
|  | 418 | if (j == 4) { | 
|  | 419 | printk | 
|  | 420 | ("nicstar%d: can't set PCI latency timer to %d.\n", | 
|  | 421 | i, NS_PCI_LATENCY); | 
|  | 422 | error = 7; | 
|  | 423 | ns_init_card_error(card, error); | 
|  | 424 | return error; | 
|  | 425 | } | 
|  | 426 | } | 
|  | 427 | #endif /* NS_PCI_LATENCY */ | 
|  | 428 |  | 
|  | 429 | /* Clear timer overflow */ | 
|  | 430 | data = readl(card->membase + STAT); | 
|  | 431 | if (data & NS_STAT_TMROF) | 
|  | 432 | writel(NS_STAT_TMROF, card->membase + STAT); | 
|  | 433 |  | 
|  | 434 | /* Software reset */ | 
|  | 435 | writel(NS_CFG_SWRST, card->membase + CFG); | 
|  | 436 | NS_DELAY; | 
|  | 437 | writel(0x00000000, card->membase + CFG); | 
|  | 438 |  | 
|  | 439 | /* PHY reset */ | 
|  | 440 | writel(0x00000008, card->membase + GP); | 
|  | 441 | NS_DELAY; | 
|  | 442 | writel(0x00000001, card->membase + GP); | 
|  | 443 | NS_DELAY; | 
|  | 444 | while (CMD_BUSY(card)) ; | 
|  | 445 | writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD);	/* Sync UTOPIA with SAR clock */ | 
|  | 446 | NS_DELAY; | 
|  | 447 |  | 
|  | 448 | /* Detect PHY type */ | 
|  | 449 | while (CMD_BUSY(card)) ; | 
|  | 450 | writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); | 
|  | 451 | while (CMD_BUSY(card)) ; | 
|  | 452 | data = readl(card->membase + DR0); | 
|  | 453 | switch (data) { | 
|  | 454 | case 0x00000009: | 
|  | 455 | printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); | 
|  | 456 | card->max_pcr = ATM_25_PCR; | 
|  | 457 | while (CMD_BUSY(card)) ; | 
|  | 458 | writel(0x00000008, card->membase + DR0); | 
|  | 459 | writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); | 
|  | 460 | /* Clear an eventual pending interrupt */ | 
|  | 461 | writel(NS_STAT_SFBQF, card->membase + STAT); | 
|  | 462 | #ifdef PHY_LOOPBACK | 
|  | 463 | while (CMD_BUSY(card)) ; | 
|  | 464 | writel(0x00000022, card->membase + DR0); | 
|  | 465 | writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); | 
|  | 466 | #endif /* PHY_LOOPBACK */ | 
|  | 467 | break; | 
|  | 468 | case 0x00000030: | 
|  | 469 | case 0x00000031: | 
|  | 470 | printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); | 
|  | 471 | card->max_pcr = ATM_OC3_PCR; | 
|  | 472 | #ifdef PHY_LOOPBACK | 
|  | 473 | while (CMD_BUSY(card)) ; | 
|  | 474 | writel(0x00000002, card->membase + DR0); | 
|  | 475 | writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); | 
|  | 476 | #endif /* PHY_LOOPBACK */ | 
|  | 477 | break; | 
|  | 478 | default: | 
|  | 479 | printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); | 
|  | 480 | error = 8; | 
|  | 481 | ns_init_card_error(card, error); | 
|  | 482 | return error; | 
|  | 483 | } | 
|  | 484 | writel(0x00000000, card->membase + GP); | 
|  | 485 |  | 
|  | 486 | /* Determine SRAM size */ | 
|  | 487 | data = 0x76543210; | 
|  | 488 | ns_write_sram(card, 0x1C003, &data, 1); | 
|  | 489 | data = 0x89ABCDEF; | 
|  | 490 | ns_write_sram(card, 0x14003, &data, 1); | 
|  | 491 | if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && | 
|  | 492 | ns_read_sram(card, 0x1C003) == 0x76543210) | 
|  | 493 | card->sram_size = 128; | 
|  | 494 | else | 
|  | 495 | card->sram_size = 32; | 
|  | 496 | PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); | 
|  | 497 |  | 
|  | 498 | card->rct_size = NS_MAX_RCTSIZE; | 
|  | 499 |  | 
|  | 500 | #if (NS_MAX_RCTSIZE == 4096) | 
|  | 501 | if (card->sram_size == 128) | 
|  | 502 | printk | 
|  | 503 | ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", | 
|  | 504 | i); | 
|  | 505 | #elif (NS_MAX_RCTSIZE == 16384) | 
|  | 506 | if (card->sram_size == 32) { | 
|  | 507 | printk | 
|  | 508 | ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", | 
|  | 509 | i); | 
|  | 510 | card->rct_size = 4096; | 
|  | 511 | } | 
|  | 512 | #else | 
|  | 513 | #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c | 
|  | 514 | #endif | 
|  | 515 |  | 
|  | 516 | card->vpibits = NS_VPIBITS; | 
|  | 517 | if (card->rct_size == 4096) | 
|  | 518 | card->vcibits = 12 - NS_VPIBITS; | 
|  | 519 | else			/* card->rct_size == 16384 */ | 
|  | 520 | card->vcibits = 14 - NS_VPIBITS; | 
|  | 521 |  | 
|  | 522 | /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ | 
|  | 523 | if (mac[i] == NULL) | 
|  | 524 | nicstar_init_eprom(card->membase); | 
|  | 525 |  | 
|  | 526 | /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ | 
|  | 527 | writel(0x00000000, card->membase + VPM); | 
|  | 528 |  | 
|  | 529 | /* Initialize TSQ */ | 
|  | 530 | card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, | 
|  | 531 | NS_TSQSIZE + NS_TSQ_ALIGNMENT, | 
|  | 532 | &card->tsq.dma, GFP_KERNEL); | 
|  | 533 | if (card->tsq.org == NULL) { | 
|  | 534 | printk("nicstar%d: can't allocate TSQ.\n", i); | 
|  | 535 | error = 10; | 
|  | 536 | ns_init_card_error(card, error); | 
|  | 537 | return error; | 
|  | 538 | } | 
|  | 539 | card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); | 
|  | 540 | card->tsq.next = card->tsq.base; | 
|  | 541 | card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); | 
|  | 542 | for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) | 
|  | 543 | ns_tsi_init(card->tsq.base + j); | 
|  | 544 | writel(0x00000000, card->membase + TSQH); | 
|  | 545 | writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); | 
|  | 546 | PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); | 
|  | 547 |  | 
|  | 548 | /* Initialize RSQ */ | 
|  | 549 | card->rsq.org = dma_alloc_coherent(&card->pcidev->dev, | 
|  | 550 | NS_RSQSIZE + NS_RSQ_ALIGNMENT, | 
|  | 551 | &card->rsq.dma, GFP_KERNEL); | 
|  | 552 | if (card->rsq.org == NULL) { | 
|  | 553 | printk("nicstar%d: can't allocate RSQ.\n", i); | 
|  | 554 | error = 11; | 
|  | 555 | ns_init_card_error(card, error); | 
|  | 556 | return error; | 
|  | 557 | } | 
|  | 558 | card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); | 
|  | 559 | card->rsq.next = card->rsq.base; | 
|  | 560 | card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); | 
|  | 561 | for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) | 
|  | 562 | ns_rsqe_init(card->rsq.base + j); | 
|  | 563 | writel(0x00000000, card->membase + RSQH); | 
|  | 564 | writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); | 
|  | 565 | PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); | 
|  | 566 |  | 
|  | 567 | /* Initialize SCQ0, the only VBR SCQ used */ | 
|  | 568 | card->scq1 = NULL; | 
|  | 569 | card->scq2 = NULL; | 
|  | 570 | card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); | 
|  | 571 | if (card->scq0 == NULL) { | 
|  | 572 | printk("nicstar%d: can't get SCQ0.\n", i); | 
|  | 573 | error = 12; | 
|  | 574 | ns_init_card_error(card, error); | 
|  | 575 | return error; | 
|  | 576 | } | 
|  | 577 | u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); | 
|  | 578 | u32d[1] = (u32) 0x00000000; | 
|  | 579 | u32d[2] = (u32) 0xffffffff; | 
|  | 580 | u32d[3] = (u32) 0x00000000; | 
|  | 581 | ns_write_sram(card, NS_VRSCD0, u32d, 4); | 
|  | 582 | ns_write_sram(card, NS_VRSCD1, u32d, 4);	/* These last two won't be used */ | 
|  | 583 | ns_write_sram(card, NS_VRSCD2, u32d, 4);	/* but are initialized, just in case... */ | 
|  | 584 | card->scq0->scd = NS_VRSCD0; | 
|  | 585 | PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); | 
|  | 586 |  | 
|  | 587 | /* Initialize TSTs */ | 
|  | 588 | card->tst_addr = NS_TST0; | 
|  | 589 | card->tst_free_entries = NS_TST_NUM_ENTRIES; | 
|  | 590 | data = NS_TST_OPCODE_VARIABLE; | 
|  | 591 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 
|  | 592 | ns_write_sram(card, NS_TST0 + j, &data, 1); | 
|  | 593 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); | 
|  | 594 | ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); | 
|  | 595 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 
|  | 596 | ns_write_sram(card, NS_TST1 + j, &data, 1); | 
|  | 597 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); | 
|  | 598 | ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); | 
|  | 599 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | 
|  | 600 | card->tste2vc[j] = NULL; | 
|  | 601 | writel(NS_TST0 << 2, card->membase + TSTB); | 
|  | 602 |  | 
|  | 603 | /* Initialize RCT. AAL type is set on opening the VC. */ | 
|  | 604 | #ifdef RCQ_SUPPORT | 
|  | 605 | u32d[0] = NS_RCTE_RAWCELLINTEN; | 
|  | 606 | #else | 
|  | 607 | u32d[0] = 0x00000000; | 
|  | 608 | #endif /* RCQ_SUPPORT */ | 
|  | 609 | u32d[1] = 0x00000000; | 
|  | 610 | u32d[2] = 0x00000000; | 
|  | 611 | u32d[3] = 0xFFFFFFFF; | 
|  | 612 | for (j = 0; j < card->rct_size; j++) | 
|  | 613 | ns_write_sram(card, j * 4, u32d, 4); | 
|  | 614 |  | 
|  | 615 | memset(card->vcmap, 0, sizeof(card->vcmap)); | 
|  | 616 |  | 
|  | 617 | for (j = 0; j < NS_FRSCD_NUM; j++) | 
|  | 618 | card->scd2vc[j] = NULL; | 
|  | 619 |  | 
|  | 620 | /* Initialize buffer levels */ | 
|  | 621 | card->sbnr.min = MIN_SB; | 
|  | 622 | card->sbnr.init = NUM_SB; | 
|  | 623 | card->sbnr.max = MAX_SB; | 
|  | 624 | card->lbnr.min = MIN_LB; | 
|  | 625 | card->lbnr.init = NUM_LB; | 
|  | 626 | card->lbnr.max = MAX_LB; | 
|  | 627 | card->iovnr.min = MIN_IOVB; | 
|  | 628 | card->iovnr.init = NUM_IOVB; | 
|  | 629 | card->iovnr.max = MAX_IOVB; | 
|  | 630 | card->hbnr.min = MIN_HB; | 
|  | 631 | card->hbnr.init = NUM_HB; | 
|  | 632 | card->hbnr.max = MAX_HB; | 
|  | 633 |  | 
|  | 634 | card->sm_handle = NULL; | 
|  | 635 | card->sm_addr = 0x00000000; | 
|  | 636 | card->lg_handle = NULL; | 
|  | 637 | card->lg_addr = 0x00000000; | 
|  | 638 |  | 
|  | 639 | card->efbie = 1;	/* To prevent push_rxbufs from enabling the interrupt */ | 
|  | 640 |  | 
|  | 641 | idr_init(&card->idr); | 
|  | 642 |  | 
|  | 643 | /* Pre-allocate some huge buffers */ | 
|  | 644 | skb_queue_head_init(&card->hbpool.queue); | 
|  | 645 | card->hbpool.count = 0; | 
|  | 646 | for (j = 0; j < NUM_HB; j++) { | 
|  | 647 | struct sk_buff *hb; | 
|  | 648 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 
|  | 649 | if (hb == NULL) { | 
|  | 650 | printk | 
|  | 651 | ("nicstar%d: can't allocate %dth of %d huge buffers.\n", | 
|  | 652 | i, j, NUM_HB); | 
|  | 653 | error = 13; | 
|  | 654 | ns_init_card_error(card, error); | 
|  | 655 | return error; | 
|  | 656 | } | 
|  | 657 | NS_PRV_BUFTYPE(hb) = BUF_NONE; | 
|  | 658 | skb_queue_tail(&card->hbpool.queue, hb); | 
|  | 659 | card->hbpool.count++; | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | /* Allocate large buffers */ | 
|  | 663 | skb_queue_head_init(&card->lbpool.queue); | 
|  | 664 | card->lbpool.count = 0;	/* Not used */ | 
|  | 665 | for (j = 0; j < NUM_LB; j++) { | 
|  | 666 | struct sk_buff *lb; | 
|  | 667 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 
|  | 668 | if (lb == NULL) { | 
|  | 669 | printk | 
|  | 670 | ("nicstar%d: can't allocate %dth of %d large buffers.\n", | 
|  | 671 | i, j, NUM_LB); | 
|  | 672 | error = 14; | 
|  | 673 | ns_init_card_error(card, error); | 
|  | 674 | return error; | 
|  | 675 | } | 
|  | 676 | NS_PRV_BUFTYPE(lb) = BUF_LG; | 
|  | 677 | skb_queue_tail(&card->lbpool.queue, lb); | 
|  | 678 | skb_reserve(lb, NS_SMBUFSIZE); | 
|  | 679 | push_rxbufs(card, lb); | 
|  | 680 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ | 
|  | 681 | if (j == 1) { | 
|  | 682 | card->rcbuf = lb; | 
|  | 683 | card->rawcell = (struct ns_rcqe *) lb->data; | 
|  | 684 | card->rawch = NS_PRV_DMA(lb); | 
|  | 685 | } | 
|  | 686 | } | 
|  | 687 | /* Test for strange behaviour which leads to crashes */ | 
|  | 688 | if ((bcount = | 
|  | 689 | ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { | 
|  | 690 | printk | 
|  | 691 | ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", | 
|  | 692 | i, j, bcount); | 
|  | 693 | error = 14; | 
|  | 694 | ns_init_card_error(card, error); | 
|  | 695 | return error; | 
|  | 696 | } | 
|  | 697 |  | 
|  | 698 | /* Allocate small buffers */ | 
|  | 699 | skb_queue_head_init(&card->sbpool.queue); | 
|  | 700 | card->sbpool.count = 0;	/* Not used */ | 
|  | 701 | for (j = 0; j < NUM_SB; j++) { | 
|  | 702 | struct sk_buff *sb; | 
|  | 703 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 
|  | 704 | if (sb == NULL) { | 
|  | 705 | printk | 
|  | 706 | ("nicstar%d: can't allocate %dth of %d small buffers.\n", | 
|  | 707 | i, j, NUM_SB); | 
|  | 708 | error = 15; | 
|  | 709 | ns_init_card_error(card, error); | 
|  | 710 | return error; | 
|  | 711 | } | 
|  | 712 | NS_PRV_BUFTYPE(sb) = BUF_SM; | 
|  | 713 | skb_queue_tail(&card->sbpool.queue, sb); | 
|  | 714 | skb_reserve(sb, NS_AAL0_HEADER); | 
|  | 715 | push_rxbufs(card, sb); | 
|  | 716 | } | 
|  | 717 | /* Test for strange behaviour which leads to crashes */ | 
|  | 718 | if ((bcount = | 
|  | 719 | ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { | 
|  | 720 | printk | 
|  | 721 | ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", | 
|  | 722 | i, j, bcount); | 
|  | 723 | error = 15; | 
|  | 724 | ns_init_card_error(card, error); | 
|  | 725 | return error; | 
|  | 726 | } | 
|  | 727 |  | 
|  | 728 | /* Allocate iovec buffers */ | 
|  | 729 | skb_queue_head_init(&card->iovpool.queue); | 
|  | 730 | card->iovpool.count = 0; | 
|  | 731 | for (j = 0; j < NUM_IOVB; j++) { | 
|  | 732 | struct sk_buff *iovb; | 
|  | 733 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | 
|  | 734 | if (iovb == NULL) { | 
|  | 735 | printk | 
|  | 736 | ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", | 
|  | 737 | i, j, NUM_IOVB); | 
|  | 738 | error = 16; | 
|  | 739 | ns_init_card_error(card, error); | 
|  | 740 | return error; | 
|  | 741 | } | 
|  | 742 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | 
|  | 743 | skb_queue_tail(&card->iovpool.queue, iovb); | 
|  | 744 | card->iovpool.count++; | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | /* Configure NICStAR */ | 
|  | 748 | if (card->rct_size == 4096) | 
|  | 749 | ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; | 
|  | 750 | else			/* (card->rct_size == 16384) */ | 
|  | 751 | ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; | 
|  | 752 |  | 
|  | 753 | card->efbie = 1; | 
|  | 754 |  | 
|  | 755 | card->intcnt = 0; | 
|  | 756 | if (request_irq | 
|  | 757 | (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { | 
|  | 758 | printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); | 
|  | 759 | error = 9; | 
|  | 760 | ns_init_card_error(card, error); | 
|  | 761 | return error; | 
|  | 762 | } | 
|  | 763 |  | 
|  | 764 | /* Register device */ | 
|  | 765 | card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, | 
|  | 766 | -1, NULL); | 
|  | 767 | if (card->atmdev == NULL) { | 
|  | 768 | printk("nicstar%d: can't register device.\n", i); | 
|  | 769 | error = 17; | 
|  | 770 | ns_init_card_error(card, error); | 
|  | 771 | return error; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { | 
|  | 775 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, | 
|  | 776 | card->atmdev->esi, 6); | 
|  | 777 | if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { | 
|  | 778 | nicstar_read_eprom(card->membase, | 
|  | 779 | NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, | 
|  | 780 | card->atmdev->esi, 6); | 
|  | 781 | } | 
|  | 782 | } | 
|  | 783 |  | 
|  | 784 | printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); | 
|  | 785 |  | 
|  | 786 | card->atmdev->dev_data = card; | 
|  | 787 | card->atmdev->ci_range.vpi_bits = card->vpibits; | 
|  | 788 | card->atmdev->ci_range.vci_bits = card->vcibits; | 
|  | 789 | card->atmdev->link_rate = card->max_pcr; | 
|  | 790 | card->atmdev->phy = NULL; | 
|  | 791 |  | 
|  | 792 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI | 
|  | 793 | if (card->max_pcr == ATM_OC3_PCR) | 
|  | 794 | suni_init(card->atmdev); | 
|  | 795 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ | 
|  | 796 |  | 
|  | 797 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 | 
|  | 798 | if (card->max_pcr == ATM_25_PCR) | 
|  | 799 | idt77105_init(card->atmdev); | 
|  | 800 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | 
|  | 801 |  | 
|  | 802 | if (card->atmdev->phy && card->atmdev->phy->start) | 
|  | 803 | card->atmdev->phy->start(card->atmdev); | 
|  | 804 |  | 
|  | 805 | writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE |	/* Only enabled if RCQ_SUPPORT */ | 
|  | 806 | NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT |	/* Only enabled if ENABLE_TSQFIE */ | 
|  | 807 | NS_CFG_PHYIE, card->membase + CFG); | 
|  | 808 |  | 
|  | 809 | num_cards++; | 
|  | 810 |  | 
|  | 811 | return error; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | static void ns_init_card_error(ns_dev *card, int error) | 
|  | 815 | { | 
|  | 816 | if (error >= 17) { | 
|  | 817 | writel(0x00000000, card->membase + CFG); | 
|  | 818 | } | 
|  | 819 | if (error >= 16) { | 
|  | 820 | struct sk_buff *iovb; | 
|  | 821 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | 
|  | 822 | dev_kfree_skb_any(iovb); | 
|  | 823 | } | 
|  | 824 | if (error >= 15) { | 
|  | 825 | struct sk_buff *sb; | 
|  | 826 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | 
|  | 827 | dev_kfree_skb_any(sb); | 
|  | 828 | free_scq(card, card->scq0, NULL); | 
|  | 829 | } | 
|  | 830 | if (error >= 14) { | 
|  | 831 | struct sk_buff *lb; | 
|  | 832 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | 
|  | 833 | dev_kfree_skb_any(lb); | 
|  | 834 | } | 
|  | 835 | if (error >= 13) { | 
|  | 836 | struct sk_buff *hb; | 
|  | 837 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) | 
|  | 838 | dev_kfree_skb_any(hb); | 
|  | 839 | } | 
|  | 840 | if (error >= 12) { | 
|  | 841 | kfree(card->rsq.org); | 
|  | 842 | } | 
|  | 843 | if (error >= 11) { | 
|  | 844 | kfree(card->tsq.org); | 
|  | 845 | } | 
|  | 846 | if (error >= 10) { | 
|  | 847 | free_irq(card->pcidev->irq, card); | 
|  | 848 | } | 
|  | 849 | if (error >= 4) { | 
|  | 850 | iounmap(card->membase); | 
|  | 851 | } | 
|  | 852 | if (error >= 3) { | 
|  | 853 | pci_disable_device(card->pcidev); | 
|  | 854 | kfree(card); | 
|  | 855 | } | 
|  | 856 | } | 
|  | 857 |  | 
|  | 858 | static scq_info *get_scq(ns_dev *card, int size, u32 scd) | 
|  | 859 | { | 
|  | 860 | scq_info *scq; | 
|  | 861 | int i; | 
|  | 862 |  | 
|  | 863 | if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) | 
|  | 864 | return NULL; | 
|  | 865 |  | 
|  | 866 | scq = kmalloc(sizeof(*scq), GFP_KERNEL); | 
|  | 867 | if (!scq) | 
|  | 868 | return NULL; | 
|  | 869 | scq->org = dma_alloc_coherent(&card->pcidev->dev, | 
|  | 870 | 2 * size,  &scq->dma, GFP_KERNEL); | 
|  | 871 | if (!scq->org) { | 
|  | 872 | kfree(scq); | 
|  | 873 | return NULL; | 
|  | 874 | } | 
|  | 875 | scq->skb = kmalloc_array(size / NS_SCQE_SIZE, | 
|  | 876 | sizeof(*scq->skb), | 
|  | 877 | GFP_KERNEL); | 
|  | 878 | if (!scq->skb) { | 
|  | 879 | dma_free_coherent(&card->pcidev->dev, | 
|  | 880 | 2 * size, scq->org, scq->dma); | 
|  | 881 | kfree(scq); | 
|  | 882 | return NULL; | 
|  | 883 | } | 
|  | 884 | scq->num_entries = size / NS_SCQE_SIZE; | 
|  | 885 | scq->base = PTR_ALIGN(scq->org, size); | 
|  | 886 | scq->next = scq->base; | 
|  | 887 | scq->last = scq->base + (scq->num_entries - 1); | 
|  | 888 | scq->tail = scq->last; | 
|  | 889 | scq->scd = scd; | 
|  | 890 | scq->num_entries = size / NS_SCQE_SIZE; | 
|  | 891 | scq->tbd_count = 0; | 
|  | 892 | init_waitqueue_head(&scq->scqfull_waitq); | 
|  | 893 | scq->full = 0; | 
|  | 894 | spin_lock_init(&scq->lock); | 
|  | 895 |  | 
|  | 896 | for (i = 0; i < scq->num_entries; i++) | 
|  | 897 | scq->skb[i] = NULL; | 
|  | 898 |  | 
|  | 899 | return scq; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | /* For variable rate SCQ vcc must be NULL */ | 
|  | 903 | static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) | 
|  | 904 | { | 
|  | 905 | int i; | 
|  | 906 |  | 
|  | 907 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) | 
|  | 908 | for (i = 0; i < scq->num_entries; i++) { | 
|  | 909 | if (scq->skb[i] != NULL) { | 
|  | 910 | vcc = ATM_SKB(scq->skb[i])->vcc; | 
|  | 911 | if (vcc->pop != NULL) | 
|  | 912 | vcc->pop(vcc, scq->skb[i]); | 
|  | 913 | else | 
|  | 914 | dev_kfree_skb_any(scq->skb[i]); | 
|  | 915 | } | 
|  | 916 | } else {		/* vcc must be != NULL */ | 
|  | 917 |  | 
|  | 918 | if (vcc == NULL) { | 
|  | 919 | printk | 
|  | 920 | ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); | 
|  | 921 | for (i = 0; i < scq->num_entries; i++) | 
|  | 922 | dev_kfree_skb_any(scq->skb[i]); | 
|  | 923 | } else | 
|  | 924 | for (i = 0; i < scq->num_entries; i++) { | 
|  | 925 | if (scq->skb[i] != NULL) { | 
|  | 926 | if (vcc->pop != NULL) | 
|  | 927 | vcc->pop(vcc, scq->skb[i]); | 
|  | 928 | else | 
|  | 929 | dev_kfree_skb_any(scq->skb[i]); | 
|  | 930 | } | 
|  | 931 | } | 
|  | 932 | } | 
|  | 933 | kfree(scq->skb); | 
|  | 934 | dma_free_coherent(&card->pcidev->dev, | 
|  | 935 | 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? | 
|  | 936 | VBR_SCQSIZE : CBR_SCQSIZE), | 
|  | 937 | scq->org, scq->dma); | 
|  | 938 | kfree(scq); | 
|  | 939 | } | 
|  | 940 |  | 
|  | 941 | /* The handles passed must be pointers to the sk_buff containing the small | 
|  | 942 | or large buffer(s) cast to u32. */ | 
|  | 943 | static void push_rxbufs(ns_dev * card, struct sk_buff *skb) | 
|  | 944 | { | 
|  | 945 | struct sk_buff *handle1, *handle2; | 
|  | 946 | int id1, id2; | 
|  | 947 | u32 addr1, addr2; | 
|  | 948 | u32 stat; | 
|  | 949 | unsigned long flags; | 
|  | 950 |  | 
|  | 951 | /* *BARF* */ | 
|  | 952 | handle2 = NULL; | 
|  | 953 | addr2 = 0; | 
|  | 954 | handle1 = skb; | 
|  | 955 | addr1 = dma_map_single(&card->pcidev->dev, | 
|  | 956 | skb->data, | 
|  | 957 | (NS_PRV_BUFTYPE(skb) == BUF_SM | 
|  | 958 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | 
|  | 959 | DMA_TO_DEVICE); | 
|  | 960 | NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ | 
|  | 961 |  | 
|  | 962 | #ifdef GENERAL_DEBUG | 
|  | 963 | if (!addr1) | 
|  | 964 | printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", | 
|  | 965 | card->index); | 
|  | 966 | #endif /* GENERAL_DEBUG */ | 
|  | 967 |  | 
|  | 968 | stat = readl(card->membase + STAT); | 
|  | 969 | card->sbfqc = ns_stat_sfbqc_get(stat); | 
|  | 970 | card->lbfqc = ns_stat_lfbqc_get(stat); | 
|  | 971 | if (NS_PRV_BUFTYPE(skb) == BUF_SM) { | 
|  | 972 | if (!addr2) { | 
|  | 973 | if (card->sm_addr) { | 
|  | 974 | addr2 = card->sm_addr; | 
|  | 975 | handle2 = card->sm_handle; | 
|  | 976 | card->sm_addr = 0x00000000; | 
|  | 977 | card->sm_handle = NULL; | 
|  | 978 | } else {	/* (!sm_addr) */ | 
|  | 979 |  | 
|  | 980 | card->sm_addr = addr1; | 
|  | 981 | card->sm_handle = handle1; | 
|  | 982 | } | 
|  | 983 | } | 
|  | 984 | } else {		/* buf_type == BUF_LG */ | 
|  | 985 |  | 
|  | 986 | if (!addr2) { | 
|  | 987 | if (card->lg_addr) { | 
|  | 988 | addr2 = card->lg_addr; | 
|  | 989 | handle2 = card->lg_handle; | 
|  | 990 | card->lg_addr = 0x00000000; | 
|  | 991 | card->lg_handle = NULL; | 
|  | 992 | } else {	/* (!lg_addr) */ | 
|  | 993 |  | 
|  | 994 | card->lg_addr = addr1; | 
|  | 995 | card->lg_handle = handle1; | 
|  | 996 | } | 
|  | 997 | } | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | if (addr2) { | 
|  | 1001 | if (NS_PRV_BUFTYPE(skb) == BUF_SM) { | 
|  | 1002 | if (card->sbfqc >= card->sbnr.max) { | 
|  | 1003 | skb_unlink(handle1, &card->sbpool.queue); | 
|  | 1004 | dev_kfree_skb_any(handle1); | 
|  | 1005 | skb_unlink(handle2, &card->sbpool.queue); | 
|  | 1006 | dev_kfree_skb_any(handle2); | 
|  | 1007 | return; | 
|  | 1008 | } else | 
|  | 1009 | card->sbfqc += 2; | 
|  | 1010 | } else {	/* (buf_type == BUF_LG) */ | 
|  | 1011 |  | 
|  | 1012 | if (card->lbfqc >= card->lbnr.max) { | 
|  | 1013 | skb_unlink(handle1, &card->lbpool.queue); | 
|  | 1014 | dev_kfree_skb_any(handle1); | 
|  | 1015 | skb_unlink(handle2, &card->lbpool.queue); | 
|  | 1016 | dev_kfree_skb_any(handle2); | 
|  | 1017 | return; | 
|  | 1018 | } else | 
|  | 1019 | card->lbfqc += 2; | 
|  | 1020 | } | 
|  | 1021 |  | 
|  | 1022 | id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); | 
|  | 1023 | if (id1 < 0) | 
|  | 1024 | goto out; | 
|  | 1025 |  | 
|  | 1026 | id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); | 
|  | 1027 | if (id2 < 0) | 
|  | 1028 | goto out; | 
|  | 1029 |  | 
|  | 1030 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 1031 | while (CMD_BUSY(card)) ; | 
|  | 1032 | writel(addr2, card->membase + DR3); | 
|  | 1033 | writel(id2, card->membase + DR2); | 
|  | 1034 | writel(addr1, card->membase + DR1); | 
|  | 1035 | writel(id1, card->membase + DR0); | 
|  | 1036 | writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), | 
|  | 1037 | card->membase + CMD); | 
|  | 1038 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 1039 |  | 
|  | 1040 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", | 
|  | 1041 | card->index, | 
|  | 1042 | (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), | 
|  | 1043 | addr1, addr2); | 
|  | 1044 | } | 
|  | 1045 |  | 
|  | 1046 | if (!card->efbie && card->sbfqc >= card->sbnr.min && | 
|  | 1047 | card->lbfqc >= card->lbnr.min) { | 
|  | 1048 | card->efbie = 1; | 
|  | 1049 | writel((readl(card->membase + CFG) | NS_CFG_EFBIE), | 
|  | 1050 | card->membase + CFG); | 
|  | 1051 | } | 
|  | 1052 |  | 
|  | 1053 | out: | 
|  | 1054 | return; | 
|  | 1055 | } | 
|  | 1056 |  | 
|  | 1057 | static irqreturn_t ns_irq_handler(int irq, void *dev_id) | 
|  | 1058 | { | 
|  | 1059 | u32 stat_r; | 
|  | 1060 | ns_dev *card; | 
|  | 1061 | struct atm_dev *dev; | 
|  | 1062 | unsigned long flags; | 
|  | 1063 |  | 
|  | 1064 | card = (ns_dev *) dev_id; | 
|  | 1065 | dev = card->atmdev; | 
|  | 1066 | card->intcnt++; | 
|  | 1067 |  | 
|  | 1068 | PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); | 
|  | 1069 |  | 
|  | 1070 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 1071 |  | 
|  | 1072 | stat_r = readl(card->membase + STAT); | 
|  | 1073 |  | 
|  | 1074 | /* Transmit Status Indicator has been written to T. S. Queue */ | 
|  | 1075 | if (stat_r & NS_STAT_TSIF) { | 
|  | 1076 | TXPRINTK("nicstar%d: TSI interrupt\n", card->index); | 
|  | 1077 | process_tsq(card); | 
|  | 1078 | writel(NS_STAT_TSIF, card->membase + STAT); | 
|  | 1079 | } | 
|  | 1080 |  | 
|  | 1081 | /* Incomplete CS-PDU has been transmitted */ | 
|  | 1082 | if (stat_r & NS_STAT_TXICP) { | 
|  | 1083 | writel(NS_STAT_TXICP, card->membase + STAT); | 
|  | 1084 | TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", | 
|  | 1085 | card->index); | 
|  | 1086 | } | 
|  | 1087 |  | 
|  | 1088 | /* Transmit Status Queue 7/8 full */ | 
|  | 1089 | if (stat_r & NS_STAT_TSQF) { | 
|  | 1090 | writel(NS_STAT_TSQF, card->membase + STAT); | 
|  | 1091 | PRINTK("nicstar%d: TSQ full.\n", card->index); | 
|  | 1092 | process_tsq(card); | 
|  | 1093 | } | 
|  | 1094 |  | 
|  | 1095 | /* Timer overflow */ | 
|  | 1096 | if (stat_r & NS_STAT_TMROF) { | 
|  | 1097 | writel(NS_STAT_TMROF, card->membase + STAT); | 
|  | 1098 | PRINTK("nicstar%d: Timer overflow.\n", card->index); | 
|  | 1099 | } | 
|  | 1100 |  | 
|  | 1101 | /* PHY device interrupt signal active */ | 
|  | 1102 | if (stat_r & NS_STAT_PHYI) { | 
|  | 1103 | writel(NS_STAT_PHYI, card->membase + STAT); | 
|  | 1104 | PRINTK("nicstar%d: PHY interrupt.\n", card->index); | 
|  | 1105 | if (dev->phy && dev->phy->interrupt) { | 
|  | 1106 | dev->phy->interrupt(dev); | 
|  | 1107 | } | 
|  | 1108 | } | 
|  | 1109 |  | 
|  | 1110 | /* Small Buffer Queue is full */ | 
|  | 1111 | if (stat_r & NS_STAT_SFBQF) { | 
|  | 1112 | writel(NS_STAT_SFBQF, card->membase + STAT); | 
|  | 1113 | printk("nicstar%d: Small free buffer queue is full.\n", | 
|  | 1114 | card->index); | 
|  | 1115 | } | 
|  | 1116 |  | 
|  | 1117 | /* Large Buffer Queue is full */ | 
|  | 1118 | if (stat_r & NS_STAT_LFBQF) { | 
|  | 1119 | writel(NS_STAT_LFBQF, card->membase + STAT); | 
|  | 1120 | printk("nicstar%d: Large free buffer queue is full.\n", | 
|  | 1121 | card->index); | 
|  | 1122 | } | 
|  | 1123 |  | 
|  | 1124 | /* Receive Status Queue is full */ | 
|  | 1125 | if (stat_r & NS_STAT_RSQF) { | 
|  | 1126 | writel(NS_STAT_RSQF, card->membase + STAT); | 
|  | 1127 | printk("nicstar%d: RSQ full.\n", card->index); | 
|  | 1128 | process_rsq(card); | 
|  | 1129 | } | 
|  | 1130 |  | 
|  | 1131 | /* Complete CS-PDU received */ | 
|  | 1132 | if (stat_r & NS_STAT_EOPDU) { | 
|  | 1133 | RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); | 
|  | 1134 | process_rsq(card); | 
|  | 1135 | writel(NS_STAT_EOPDU, card->membase + STAT); | 
|  | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | /* Raw cell received */ | 
|  | 1139 | if (stat_r & NS_STAT_RAWCF) { | 
|  | 1140 | writel(NS_STAT_RAWCF, card->membase + STAT); | 
|  | 1141 | #ifndef RCQ_SUPPORT | 
|  | 1142 | printk("nicstar%d: Raw cell received and no support yet...\n", | 
|  | 1143 | card->index); | 
|  | 1144 | #endif /* RCQ_SUPPORT */ | 
|  | 1145 | /* NOTE: the following procedure may keep a raw cell pending until the | 
|  | 1146 | next interrupt. As this preliminary support is only meant to | 
|  | 1147 | avoid buffer leakage, this is not an issue. */ | 
|  | 1148 | while (readl(card->membase + RAWCT) != card->rawch) { | 
|  | 1149 |  | 
|  | 1150 | if (ns_rcqe_islast(card->rawcell)) { | 
|  | 1151 | struct sk_buff *oldbuf; | 
|  | 1152 |  | 
|  | 1153 | oldbuf = card->rcbuf; | 
|  | 1154 | card->rcbuf = idr_find(&card->idr, | 
|  | 1155 | ns_rcqe_nextbufhandle(card->rawcell)); | 
|  | 1156 | card->rawch = NS_PRV_DMA(card->rcbuf); | 
|  | 1157 | card->rawcell = (struct ns_rcqe *) | 
|  | 1158 | card->rcbuf->data; | 
|  | 1159 | recycle_rx_buf(card, oldbuf); | 
|  | 1160 | } else { | 
|  | 1161 | card->rawch += NS_RCQE_SIZE; | 
|  | 1162 | card->rawcell++; | 
|  | 1163 | } | 
|  | 1164 | } | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | /* Small buffer queue is empty */ | 
|  | 1168 | if (stat_r & NS_STAT_SFBQE) { | 
|  | 1169 | int i; | 
|  | 1170 | struct sk_buff *sb; | 
|  | 1171 |  | 
|  | 1172 | writel(NS_STAT_SFBQE, card->membase + STAT); | 
|  | 1173 | printk("nicstar%d: Small free buffer queue empty.\n", | 
|  | 1174 | card->index); | 
|  | 1175 | for (i = 0; i < card->sbnr.min; i++) { | 
|  | 1176 | sb = dev_alloc_skb(NS_SMSKBSIZE); | 
|  | 1177 | if (sb == NULL) { | 
|  | 1178 | writel(readl(card->membase + CFG) & | 
|  | 1179 | ~NS_CFG_EFBIE, card->membase + CFG); | 
|  | 1180 | card->efbie = 0; | 
|  | 1181 | break; | 
|  | 1182 | } | 
|  | 1183 | NS_PRV_BUFTYPE(sb) = BUF_SM; | 
|  | 1184 | skb_queue_tail(&card->sbpool.queue, sb); | 
|  | 1185 | skb_reserve(sb, NS_AAL0_HEADER); | 
|  | 1186 | push_rxbufs(card, sb); | 
|  | 1187 | } | 
|  | 1188 | card->sbfqc = i; | 
|  | 1189 | process_rsq(card); | 
|  | 1190 | } | 
|  | 1191 |  | 
|  | 1192 | /* Large buffer queue empty */ | 
|  | 1193 | if (stat_r & NS_STAT_LFBQE) { | 
|  | 1194 | int i; | 
|  | 1195 | struct sk_buff *lb; | 
|  | 1196 |  | 
|  | 1197 | writel(NS_STAT_LFBQE, card->membase + STAT); | 
|  | 1198 | printk("nicstar%d: Large free buffer queue empty.\n", | 
|  | 1199 | card->index); | 
|  | 1200 | for (i = 0; i < card->lbnr.min; i++) { | 
|  | 1201 | lb = dev_alloc_skb(NS_LGSKBSIZE); | 
|  | 1202 | if (lb == NULL) { | 
|  | 1203 | writel(readl(card->membase + CFG) & | 
|  | 1204 | ~NS_CFG_EFBIE, card->membase + CFG); | 
|  | 1205 | card->efbie = 0; | 
|  | 1206 | break; | 
|  | 1207 | } | 
|  | 1208 | NS_PRV_BUFTYPE(lb) = BUF_LG; | 
|  | 1209 | skb_queue_tail(&card->lbpool.queue, lb); | 
|  | 1210 | skb_reserve(lb, NS_SMBUFSIZE); | 
|  | 1211 | push_rxbufs(card, lb); | 
|  | 1212 | } | 
|  | 1213 | card->lbfqc = i; | 
|  | 1214 | process_rsq(card); | 
|  | 1215 | } | 
|  | 1216 |  | 
|  | 1217 | /* Receive Status Queue is 7/8 full */ | 
|  | 1218 | if (stat_r & NS_STAT_RSQAF) { | 
|  | 1219 | writel(NS_STAT_RSQAF, card->membase + STAT); | 
|  | 1220 | RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); | 
|  | 1221 | process_rsq(card); | 
|  | 1222 | } | 
|  | 1223 |  | 
|  | 1224 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 1225 | PRINTK("nicstar%d: end of interrupt service\n", card->index); | 
|  | 1226 | return IRQ_HANDLED; | 
|  | 1227 | } | 
|  | 1228 |  | 
|  | 1229 | static int ns_open(struct atm_vcc *vcc) | 
|  | 1230 | { | 
|  | 1231 | ns_dev *card; | 
|  | 1232 | vc_map *vc; | 
|  | 1233 | unsigned long tmpl, modl; | 
|  | 1234 | int tcr, tcra;		/* target cell rate, and absolute value */ | 
|  | 1235 | int n = 0;		/* Number of entries in the TST. Initialized to remove | 
|  | 1236 | the compiler warning. */ | 
|  | 1237 | u32 u32d[4]; | 
|  | 1238 | int frscdi = 0;		/* Index of the SCD. Initialized to remove the compiler | 
|  | 1239 | warning. How I wish compilers were clever enough to | 
|  | 1240 | tell which variables can truly be used | 
|  | 1241 | uninitialized... */ | 
|  | 1242 | int inuse;		/* tx or rx vc already in use by another vcc */ | 
|  | 1243 | short vpi = vcc->vpi; | 
|  | 1244 | int vci = vcc->vci; | 
|  | 1245 |  | 
|  | 1246 | card = (ns_dev *) vcc->dev->dev_data; | 
|  | 1247 | PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, | 
|  | 1248 | vci); | 
|  | 1249 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { | 
|  | 1250 | PRINTK("nicstar%d: unsupported AAL.\n", card->index); | 
|  | 1251 | return -EINVAL; | 
|  | 1252 | } | 
|  | 1253 |  | 
|  | 1254 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | 
|  | 1255 | vcc->dev_data = vc; | 
|  | 1256 |  | 
|  | 1257 | inuse = 0; | 
|  | 1258 | if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) | 
|  | 1259 | inuse = 1; | 
|  | 1260 | if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) | 
|  | 1261 | inuse += 2; | 
|  | 1262 | if (inuse) { | 
|  | 1263 | printk("nicstar%d: %s vci already in use.\n", card->index, | 
|  | 1264 | inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); | 
|  | 1265 | return -EINVAL; | 
|  | 1266 | } | 
|  | 1267 |  | 
|  | 1268 | set_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1269 |  | 
|  | 1270 | /* NOTE: You are not allowed to modify an open connection's QOS. To change | 
|  | 1271 | that, remove the ATM_VF_PARTIAL flag checking. There may be other changes | 
|  | 1272 | needed to do that. */ | 
|  | 1273 | if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { | 
|  | 1274 | scq_info *scq; | 
|  | 1275 |  | 
|  | 1276 | set_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1277 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { | 
|  | 1278 | /* Check requested cell rate and availability of SCD */ | 
|  | 1279 | if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 | 
|  | 1280 | && vcc->qos.txtp.min_pcr == 0) { | 
|  | 1281 | PRINTK | 
|  | 1282 | ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", | 
|  | 1283 | card->index); | 
|  | 1284 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1285 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1286 | return -EINVAL; | 
|  | 1287 | } | 
|  | 1288 |  | 
|  | 1289 | tcr = atm_pcr_goal(&(vcc->qos.txtp)); | 
|  | 1290 | tcra = tcr >= 0 ? tcr : -tcr; | 
|  | 1291 |  | 
|  | 1292 | PRINTK("nicstar%d: target cell rate = %d.\n", | 
|  | 1293 | card->index, vcc->qos.txtp.max_pcr); | 
|  | 1294 |  | 
|  | 1295 | tmpl = | 
|  | 1296 | (unsigned long)tcra *(unsigned long) | 
|  | 1297 | NS_TST_NUM_ENTRIES; | 
|  | 1298 | modl = tmpl % card->max_pcr; | 
|  | 1299 |  | 
|  | 1300 | n = (int)(tmpl / card->max_pcr); | 
|  | 1301 | if (tcr > 0) { | 
|  | 1302 | if (modl > 0) | 
|  | 1303 | n++; | 
|  | 1304 | } else if (tcr == 0) { | 
|  | 1305 | if ((n = | 
|  | 1306 | (card->tst_free_entries - | 
|  | 1307 | NS_TST_RESERVED)) <= 0) { | 
|  | 1308 | PRINTK | 
|  | 1309 | ("nicstar%d: no CBR bandwidth free.\n", | 
|  | 1310 | card->index); | 
|  | 1311 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1312 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1313 | return -EINVAL; | 
|  | 1314 | } | 
|  | 1315 | } | 
|  | 1316 |  | 
|  | 1317 | if (n == 0) { | 
|  | 1318 | printk | 
|  | 1319 | ("nicstar%d: selected bandwidth < granularity.\n", | 
|  | 1320 | card->index); | 
|  | 1321 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1322 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1323 | return -EINVAL; | 
|  | 1324 | } | 
|  | 1325 |  | 
|  | 1326 | if (n > (card->tst_free_entries - NS_TST_RESERVED)) { | 
|  | 1327 | PRINTK | 
|  | 1328 | ("nicstar%d: not enough free CBR bandwidth.\n", | 
|  | 1329 | card->index); | 
|  | 1330 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1331 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1332 | return -EINVAL; | 
|  | 1333 | } else | 
|  | 1334 | card->tst_free_entries -= n; | 
|  | 1335 |  | 
|  | 1336 | XPRINTK("nicstar%d: writing %d tst entries.\n", | 
|  | 1337 | card->index, n); | 
|  | 1338 | for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { | 
|  | 1339 | if (card->scd2vc[frscdi] == NULL) { | 
|  | 1340 | card->scd2vc[frscdi] = vc; | 
|  | 1341 | break; | 
|  | 1342 | } | 
|  | 1343 | } | 
|  | 1344 | if (frscdi == NS_FRSCD_NUM) { | 
|  | 1345 | PRINTK | 
|  | 1346 | ("nicstar%d: no SCD available for CBR channel.\n", | 
|  | 1347 | card->index); | 
|  | 1348 | card->tst_free_entries += n; | 
|  | 1349 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1350 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1351 | return -EBUSY; | 
|  | 1352 | } | 
|  | 1353 |  | 
|  | 1354 | vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; | 
|  | 1355 |  | 
|  | 1356 | scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); | 
|  | 1357 | if (scq == NULL) { | 
|  | 1358 | PRINTK("nicstar%d: can't get fixed rate SCQ.\n", | 
|  | 1359 | card->index); | 
|  | 1360 | card->scd2vc[frscdi] = NULL; | 
|  | 1361 | card->tst_free_entries += n; | 
|  | 1362 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1363 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1364 | return -ENOMEM; | 
|  | 1365 | } | 
|  | 1366 | vc->scq = scq; | 
|  | 1367 | u32d[0] = scq_virt_to_bus(scq, scq->base); | 
|  | 1368 | u32d[1] = (u32) 0x00000000; | 
|  | 1369 | u32d[2] = (u32) 0xffffffff; | 
|  | 1370 | u32d[3] = (u32) 0x00000000; | 
|  | 1371 | ns_write_sram(card, vc->cbr_scd, u32d, 4); | 
|  | 1372 |  | 
|  | 1373 | fill_tst(card, n, vc); | 
|  | 1374 | } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { | 
|  | 1375 | vc->cbr_scd = 0x00000000; | 
|  | 1376 | vc->scq = card->scq0; | 
|  | 1377 | } | 
|  | 1378 |  | 
|  | 1379 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { | 
|  | 1380 | vc->tx = 1; | 
|  | 1381 | vc->tx_vcc = vcc; | 
|  | 1382 | vc->tbd_count = 0; | 
|  | 1383 | } | 
|  | 1384 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) { | 
|  | 1385 | u32 status; | 
|  | 1386 |  | 
|  | 1387 | vc->rx = 1; | 
|  | 1388 | vc->rx_vcc = vcc; | 
|  | 1389 | vc->rx_iov = NULL; | 
|  | 1390 |  | 
|  | 1391 | /* Open the connection in hardware */ | 
|  | 1392 | if (vcc->qos.aal == ATM_AAL5) | 
|  | 1393 | status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; | 
|  | 1394 | else	/* vcc->qos.aal == ATM_AAL0 */ | 
|  | 1395 | status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; | 
|  | 1396 | #ifdef RCQ_SUPPORT | 
|  | 1397 | status |= NS_RCTE_RAWCELLINTEN; | 
|  | 1398 | #endif /* RCQ_SUPPORT */ | 
|  | 1399 | ns_write_sram(card, | 
|  | 1400 | NS_RCT + | 
|  | 1401 | (vpi << card->vcibits | vci) * | 
|  | 1402 | NS_RCT_ENTRY_SIZE, &status, 1); | 
|  | 1403 | } | 
|  | 1404 |  | 
|  | 1405 | } | 
|  | 1406 |  | 
|  | 1407 | set_bit(ATM_VF_READY, &vcc->flags); | 
|  | 1408 | return 0; | 
|  | 1409 | } | 
|  | 1410 |  | 
|  | 1411 | static void ns_close(struct atm_vcc *vcc) | 
|  | 1412 | { | 
|  | 1413 | vc_map *vc; | 
|  | 1414 | ns_dev *card; | 
|  | 1415 | u32 data; | 
|  | 1416 | int i; | 
|  | 1417 |  | 
|  | 1418 | vc = vcc->dev_data; | 
|  | 1419 | card = vcc->dev->dev_data; | 
|  | 1420 | PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, | 
|  | 1421 | (int)vcc->vpi, vcc->vci); | 
|  | 1422 |  | 
|  | 1423 | clear_bit(ATM_VF_READY, &vcc->flags); | 
|  | 1424 |  | 
|  | 1425 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) { | 
|  | 1426 | u32 addr; | 
|  | 1427 | unsigned long flags; | 
|  | 1428 |  | 
|  | 1429 | addr = | 
|  | 1430 | NS_RCT + | 
|  | 1431 | (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; | 
|  | 1432 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 1433 | while (CMD_BUSY(card)) ; | 
|  | 1434 | writel(NS_CMD_CLOSE_CONNECTION | addr << 2, | 
|  | 1435 | card->membase + CMD); | 
|  | 1436 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 1437 |  | 
|  | 1438 | vc->rx = 0; | 
|  | 1439 | if (vc->rx_iov != NULL) { | 
|  | 1440 | struct sk_buff *iovb; | 
|  | 1441 | u32 stat; | 
|  | 1442 |  | 
|  | 1443 | stat = readl(card->membase + STAT); | 
|  | 1444 | card->sbfqc = ns_stat_sfbqc_get(stat); | 
|  | 1445 | card->lbfqc = ns_stat_lfbqc_get(stat); | 
|  | 1446 |  | 
|  | 1447 | PRINTK | 
|  | 1448 | ("nicstar%d: closing a VC with pending rx buffers.\n", | 
|  | 1449 | card->index); | 
|  | 1450 | iovb = vc->rx_iov; | 
|  | 1451 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | 
|  | 1452 | NS_PRV_IOVCNT(iovb)); | 
|  | 1453 | NS_PRV_IOVCNT(iovb) = 0; | 
|  | 1454 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 1455 | recycle_iov_buf(card, iovb); | 
|  | 1456 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 1457 | vc->rx_iov = NULL; | 
|  | 1458 | } | 
|  | 1459 | } | 
|  | 1460 |  | 
|  | 1461 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { | 
|  | 1462 | vc->tx = 0; | 
|  | 1463 | } | 
|  | 1464 |  | 
|  | 1465 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { | 
|  | 1466 | unsigned long flags; | 
|  | 1467 | ns_scqe *scqep; | 
|  | 1468 | scq_info *scq; | 
|  | 1469 |  | 
|  | 1470 | scq = vc->scq; | 
|  | 1471 |  | 
|  | 1472 | for (;;) { | 
|  | 1473 | spin_lock_irqsave(&scq->lock, flags); | 
|  | 1474 | scqep = scq->next; | 
|  | 1475 | if (scqep == scq->base) | 
|  | 1476 | scqep = scq->last; | 
|  | 1477 | else | 
|  | 1478 | scqep--; | 
|  | 1479 | if (scqep == scq->tail) { | 
|  | 1480 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1481 | break; | 
|  | 1482 | } | 
|  | 1483 | /* If the last entry is not a TSR, place one in the SCQ in order to | 
|  | 1484 | be able to completely drain it and then close. */ | 
|  | 1485 | if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { | 
|  | 1486 | ns_scqe tsr; | 
|  | 1487 | u32 scdi, scqi; | 
|  | 1488 | u32 data; | 
|  | 1489 | int index; | 
|  | 1490 |  | 
|  | 1491 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | 
|  | 1492 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | 
|  | 1493 | scqi = scq->next - scq->base; | 
|  | 1494 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | 
|  | 1495 | tsr.word_3 = 0x00000000; | 
|  | 1496 | tsr.word_4 = 0x00000000; | 
|  | 1497 | *scq->next = tsr; | 
|  | 1498 | index = (int)scqi; | 
|  | 1499 | scq->skb[index] = NULL; | 
|  | 1500 | if (scq->next == scq->last) | 
|  | 1501 | scq->next = scq->base; | 
|  | 1502 | else | 
|  | 1503 | scq->next++; | 
|  | 1504 | data = scq_virt_to_bus(scq, scq->next); | 
|  | 1505 | ns_write_sram(card, scq->scd, &data, 1); | 
|  | 1506 | } | 
|  | 1507 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1508 | schedule(); | 
|  | 1509 | } | 
|  | 1510 |  | 
|  | 1511 | /* Free all TST entries */ | 
|  | 1512 | data = NS_TST_OPCODE_VARIABLE; | 
|  | 1513 | for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { | 
|  | 1514 | if (card->tste2vc[i] == vc) { | 
|  | 1515 | ns_write_sram(card, card->tst_addr + i, &data, | 
|  | 1516 | 1); | 
|  | 1517 | card->tste2vc[i] = NULL; | 
|  | 1518 | card->tst_free_entries++; | 
|  | 1519 | } | 
|  | 1520 | } | 
|  | 1521 |  | 
|  | 1522 | card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; | 
|  | 1523 | free_scq(card, vc->scq, vcc); | 
|  | 1524 | } | 
|  | 1525 |  | 
|  | 1526 | /* remove all references to vcc before deleting it */ | 
|  | 1527 | if (vcc->qos.txtp.traffic_class != ATM_NONE) { | 
|  | 1528 | unsigned long flags; | 
|  | 1529 | scq_info *scq = card->scq0; | 
|  | 1530 |  | 
|  | 1531 | spin_lock_irqsave(&scq->lock, flags); | 
|  | 1532 |  | 
|  | 1533 | for (i = 0; i < scq->num_entries; i++) { | 
|  | 1534 | if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { | 
|  | 1535 | ATM_SKB(scq->skb[i])->vcc = NULL; | 
|  | 1536 | atm_return(vcc, scq->skb[i]->truesize); | 
|  | 1537 | PRINTK | 
|  | 1538 | ("nicstar: deleted pending vcc mapping\n"); | 
|  | 1539 | } | 
|  | 1540 | } | 
|  | 1541 |  | 
|  | 1542 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1543 | } | 
|  | 1544 |  | 
|  | 1545 | vcc->dev_data = NULL; | 
|  | 1546 | clear_bit(ATM_VF_PARTIAL, &vcc->flags); | 
|  | 1547 | clear_bit(ATM_VF_ADDR, &vcc->flags); | 
|  | 1548 |  | 
|  | 1549 | #ifdef RX_DEBUG | 
|  | 1550 | { | 
|  | 1551 | u32 stat, cfg; | 
|  | 1552 | stat = readl(card->membase + STAT); | 
|  | 1553 | cfg = readl(card->membase + CFG); | 
|  | 1554 | printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg); | 
|  | 1555 | printk | 
|  | 1556 | ("TSQ: base = 0x%p  next = 0x%p  last = 0x%p  TSQT = 0x%08X \n", | 
|  | 1557 | card->tsq.base, card->tsq.next, | 
|  | 1558 | card->tsq.last, readl(card->membase + TSQT)); | 
|  | 1559 | printk | 
|  | 1560 | ("RSQ: base = 0x%p  next = 0x%p  last = 0x%p  RSQT = 0x%08X \n", | 
|  | 1561 | card->rsq.base, card->rsq.next, | 
|  | 1562 | card->rsq.last, readl(card->membase + RSQT)); | 
|  | 1563 | printk("Empty free buffer queue interrupt %s \n", | 
|  | 1564 | card->efbie ? "enabled" : "disabled"); | 
|  | 1565 | printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n", | 
|  | 1566 | ns_stat_sfbqc_get(stat), card->sbpool.count, | 
|  | 1567 | ns_stat_lfbqc_get(stat), card->lbpool.count); | 
|  | 1568 | printk("hbpool.count = %d  iovpool.count = %d \n", | 
|  | 1569 | card->hbpool.count, card->iovpool.count); | 
|  | 1570 | } | 
|  | 1571 | #endif /* RX_DEBUG */ | 
|  | 1572 | } | 
|  | 1573 |  | 
|  | 1574 | static void fill_tst(ns_dev * card, int n, vc_map * vc) | 
|  | 1575 | { | 
|  | 1576 | u32 new_tst; | 
|  | 1577 | unsigned long cl; | 
|  | 1578 | int e, r; | 
|  | 1579 | u32 data; | 
|  | 1580 |  | 
|  | 1581 | /* It would be very complicated to keep the two TSTs synchronized while | 
|  | 1582 | assuring that writes are only made to the inactive TST. So, for now I | 
|  | 1583 | will use only one TST. If problems occur, I will change this again */ | 
|  | 1584 |  | 
|  | 1585 | new_tst = card->tst_addr; | 
|  | 1586 |  | 
|  | 1587 | /* Fill procedure */ | 
|  | 1588 |  | 
|  | 1589 | for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { | 
|  | 1590 | if (card->tste2vc[e] == NULL) | 
|  | 1591 | break; | 
|  | 1592 | } | 
|  | 1593 | if (e == NS_TST_NUM_ENTRIES) { | 
|  | 1594 | printk("nicstar%d: No free TST entries found. \n", card->index); | 
|  | 1595 | return; | 
|  | 1596 | } | 
|  | 1597 |  | 
|  | 1598 | r = n; | 
|  | 1599 | cl = NS_TST_NUM_ENTRIES; | 
|  | 1600 | data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); | 
|  | 1601 |  | 
|  | 1602 | while (r > 0) { | 
|  | 1603 | if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { | 
|  | 1604 | card->tste2vc[e] = vc; | 
|  | 1605 | ns_write_sram(card, new_tst + e, &data, 1); | 
|  | 1606 | cl -= NS_TST_NUM_ENTRIES; | 
|  | 1607 | r--; | 
|  | 1608 | } | 
|  | 1609 |  | 
|  | 1610 | if (++e == NS_TST_NUM_ENTRIES) { | 
|  | 1611 | e = 0; | 
|  | 1612 | } | 
|  | 1613 | cl += n; | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | /* End of fill procedure */ | 
|  | 1617 |  | 
|  | 1618 | data = ns_tste_make(NS_TST_OPCODE_END, new_tst); | 
|  | 1619 | ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); | 
|  | 1620 | ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); | 
|  | 1621 | card->tst_addr = new_tst; | 
|  | 1622 | } | 
|  | 1623 |  | 
|  | 1624 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) | 
|  | 1625 | { | 
|  | 1626 | ns_dev *card; | 
|  | 1627 | vc_map *vc; | 
|  | 1628 | scq_info *scq; | 
|  | 1629 | unsigned long buflen; | 
|  | 1630 | ns_scqe scqe; | 
|  | 1631 | u32 flags;		/* TBD flags, not CPU flags */ | 
|  | 1632 |  | 
|  | 1633 | card = vcc->dev->dev_data; | 
|  | 1634 | TXPRINTK("nicstar%d: ns_send() called.\n", card->index); | 
|  | 1635 | if ((vc = (vc_map *) vcc->dev_data) == NULL) { | 
|  | 1636 | printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", | 
|  | 1637 | card->index); | 
|  | 1638 | atomic_inc(&vcc->stats->tx_err); | 
|  | 1639 | dev_kfree_skb_any(skb); | 
|  | 1640 | return -EINVAL; | 
|  | 1641 | } | 
|  | 1642 |  | 
|  | 1643 | if (!vc->tx) { | 
|  | 1644 | printk("nicstar%d: Trying to transmit on a non-tx VC.\n", | 
|  | 1645 | card->index); | 
|  | 1646 | atomic_inc(&vcc->stats->tx_err); | 
|  | 1647 | dev_kfree_skb_any(skb); | 
|  | 1648 | return -EINVAL; | 
|  | 1649 | } | 
|  | 1650 |  | 
|  | 1651 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { | 
|  | 1652 | printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", | 
|  | 1653 | card->index); | 
|  | 1654 | atomic_inc(&vcc->stats->tx_err); | 
|  | 1655 | dev_kfree_skb_any(skb); | 
|  | 1656 | return -EINVAL; | 
|  | 1657 | } | 
|  | 1658 |  | 
|  | 1659 | if (skb_shinfo(skb)->nr_frags != 0) { | 
|  | 1660 | printk("nicstar%d: No scatter-gather yet.\n", card->index); | 
|  | 1661 | atomic_inc(&vcc->stats->tx_err); | 
|  | 1662 | dev_kfree_skb_any(skb); | 
|  | 1663 | return -EINVAL; | 
|  | 1664 | } | 
|  | 1665 |  | 
|  | 1666 | ATM_SKB(skb)->vcc = vcc; | 
|  | 1667 |  | 
|  | 1668 | NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data, | 
|  | 1669 | skb->len, DMA_TO_DEVICE); | 
|  | 1670 |  | 
|  | 1671 | if (vcc->qos.aal == ATM_AAL5) { | 
|  | 1672 | buflen = (skb->len + 47 + 8) / 48 * 48;	/* Multiple of 48 */ | 
|  | 1673 | flags = NS_TBD_AAL5; | 
|  | 1674 | scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); | 
|  | 1675 | scqe.word_3 = cpu_to_le32(skb->len); | 
|  | 1676 | scqe.word_4 = | 
|  | 1677 | ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, | 
|  | 1678 | ATM_SKB(skb)-> | 
|  | 1679 | atm_options & ATM_ATMOPT_CLP ? 1 : 0); | 
|  | 1680 | flags |= NS_TBD_EOPDU; | 
|  | 1681 | } else {		/* (vcc->qos.aal == ATM_AAL0) */ | 
|  | 1682 |  | 
|  | 1683 | buflen = ATM_CELL_PAYLOAD;	/* i.e., 48 bytes */ | 
|  | 1684 | flags = NS_TBD_AAL0; | 
|  | 1685 | scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); | 
|  | 1686 | scqe.word_3 = cpu_to_le32(0x00000000); | 
|  | 1687 | if (*skb->data & 0x02)	/* Payload type 1 - end of pdu */ | 
|  | 1688 | flags |= NS_TBD_EOPDU; | 
|  | 1689 | scqe.word_4 = | 
|  | 1690 | cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); | 
|  | 1691 | /* Force the VPI/VCI to be the same as in VCC struct */ | 
|  | 1692 | scqe.word_4 |= | 
|  | 1693 | cpu_to_le32((((u32) vcc-> | 
|  | 1694 | vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> | 
|  | 1695 | vci) << | 
|  | 1696 | NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); | 
|  | 1697 | } | 
|  | 1698 |  | 
|  | 1699 | if (vcc->qos.txtp.traffic_class == ATM_CBR) { | 
|  | 1700 | scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); | 
|  | 1701 | scq = ((vc_map *) vcc->dev_data)->scq; | 
|  | 1702 | } else { | 
|  | 1703 | scqe.word_1 = | 
|  | 1704 | ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); | 
|  | 1705 | scq = card->scq0; | 
|  | 1706 | } | 
|  | 1707 |  | 
|  | 1708 | if (push_scqe(card, vc, scq, &scqe, skb) != 0) { | 
|  | 1709 | atomic_inc(&vcc->stats->tx_err); | 
|  | 1710 | dev_kfree_skb_any(skb); | 
|  | 1711 | return -EIO; | 
|  | 1712 | } | 
|  | 1713 | atomic_inc(&vcc->stats->tx); | 
|  | 1714 |  | 
|  | 1715 | return 0; | 
|  | 1716 | } | 
|  | 1717 |  | 
|  | 1718 | static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, | 
|  | 1719 | struct sk_buff *skb) | 
|  | 1720 | { | 
|  | 1721 | unsigned long flags; | 
|  | 1722 | ns_scqe tsr; | 
|  | 1723 | u32 scdi, scqi; | 
|  | 1724 | int scq_is_vbr; | 
|  | 1725 | u32 data; | 
|  | 1726 | int index; | 
|  | 1727 |  | 
|  | 1728 | spin_lock_irqsave(&scq->lock, flags); | 
|  | 1729 | while (scq->tail == scq->next) { | 
|  | 1730 | if (in_interrupt()) { | 
|  | 1731 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1732 | printk("nicstar%d: Error pushing TBD.\n", card->index); | 
|  | 1733 | return 1; | 
|  | 1734 | } | 
|  | 1735 |  | 
|  | 1736 | scq->full = 1; | 
|  | 1737 | wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, | 
|  | 1738 | scq->tail != scq->next, | 
|  | 1739 | scq->lock, | 
|  | 1740 | SCQFULL_TIMEOUT); | 
|  | 1741 |  | 
|  | 1742 | if (scq->full) { | 
|  | 1743 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1744 | printk("nicstar%d: Timeout pushing TBD.\n", | 
|  | 1745 | card->index); | 
|  | 1746 | return 1; | 
|  | 1747 | } | 
|  | 1748 | } | 
|  | 1749 | *scq->next = *tbd; | 
|  | 1750 | index = (int)(scq->next - scq->base); | 
|  | 1751 | scq->skb[index] = skb; | 
|  | 1752 | XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", | 
|  | 1753 | card->index, skb, index); | 
|  | 1754 | XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", | 
|  | 1755 | card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), | 
|  | 1756 | le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), | 
|  | 1757 | scq->next); | 
|  | 1758 | if (scq->next == scq->last) | 
|  | 1759 | scq->next = scq->base; | 
|  | 1760 | else | 
|  | 1761 | scq->next++; | 
|  | 1762 |  | 
|  | 1763 | vc->tbd_count++; | 
|  | 1764 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { | 
|  | 1765 | scq->tbd_count++; | 
|  | 1766 | scq_is_vbr = 1; | 
|  | 1767 | } else | 
|  | 1768 | scq_is_vbr = 0; | 
|  | 1769 |  | 
|  | 1770 | if (vc->tbd_count >= MAX_TBD_PER_VC | 
|  | 1771 | || scq->tbd_count >= MAX_TBD_PER_SCQ) { | 
|  | 1772 | int has_run = 0; | 
|  | 1773 |  | 
|  | 1774 | while (scq->tail == scq->next) { | 
|  | 1775 | if (in_interrupt()) { | 
|  | 1776 | data = scq_virt_to_bus(scq, scq->next); | 
|  | 1777 | ns_write_sram(card, scq->scd, &data, 1); | 
|  | 1778 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1779 | printk("nicstar%d: Error pushing TSR.\n", | 
|  | 1780 | card->index); | 
|  | 1781 | return 0; | 
|  | 1782 | } | 
|  | 1783 |  | 
|  | 1784 | scq->full = 1; | 
|  | 1785 | if (has_run++) | 
|  | 1786 | break; | 
|  | 1787 | wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, | 
|  | 1788 | scq->tail != scq->next, | 
|  | 1789 | scq->lock, | 
|  | 1790 | SCQFULL_TIMEOUT); | 
|  | 1791 | } | 
|  | 1792 |  | 
|  | 1793 | if (!scq->full) { | 
|  | 1794 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | 
|  | 1795 | if (scq_is_vbr) | 
|  | 1796 | scdi = NS_TSR_SCDISVBR; | 
|  | 1797 | else | 
|  | 1798 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | 
|  | 1799 | scqi = scq->next - scq->base; | 
|  | 1800 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | 
|  | 1801 | tsr.word_3 = 0x00000000; | 
|  | 1802 | tsr.word_4 = 0x00000000; | 
|  | 1803 |  | 
|  | 1804 | *scq->next = tsr; | 
|  | 1805 | index = (int)scqi; | 
|  | 1806 | scq->skb[index] = NULL; | 
|  | 1807 | XPRINTK | 
|  | 1808 | ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", | 
|  | 1809 | card->index, le32_to_cpu(tsr.word_1), | 
|  | 1810 | le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), | 
|  | 1811 | le32_to_cpu(tsr.word_4), scq->next); | 
|  | 1812 | if (scq->next == scq->last) | 
|  | 1813 | scq->next = scq->base; | 
|  | 1814 | else | 
|  | 1815 | scq->next++; | 
|  | 1816 | vc->tbd_count = 0; | 
|  | 1817 | scq->tbd_count = 0; | 
|  | 1818 | } else | 
|  | 1819 | PRINTK("nicstar%d: Timeout pushing TSR.\n", | 
|  | 1820 | card->index); | 
|  | 1821 | } | 
|  | 1822 | data = scq_virt_to_bus(scq, scq->next); | 
|  | 1823 | ns_write_sram(card, scq->scd, &data, 1); | 
|  | 1824 |  | 
|  | 1825 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1826 |  | 
|  | 1827 | return 0; | 
|  | 1828 | } | 
|  | 1829 |  | 
|  | 1830 | static void process_tsq(ns_dev * card) | 
|  | 1831 | { | 
|  | 1832 | u32 scdi; | 
|  | 1833 | scq_info *scq; | 
|  | 1834 | ns_tsi *previous = NULL, *one_ahead, *two_ahead; | 
|  | 1835 | int serviced_entries;	/* flag indicating at least on entry was serviced */ | 
|  | 1836 |  | 
|  | 1837 | serviced_entries = 0; | 
|  | 1838 |  | 
|  | 1839 | if (card->tsq.next == card->tsq.last) | 
|  | 1840 | one_ahead = card->tsq.base; | 
|  | 1841 | else | 
|  | 1842 | one_ahead = card->tsq.next + 1; | 
|  | 1843 |  | 
|  | 1844 | if (one_ahead == card->tsq.last) | 
|  | 1845 | two_ahead = card->tsq.base; | 
|  | 1846 | else | 
|  | 1847 | two_ahead = one_ahead + 1; | 
|  | 1848 |  | 
|  | 1849 | while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || | 
|  | 1850 | !ns_tsi_isempty(two_ahead)) | 
|  | 1851 | /* At most two empty, as stated in the 77201 errata */ | 
|  | 1852 | { | 
|  | 1853 | serviced_entries = 1; | 
|  | 1854 |  | 
|  | 1855 | /* Skip the one or two possible empty entries */ | 
|  | 1856 | while (ns_tsi_isempty(card->tsq.next)) { | 
|  | 1857 | if (card->tsq.next == card->tsq.last) | 
|  | 1858 | card->tsq.next = card->tsq.base; | 
|  | 1859 | else | 
|  | 1860 | card->tsq.next++; | 
|  | 1861 | } | 
|  | 1862 |  | 
|  | 1863 | if (!ns_tsi_tmrof(card->tsq.next)) { | 
|  | 1864 | scdi = ns_tsi_getscdindex(card->tsq.next); | 
|  | 1865 | if (scdi == NS_TSI_SCDISVBR) | 
|  | 1866 | scq = card->scq0; | 
|  | 1867 | else { | 
|  | 1868 | if (card->scd2vc[scdi] == NULL) { | 
|  | 1869 | printk | 
|  | 1870 | ("nicstar%d: could not find VC from SCD index.\n", | 
|  | 1871 | card->index); | 
|  | 1872 | ns_tsi_init(card->tsq.next); | 
|  | 1873 | return; | 
|  | 1874 | } | 
|  | 1875 | scq = card->scd2vc[scdi]->scq; | 
|  | 1876 | } | 
|  | 1877 | drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); | 
|  | 1878 | scq->full = 0; | 
|  | 1879 | wake_up_interruptible(&(scq->scqfull_waitq)); | 
|  | 1880 | } | 
|  | 1881 |  | 
|  | 1882 | ns_tsi_init(card->tsq.next); | 
|  | 1883 | previous = card->tsq.next; | 
|  | 1884 | if (card->tsq.next == card->tsq.last) | 
|  | 1885 | card->tsq.next = card->tsq.base; | 
|  | 1886 | else | 
|  | 1887 | card->tsq.next++; | 
|  | 1888 |  | 
|  | 1889 | if (card->tsq.next == card->tsq.last) | 
|  | 1890 | one_ahead = card->tsq.base; | 
|  | 1891 | else | 
|  | 1892 | one_ahead = card->tsq.next + 1; | 
|  | 1893 |  | 
|  | 1894 | if (one_ahead == card->tsq.last) | 
|  | 1895 | two_ahead = card->tsq.base; | 
|  | 1896 | else | 
|  | 1897 | two_ahead = one_ahead + 1; | 
|  | 1898 | } | 
|  | 1899 |  | 
|  | 1900 | if (serviced_entries) | 
|  | 1901 | writel(PTR_DIFF(previous, card->tsq.base), | 
|  | 1902 | card->membase + TSQH); | 
|  | 1903 | } | 
|  | 1904 |  | 
|  | 1905 | static void drain_scq(ns_dev * card, scq_info * scq, int pos) | 
|  | 1906 | { | 
|  | 1907 | struct atm_vcc *vcc; | 
|  | 1908 | struct sk_buff *skb; | 
|  | 1909 | int i; | 
|  | 1910 | unsigned long flags; | 
|  | 1911 |  | 
|  | 1912 | XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", | 
|  | 1913 | card->index, scq, pos); | 
|  | 1914 | if (pos >= scq->num_entries) { | 
|  | 1915 | printk("nicstar%d: Bad index on drain_scq().\n", card->index); | 
|  | 1916 | return; | 
|  | 1917 | } | 
|  | 1918 |  | 
|  | 1919 | spin_lock_irqsave(&scq->lock, flags); | 
|  | 1920 | i = (int)(scq->tail - scq->base); | 
|  | 1921 | if (++i == scq->num_entries) | 
|  | 1922 | i = 0; | 
|  | 1923 | while (i != pos) { | 
|  | 1924 | skb = scq->skb[i]; | 
|  | 1925 | XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", | 
|  | 1926 | card->index, skb, i); | 
|  | 1927 | if (skb != NULL) { | 
|  | 1928 | dma_unmap_single(&card->pcidev->dev, | 
|  | 1929 | NS_PRV_DMA(skb), | 
|  | 1930 | skb->len, | 
|  | 1931 | DMA_TO_DEVICE); | 
|  | 1932 | vcc = ATM_SKB(skb)->vcc; | 
|  | 1933 | if (vcc && vcc->pop != NULL) { | 
|  | 1934 | vcc->pop(vcc, skb); | 
|  | 1935 | } else { | 
|  | 1936 | dev_kfree_skb_irq(skb); | 
|  | 1937 | } | 
|  | 1938 | scq->skb[i] = NULL; | 
|  | 1939 | } | 
|  | 1940 | if (++i == scq->num_entries) | 
|  | 1941 | i = 0; | 
|  | 1942 | } | 
|  | 1943 | scq->tail = scq->base + pos; | 
|  | 1944 | spin_unlock_irqrestore(&scq->lock, flags); | 
|  | 1945 | } | 
|  | 1946 |  | 
|  | 1947 | static void process_rsq(ns_dev * card) | 
|  | 1948 | { | 
|  | 1949 | ns_rsqe *previous; | 
|  | 1950 |  | 
|  | 1951 | if (!ns_rsqe_valid(card->rsq.next)) | 
|  | 1952 | return; | 
|  | 1953 | do { | 
|  | 1954 | dequeue_rx(card, card->rsq.next); | 
|  | 1955 | ns_rsqe_init(card->rsq.next); | 
|  | 1956 | previous = card->rsq.next; | 
|  | 1957 | if (card->rsq.next == card->rsq.last) | 
|  | 1958 | card->rsq.next = card->rsq.base; | 
|  | 1959 | else | 
|  | 1960 | card->rsq.next++; | 
|  | 1961 | } while (ns_rsqe_valid(card->rsq.next)); | 
|  | 1962 | writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); | 
|  | 1963 | } | 
|  | 1964 |  | 
|  | 1965 | static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) | 
|  | 1966 | { | 
|  | 1967 | u32 vpi, vci; | 
|  | 1968 | vc_map *vc; | 
|  | 1969 | struct sk_buff *iovb; | 
|  | 1970 | struct iovec *iov; | 
|  | 1971 | struct atm_vcc *vcc; | 
|  | 1972 | struct sk_buff *skb; | 
|  | 1973 | unsigned short aal5_len; | 
|  | 1974 | int len; | 
|  | 1975 | u32 stat; | 
|  | 1976 | u32 id; | 
|  | 1977 |  | 
|  | 1978 | stat = readl(card->membase + STAT); | 
|  | 1979 | card->sbfqc = ns_stat_sfbqc_get(stat); | 
|  | 1980 | card->lbfqc = ns_stat_lfbqc_get(stat); | 
|  | 1981 |  | 
|  | 1982 | id = le32_to_cpu(rsqe->buffer_handle); | 
|  | 1983 | skb = idr_remove(&card->idr, id); | 
|  | 1984 | if (!skb) { | 
|  | 1985 | RXPRINTK(KERN_ERR | 
|  | 1986 | "nicstar%d: skb not found!\n", card->index); | 
|  | 1987 | return; | 
|  | 1988 | } | 
|  | 1989 | dma_sync_single_for_cpu(&card->pcidev->dev, | 
|  | 1990 | NS_PRV_DMA(skb), | 
|  | 1991 | (NS_PRV_BUFTYPE(skb) == BUF_SM | 
|  | 1992 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | 
|  | 1993 | DMA_FROM_DEVICE); | 
|  | 1994 | dma_unmap_single(&card->pcidev->dev, | 
|  | 1995 | NS_PRV_DMA(skb), | 
|  | 1996 | (NS_PRV_BUFTYPE(skb) == BUF_SM | 
|  | 1997 | ? NS_SMSKBSIZE : NS_LGSKBSIZE), | 
|  | 1998 | DMA_FROM_DEVICE); | 
|  | 1999 | vpi = ns_rsqe_vpi(rsqe); | 
|  | 2000 | vci = ns_rsqe_vci(rsqe); | 
|  | 2001 | if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { | 
|  | 2002 | printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", | 
|  | 2003 | card->index, vpi, vci); | 
|  | 2004 | recycle_rx_buf(card, skb); | 
|  | 2005 | return; | 
|  | 2006 | } | 
|  | 2007 |  | 
|  | 2008 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | 
|  | 2009 | if (!vc->rx) { | 
|  | 2010 | RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", | 
|  | 2011 | card->index, vpi, vci); | 
|  | 2012 | recycle_rx_buf(card, skb); | 
|  | 2013 | return; | 
|  | 2014 | } | 
|  | 2015 |  | 
|  | 2016 | vcc = vc->rx_vcc; | 
|  | 2017 |  | 
|  | 2018 | if (vcc->qos.aal == ATM_AAL0) { | 
|  | 2019 | struct sk_buff *sb; | 
|  | 2020 | unsigned char *cell; | 
|  | 2021 | int i; | 
|  | 2022 |  | 
|  | 2023 | cell = skb->data; | 
|  | 2024 | for (i = ns_rsqe_cellcount(rsqe); i; i--) { | 
|  | 2025 | sb = dev_alloc_skb(NS_SMSKBSIZE); | 
|  | 2026 | if (!sb) { | 
|  | 2027 | printk | 
|  | 2028 | ("nicstar%d: Can't allocate buffers for aal0.\n", | 
|  | 2029 | card->index); | 
|  | 2030 | atomic_add(i, &vcc->stats->rx_drop); | 
|  | 2031 | break; | 
|  | 2032 | } | 
|  | 2033 | if (!atm_charge(vcc, sb->truesize)) { | 
|  | 2034 | RXPRINTK | 
|  | 2035 | ("nicstar%d: atm_charge() dropped aal0 packets.\n", | 
|  | 2036 | card->index); | 
|  | 2037 | atomic_add(i - 1, &vcc->stats->rx_drop);	/* already increased by 1 */ | 
|  | 2038 | dev_kfree_skb_any(sb); | 
|  | 2039 | break; | 
|  | 2040 | } | 
|  | 2041 | /* Rebuild the header */ | 
|  | 2042 | *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | | 
|  | 2043 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); | 
|  | 2044 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | 
|  | 2045 | *((u32 *) sb->data) |= 0x00000002; | 
|  | 2046 | skb_put(sb, NS_AAL0_HEADER); | 
|  | 2047 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); | 
|  | 2048 | skb_put(sb, ATM_CELL_PAYLOAD); | 
|  | 2049 | ATM_SKB(sb)->vcc = vcc; | 
|  | 2050 | __net_timestamp(sb); | 
|  | 2051 | vcc->push(vcc, sb); | 
|  | 2052 | atomic_inc(&vcc->stats->rx); | 
|  | 2053 | cell += ATM_CELL_PAYLOAD; | 
|  | 2054 | } | 
|  | 2055 |  | 
|  | 2056 | recycle_rx_buf(card, skb); | 
|  | 2057 | return; | 
|  | 2058 | } | 
|  | 2059 |  | 
|  | 2060 | /* To reach this point, the AAL layer can only be AAL5 */ | 
|  | 2061 |  | 
|  | 2062 | if ((iovb = vc->rx_iov) == NULL) { | 
|  | 2063 | iovb = skb_dequeue(&(card->iovpool.queue)); | 
|  | 2064 | if (iovb == NULL) {	/* No buffers in the queue */ | 
|  | 2065 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); | 
|  | 2066 | if (iovb == NULL) { | 
|  | 2067 | printk("nicstar%d: Out of iovec buffers.\n", | 
|  | 2068 | card->index); | 
|  | 2069 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2070 | recycle_rx_buf(card, skb); | 
|  | 2071 | return; | 
|  | 2072 | } | 
|  | 2073 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | 
|  | 2074 | } else if (--card->iovpool.count < card->iovnr.min) { | 
|  | 2075 | struct sk_buff *new_iovb; | 
|  | 2076 | if ((new_iovb = | 
|  | 2077 | alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { | 
|  | 2078 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | 
|  | 2079 | skb_queue_tail(&card->iovpool.queue, new_iovb); | 
|  | 2080 | card->iovpool.count++; | 
|  | 2081 | } | 
|  | 2082 | } | 
|  | 2083 | vc->rx_iov = iovb; | 
|  | 2084 | NS_PRV_IOVCNT(iovb) = 0; | 
|  | 2085 | iovb->len = 0; | 
|  | 2086 | iovb->data = iovb->head; | 
|  | 2087 | skb_reset_tail_pointer(iovb); | 
|  | 2088 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | 
|  | 2089 | buffer is stored as iovec base, NOT a pointer to the | 
|  | 2090 | small or large buffer itself. */ | 
|  | 2091 | } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { | 
|  | 2092 | printk("nicstar%d: received too big AAL5 SDU.\n", card->index); | 
|  | 2093 | atomic_inc(&vcc->stats->rx_err); | 
|  | 2094 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | 
|  | 2095 | NS_MAX_IOVECS); | 
|  | 2096 | NS_PRV_IOVCNT(iovb) = 0; | 
|  | 2097 | iovb->len = 0; | 
|  | 2098 | iovb->data = iovb->head; | 
|  | 2099 | skb_reset_tail_pointer(iovb); | 
|  | 2100 | } | 
|  | 2101 | iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; | 
|  | 2102 | iov->iov_base = (void *)skb; | 
|  | 2103 | iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; | 
|  | 2104 | iovb->len += iov->iov_len; | 
|  | 2105 |  | 
|  | 2106 | #ifdef EXTRA_DEBUG | 
|  | 2107 | if (NS_PRV_IOVCNT(iovb) == 1) { | 
|  | 2108 | if (NS_PRV_BUFTYPE(skb) != BUF_SM) { | 
|  | 2109 | printk | 
|  | 2110 | ("nicstar%d: Expected a small buffer, and this is not one.\n", | 
|  | 2111 | card->index); | 
|  | 2112 | which_list(card, skb); | 
|  | 2113 | atomic_inc(&vcc->stats->rx_err); | 
|  | 2114 | recycle_rx_buf(card, skb); | 
|  | 2115 | vc->rx_iov = NULL; | 
|  | 2116 | recycle_iov_buf(card, iovb); | 
|  | 2117 | return; | 
|  | 2118 | } | 
|  | 2119 | } else {		/* NS_PRV_IOVCNT(iovb) >= 2 */ | 
|  | 2120 |  | 
|  | 2121 | if (NS_PRV_BUFTYPE(skb) != BUF_LG) { | 
|  | 2122 | printk | 
|  | 2123 | ("nicstar%d: Expected a large buffer, and this is not one.\n", | 
|  | 2124 | card->index); | 
|  | 2125 | which_list(card, skb); | 
|  | 2126 | atomic_inc(&vcc->stats->rx_err); | 
|  | 2127 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | 
|  | 2128 | NS_PRV_IOVCNT(iovb)); | 
|  | 2129 | vc->rx_iov = NULL; | 
|  | 2130 | recycle_iov_buf(card, iovb); | 
|  | 2131 | return; | 
|  | 2132 | } | 
|  | 2133 | } | 
|  | 2134 | #endif /* EXTRA_DEBUG */ | 
|  | 2135 |  | 
|  | 2136 | if (ns_rsqe_eopdu(rsqe)) { | 
|  | 2137 | /* This works correctly regardless of the endianness of the host */ | 
|  | 2138 | unsigned char *L1L2 = (unsigned char *) | 
|  | 2139 | (skb->data + iov->iov_len - 6); | 
|  | 2140 | aal5_len = L1L2[0] << 8 | L1L2[1]; | 
|  | 2141 | len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; | 
|  | 2142 | if (ns_rsqe_crcerr(rsqe) || | 
|  | 2143 | len + 8 > iovb->len || len + (47 + 8) < iovb->len) { | 
|  | 2144 | printk("nicstar%d: AAL5 CRC error", card->index); | 
|  | 2145 | if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) | 
|  | 2146 | printk(" - PDU size mismatch.\n"); | 
|  | 2147 | else | 
|  | 2148 | printk(".\n"); | 
|  | 2149 | atomic_inc(&vcc->stats->rx_err); | 
|  | 2150 | recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, | 
|  | 2151 | NS_PRV_IOVCNT(iovb)); | 
|  | 2152 | vc->rx_iov = NULL; | 
|  | 2153 | recycle_iov_buf(card, iovb); | 
|  | 2154 | return; | 
|  | 2155 | } | 
|  | 2156 |  | 
|  | 2157 | /* By this point we (hopefully) have a complete SDU without errors. */ | 
|  | 2158 |  | 
|  | 2159 | if (NS_PRV_IOVCNT(iovb) == 1) {	/* Just a small buffer */ | 
|  | 2160 | /* skb points to a small buffer */ | 
|  | 2161 | if (!atm_charge(vcc, skb->truesize)) { | 
|  | 2162 | push_rxbufs(card, skb); | 
|  | 2163 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2164 | } else { | 
|  | 2165 | skb_put(skb, len); | 
|  | 2166 | dequeue_sm_buf(card, skb); | 
|  | 2167 | ATM_SKB(skb)->vcc = vcc; | 
|  | 2168 | __net_timestamp(skb); | 
|  | 2169 | vcc->push(vcc, skb); | 
|  | 2170 | atomic_inc(&vcc->stats->rx); | 
|  | 2171 | } | 
|  | 2172 | } else if (NS_PRV_IOVCNT(iovb) == 2) {	/* One small plus one large buffer */ | 
|  | 2173 | struct sk_buff *sb; | 
|  | 2174 |  | 
|  | 2175 | sb = (struct sk_buff *)(iov - 1)->iov_base; | 
|  | 2176 | /* skb points to a large buffer */ | 
|  | 2177 |  | 
|  | 2178 | if (len <= NS_SMBUFSIZE) { | 
|  | 2179 | if (!atm_charge(vcc, sb->truesize)) { | 
|  | 2180 | push_rxbufs(card, sb); | 
|  | 2181 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2182 | } else { | 
|  | 2183 | skb_put(sb, len); | 
|  | 2184 | dequeue_sm_buf(card, sb); | 
|  | 2185 | ATM_SKB(sb)->vcc = vcc; | 
|  | 2186 | __net_timestamp(sb); | 
|  | 2187 | vcc->push(vcc, sb); | 
|  | 2188 | atomic_inc(&vcc->stats->rx); | 
|  | 2189 | } | 
|  | 2190 |  | 
|  | 2191 | push_rxbufs(card, skb); | 
|  | 2192 |  | 
|  | 2193 | } else {	/* len > NS_SMBUFSIZE, the usual case */ | 
|  | 2194 |  | 
|  | 2195 | if (!atm_charge(vcc, skb->truesize)) { | 
|  | 2196 | push_rxbufs(card, skb); | 
|  | 2197 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2198 | } else { | 
|  | 2199 | dequeue_lg_buf(card, skb); | 
|  | 2200 | skb_push(skb, NS_SMBUFSIZE); | 
|  | 2201 | skb_copy_from_linear_data(sb, skb->data, | 
|  | 2202 | NS_SMBUFSIZE); | 
|  | 2203 | skb_put(skb, len - NS_SMBUFSIZE); | 
|  | 2204 | ATM_SKB(skb)->vcc = vcc; | 
|  | 2205 | __net_timestamp(skb); | 
|  | 2206 | vcc->push(vcc, skb); | 
|  | 2207 | atomic_inc(&vcc->stats->rx); | 
|  | 2208 | } | 
|  | 2209 |  | 
|  | 2210 | push_rxbufs(card, sb); | 
|  | 2211 |  | 
|  | 2212 | } | 
|  | 2213 |  | 
|  | 2214 | } else {	/* Must push a huge buffer */ | 
|  | 2215 |  | 
|  | 2216 | struct sk_buff *hb, *sb, *lb; | 
|  | 2217 | int remaining, tocopy; | 
|  | 2218 | int j; | 
|  | 2219 |  | 
|  | 2220 | hb = skb_dequeue(&(card->hbpool.queue)); | 
|  | 2221 | if (hb == NULL) {	/* No buffers in the queue */ | 
|  | 2222 |  | 
|  | 2223 | hb = dev_alloc_skb(NS_HBUFSIZE); | 
|  | 2224 | if (hb == NULL) { | 
|  | 2225 | printk | 
|  | 2226 | ("nicstar%d: Out of huge buffers.\n", | 
|  | 2227 | card->index); | 
|  | 2228 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2229 | recycle_iovec_rx_bufs(card, | 
|  | 2230 | (struct iovec *) | 
|  | 2231 | iovb->data, | 
|  | 2232 | NS_PRV_IOVCNT(iovb)); | 
|  | 2233 | vc->rx_iov = NULL; | 
|  | 2234 | recycle_iov_buf(card, iovb); | 
|  | 2235 | return; | 
|  | 2236 | } else if (card->hbpool.count < card->hbnr.min) { | 
|  | 2237 | struct sk_buff *new_hb; | 
|  | 2238 | if ((new_hb = | 
|  | 2239 | dev_alloc_skb(NS_HBUFSIZE)) != | 
|  | 2240 | NULL) { | 
|  | 2241 | skb_queue_tail(&card->hbpool. | 
|  | 2242 | queue, new_hb); | 
|  | 2243 | card->hbpool.count++; | 
|  | 2244 | } | 
|  | 2245 | } | 
|  | 2246 | NS_PRV_BUFTYPE(hb) = BUF_NONE; | 
|  | 2247 | } else if (--card->hbpool.count < card->hbnr.min) { | 
|  | 2248 | struct sk_buff *new_hb; | 
|  | 2249 | if ((new_hb = | 
|  | 2250 | dev_alloc_skb(NS_HBUFSIZE)) != NULL) { | 
|  | 2251 | NS_PRV_BUFTYPE(new_hb) = BUF_NONE; | 
|  | 2252 | skb_queue_tail(&card->hbpool.queue, | 
|  | 2253 | new_hb); | 
|  | 2254 | card->hbpool.count++; | 
|  | 2255 | } | 
|  | 2256 | if (card->hbpool.count < card->hbnr.min) { | 
|  | 2257 | if ((new_hb = | 
|  | 2258 | dev_alloc_skb(NS_HBUFSIZE)) != | 
|  | 2259 | NULL) { | 
|  | 2260 | NS_PRV_BUFTYPE(new_hb) = | 
|  | 2261 | BUF_NONE; | 
|  | 2262 | skb_queue_tail(&card->hbpool. | 
|  | 2263 | queue, new_hb); | 
|  | 2264 | card->hbpool.count++; | 
|  | 2265 | } | 
|  | 2266 | } | 
|  | 2267 | } | 
|  | 2268 |  | 
|  | 2269 | iov = (struct iovec *)iovb->data; | 
|  | 2270 |  | 
|  | 2271 | if (!atm_charge(vcc, hb->truesize)) { | 
|  | 2272 | recycle_iovec_rx_bufs(card, iov, | 
|  | 2273 | NS_PRV_IOVCNT(iovb)); | 
|  | 2274 | if (card->hbpool.count < card->hbnr.max) { | 
|  | 2275 | skb_queue_tail(&card->hbpool.queue, hb); | 
|  | 2276 | card->hbpool.count++; | 
|  | 2277 | } else | 
|  | 2278 | dev_kfree_skb_any(hb); | 
|  | 2279 | atomic_inc(&vcc->stats->rx_drop); | 
|  | 2280 | } else { | 
|  | 2281 | /* Copy the small buffer to the huge buffer */ | 
|  | 2282 | sb = (struct sk_buff *)iov->iov_base; | 
|  | 2283 | skb_copy_from_linear_data(sb, hb->data, | 
|  | 2284 | iov->iov_len); | 
|  | 2285 | skb_put(hb, iov->iov_len); | 
|  | 2286 | remaining = len - iov->iov_len; | 
|  | 2287 | iov++; | 
|  | 2288 | /* Free the small buffer */ | 
|  | 2289 | push_rxbufs(card, sb); | 
|  | 2290 |  | 
|  | 2291 | /* Copy all large buffers to the huge buffer and free them */ | 
|  | 2292 | for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { | 
|  | 2293 | lb = (struct sk_buff *)iov->iov_base; | 
|  | 2294 | tocopy = | 
|  | 2295 | min_t(int, remaining, iov->iov_len); | 
|  | 2296 | skb_copy_from_linear_data(lb, | 
|  | 2297 | skb_tail_pointer | 
|  | 2298 | (hb), tocopy); | 
|  | 2299 | skb_put(hb, tocopy); | 
|  | 2300 | iov++; | 
|  | 2301 | remaining -= tocopy; | 
|  | 2302 | push_rxbufs(card, lb); | 
|  | 2303 | } | 
|  | 2304 | #ifdef EXTRA_DEBUG | 
|  | 2305 | if (remaining != 0 || hb->len != len) | 
|  | 2306 | printk | 
|  | 2307 | ("nicstar%d: Huge buffer len mismatch.\n", | 
|  | 2308 | card->index); | 
|  | 2309 | #endif /* EXTRA_DEBUG */ | 
|  | 2310 | ATM_SKB(hb)->vcc = vcc; | 
|  | 2311 | __net_timestamp(hb); | 
|  | 2312 | vcc->push(vcc, hb); | 
|  | 2313 | atomic_inc(&vcc->stats->rx); | 
|  | 2314 | } | 
|  | 2315 | } | 
|  | 2316 |  | 
|  | 2317 | vc->rx_iov = NULL; | 
|  | 2318 | recycle_iov_buf(card, iovb); | 
|  | 2319 | } | 
|  | 2320 |  | 
|  | 2321 | } | 
|  | 2322 |  | 
|  | 2323 | static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) | 
|  | 2324 | { | 
|  | 2325 | if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { | 
|  | 2326 | printk("nicstar%d: What kind of rx buffer is this?\n", | 
|  | 2327 | card->index); | 
|  | 2328 | dev_kfree_skb_any(skb); | 
|  | 2329 | } else | 
|  | 2330 | push_rxbufs(card, skb); | 
|  | 2331 | } | 
|  | 2332 |  | 
|  | 2333 | static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) | 
|  | 2334 | { | 
|  | 2335 | while (count-- > 0) | 
|  | 2336 | recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); | 
|  | 2337 | } | 
|  | 2338 |  | 
|  | 2339 | static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) | 
|  | 2340 | { | 
|  | 2341 | if (card->iovpool.count < card->iovnr.max) { | 
|  | 2342 | skb_queue_tail(&card->iovpool.queue, iovb); | 
|  | 2343 | card->iovpool.count++; | 
|  | 2344 | } else | 
|  | 2345 | dev_kfree_skb_any(iovb); | 
|  | 2346 | } | 
|  | 2347 |  | 
|  | 2348 | static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) | 
|  | 2349 | { | 
|  | 2350 | skb_unlink(sb, &card->sbpool.queue); | 
|  | 2351 | if (card->sbfqc < card->sbnr.init) { | 
|  | 2352 | struct sk_buff *new_sb; | 
|  | 2353 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { | 
|  | 2354 | NS_PRV_BUFTYPE(new_sb) = BUF_SM; | 
|  | 2355 | skb_queue_tail(&card->sbpool.queue, new_sb); | 
|  | 2356 | skb_reserve(new_sb, NS_AAL0_HEADER); | 
|  | 2357 | push_rxbufs(card, new_sb); | 
|  | 2358 | } | 
|  | 2359 | } | 
|  | 2360 | if (card->sbfqc < card->sbnr.init) | 
|  | 2361 | { | 
|  | 2362 | struct sk_buff *new_sb; | 
|  | 2363 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { | 
|  | 2364 | NS_PRV_BUFTYPE(new_sb) = BUF_SM; | 
|  | 2365 | skb_queue_tail(&card->sbpool.queue, new_sb); | 
|  | 2366 | skb_reserve(new_sb, NS_AAL0_HEADER); | 
|  | 2367 | push_rxbufs(card, new_sb); | 
|  | 2368 | } | 
|  | 2369 | } | 
|  | 2370 | } | 
|  | 2371 |  | 
|  | 2372 | static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) | 
|  | 2373 | { | 
|  | 2374 | skb_unlink(lb, &card->lbpool.queue); | 
|  | 2375 | if (card->lbfqc < card->lbnr.init) { | 
|  | 2376 | struct sk_buff *new_lb; | 
|  | 2377 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { | 
|  | 2378 | NS_PRV_BUFTYPE(new_lb) = BUF_LG; | 
|  | 2379 | skb_queue_tail(&card->lbpool.queue, new_lb); | 
|  | 2380 | skb_reserve(new_lb, NS_SMBUFSIZE); | 
|  | 2381 | push_rxbufs(card, new_lb); | 
|  | 2382 | } | 
|  | 2383 | } | 
|  | 2384 | if (card->lbfqc < card->lbnr.init) | 
|  | 2385 | { | 
|  | 2386 | struct sk_buff *new_lb; | 
|  | 2387 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { | 
|  | 2388 | NS_PRV_BUFTYPE(new_lb) = BUF_LG; | 
|  | 2389 | skb_queue_tail(&card->lbpool.queue, new_lb); | 
|  | 2390 | skb_reserve(new_lb, NS_SMBUFSIZE); | 
|  | 2391 | push_rxbufs(card, new_lb); | 
|  | 2392 | } | 
|  | 2393 | } | 
|  | 2394 | } | 
|  | 2395 |  | 
|  | 2396 | static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) | 
|  | 2397 | { | 
|  | 2398 | u32 stat; | 
|  | 2399 | ns_dev *card; | 
|  | 2400 | int left; | 
|  | 2401 |  | 
|  | 2402 | left = (int)*pos; | 
|  | 2403 | card = (ns_dev *) dev->dev_data; | 
|  | 2404 | stat = readl(card->membase + STAT); | 
|  | 2405 | if (!left--) | 
|  | 2406 | return sprintf(page, "Pool   count    min   init    max \n"); | 
|  | 2407 | if (!left--) | 
|  | 2408 | return sprintf(page, "Small  %5d  %5d  %5d  %5d \n", | 
|  | 2409 | ns_stat_sfbqc_get(stat), card->sbnr.min, | 
|  | 2410 | card->sbnr.init, card->sbnr.max); | 
|  | 2411 | if (!left--) | 
|  | 2412 | return sprintf(page, "Large  %5d  %5d  %5d  %5d \n", | 
|  | 2413 | ns_stat_lfbqc_get(stat), card->lbnr.min, | 
|  | 2414 | card->lbnr.init, card->lbnr.max); | 
|  | 2415 | if (!left--) | 
|  | 2416 | return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n", | 
|  | 2417 | card->hbpool.count, card->hbnr.min, | 
|  | 2418 | card->hbnr.init, card->hbnr.max); | 
|  | 2419 | if (!left--) | 
|  | 2420 | return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n", | 
|  | 2421 | card->iovpool.count, card->iovnr.min, | 
|  | 2422 | card->iovnr.init, card->iovnr.max); | 
|  | 2423 | if (!left--) { | 
|  | 2424 | int retval; | 
|  | 2425 | retval = | 
|  | 2426 | sprintf(page, "Interrupt counter: %u \n", card->intcnt); | 
|  | 2427 | card->intcnt = 0; | 
|  | 2428 | return retval; | 
|  | 2429 | } | 
|  | 2430 | #if 0 | 
|  | 2431 | /* Dump 25.6 Mbps PHY registers */ | 
|  | 2432 | /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it | 
|  | 2433 | here just in case it's needed for debugging. */ | 
|  | 2434 | if (card->max_pcr == ATM_25_PCR && !left--) { | 
|  | 2435 | u32 phy_regs[4]; | 
|  | 2436 | u32 i; | 
|  | 2437 |  | 
|  | 2438 | for (i = 0; i < 4; i++) { | 
|  | 2439 | while (CMD_BUSY(card)) ; | 
|  | 2440 | writel(NS_CMD_READ_UTILITY | 0x00000200 | i, | 
|  | 2441 | card->membase + CMD); | 
|  | 2442 | while (CMD_BUSY(card)) ; | 
|  | 2443 | phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; | 
|  | 2444 | } | 
|  | 2445 |  | 
|  | 2446 | return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", | 
|  | 2447 | phy_regs[0], phy_regs[1], phy_regs[2], | 
|  | 2448 | phy_regs[3]); | 
|  | 2449 | } | 
|  | 2450 | #endif /* 0 - Dump 25.6 Mbps PHY registers */ | 
|  | 2451 | #if 0 | 
|  | 2452 | /* Dump TST */ | 
|  | 2453 | if (left-- < NS_TST_NUM_ENTRIES) { | 
|  | 2454 | if (card->tste2vc[left + 1] == NULL) | 
|  | 2455 | return sprintf(page, "%5d - VBR/UBR \n", left + 1); | 
|  | 2456 | else | 
|  | 2457 | return sprintf(page, "%5d - %d %d \n", left + 1, | 
|  | 2458 | card->tste2vc[left + 1]->tx_vcc->vpi, | 
|  | 2459 | card->tste2vc[left + 1]->tx_vcc->vci); | 
|  | 2460 | } | 
|  | 2461 | #endif /* 0 */ | 
|  | 2462 | return 0; | 
|  | 2463 | } | 
|  | 2464 |  | 
|  | 2465 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) | 
|  | 2466 | { | 
|  | 2467 | ns_dev *card; | 
|  | 2468 | pool_levels pl; | 
|  | 2469 | long btype; | 
|  | 2470 | unsigned long flags; | 
|  | 2471 |  | 
|  | 2472 | card = dev->dev_data; | 
|  | 2473 | switch (cmd) { | 
|  | 2474 | case NS_GETPSTAT: | 
|  | 2475 | if (get_user | 
|  | 2476 | (pl.buftype, &((pool_levels __user *) arg)->buftype)) | 
|  | 2477 | return -EFAULT; | 
|  | 2478 | switch (pl.buftype) { | 
|  | 2479 | case NS_BUFTYPE_SMALL: | 
|  | 2480 | pl.count = | 
|  | 2481 | ns_stat_sfbqc_get(readl(card->membase + STAT)); | 
|  | 2482 | pl.level.min = card->sbnr.min; | 
|  | 2483 | pl.level.init = card->sbnr.init; | 
|  | 2484 | pl.level.max = card->sbnr.max; | 
|  | 2485 | break; | 
|  | 2486 |  | 
|  | 2487 | case NS_BUFTYPE_LARGE: | 
|  | 2488 | pl.count = | 
|  | 2489 | ns_stat_lfbqc_get(readl(card->membase + STAT)); | 
|  | 2490 | pl.level.min = card->lbnr.min; | 
|  | 2491 | pl.level.init = card->lbnr.init; | 
|  | 2492 | pl.level.max = card->lbnr.max; | 
|  | 2493 | break; | 
|  | 2494 |  | 
|  | 2495 | case NS_BUFTYPE_HUGE: | 
|  | 2496 | pl.count = card->hbpool.count; | 
|  | 2497 | pl.level.min = card->hbnr.min; | 
|  | 2498 | pl.level.init = card->hbnr.init; | 
|  | 2499 | pl.level.max = card->hbnr.max; | 
|  | 2500 | break; | 
|  | 2501 |  | 
|  | 2502 | case NS_BUFTYPE_IOVEC: | 
|  | 2503 | pl.count = card->iovpool.count; | 
|  | 2504 | pl.level.min = card->iovnr.min; | 
|  | 2505 | pl.level.init = card->iovnr.init; | 
|  | 2506 | pl.level.max = card->iovnr.max; | 
|  | 2507 | break; | 
|  | 2508 |  | 
|  | 2509 | default: | 
|  | 2510 | return -ENOIOCTLCMD; | 
|  | 2511 |  | 
|  | 2512 | } | 
|  | 2513 | if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) | 
|  | 2514 | return (sizeof(pl)); | 
|  | 2515 | else | 
|  | 2516 | return -EFAULT; | 
|  | 2517 |  | 
|  | 2518 | case NS_SETBUFLEV: | 
|  | 2519 | if (!capable(CAP_NET_ADMIN)) | 
|  | 2520 | return -EPERM; | 
|  | 2521 | if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) | 
|  | 2522 | return -EFAULT; | 
|  | 2523 | if (pl.level.min >= pl.level.init | 
|  | 2524 | || pl.level.init >= pl.level.max) | 
|  | 2525 | return -EINVAL; | 
|  | 2526 | if (pl.level.min == 0) | 
|  | 2527 | return -EINVAL; | 
|  | 2528 | switch (pl.buftype) { | 
|  | 2529 | case NS_BUFTYPE_SMALL: | 
|  | 2530 | if (pl.level.max > TOP_SB) | 
|  | 2531 | return -EINVAL; | 
|  | 2532 | card->sbnr.min = pl.level.min; | 
|  | 2533 | card->sbnr.init = pl.level.init; | 
|  | 2534 | card->sbnr.max = pl.level.max; | 
|  | 2535 | break; | 
|  | 2536 |  | 
|  | 2537 | case NS_BUFTYPE_LARGE: | 
|  | 2538 | if (pl.level.max > TOP_LB) | 
|  | 2539 | return -EINVAL; | 
|  | 2540 | card->lbnr.min = pl.level.min; | 
|  | 2541 | card->lbnr.init = pl.level.init; | 
|  | 2542 | card->lbnr.max = pl.level.max; | 
|  | 2543 | break; | 
|  | 2544 |  | 
|  | 2545 | case NS_BUFTYPE_HUGE: | 
|  | 2546 | if (pl.level.max > TOP_HB) | 
|  | 2547 | return -EINVAL; | 
|  | 2548 | card->hbnr.min = pl.level.min; | 
|  | 2549 | card->hbnr.init = pl.level.init; | 
|  | 2550 | card->hbnr.max = pl.level.max; | 
|  | 2551 | break; | 
|  | 2552 |  | 
|  | 2553 | case NS_BUFTYPE_IOVEC: | 
|  | 2554 | if (pl.level.max > TOP_IOVB) | 
|  | 2555 | return -EINVAL; | 
|  | 2556 | card->iovnr.min = pl.level.min; | 
|  | 2557 | card->iovnr.init = pl.level.init; | 
|  | 2558 | card->iovnr.max = pl.level.max; | 
|  | 2559 | break; | 
|  | 2560 |  | 
|  | 2561 | default: | 
|  | 2562 | return -EINVAL; | 
|  | 2563 |  | 
|  | 2564 | } | 
|  | 2565 | return 0; | 
|  | 2566 |  | 
|  | 2567 | case NS_ADJBUFLEV: | 
|  | 2568 | if (!capable(CAP_NET_ADMIN)) | 
|  | 2569 | return -EPERM; | 
|  | 2570 | btype = (long)arg;	/* a long is the same size as a pointer or bigger */ | 
|  | 2571 | switch (btype) { | 
|  | 2572 | case NS_BUFTYPE_SMALL: | 
|  | 2573 | while (card->sbfqc < card->sbnr.init) { | 
|  | 2574 | struct sk_buff *sb; | 
|  | 2575 |  | 
|  | 2576 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | 
|  | 2577 | if (sb == NULL) | 
|  | 2578 | return -ENOMEM; | 
|  | 2579 | NS_PRV_BUFTYPE(sb) = BUF_SM; | 
|  | 2580 | skb_queue_tail(&card->sbpool.queue, sb); | 
|  | 2581 | skb_reserve(sb, NS_AAL0_HEADER); | 
|  | 2582 | push_rxbufs(card, sb); | 
|  | 2583 | } | 
|  | 2584 | break; | 
|  | 2585 |  | 
|  | 2586 | case NS_BUFTYPE_LARGE: | 
|  | 2587 | while (card->lbfqc < card->lbnr.init) { | 
|  | 2588 | struct sk_buff *lb; | 
|  | 2589 |  | 
|  | 2590 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | 
|  | 2591 | if (lb == NULL) | 
|  | 2592 | return -ENOMEM; | 
|  | 2593 | NS_PRV_BUFTYPE(lb) = BUF_LG; | 
|  | 2594 | skb_queue_tail(&card->lbpool.queue, lb); | 
|  | 2595 | skb_reserve(lb, NS_SMBUFSIZE); | 
|  | 2596 | push_rxbufs(card, lb); | 
|  | 2597 | } | 
|  | 2598 | break; | 
|  | 2599 |  | 
|  | 2600 | case NS_BUFTYPE_HUGE: | 
|  | 2601 | while (card->hbpool.count > card->hbnr.init) { | 
|  | 2602 | struct sk_buff *hb; | 
|  | 2603 |  | 
|  | 2604 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 2605 | hb = skb_dequeue(&card->hbpool.queue); | 
|  | 2606 | card->hbpool.count--; | 
|  | 2607 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 2608 | if (hb == NULL) | 
|  | 2609 | printk | 
|  | 2610 | ("nicstar%d: huge buffer count inconsistent.\n", | 
|  | 2611 | card->index); | 
|  | 2612 | else | 
|  | 2613 | dev_kfree_skb_any(hb); | 
|  | 2614 |  | 
|  | 2615 | } | 
|  | 2616 | while (card->hbpool.count < card->hbnr.init) { | 
|  | 2617 | struct sk_buff *hb; | 
|  | 2618 |  | 
|  | 2619 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | 
|  | 2620 | if (hb == NULL) | 
|  | 2621 | return -ENOMEM; | 
|  | 2622 | NS_PRV_BUFTYPE(hb) = BUF_NONE; | 
|  | 2623 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 2624 | skb_queue_tail(&card->hbpool.queue, hb); | 
|  | 2625 | card->hbpool.count++; | 
|  | 2626 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 2627 | } | 
|  | 2628 | break; | 
|  | 2629 |  | 
|  | 2630 | case NS_BUFTYPE_IOVEC: | 
|  | 2631 | while (card->iovpool.count > card->iovnr.init) { | 
|  | 2632 | struct sk_buff *iovb; | 
|  | 2633 |  | 
|  | 2634 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 2635 | iovb = skb_dequeue(&card->iovpool.queue); | 
|  | 2636 | card->iovpool.count--; | 
|  | 2637 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 2638 | if (iovb == NULL) | 
|  | 2639 | printk | 
|  | 2640 | ("nicstar%d: iovec buffer count inconsistent.\n", | 
|  | 2641 | card->index); | 
|  | 2642 | else | 
|  | 2643 | dev_kfree_skb_any(iovb); | 
|  | 2644 |  | 
|  | 2645 | } | 
|  | 2646 | while (card->iovpool.count < card->iovnr.init) { | 
|  | 2647 | struct sk_buff *iovb; | 
|  | 2648 |  | 
|  | 2649 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | 
|  | 2650 | if (iovb == NULL) | 
|  | 2651 | return -ENOMEM; | 
|  | 2652 | NS_PRV_BUFTYPE(iovb) = BUF_NONE; | 
|  | 2653 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 2654 | skb_queue_tail(&card->iovpool.queue, iovb); | 
|  | 2655 | card->iovpool.count++; | 
|  | 2656 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 2657 | } | 
|  | 2658 | break; | 
|  | 2659 |  | 
|  | 2660 | default: | 
|  | 2661 | return -EINVAL; | 
|  | 2662 |  | 
|  | 2663 | } | 
|  | 2664 | return 0; | 
|  | 2665 |  | 
|  | 2666 | default: | 
|  | 2667 | if (dev->phy && dev->phy->ioctl) { | 
|  | 2668 | return dev->phy->ioctl(dev, cmd, arg); | 
|  | 2669 | } else { | 
|  | 2670 | printk("nicstar%d: %s == NULL \n", card->index, | 
|  | 2671 | dev->phy ? "dev->phy->ioctl" : "dev->phy"); | 
|  | 2672 | return -ENOIOCTLCMD; | 
|  | 2673 | } | 
|  | 2674 | } | 
|  | 2675 | } | 
|  | 2676 |  | 
|  | 2677 | #ifdef EXTRA_DEBUG | 
|  | 2678 | static void which_list(ns_dev * card, struct sk_buff *skb) | 
|  | 2679 | { | 
|  | 2680 | printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); | 
|  | 2681 | } | 
|  | 2682 | #endif /* EXTRA_DEBUG */ | 
|  | 2683 |  | 
|  | 2684 | static void ns_poll(unsigned long arg) | 
|  | 2685 | { | 
|  | 2686 | int i; | 
|  | 2687 | ns_dev *card; | 
|  | 2688 | unsigned long flags; | 
|  | 2689 | u32 stat_r, stat_w; | 
|  | 2690 |  | 
|  | 2691 | PRINTK("nicstar: Entering ns_poll().\n"); | 
|  | 2692 | for (i = 0; i < num_cards; i++) { | 
|  | 2693 | card = cards[i]; | 
|  | 2694 | if (spin_is_locked(&card->int_lock)) { | 
|  | 2695 | /* Probably it isn't worth spinning */ | 
|  | 2696 | continue; | 
|  | 2697 | } | 
|  | 2698 | spin_lock_irqsave(&card->int_lock, flags); | 
|  | 2699 |  | 
|  | 2700 | stat_w = 0; | 
|  | 2701 | stat_r = readl(card->membase + STAT); | 
|  | 2702 | if (stat_r & NS_STAT_TSIF) | 
|  | 2703 | stat_w |= NS_STAT_TSIF; | 
|  | 2704 | if (stat_r & NS_STAT_EOPDU) | 
|  | 2705 | stat_w |= NS_STAT_EOPDU; | 
|  | 2706 |  | 
|  | 2707 | process_tsq(card); | 
|  | 2708 | process_rsq(card); | 
|  | 2709 |  | 
|  | 2710 | writel(stat_w, card->membase + STAT); | 
|  | 2711 | spin_unlock_irqrestore(&card->int_lock, flags); | 
|  | 2712 | } | 
|  | 2713 | mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); | 
|  | 2714 | PRINTK("nicstar: Leaving ns_poll().\n"); | 
|  | 2715 | } | 
|  | 2716 |  | 
|  | 2717 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | 
|  | 2718 | unsigned long addr) | 
|  | 2719 | { | 
|  | 2720 | ns_dev *card; | 
|  | 2721 | unsigned long flags; | 
|  | 2722 |  | 
|  | 2723 | card = dev->dev_data; | 
|  | 2724 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 2725 | while (CMD_BUSY(card)) ; | 
|  | 2726 | writel((u32) value, card->membase + DR0); | 
|  | 2727 | writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), | 
|  | 2728 | card->membase + CMD); | 
|  | 2729 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 2730 | } | 
|  | 2731 |  | 
|  | 2732 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) | 
|  | 2733 | { | 
|  | 2734 | ns_dev *card; | 
|  | 2735 | unsigned long flags; | 
|  | 2736 | u32 data; | 
|  | 2737 |  | 
|  | 2738 | card = dev->dev_data; | 
|  | 2739 | spin_lock_irqsave(&card->res_lock, flags); | 
|  | 2740 | while (CMD_BUSY(card)) ; | 
|  | 2741 | writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), | 
|  | 2742 | card->membase + CMD); | 
|  | 2743 | while (CMD_BUSY(card)) ; | 
|  | 2744 | data = readl(card->membase + DR0) & 0x000000FF; | 
|  | 2745 | spin_unlock_irqrestore(&card->res_lock, flags); | 
|  | 2746 | return (unsigned char)data; | 
|  | 2747 | } | 
|  | 2748 |  | 
|  | 2749 | module_init(nicstar_init); | 
|  | 2750 | module_exit(nicstar_cleanup); |