blob: 5ce1a4d8fcee79b4a05c668c1128226de2b467f8 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/******************************************************************************
3
4 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5
6 802.11 status code portion of this file from ethereal-0.10.6:
7 Copyright 2000, Axis Communications AB
8 Ethereal - Network traffic analyzer
9 By Gerald Combs <gerald@ethereal.com>
10 Copyright 1998 Gerald Combs
11
12
13 Contact Information:
14 Intel Linux Wireless <ilw@linux.intel.com>
15 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
16
17******************************************************************************/
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <net/cfg80211-wext.h>
22#include "ipw2200.h"
23#include "ipw.h"
24
25
26#ifndef KBUILD_EXTMOD
27#define VK "k"
28#else
29#define VK
30#endif
31
32#ifdef CONFIG_IPW2200_DEBUG
33#define VD "d"
34#else
35#define VD
36#endif
37
38#ifdef CONFIG_IPW2200_MONITOR
39#define VM "m"
40#else
41#define VM
42#endif
43
44#ifdef CONFIG_IPW2200_PROMISCUOUS
45#define VP "p"
46#else
47#define VP
48#endif
49
50#ifdef CONFIG_IPW2200_RADIOTAP
51#define VR "r"
52#else
53#define VR
54#endif
55
56#ifdef CONFIG_IPW2200_QOS
57#define VQ "q"
58#else
59#define VQ
60#endif
61
62#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
63#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
64#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
65#define DRV_VERSION IPW2200_VERSION
66
67#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
68
69MODULE_DESCRIPTION(DRV_DESCRIPTION);
70MODULE_VERSION(DRV_VERSION);
71MODULE_AUTHOR(DRV_COPYRIGHT);
72MODULE_LICENSE("GPL");
73MODULE_FIRMWARE("ipw2200-ibss.fw");
74#ifdef CONFIG_IPW2200_MONITOR
75MODULE_FIRMWARE("ipw2200-sniffer.fw");
76#endif
77MODULE_FIRMWARE("ipw2200-bss.fw");
78
79static int cmdlog = 0;
80static int debug = 0;
81static int default_channel = 0;
82static int network_mode = 0;
83
84static u32 ipw_debug_level;
85static int associate;
86static int auto_create = 1;
87static int led_support = 1;
88static int disable = 0;
89static int bt_coexist = 0;
90static int hwcrypto = 0;
91static int roaming = 1;
92static const char ipw_modes[] = {
93 'a', 'b', 'g', '?'
94};
95static int antenna = CFG_SYS_ANTENNA_BOTH;
96
97#ifdef CONFIG_IPW2200_PROMISCUOUS
98static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
99#endif
100
101static struct ieee80211_rate ipw2200_rates[] = {
102 { .bitrate = 10 },
103 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
104 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
105 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
106 { .bitrate = 60 },
107 { .bitrate = 90 },
108 { .bitrate = 120 },
109 { .bitrate = 180 },
110 { .bitrate = 240 },
111 { .bitrate = 360 },
112 { .bitrate = 480 },
113 { .bitrate = 540 }
114};
115
116#define ipw2200_a_rates (ipw2200_rates + 4)
117#define ipw2200_num_a_rates 8
118#define ipw2200_bg_rates (ipw2200_rates + 0)
119#define ipw2200_num_bg_rates 12
120
121/* Ugly macro to convert literal channel numbers into their mhz equivalents
122 * There are certianly some conditions that will break this (like feeding it '30')
123 * but they shouldn't arise since nothing talks on channel 30. */
124#define ieee80211chan2mhz(x) \
125 (((x) <= 14) ? \
126 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
127 ((x) + 1000) * 5)
128
129#ifdef CONFIG_IPW2200_QOS
130static int qos_enable = 0;
131static int qos_burst_enable = 0;
132static int qos_no_ack_mask = 0;
133static int burst_duration_CCK = 0;
134static int burst_duration_OFDM = 0;
135
136static struct libipw_qos_parameters def_qos_parameters_OFDM = {
137 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
138 QOS_TX3_CW_MIN_OFDM},
139 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
140 QOS_TX3_CW_MAX_OFDM},
141 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
142 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
143 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
144 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
145};
146
147static struct libipw_qos_parameters def_qos_parameters_CCK = {
148 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
149 QOS_TX3_CW_MIN_CCK},
150 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
151 QOS_TX3_CW_MAX_CCK},
152 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
153 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
154 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
155 QOS_TX3_TXOP_LIMIT_CCK}
156};
157
158static struct libipw_qos_parameters def_parameters_OFDM = {
159 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
160 DEF_TX3_CW_MIN_OFDM},
161 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
162 DEF_TX3_CW_MAX_OFDM},
163 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
164 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
165 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
166 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
167};
168
169static struct libipw_qos_parameters def_parameters_CCK = {
170 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
171 DEF_TX3_CW_MIN_CCK},
172 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
173 DEF_TX3_CW_MAX_CCK},
174 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
175 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
176 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
177 DEF_TX3_TXOP_LIMIT_CCK}
178};
179
180static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
181
182static int from_priority_to_tx_queue[] = {
183 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
184 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
185};
186
187static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
188
189static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
190 *qos_param);
191static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
192 *qos_param);
193#endif /* CONFIG_IPW2200_QOS */
194
195static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
196static void ipw_remove_current_network(struct ipw_priv *priv);
197static void ipw_rx(struct ipw_priv *priv);
198static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
199 struct clx2_tx_queue *txq, int qindex);
200static int ipw_queue_reset(struct ipw_priv *priv);
201
202static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
203 int len, int sync);
204
205static void ipw_tx_queue_free(struct ipw_priv *);
206
207static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
208static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
209static void ipw_rx_queue_replenish(void *);
210static int ipw_up(struct ipw_priv *);
211static void ipw_bg_up(struct work_struct *work);
212static void ipw_down(struct ipw_priv *);
213static void ipw_bg_down(struct work_struct *work);
214static int ipw_config(struct ipw_priv *);
215static int init_supported_rates(struct ipw_priv *priv,
216 struct ipw_supported_rates *prates);
217static void ipw_set_hwcrypto_keys(struct ipw_priv *);
218static void ipw_send_wep_keys(struct ipw_priv *, int);
219
220static int snprint_line(char *buf, size_t count,
221 const u8 * data, u32 len, u32 ofs)
222{
223 int out, i, j, l;
224 char c;
225
226 out = snprintf(buf, count, "%08X", ofs);
227
228 for (l = 0, i = 0; i < 2; i++) {
229 out += snprintf(buf + out, count - out, " ");
230 for (j = 0; j < 8 && l < len; j++, l++)
231 out += snprintf(buf + out, count - out, "%02X ",
232 data[(i * 8 + j)]);
233 for (; j < 8; j++)
234 out += snprintf(buf + out, count - out, " ");
235 }
236
237 out += snprintf(buf + out, count - out, " ");
238 for (l = 0, i = 0; i < 2; i++) {
239 out += snprintf(buf + out, count - out, " ");
240 for (j = 0; j < 8 && l < len; j++, l++) {
241 c = data[(i * 8 + j)];
242 if (!isascii(c) || !isprint(c))
243 c = '.';
244
245 out += snprintf(buf + out, count - out, "%c", c);
246 }
247
248 for (; j < 8; j++)
249 out += snprintf(buf + out, count - out, " ");
250 }
251
252 return out;
253}
254
255static void printk_buf(int level, const u8 * data, u32 len)
256{
257 char line[81];
258 u32 ofs = 0;
259 if (!(ipw_debug_level & level))
260 return;
261
262 while (len) {
263 snprint_line(line, sizeof(line), &data[ofs],
264 min(len, 16U), ofs);
265 printk(KERN_DEBUG "%s\n", line);
266 ofs += 16;
267 len -= min(len, 16U);
268 }
269}
270
271static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
272{
273 size_t out = size;
274 u32 ofs = 0;
275 int total = 0;
276
277 while (size && len) {
278 out = snprint_line(output, size, &data[ofs],
279 min_t(size_t, len, 16U), ofs);
280
281 ofs += 16;
282 output += out;
283 size -= out;
284 len -= min_t(size_t, len, 16U);
285 total += out;
286 }
287 return total;
288}
289
290/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
291static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
292#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
293
294/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
295static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
296#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
297
298/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
299static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
300static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
301{
302 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
303 __LINE__, (u32) (b), (u32) (c));
304 _ipw_write_reg8(a, b, c);
305}
306
307/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
308static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
309static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
310{
311 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
312 __LINE__, (u32) (b), (u32) (c));
313 _ipw_write_reg16(a, b, c);
314}
315
316/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
317static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
318static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
319{
320 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
321 __LINE__, (u32) (b), (u32) (c));
322 _ipw_write_reg32(a, b, c);
323}
324
325/* 8-bit direct write (low 4K) */
326static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
327 u8 val)
328{
329 writeb(val, ipw->hw_base + ofs);
330}
331
332/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
333#define ipw_write8(ipw, ofs, val) do { \
334 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
335 __LINE__, (u32)(ofs), (u32)(val)); \
336 _ipw_write8(ipw, ofs, val); \
337} while (0)
338
339/* 16-bit direct write (low 4K) */
340static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
341 u16 val)
342{
343 writew(val, ipw->hw_base + ofs);
344}
345
346/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347#define ipw_write16(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write16(ipw, ofs, val); \
351} while (0)
352
353/* 32-bit direct write (low 4K) */
354static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
355 u32 val)
356{
357 writel(val, ipw->hw_base + ofs);
358}
359
360/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361#define ipw_write32(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write32(ipw, ofs, val); \
365} while (0)
366
367/* 8-bit direct read (low 4K) */
368static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
369{
370 return readb(ipw->hw_base + ofs);
371}
372
373/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
374#define ipw_read8(ipw, ofs) ({ \
375 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
376 (u32)(ofs)); \
377 _ipw_read8(ipw, ofs); \
378})
379
380/* 16-bit direct read (low 4K) */
381static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
382{
383 return readw(ipw->hw_base + ofs);
384}
385
386/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
387#define ipw_read16(ipw, ofs) ({ \
388 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
389 (u32)(ofs)); \
390 _ipw_read16(ipw, ofs); \
391})
392
393/* 32-bit direct read (low 4K) */
394static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
395{
396 return readl(ipw->hw_base + ofs);
397}
398
399/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
400#define ipw_read32(ipw, ofs) ({ \
401 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
402 (u32)(ofs)); \
403 _ipw_read32(ipw, ofs); \
404})
405
406static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
407/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
408#define ipw_read_indirect(a, b, c, d) ({ \
409 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
410 __LINE__, (u32)(b), (u32)(d)); \
411 _ipw_read_indirect(a, b, c, d); \
412})
413
414/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
415static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
416 int num);
417#define ipw_write_indirect(a, b, c, d) do { \
418 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
419 __LINE__, (u32)(b), (u32)(d)); \
420 _ipw_write_indirect(a, b, c, d); \
421} while (0)
422
423/* 32-bit indirect write (above 4K) */
424static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
425{
426 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
427 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
428 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
429}
430
431/* 8-bit indirect write (above 4K) */
432static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
433{
434 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
435 u32 dif_len = reg - aligned_addr;
436
437 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
438 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
439 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
440}
441
442/* 16-bit indirect write (above 4K) */
443static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
444{
445 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
447
448 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
449 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
450 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
451}
452
453/* 8-bit indirect read (above 4K) */
454static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
455{
456 u32 word;
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
458 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
459 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
460 return (word >> ((reg & 0x3) * 8)) & 0xff;
461}
462
463/* 32-bit indirect read (above 4K) */
464static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
465{
466 u32 value;
467
468 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
469
470 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
471 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
472 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
473 return value;
474}
475
476/* General purpose, no alignment requirement, iterative (multi-byte) read, */
477/* for area above 1st 4K of SRAM/reg space */
478static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
479 int num)
480{
481 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
482 u32 dif_len = addr - aligned_addr;
483 u32 i;
484
485 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
486
487 if (num <= 0) {
488 return;
489 }
490
491 /* Read the first dword (or portion) byte by byte */
492 if (unlikely(dif_len)) {
493 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
494 /* Start reading at aligned_addr + dif_len */
495 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
496 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
497 aligned_addr += 4;
498 }
499
500 /* Read all of the middle dwords as dwords, with auto-increment */
501 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
502 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
503 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
504
505 /* Read the last dword (or portion) byte by byte */
506 if (unlikely(num)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 for (i = 0; num > 0; i++, num--)
509 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
510 }
511}
512
513/* General purpose, no alignment requirement, iterative (multi-byte) write, */
514/* for area above 1st 4K of SRAM/reg space */
515static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
516 int num)
517{
518 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
519 u32 dif_len = addr - aligned_addr;
520 u32 i;
521
522 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
523
524 if (num <= 0) {
525 return;
526 }
527
528 /* Write the first dword (or portion) byte by byte */
529 if (unlikely(dif_len)) {
530 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
531 /* Start writing at aligned_addr + dif_len */
532 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
533 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
534 aligned_addr += 4;
535 }
536
537 /* Write all of the middle dwords as dwords, with auto-increment */
538 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
539 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
540 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
541
542 /* Write the last dword (or portion) byte by byte */
543 if (unlikely(num)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 for (i = 0; num > 0; i++, num--, buf++)
546 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
547 }
548}
549
550/* General purpose, no alignment requirement, iterative (multi-byte) write, */
551/* for 1st 4K of SRAM/regs space */
552static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
553 int num)
554{
555 memcpy_toio((priv->hw_base + addr), buf, num);
556}
557
558/* Set bit(s) in low 4K of SRAM/regs */
559static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
560{
561 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
562}
563
564/* Clear bit(s) in low 4K of SRAM/regs */
565static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
566{
567 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
568}
569
570static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
571{
572 if (priv->status & STATUS_INT_ENABLED)
573 return;
574 priv->status |= STATUS_INT_ENABLED;
575 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
576}
577
578static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
579{
580 if (!(priv->status & STATUS_INT_ENABLED))
581 return;
582 priv->status &= ~STATUS_INT_ENABLED;
583 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
584}
585
586static inline void ipw_enable_interrupts(struct ipw_priv *priv)
587{
588 unsigned long flags;
589
590 spin_lock_irqsave(&priv->irq_lock, flags);
591 __ipw_enable_interrupts(priv);
592 spin_unlock_irqrestore(&priv->irq_lock, flags);
593}
594
595static inline void ipw_disable_interrupts(struct ipw_priv *priv)
596{
597 unsigned long flags;
598
599 spin_lock_irqsave(&priv->irq_lock, flags);
600 __ipw_disable_interrupts(priv);
601 spin_unlock_irqrestore(&priv->irq_lock, flags);
602}
603
604static char *ipw_error_desc(u32 val)
605{
606 switch (val) {
607 case IPW_FW_ERROR_OK:
608 return "ERROR_OK";
609 case IPW_FW_ERROR_FAIL:
610 return "ERROR_FAIL";
611 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
612 return "MEMORY_UNDERFLOW";
613 case IPW_FW_ERROR_MEMORY_OVERFLOW:
614 return "MEMORY_OVERFLOW";
615 case IPW_FW_ERROR_BAD_PARAM:
616 return "BAD_PARAM";
617 case IPW_FW_ERROR_BAD_CHECKSUM:
618 return "BAD_CHECKSUM";
619 case IPW_FW_ERROR_NMI_INTERRUPT:
620 return "NMI_INTERRUPT";
621 case IPW_FW_ERROR_BAD_DATABASE:
622 return "BAD_DATABASE";
623 case IPW_FW_ERROR_ALLOC_FAIL:
624 return "ALLOC_FAIL";
625 case IPW_FW_ERROR_DMA_UNDERRUN:
626 return "DMA_UNDERRUN";
627 case IPW_FW_ERROR_DMA_STATUS:
628 return "DMA_STATUS";
629 case IPW_FW_ERROR_DINO_ERROR:
630 return "DINO_ERROR";
631 case IPW_FW_ERROR_EEPROM_ERROR:
632 return "EEPROM_ERROR";
633 case IPW_FW_ERROR_SYSASSERT:
634 return "SYSASSERT";
635 case IPW_FW_ERROR_FATAL_ERROR:
636 return "FATAL_ERROR";
637 default:
638 return "UNKNOWN_ERROR";
639 }
640}
641
642static void ipw_dump_error_log(struct ipw_priv *priv,
643 struct ipw_fw_error *error)
644{
645 u32 i;
646
647 if (!error) {
648 IPW_ERROR("Error allocating and capturing error log. "
649 "Nothing to dump.\n");
650 return;
651 }
652
653 IPW_ERROR("Start IPW Error Log Dump:\n");
654 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
655 error->status, error->config);
656
657 for (i = 0; i < error->elem_len; i++)
658 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
659 ipw_error_desc(error->elem[i].desc),
660 error->elem[i].time,
661 error->elem[i].blink1,
662 error->elem[i].blink2,
663 error->elem[i].link1,
664 error->elem[i].link2, error->elem[i].data);
665 for (i = 0; i < error->log_len; i++)
666 IPW_ERROR("%i\t0x%08x\t%i\n",
667 error->log[i].time,
668 error->log[i].data, error->log[i].event);
669}
670
671static inline int ipw_is_init(struct ipw_priv *priv)
672{
673 return (priv->status & STATUS_INIT) ? 1 : 0;
674}
675
676static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
677{
678 u32 addr, field_info, field_len, field_count, total_len;
679
680 IPW_DEBUG_ORD("ordinal = %i\n", ord);
681
682 if (!priv || !val || !len) {
683 IPW_DEBUG_ORD("Invalid argument\n");
684 return -EINVAL;
685 }
686
687 /* verify device ordinal tables have been initialized */
688 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
689 IPW_DEBUG_ORD("Access ordinals before initialization\n");
690 return -EINVAL;
691 }
692
693 switch (IPW_ORD_TABLE_ID_MASK & ord) {
694 case IPW_ORD_TABLE_0_MASK:
695 /*
696 * TABLE 0: Direct access to a table of 32 bit values
697 *
698 * This is a very simple table with the data directly
699 * read from the table
700 */
701
702 /* remove the table id from the ordinal */
703 ord &= IPW_ORD_TABLE_VALUE_MASK;
704
705 /* boundary check */
706 if (ord > priv->table0_len) {
707 IPW_DEBUG_ORD("ordinal value (%i) longer then "
708 "max (%i)\n", ord, priv->table0_len);
709 return -EINVAL;
710 }
711
712 /* verify we have enough room to store the value */
713 if (*len < sizeof(u32)) {
714 IPW_DEBUG_ORD("ordinal buffer length too small, "
715 "need %zd\n", sizeof(u32));
716 return -EINVAL;
717 }
718
719 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
720 ord, priv->table0_addr + (ord << 2));
721
722 *len = sizeof(u32);
723 ord <<= 2;
724 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
725 break;
726
727 case IPW_ORD_TABLE_1_MASK:
728 /*
729 * TABLE 1: Indirect access to a table of 32 bit values
730 *
731 * This is a fairly large table of u32 values each
732 * representing starting addr for the data (which is
733 * also a u32)
734 */
735
736 /* remove the table id from the ordinal */
737 ord &= IPW_ORD_TABLE_VALUE_MASK;
738
739 /* boundary check */
740 if (ord > priv->table1_len) {
741 IPW_DEBUG_ORD("ordinal value too long\n");
742 return -EINVAL;
743 }
744
745 /* verify we have enough room to store the value */
746 if (*len < sizeof(u32)) {
747 IPW_DEBUG_ORD("ordinal buffer length too small, "
748 "need %zd\n", sizeof(u32));
749 return -EINVAL;
750 }
751
752 *((u32 *) val) =
753 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
754 *len = sizeof(u32);
755 break;
756
757 case IPW_ORD_TABLE_2_MASK:
758 /*
759 * TABLE 2: Indirect access to a table of variable sized values
760 *
761 * This table consist of six values, each containing
762 * - dword containing the starting offset of the data
763 * - dword containing the lengh in the first 16bits
764 * and the count in the second 16bits
765 */
766
767 /* remove the table id from the ordinal */
768 ord &= IPW_ORD_TABLE_VALUE_MASK;
769
770 /* boundary check */
771 if (ord > priv->table2_len) {
772 IPW_DEBUG_ORD("ordinal value too long\n");
773 return -EINVAL;
774 }
775
776 /* get the address of statistic */
777 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
778
779 /* get the second DW of statistics ;
780 * two 16-bit words - first is length, second is count */
781 field_info =
782 ipw_read_reg32(priv,
783 priv->table2_addr + (ord << 3) +
784 sizeof(u32));
785
786 /* get each entry length */
787 field_len = *((u16 *) & field_info);
788
789 /* get number of entries */
790 field_count = *(((u16 *) & field_info) + 1);
791
792 /* abort if not enough memory */
793 total_len = field_len * field_count;
794 if (total_len > *len) {
795 *len = total_len;
796 return -EINVAL;
797 }
798
799 *len = total_len;
800 if (!total_len)
801 return 0;
802
803 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
804 "field_info = 0x%08x\n",
805 addr, total_len, field_info);
806 ipw_read_indirect(priv, addr, val, total_len);
807 break;
808
809 default:
810 IPW_DEBUG_ORD("Invalid ordinal!\n");
811 return -EINVAL;
812
813 }
814
815 return 0;
816}
817
818static void ipw_init_ordinals(struct ipw_priv *priv)
819{
820 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
821 priv->table0_len = ipw_read32(priv, priv->table0_addr);
822
823 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
824 priv->table0_addr, priv->table0_len);
825
826 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
827 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
828
829 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
830 priv->table1_addr, priv->table1_len);
831
832 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
833 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
834 priv->table2_len &= 0x0000ffff; /* use first two bytes */
835
836 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
837 priv->table2_addr, priv->table2_len);
838
839}
840
841static u32 ipw_register_toggle(u32 reg)
842{
843 reg &= ~IPW_START_STANDBY;
844 if (reg & IPW_GATE_ODMA)
845 reg &= ~IPW_GATE_ODMA;
846 if (reg & IPW_GATE_IDMA)
847 reg &= ~IPW_GATE_IDMA;
848 if (reg & IPW_GATE_ADMA)
849 reg &= ~IPW_GATE_ADMA;
850 return reg;
851}
852
853/*
854 * LED behavior:
855 * - On radio ON, turn on any LEDs that require to be on during start
856 * - On initialization, start unassociated blink
857 * - On association, disable unassociated blink
858 * - On disassociation, start unassociated blink
859 * - On radio OFF, turn off any LEDs started during radio on
860 *
861 */
862#define LD_TIME_LINK_ON msecs_to_jiffies(300)
863#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
864#define LD_TIME_ACT_ON msecs_to_jiffies(250)
865
866static void ipw_led_link_on(struct ipw_priv *priv)
867{
868 unsigned long flags;
869 u32 led;
870
871 /* If configured to not use LEDs, or nic_type is 1,
872 * then we don't toggle a LINK led */
873 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
874 return;
875
876 spin_lock_irqsave(&priv->lock, flags);
877
878 if (!(priv->status & STATUS_RF_KILL_MASK) &&
879 !(priv->status & STATUS_LED_LINK_ON)) {
880 IPW_DEBUG_LED("Link LED On\n");
881 led = ipw_read_reg32(priv, IPW_EVENT_REG);
882 led |= priv->led_association_on;
883
884 led = ipw_register_toggle(led);
885
886 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
887 ipw_write_reg32(priv, IPW_EVENT_REG, led);
888
889 priv->status |= STATUS_LED_LINK_ON;
890
891 /* If we aren't associated, schedule turning the LED off */
892 if (!(priv->status & STATUS_ASSOCIATED))
893 schedule_delayed_work(&priv->led_link_off,
894 LD_TIME_LINK_ON);
895 }
896
897 spin_unlock_irqrestore(&priv->lock, flags);
898}
899
900static void ipw_bg_led_link_on(struct work_struct *work)
901{
902 struct ipw_priv *priv =
903 container_of(work, struct ipw_priv, led_link_on.work);
904 mutex_lock(&priv->mutex);
905 ipw_led_link_on(priv);
906 mutex_unlock(&priv->mutex);
907}
908
909static void ipw_led_link_off(struct ipw_priv *priv)
910{
911 unsigned long flags;
912 u32 led;
913
914 /* If configured not to use LEDs, or nic type is 1,
915 * then we don't goggle the LINK led. */
916 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
917 return;
918
919 spin_lock_irqsave(&priv->lock, flags);
920
921 if (priv->status & STATUS_LED_LINK_ON) {
922 led = ipw_read_reg32(priv, IPW_EVENT_REG);
923 led &= priv->led_association_off;
924 led = ipw_register_toggle(led);
925
926 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
927 ipw_write_reg32(priv, IPW_EVENT_REG, led);
928
929 IPW_DEBUG_LED("Link LED Off\n");
930
931 priv->status &= ~STATUS_LED_LINK_ON;
932
933 /* If we aren't associated and the radio is on, schedule
934 * turning the LED on (blink while unassociated) */
935 if (!(priv->status & STATUS_RF_KILL_MASK) &&
936 !(priv->status & STATUS_ASSOCIATED))
937 schedule_delayed_work(&priv->led_link_on,
938 LD_TIME_LINK_OFF);
939
940 }
941
942 spin_unlock_irqrestore(&priv->lock, flags);
943}
944
945static void ipw_bg_led_link_off(struct work_struct *work)
946{
947 struct ipw_priv *priv =
948 container_of(work, struct ipw_priv, led_link_off.work);
949 mutex_lock(&priv->mutex);
950 ipw_led_link_off(priv);
951 mutex_unlock(&priv->mutex);
952}
953
954static void __ipw_led_activity_on(struct ipw_priv *priv)
955{
956 u32 led;
957
958 if (priv->config & CFG_NO_LED)
959 return;
960
961 if (priv->status & STATUS_RF_KILL_MASK)
962 return;
963
964 if (!(priv->status & STATUS_LED_ACT_ON)) {
965 led = ipw_read_reg32(priv, IPW_EVENT_REG);
966 led |= priv->led_activity_on;
967
968 led = ipw_register_toggle(led);
969
970 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
971 ipw_write_reg32(priv, IPW_EVENT_REG, led);
972
973 IPW_DEBUG_LED("Activity LED On\n");
974
975 priv->status |= STATUS_LED_ACT_ON;
976
977 cancel_delayed_work(&priv->led_act_off);
978 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
979 } else {
980 /* Reschedule LED off for full time period */
981 cancel_delayed_work(&priv->led_act_off);
982 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
983 }
984}
985
986#if 0
987void ipw_led_activity_on(struct ipw_priv *priv)
988{
989 unsigned long flags;
990 spin_lock_irqsave(&priv->lock, flags);
991 __ipw_led_activity_on(priv);
992 spin_unlock_irqrestore(&priv->lock, flags);
993}
994#endif /* 0 */
995
996static void ipw_led_activity_off(struct ipw_priv *priv)
997{
998 unsigned long flags;
999 u32 led;
1000
1001 if (priv->config & CFG_NO_LED)
1002 return;
1003
1004 spin_lock_irqsave(&priv->lock, flags);
1005
1006 if (priv->status & STATUS_LED_ACT_ON) {
1007 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1008 led &= priv->led_activity_off;
1009
1010 led = ipw_register_toggle(led);
1011
1012 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1013 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1014
1015 IPW_DEBUG_LED("Activity LED Off\n");
1016
1017 priv->status &= ~STATUS_LED_ACT_ON;
1018 }
1019
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021}
1022
1023static void ipw_bg_led_activity_off(struct work_struct *work)
1024{
1025 struct ipw_priv *priv =
1026 container_of(work, struct ipw_priv, led_act_off.work);
1027 mutex_lock(&priv->mutex);
1028 ipw_led_activity_off(priv);
1029 mutex_unlock(&priv->mutex);
1030}
1031
1032static void ipw_led_band_on(struct ipw_priv *priv)
1033{
1034 unsigned long flags;
1035 u32 led;
1036
1037 /* Only nic type 1 supports mode LEDs */
1038 if (priv->config & CFG_NO_LED ||
1039 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1040 return;
1041
1042 spin_lock_irqsave(&priv->lock, flags);
1043
1044 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1045 if (priv->assoc_network->mode == IEEE_A) {
1046 led |= priv->led_ofdm_on;
1047 led &= priv->led_association_off;
1048 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1049 } else if (priv->assoc_network->mode == IEEE_G) {
1050 led |= priv->led_ofdm_on;
1051 led |= priv->led_association_on;
1052 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1053 } else {
1054 led &= priv->led_ofdm_off;
1055 led |= priv->led_association_on;
1056 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1057 }
1058
1059 led = ipw_register_toggle(led);
1060
1061 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1062 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1063
1064 spin_unlock_irqrestore(&priv->lock, flags);
1065}
1066
1067static void ipw_led_band_off(struct ipw_priv *priv)
1068{
1069 unsigned long flags;
1070 u32 led;
1071
1072 /* Only nic type 1 supports mode LEDs */
1073 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1074 return;
1075
1076 spin_lock_irqsave(&priv->lock, flags);
1077
1078 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1079 led &= priv->led_ofdm_off;
1080 led &= priv->led_association_off;
1081
1082 led = ipw_register_toggle(led);
1083
1084 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1085 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1086
1087 spin_unlock_irqrestore(&priv->lock, flags);
1088}
1089
1090static void ipw_led_radio_on(struct ipw_priv *priv)
1091{
1092 ipw_led_link_on(priv);
1093}
1094
1095static void ipw_led_radio_off(struct ipw_priv *priv)
1096{
1097 ipw_led_activity_off(priv);
1098 ipw_led_link_off(priv);
1099}
1100
1101static void ipw_led_link_up(struct ipw_priv *priv)
1102{
1103 /* Set the Link Led on for all nic types */
1104 ipw_led_link_on(priv);
1105}
1106
1107static void ipw_led_link_down(struct ipw_priv *priv)
1108{
1109 ipw_led_activity_off(priv);
1110 ipw_led_link_off(priv);
1111
1112 if (priv->status & STATUS_RF_KILL_MASK)
1113 ipw_led_radio_off(priv);
1114}
1115
1116static void ipw_led_init(struct ipw_priv *priv)
1117{
1118 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1119
1120 /* Set the default PINs for the link and activity leds */
1121 priv->led_activity_on = IPW_ACTIVITY_LED;
1122 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1123
1124 priv->led_association_on = IPW_ASSOCIATED_LED;
1125 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1126
1127 /* Set the default PINs for the OFDM leds */
1128 priv->led_ofdm_on = IPW_OFDM_LED;
1129 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1130
1131 switch (priv->nic_type) {
1132 case EEPROM_NIC_TYPE_1:
1133 /* In this NIC type, the LEDs are reversed.... */
1134 priv->led_activity_on = IPW_ASSOCIATED_LED;
1135 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1136 priv->led_association_on = IPW_ACTIVITY_LED;
1137 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1138
1139 if (!(priv->config & CFG_NO_LED))
1140 ipw_led_band_on(priv);
1141
1142 /* And we don't blink link LEDs for this nic, so
1143 * just return here */
1144 return;
1145
1146 case EEPROM_NIC_TYPE_3:
1147 case EEPROM_NIC_TYPE_2:
1148 case EEPROM_NIC_TYPE_4:
1149 case EEPROM_NIC_TYPE_0:
1150 break;
1151
1152 default:
1153 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1154 priv->nic_type);
1155 priv->nic_type = EEPROM_NIC_TYPE_0;
1156 break;
1157 }
1158
1159 if (!(priv->config & CFG_NO_LED)) {
1160 if (priv->status & STATUS_ASSOCIATED)
1161 ipw_led_link_on(priv);
1162 else
1163 ipw_led_link_off(priv);
1164 }
1165}
1166
1167static void ipw_led_shutdown(struct ipw_priv *priv)
1168{
1169 ipw_led_activity_off(priv);
1170 ipw_led_link_off(priv);
1171 ipw_led_band_off(priv);
1172 cancel_delayed_work(&priv->led_link_on);
1173 cancel_delayed_work(&priv->led_link_off);
1174 cancel_delayed_work(&priv->led_act_off);
1175}
1176
1177/*
1178 * The following adds a new attribute to the sysfs representation
1179 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1180 * used for controlling the debug level.
1181 *
1182 * See the level definitions in ipw for details.
1183 */
1184static ssize_t debug_level_show(struct device_driver *d, char *buf)
1185{
1186 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1187}
1188
1189static ssize_t debug_level_store(struct device_driver *d, const char *buf,
1190 size_t count)
1191{
1192 char *p = (char *)buf;
1193 u32 val;
1194
1195 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1196 p++;
1197 if (p[0] == 'x' || p[0] == 'X')
1198 p++;
1199 val = simple_strtoul(p, &p, 16);
1200 } else
1201 val = simple_strtoul(p, &p, 10);
1202 if (p == buf)
1203 printk(KERN_INFO DRV_NAME
1204 ": %s is not in hex or decimal form.\n", buf);
1205 else
1206 ipw_debug_level = val;
1207
1208 return strnlen(buf, count);
1209}
1210static DRIVER_ATTR_RW(debug_level);
1211
1212static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1213{
1214 /* length = 1st dword in log */
1215 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1216}
1217
1218static void ipw_capture_event_log(struct ipw_priv *priv,
1219 u32 log_len, struct ipw_event *log)
1220{
1221 u32 base;
1222
1223 if (log_len) {
1224 base = ipw_read32(priv, IPW_EVENT_LOG);
1225 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1226 (u8 *) log, sizeof(*log) * log_len);
1227 }
1228}
1229
1230static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1231{
1232 struct ipw_fw_error *error;
1233 u32 log_len = ipw_get_event_log_len(priv);
1234 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1235 u32 elem_len = ipw_read_reg32(priv, base);
1236
1237 error = kmalloc(sizeof(*error) +
1238 sizeof(*error->elem) * elem_len +
1239 sizeof(*error->log) * log_len, GFP_ATOMIC);
1240 if (!error) {
1241 IPW_ERROR("Memory allocation for firmware error log "
1242 "failed.\n");
1243 return NULL;
1244 }
1245 error->jiffies = jiffies;
1246 error->status = priv->status;
1247 error->config = priv->config;
1248 error->elem_len = elem_len;
1249 error->log_len = log_len;
1250 error->elem = (struct ipw_error_elem *)error->payload;
1251 error->log = (struct ipw_event *)(error->elem + elem_len);
1252
1253 ipw_capture_event_log(priv, log_len, error->log);
1254
1255 if (elem_len)
1256 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1257 sizeof(*error->elem) * elem_len);
1258
1259 return error;
1260}
1261
1262static ssize_t show_event_log(struct device *d,
1263 struct device_attribute *attr, char *buf)
1264{
1265 struct ipw_priv *priv = dev_get_drvdata(d);
1266 u32 log_len = ipw_get_event_log_len(priv);
1267 u32 log_size;
1268 struct ipw_event *log;
1269 u32 len = 0, i;
1270
1271 /* not using min() because of its strict type checking */
1272 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1273 sizeof(*log) * log_len : PAGE_SIZE;
1274 log = kzalloc(log_size, GFP_KERNEL);
1275 if (!log) {
1276 IPW_ERROR("Unable to allocate memory for log\n");
1277 return 0;
1278 }
1279 log_len = log_size / sizeof(*log);
1280 ipw_capture_event_log(priv, log_len, log);
1281
1282 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1283 for (i = 0; i < log_len; i++)
1284 len += snprintf(buf + len, PAGE_SIZE - len,
1285 "\n%08X%08X%08X",
1286 log[i].time, log[i].event, log[i].data);
1287 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1288 kfree(log);
1289 return len;
1290}
1291
1292static DEVICE_ATTR(event_log, 0444, show_event_log, NULL);
1293
1294static ssize_t show_error(struct device *d,
1295 struct device_attribute *attr, char *buf)
1296{
1297 struct ipw_priv *priv = dev_get_drvdata(d);
1298 u32 len = 0, i;
1299 if (!priv->error)
1300 return 0;
1301 len += snprintf(buf + len, PAGE_SIZE - len,
1302 "%08lX%08X%08X%08X",
1303 priv->error->jiffies,
1304 priv->error->status,
1305 priv->error->config, priv->error->elem_len);
1306 for (i = 0; i < priv->error->elem_len; i++)
1307 len += snprintf(buf + len, PAGE_SIZE - len,
1308 "\n%08X%08X%08X%08X%08X%08X%08X",
1309 priv->error->elem[i].time,
1310 priv->error->elem[i].desc,
1311 priv->error->elem[i].blink1,
1312 priv->error->elem[i].blink2,
1313 priv->error->elem[i].link1,
1314 priv->error->elem[i].link2,
1315 priv->error->elem[i].data);
1316
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "\n%08X", priv->error->log_len);
1319 for (i = 0; i < priv->error->log_len; i++)
1320 len += snprintf(buf + len, PAGE_SIZE - len,
1321 "\n%08X%08X%08X",
1322 priv->error->log[i].time,
1323 priv->error->log[i].event,
1324 priv->error->log[i].data);
1325 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1326 return len;
1327}
1328
1329static ssize_t clear_error(struct device *d,
1330 struct device_attribute *attr,
1331 const char *buf, size_t count)
1332{
1333 struct ipw_priv *priv = dev_get_drvdata(d);
1334
1335 kfree(priv->error);
1336 priv->error = NULL;
1337 return count;
1338}
1339
1340static DEVICE_ATTR(error, 0644, show_error, clear_error);
1341
1342static ssize_t show_cmd_log(struct device *d,
1343 struct device_attribute *attr, char *buf)
1344{
1345 struct ipw_priv *priv = dev_get_drvdata(d);
1346 u32 len = 0, i;
1347 if (!priv->cmdlog)
1348 return 0;
1349 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1350 (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1351 i = (i + 1) % priv->cmdlog_len) {
1352 len +=
1353 snprintf(buf + len, PAGE_SIZE - len,
1354 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1355 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1356 priv->cmdlog[i].cmd.len);
1357 len +=
1358 snprintk_buf(buf + len, PAGE_SIZE - len,
1359 (u8 *) priv->cmdlog[i].cmd.param,
1360 priv->cmdlog[i].cmd.len);
1361 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1362 }
1363 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1364 return len;
1365}
1366
1367static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL);
1368
1369#ifdef CONFIG_IPW2200_PROMISCUOUS
1370static void ipw_prom_free(struct ipw_priv *priv);
1371static int ipw_prom_alloc(struct ipw_priv *priv);
1372static ssize_t store_rtap_iface(struct device *d,
1373 struct device_attribute *attr,
1374 const char *buf, size_t count)
1375{
1376 struct ipw_priv *priv = dev_get_drvdata(d);
1377 int rc = 0;
1378
1379 if (count < 1)
1380 return -EINVAL;
1381
1382 switch (buf[0]) {
1383 case '0':
1384 if (!rtap_iface)
1385 return count;
1386
1387 if (netif_running(priv->prom_net_dev)) {
1388 IPW_WARNING("Interface is up. Cannot unregister.\n");
1389 return count;
1390 }
1391
1392 ipw_prom_free(priv);
1393 rtap_iface = 0;
1394 break;
1395
1396 case '1':
1397 if (rtap_iface)
1398 return count;
1399
1400 rc = ipw_prom_alloc(priv);
1401 if (!rc)
1402 rtap_iface = 1;
1403 break;
1404
1405 default:
1406 return -EINVAL;
1407 }
1408
1409 if (rc) {
1410 IPW_ERROR("Failed to register promiscuous network "
1411 "device (error %d).\n", rc);
1412 }
1413
1414 return count;
1415}
1416
1417static ssize_t show_rtap_iface(struct device *d,
1418 struct device_attribute *attr,
1419 char *buf)
1420{
1421 struct ipw_priv *priv = dev_get_drvdata(d);
1422 if (rtap_iface)
1423 return sprintf(buf, "%s", priv->prom_net_dev->name);
1424 else {
1425 buf[0] = '-';
1426 buf[1] = '1';
1427 buf[2] = '\0';
1428 return 3;
1429 }
1430}
1431
1432static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface);
1433
1434static ssize_t store_rtap_filter(struct device *d,
1435 struct device_attribute *attr,
1436 const char *buf, size_t count)
1437{
1438 struct ipw_priv *priv = dev_get_drvdata(d);
1439
1440 if (!priv->prom_priv) {
1441 IPW_ERROR("Attempting to set filter without "
1442 "rtap_iface enabled.\n");
1443 return -EPERM;
1444 }
1445
1446 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1447
1448 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1449 BIT_ARG16(priv->prom_priv->filter));
1450
1451 return count;
1452}
1453
1454static ssize_t show_rtap_filter(struct device *d,
1455 struct device_attribute *attr,
1456 char *buf)
1457{
1458 struct ipw_priv *priv = dev_get_drvdata(d);
1459 return sprintf(buf, "0x%04X",
1460 priv->prom_priv ? priv->prom_priv->filter : 0);
1461}
1462
1463static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter);
1464#endif
1465
1466static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1467 char *buf)
1468{
1469 struct ipw_priv *priv = dev_get_drvdata(d);
1470 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1471}
1472
1473static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1474 const char *buf, size_t count)
1475{
1476 struct ipw_priv *priv = dev_get_drvdata(d);
1477 struct net_device *dev = priv->net_dev;
1478 char buffer[] = "00000000";
1479 unsigned long len =
1480 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1481 unsigned long val;
1482 char *p = buffer;
1483
1484 IPW_DEBUG_INFO("enter\n");
1485
1486 strncpy(buffer, buf, len);
1487 buffer[len] = 0;
1488
1489 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1490 p++;
1491 if (p[0] == 'x' || p[0] == 'X')
1492 p++;
1493 val = simple_strtoul(p, &p, 16);
1494 } else
1495 val = simple_strtoul(p, &p, 10);
1496 if (p == buffer) {
1497 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1498 } else {
1499 priv->ieee->scan_age = val;
1500 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1501 }
1502
1503 IPW_DEBUG_INFO("exit\n");
1504 return len;
1505}
1506
1507static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
1508
1509static ssize_t show_led(struct device *d, struct device_attribute *attr,
1510 char *buf)
1511{
1512 struct ipw_priv *priv = dev_get_drvdata(d);
1513 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1514}
1515
1516static ssize_t store_led(struct device *d, struct device_attribute *attr,
1517 const char *buf, size_t count)
1518{
1519 struct ipw_priv *priv = dev_get_drvdata(d);
1520
1521 IPW_DEBUG_INFO("enter\n");
1522
1523 if (count == 0)
1524 return 0;
1525
1526 if (*buf == 0) {
1527 IPW_DEBUG_LED("Disabling LED control.\n");
1528 priv->config |= CFG_NO_LED;
1529 ipw_led_shutdown(priv);
1530 } else {
1531 IPW_DEBUG_LED("Enabling LED control.\n");
1532 priv->config &= ~CFG_NO_LED;
1533 ipw_led_init(priv);
1534 }
1535
1536 IPW_DEBUG_INFO("exit\n");
1537 return count;
1538}
1539
1540static DEVICE_ATTR(led, 0644, show_led, store_led);
1541
1542static ssize_t show_status(struct device *d,
1543 struct device_attribute *attr, char *buf)
1544{
1545 struct ipw_priv *p = dev_get_drvdata(d);
1546 return sprintf(buf, "0x%08x\n", (int)p->status);
1547}
1548
1549static DEVICE_ATTR(status, 0444, show_status, NULL);
1550
1551static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1552 char *buf)
1553{
1554 struct ipw_priv *p = dev_get_drvdata(d);
1555 return sprintf(buf, "0x%08x\n", (int)p->config);
1556}
1557
1558static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
1559
1560static ssize_t show_nic_type(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562{
1563 struct ipw_priv *priv = dev_get_drvdata(d);
1564 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1565}
1566
1567static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL);
1568
1569static ssize_t show_ucode_version(struct device *d,
1570 struct device_attribute *attr, char *buf)
1571{
1572 u32 len = sizeof(u32), tmp = 0;
1573 struct ipw_priv *p = dev_get_drvdata(d);
1574
1575 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1576 return 0;
1577
1578 return sprintf(buf, "0x%08x\n", tmp);
1579}
1580
1581static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL);
1582
1583static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1584 char *buf)
1585{
1586 u32 len = sizeof(u32), tmp = 0;
1587 struct ipw_priv *p = dev_get_drvdata(d);
1588
1589 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1590 return 0;
1591
1592 return sprintf(buf, "0x%08x\n", tmp);
1593}
1594
1595static DEVICE_ATTR(rtc, 0644, show_rtc, NULL);
1596
1597/*
1598 * Add a device attribute to view/control the delay between eeprom
1599 * operations.
1600 */
1601static ssize_t show_eeprom_delay(struct device *d,
1602 struct device_attribute *attr, char *buf)
1603{
1604 struct ipw_priv *p = dev_get_drvdata(d);
1605 int n = p->eeprom_delay;
1606 return sprintf(buf, "%i\n", n);
1607}
1608static ssize_t store_eeprom_delay(struct device *d,
1609 struct device_attribute *attr,
1610 const char *buf, size_t count)
1611{
1612 struct ipw_priv *p = dev_get_drvdata(d);
1613 sscanf(buf, "%i", &p->eeprom_delay);
1614 return strnlen(buf, count);
1615}
1616
1617static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay);
1618
1619static ssize_t show_command_event_reg(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621{
1622 u32 reg = 0;
1623 struct ipw_priv *p = dev_get_drvdata(d);
1624
1625 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1626 return sprintf(buf, "0x%08x\n", reg);
1627}
1628static ssize_t store_command_event_reg(struct device *d,
1629 struct device_attribute *attr,
1630 const char *buf, size_t count)
1631{
1632 u32 reg;
1633 struct ipw_priv *p = dev_get_drvdata(d);
1634
1635 sscanf(buf, "%x", &reg);
1636 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1637 return strnlen(buf, count);
1638}
1639
1640static DEVICE_ATTR(command_event_reg, 0644,
1641 show_command_event_reg, store_command_event_reg);
1642
1643static ssize_t show_mem_gpio_reg(struct device *d,
1644 struct device_attribute *attr, char *buf)
1645{
1646 u32 reg = 0;
1647 struct ipw_priv *p = dev_get_drvdata(d);
1648
1649 reg = ipw_read_reg32(p, 0x301100);
1650 return sprintf(buf, "0x%08x\n", reg);
1651}
1652static ssize_t store_mem_gpio_reg(struct device *d,
1653 struct device_attribute *attr,
1654 const char *buf, size_t count)
1655{
1656 u32 reg;
1657 struct ipw_priv *p = dev_get_drvdata(d);
1658
1659 sscanf(buf, "%x", &reg);
1660 ipw_write_reg32(p, 0x301100, reg);
1661 return strnlen(buf, count);
1662}
1663
1664static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg);
1665
1666static ssize_t show_indirect_dword(struct device *d,
1667 struct device_attribute *attr, char *buf)
1668{
1669 u32 reg = 0;
1670 struct ipw_priv *priv = dev_get_drvdata(d);
1671
1672 if (priv->status & STATUS_INDIRECT_DWORD)
1673 reg = ipw_read_reg32(priv, priv->indirect_dword);
1674 else
1675 reg = 0;
1676
1677 return sprintf(buf, "0x%08x\n", reg);
1678}
1679static ssize_t store_indirect_dword(struct device *d,
1680 struct device_attribute *attr,
1681 const char *buf, size_t count)
1682{
1683 struct ipw_priv *priv = dev_get_drvdata(d);
1684
1685 sscanf(buf, "%x", &priv->indirect_dword);
1686 priv->status |= STATUS_INDIRECT_DWORD;
1687 return strnlen(buf, count);
1688}
1689
1690static DEVICE_ATTR(indirect_dword, 0644,
1691 show_indirect_dword, store_indirect_dword);
1692
1693static ssize_t show_indirect_byte(struct device *d,
1694 struct device_attribute *attr, char *buf)
1695{
1696 u8 reg = 0;
1697 struct ipw_priv *priv = dev_get_drvdata(d);
1698
1699 if (priv->status & STATUS_INDIRECT_BYTE)
1700 reg = ipw_read_reg8(priv, priv->indirect_byte);
1701 else
1702 reg = 0;
1703
1704 return sprintf(buf, "0x%02x\n", reg);
1705}
1706static ssize_t store_indirect_byte(struct device *d,
1707 struct device_attribute *attr,
1708 const char *buf, size_t count)
1709{
1710 struct ipw_priv *priv = dev_get_drvdata(d);
1711
1712 sscanf(buf, "%x", &priv->indirect_byte);
1713 priv->status |= STATUS_INDIRECT_BYTE;
1714 return strnlen(buf, count);
1715}
1716
1717static DEVICE_ATTR(indirect_byte, 0644,
1718 show_indirect_byte, store_indirect_byte);
1719
1720static ssize_t show_direct_dword(struct device *d,
1721 struct device_attribute *attr, char *buf)
1722{
1723 u32 reg = 0;
1724 struct ipw_priv *priv = dev_get_drvdata(d);
1725
1726 if (priv->status & STATUS_DIRECT_DWORD)
1727 reg = ipw_read32(priv, priv->direct_dword);
1728 else
1729 reg = 0;
1730
1731 return sprintf(buf, "0x%08x\n", reg);
1732}
1733static ssize_t store_direct_dword(struct device *d,
1734 struct device_attribute *attr,
1735 const char *buf, size_t count)
1736{
1737 struct ipw_priv *priv = dev_get_drvdata(d);
1738
1739 sscanf(buf, "%x", &priv->direct_dword);
1740 priv->status |= STATUS_DIRECT_DWORD;
1741 return strnlen(buf, count);
1742}
1743
1744static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword);
1745
1746static int rf_kill_active(struct ipw_priv *priv)
1747{
1748 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1749 priv->status |= STATUS_RF_KILL_HW;
1750 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1751 } else {
1752 priv->status &= ~STATUS_RF_KILL_HW;
1753 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1754 }
1755
1756 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1757}
1758
1759static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1760 char *buf)
1761{
1762 /* 0 - RF kill not enabled
1763 1 - SW based RF kill active (sysfs)
1764 2 - HW based RF kill active
1765 3 - Both HW and SW baed RF kill active */
1766 struct ipw_priv *priv = dev_get_drvdata(d);
1767 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1768 (rf_kill_active(priv) ? 0x2 : 0x0);
1769 return sprintf(buf, "%i\n", val);
1770}
1771
1772static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1773{
1774 if ((disable_radio ? 1 : 0) ==
1775 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1776 return 0;
1777
1778 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1779 disable_radio ? "OFF" : "ON");
1780
1781 if (disable_radio) {
1782 priv->status |= STATUS_RF_KILL_SW;
1783
1784 cancel_delayed_work(&priv->request_scan);
1785 cancel_delayed_work(&priv->request_direct_scan);
1786 cancel_delayed_work(&priv->request_passive_scan);
1787 cancel_delayed_work(&priv->scan_event);
1788 schedule_work(&priv->down);
1789 } else {
1790 priv->status &= ~STATUS_RF_KILL_SW;
1791 if (rf_kill_active(priv)) {
1792 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1793 "disabled by HW switch\n");
1794 /* Make sure the RF_KILL check timer is running */
1795 cancel_delayed_work(&priv->rf_kill);
1796 schedule_delayed_work(&priv->rf_kill,
1797 round_jiffies_relative(2 * HZ));
1798 } else
1799 schedule_work(&priv->up);
1800 }
1801
1802 return 1;
1803}
1804
1805static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1806 const char *buf, size_t count)
1807{
1808 struct ipw_priv *priv = dev_get_drvdata(d);
1809
1810 ipw_radio_kill_sw(priv, buf[0] == '1');
1811
1812 return count;
1813}
1814
1815static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
1816
1817static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1818 char *buf)
1819{
1820 struct ipw_priv *priv = dev_get_drvdata(d);
1821 int pos = 0, len = 0;
1822 if (priv->config & CFG_SPEED_SCAN) {
1823 while (priv->speed_scan[pos] != 0)
1824 len += sprintf(&buf[len], "%d ",
1825 priv->speed_scan[pos++]);
1826 return len + sprintf(&buf[len], "\n");
1827 }
1828
1829 return sprintf(buf, "0\n");
1830}
1831
1832static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1833 const char *buf, size_t count)
1834{
1835 struct ipw_priv *priv = dev_get_drvdata(d);
1836 int channel, pos = 0;
1837 const char *p = buf;
1838
1839 /* list of space separated channels to scan, optionally ending with 0 */
1840 while ((channel = simple_strtol(p, NULL, 0))) {
1841 if (pos == MAX_SPEED_SCAN - 1) {
1842 priv->speed_scan[pos] = 0;
1843 break;
1844 }
1845
1846 if (libipw_is_valid_channel(priv->ieee, channel))
1847 priv->speed_scan[pos++] = channel;
1848 else
1849 IPW_WARNING("Skipping invalid channel request: %d\n",
1850 channel);
1851 p = strchr(p, ' ');
1852 if (!p)
1853 break;
1854 while (*p == ' ' || *p == '\t')
1855 p++;
1856 }
1857
1858 if (pos == 0)
1859 priv->config &= ~CFG_SPEED_SCAN;
1860 else {
1861 priv->speed_scan_pos = 0;
1862 priv->config |= CFG_SPEED_SCAN;
1863 }
1864
1865 return count;
1866}
1867
1868static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan);
1869
1870static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1871 char *buf)
1872{
1873 struct ipw_priv *priv = dev_get_drvdata(d);
1874 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1875}
1876
1877static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1878 const char *buf, size_t count)
1879{
1880 struct ipw_priv *priv = dev_get_drvdata(d);
1881 if (buf[0] == '1')
1882 priv->config |= CFG_NET_STATS;
1883 else
1884 priv->config &= ~CFG_NET_STATS;
1885
1886 return count;
1887}
1888
1889static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats);
1890
1891static ssize_t show_channels(struct device *d,
1892 struct device_attribute *attr,
1893 char *buf)
1894{
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1897 int len = 0, i;
1898
1899 len = sprintf(&buf[len],
1900 "Displaying %d channels in 2.4Ghz band "
1901 "(802.11bg):\n", geo->bg_channels);
1902
1903 for (i = 0; i < geo->bg_channels; i++) {
1904 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1905 geo->bg[i].channel,
1906 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1907 " (radar spectrum)" : "",
1908 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1909 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1910 ? "" : ", IBSS",
1911 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1912 "passive only" : "active/passive",
1913 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1914 "B" : "B/G");
1915 }
1916
1917 len += sprintf(&buf[len],
1918 "Displaying %d channels in 5.2Ghz band "
1919 "(802.11a):\n", geo->a_channels);
1920 for (i = 0; i < geo->a_channels; i++) {
1921 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1922 geo->a[i].channel,
1923 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1924 " (radar spectrum)" : "",
1925 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1926 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1927 ? "" : ", IBSS",
1928 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1929 "passive only" : "active/passive");
1930 }
1931
1932 return len;
1933}
1934
1935static DEVICE_ATTR(channels, 0400, show_channels, NULL);
1936
1937static void notify_wx_assoc_event(struct ipw_priv *priv)
1938{
1939 union iwreq_data wrqu;
1940 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1941 if (priv->status & STATUS_ASSOCIATED)
1942 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1943 else
1944 eth_zero_addr(wrqu.ap_addr.sa_data);
1945 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1946}
1947
1948static void ipw_irq_tasklet(unsigned long data)
1949{
1950 struct ipw_priv *priv = (struct ipw_priv *)data;
1951 u32 inta, inta_mask, handled = 0;
1952 unsigned long flags;
1953 int rc = 0;
1954
1955 spin_lock_irqsave(&priv->irq_lock, flags);
1956
1957 inta = ipw_read32(priv, IPW_INTA_RW);
1958 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1959
1960 if (inta == 0xFFFFFFFF) {
1961 /* Hardware disappeared */
1962 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1963 /* Only handle the cached INTA values */
1964 inta = 0;
1965 }
1966 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1967
1968 /* Add any cached INTA values that need to be handled */
1969 inta |= priv->isr_inta;
1970
1971 spin_unlock_irqrestore(&priv->irq_lock, flags);
1972
1973 spin_lock_irqsave(&priv->lock, flags);
1974
1975 /* handle all the justifications for the interrupt */
1976 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1977 ipw_rx(priv);
1978 handled |= IPW_INTA_BIT_RX_TRANSFER;
1979 }
1980
1981 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1982 IPW_DEBUG_HC("Command completed.\n");
1983 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1984 priv->status &= ~STATUS_HCMD_ACTIVE;
1985 wake_up_interruptible(&priv->wait_command_queue);
1986 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1987 }
1988
1989 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1990 IPW_DEBUG_TX("TX_QUEUE_1\n");
1991 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1992 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1993 }
1994
1995 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1996 IPW_DEBUG_TX("TX_QUEUE_2\n");
1997 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1998 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1999 }
2000
2001 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2002 IPW_DEBUG_TX("TX_QUEUE_3\n");
2003 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2004 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2005 }
2006
2007 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2008 IPW_DEBUG_TX("TX_QUEUE_4\n");
2009 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2010 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2011 }
2012
2013 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2014 IPW_WARNING("STATUS_CHANGE\n");
2015 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2016 }
2017
2018 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2019 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2020 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2021 }
2022
2023 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2024 IPW_WARNING("HOST_CMD_DONE\n");
2025 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2026 }
2027
2028 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2029 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2030 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2031 }
2032
2033 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2034 IPW_WARNING("PHY_OFF_DONE\n");
2035 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2036 }
2037
2038 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2039 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2040 priv->status |= STATUS_RF_KILL_HW;
2041 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2042 wake_up_interruptible(&priv->wait_command_queue);
2043 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2044 cancel_delayed_work(&priv->request_scan);
2045 cancel_delayed_work(&priv->request_direct_scan);
2046 cancel_delayed_work(&priv->request_passive_scan);
2047 cancel_delayed_work(&priv->scan_event);
2048 schedule_work(&priv->link_down);
2049 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2050 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2051 }
2052
2053 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2054 IPW_WARNING("Firmware error detected. Restarting.\n");
2055 if (priv->error) {
2056 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2057 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2058 struct ipw_fw_error *error =
2059 ipw_alloc_error_log(priv);
2060 ipw_dump_error_log(priv, error);
2061 kfree(error);
2062 }
2063 } else {
2064 priv->error = ipw_alloc_error_log(priv);
2065 if (priv->error)
2066 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2067 else
2068 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2069 "log.\n");
2070 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2071 ipw_dump_error_log(priv, priv->error);
2072 }
2073
2074 /* XXX: If hardware encryption is for WPA/WPA2,
2075 * we have to notify the supplicant. */
2076 if (priv->ieee->sec.encrypt) {
2077 priv->status &= ~STATUS_ASSOCIATED;
2078 notify_wx_assoc_event(priv);
2079 }
2080
2081 /* Keep the restart process from trying to send host
2082 * commands by clearing the INIT status bit */
2083 priv->status &= ~STATUS_INIT;
2084
2085 /* Cancel currently queued command. */
2086 priv->status &= ~STATUS_HCMD_ACTIVE;
2087 wake_up_interruptible(&priv->wait_command_queue);
2088
2089 schedule_work(&priv->adapter_restart);
2090 handled |= IPW_INTA_BIT_FATAL_ERROR;
2091 }
2092
2093 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2094 IPW_ERROR("Parity error\n");
2095 handled |= IPW_INTA_BIT_PARITY_ERROR;
2096 }
2097
2098 if (handled != inta) {
2099 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2100 }
2101
2102 spin_unlock_irqrestore(&priv->lock, flags);
2103
2104 /* enable all interrupts */
2105 ipw_enable_interrupts(priv);
2106}
2107
2108#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2109static char *get_cmd_string(u8 cmd)
2110{
2111 switch (cmd) {
2112 IPW_CMD(HOST_COMPLETE);
2113 IPW_CMD(POWER_DOWN);
2114 IPW_CMD(SYSTEM_CONFIG);
2115 IPW_CMD(MULTICAST_ADDRESS);
2116 IPW_CMD(SSID);
2117 IPW_CMD(ADAPTER_ADDRESS);
2118 IPW_CMD(PORT_TYPE);
2119 IPW_CMD(RTS_THRESHOLD);
2120 IPW_CMD(FRAG_THRESHOLD);
2121 IPW_CMD(POWER_MODE);
2122 IPW_CMD(WEP_KEY);
2123 IPW_CMD(TGI_TX_KEY);
2124 IPW_CMD(SCAN_REQUEST);
2125 IPW_CMD(SCAN_REQUEST_EXT);
2126 IPW_CMD(ASSOCIATE);
2127 IPW_CMD(SUPPORTED_RATES);
2128 IPW_CMD(SCAN_ABORT);
2129 IPW_CMD(TX_FLUSH);
2130 IPW_CMD(QOS_PARAMETERS);
2131 IPW_CMD(DINO_CONFIG);
2132 IPW_CMD(RSN_CAPABILITIES);
2133 IPW_CMD(RX_KEY);
2134 IPW_CMD(CARD_DISABLE);
2135 IPW_CMD(SEED_NUMBER);
2136 IPW_CMD(TX_POWER);
2137 IPW_CMD(COUNTRY_INFO);
2138 IPW_CMD(AIRONET_INFO);
2139 IPW_CMD(AP_TX_POWER);
2140 IPW_CMD(CCKM_INFO);
2141 IPW_CMD(CCX_VER_INFO);
2142 IPW_CMD(SET_CALIBRATION);
2143 IPW_CMD(SENSITIVITY_CALIB);
2144 IPW_CMD(RETRY_LIMIT);
2145 IPW_CMD(IPW_PRE_POWER_DOWN);
2146 IPW_CMD(VAP_BEACON_TEMPLATE);
2147 IPW_CMD(VAP_DTIM_PERIOD);
2148 IPW_CMD(EXT_SUPPORTED_RATES);
2149 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2150 IPW_CMD(VAP_QUIET_INTERVALS);
2151 IPW_CMD(VAP_CHANNEL_SWITCH);
2152 IPW_CMD(VAP_MANDATORY_CHANNELS);
2153 IPW_CMD(VAP_CELL_PWR_LIMIT);
2154 IPW_CMD(VAP_CF_PARAM_SET);
2155 IPW_CMD(VAP_SET_BEACONING_STATE);
2156 IPW_CMD(MEASUREMENT);
2157 IPW_CMD(POWER_CAPABILITY);
2158 IPW_CMD(SUPPORTED_CHANNELS);
2159 IPW_CMD(TPC_REPORT);
2160 IPW_CMD(WME_INFO);
2161 IPW_CMD(PRODUCTION_COMMAND);
2162 default:
2163 return "UNKNOWN";
2164 }
2165}
2166
2167#define HOST_COMPLETE_TIMEOUT HZ
2168
2169static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2170{
2171 int rc = 0;
2172 unsigned long flags;
2173 unsigned long now, end;
2174
2175 spin_lock_irqsave(&priv->lock, flags);
2176 if (priv->status & STATUS_HCMD_ACTIVE) {
2177 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2178 get_cmd_string(cmd->cmd));
2179 spin_unlock_irqrestore(&priv->lock, flags);
2180 return -EAGAIN;
2181 }
2182
2183 priv->status |= STATUS_HCMD_ACTIVE;
2184
2185 if (priv->cmdlog) {
2186 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2187 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2188 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2189 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2190 cmd->len);
2191 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2192 }
2193
2194 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2195 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2196 priv->status);
2197
2198#ifndef DEBUG_CMD_WEP_KEY
2199 if (cmd->cmd == IPW_CMD_WEP_KEY)
2200 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2201 else
2202#endif
2203 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2204
2205 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2206 if (rc) {
2207 priv->status &= ~STATUS_HCMD_ACTIVE;
2208 IPW_ERROR("Failed to send %s: Reason %d\n",
2209 get_cmd_string(cmd->cmd), rc);
2210 spin_unlock_irqrestore(&priv->lock, flags);
2211 goto exit;
2212 }
2213 spin_unlock_irqrestore(&priv->lock, flags);
2214
2215 now = jiffies;
2216 end = now + HOST_COMPLETE_TIMEOUT;
2217again:
2218 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2219 !(priv->
2220 status & STATUS_HCMD_ACTIVE),
2221 end - now);
2222 if (rc < 0) {
2223 now = jiffies;
2224 if (time_before(now, end))
2225 goto again;
2226 rc = 0;
2227 }
2228
2229 if (rc == 0) {
2230 spin_lock_irqsave(&priv->lock, flags);
2231 if (priv->status & STATUS_HCMD_ACTIVE) {
2232 IPW_ERROR("Failed to send %s: Command timed out.\n",
2233 get_cmd_string(cmd->cmd));
2234 priv->status &= ~STATUS_HCMD_ACTIVE;
2235 spin_unlock_irqrestore(&priv->lock, flags);
2236 rc = -EIO;
2237 goto exit;
2238 }
2239 spin_unlock_irqrestore(&priv->lock, flags);
2240 } else
2241 rc = 0;
2242
2243 if (priv->status & STATUS_RF_KILL_HW) {
2244 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2245 get_cmd_string(cmd->cmd));
2246 rc = -EIO;
2247 goto exit;
2248 }
2249
2250 exit:
2251 if (priv->cmdlog) {
2252 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2253 priv->cmdlog_pos %= priv->cmdlog_len;
2254 }
2255 return rc;
2256}
2257
2258static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2259{
2260 struct host_cmd cmd = {
2261 .cmd = command,
2262 };
2263
2264 return __ipw_send_cmd(priv, &cmd);
2265}
2266
2267static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2268 void *data)
2269{
2270 struct host_cmd cmd = {
2271 .cmd = command,
2272 .len = len,
2273 .param = data,
2274 };
2275
2276 return __ipw_send_cmd(priv, &cmd);
2277}
2278
2279static int ipw_send_host_complete(struct ipw_priv *priv)
2280{
2281 if (!priv) {
2282 IPW_ERROR("Invalid args\n");
2283 return -1;
2284 }
2285
2286 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2287}
2288
2289static int ipw_send_system_config(struct ipw_priv *priv)
2290{
2291 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2292 sizeof(priv->sys_config),
2293 &priv->sys_config);
2294}
2295
2296static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2297{
2298 if (!priv || !ssid) {
2299 IPW_ERROR("Invalid args\n");
2300 return -1;
2301 }
2302
2303 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2304 ssid);
2305}
2306
2307static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2308{
2309 if (!priv || !mac) {
2310 IPW_ERROR("Invalid args\n");
2311 return -1;
2312 }
2313
2314 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2315 priv->net_dev->name, mac);
2316
2317 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2318}
2319
2320static void ipw_adapter_restart(void *adapter)
2321{
2322 struct ipw_priv *priv = adapter;
2323
2324 if (priv->status & STATUS_RF_KILL_MASK)
2325 return;
2326
2327 ipw_down(priv);
2328
2329 if (priv->assoc_network &&
2330 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2331 ipw_remove_current_network(priv);
2332
2333 if (ipw_up(priv)) {
2334 IPW_ERROR("Failed to up device\n");
2335 return;
2336 }
2337}
2338
2339static void ipw_bg_adapter_restart(struct work_struct *work)
2340{
2341 struct ipw_priv *priv =
2342 container_of(work, struct ipw_priv, adapter_restart);
2343 mutex_lock(&priv->mutex);
2344 ipw_adapter_restart(priv);
2345 mutex_unlock(&priv->mutex);
2346}
2347
2348static void ipw_abort_scan(struct ipw_priv *priv);
2349
2350#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2351
2352static void ipw_scan_check(void *data)
2353{
2354 struct ipw_priv *priv = data;
2355
2356 if (priv->status & STATUS_SCAN_ABORTING) {
2357 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2358 "adapter after (%dms).\n",
2359 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2360 schedule_work(&priv->adapter_restart);
2361 } else if (priv->status & STATUS_SCANNING) {
2362 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2363 "after (%dms).\n",
2364 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2365 ipw_abort_scan(priv);
2366 schedule_delayed_work(&priv->scan_check, HZ);
2367 }
2368}
2369
2370static void ipw_bg_scan_check(struct work_struct *work)
2371{
2372 struct ipw_priv *priv =
2373 container_of(work, struct ipw_priv, scan_check.work);
2374 mutex_lock(&priv->mutex);
2375 ipw_scan_check(priv);
2376 mutex_unlock(&priv->mutex);
2377}
2378
2379static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2380 struct ipw_scan_request_ext *request)
2381{
2382 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2383 sizeof(*request), request);
2384}
2385
2386static int ipw_send_scan_abort(struct ipw_priv *priv)
2387{
2388 if (!priv) {
2389 IPW_ERROR("Invalid args\n");
2390 return -1;
2391 }
2392
2393 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2394}
2395
2396static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2397{
2398 struct ipw_sensitivity_calib calib = {
2399 .beacon_rssi_raw = cpu_to_le16(sens),
2400 };
2401
2402 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2403 &calib);
2404}
2405
2406static int ipw_send_associate(struct ipw_priv *priv,
2407 struct ipw_associate *associate)
2408{
2409 if (!priv || !associate) {
2410 IPW_ERROR("Invalid args\n");
2411 return -1;
2412 }
2413
2414 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2415 associate);
2416}
2417
2418static int ipw_send_supported_rates(struct ipw_priv *priv,
2419 struct ipw_supported_rates *rates)
2420{
2421 if (!priv || !rates) {
2422 IPW_ERROR("Invalid args\n");
2423 return -1;
2424 }
2425
2426 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2427 rates);
2428}
2429
2430static int ipw_set_random_seed(struct ipw_priv *priv)
2431{
2432 u32 val;
2433
2434 if (!priv) {
2435 IPW_ERROR("Invalid args\n");
2436 return -1;
2437 }
2438
2439 get_random_bytes(&val, sizeof(val));
2440
2441 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2442}
2443
2444static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2445{
2446 __le32 v = cpu_to_le32(phy_off);
2447 if (!priv) {
2448 IPW_ERROR("Invalid args\n");
2449 return -1;
2450 }
2451
2452 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2453}
2454
2455static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2456{
2457 if (!priv || !power) {
2458 IPW_ERROR("Invalid args\n");
2459 return -1;
2460 }
2461
2462 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2463}
2464
2465static int ipw_set_tx_power(struct ipw_priv *priv)
2466{
2467 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2468 struct ipw_tx_power tx_power;
2469 s8 max_power;
2470 int i;
2471
2472 memset(&tx_power, 0, sizeof(tx_power));
2473
2474 /* configure device for 'G' band */
2475 tx_power.ieee_mode = IPW_G_MODE;
2476 tx_power.num_channels = geo->bg_channels;
2477 for (i = 0; i < geo->bg_channels; i++) {
2478 max_power = geo->bg[i].max_power;
2479 tx_power.channels_tx_power[i].channel_number =
2480 geo->bg[i].channel;
2481 tx_power.channels_tx_power[i].tx_power = max_power ?
2482 min(max_power, priv->tx_power) : priv->tx_power;
2483 }
2484 if (ipw_send_tx_power(priv, &tx_power))
2485 return -EIO;
2486
2487 /* configure device to also handle 'B' band */
2488 tx_power.ieee_mode = IPW_B_MODE;
2489 if (ipw_send_tx_power(priv, &tx_power))
2490 return -EIO;
2491
2492 /* configure device to also handle 'A' band */
2493 if (priv->ieee->abg_true) {
2494 tx_power.ieee_mode = IPW_A_MODE;
2495 tx_power.num_channels = geo->a_channels;
2496 for (i = 0; i < tx_power.num_channels; i++) {
2497 max_power = geo->a[i].max_power;
2498 tx_power.channels_tx_power[i].channel_number =
2499 geo->a[i].channel;
2500 tx_power.channels_tx_power[i].tx_power = max_power ?
2501 min(max_power, priv->tx_power) : priv->tx_power;
2502 }
2503 if (ipw_send_tx_power(priv, &tx_power))
2504 return -EIO;
2505 }
2506 return 0;
2507}
2508
2509static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2510{
2511 struct ipw_rts_threshold rts_threshold = {
2512 .rts_threshold = cpu_to_le16(rts),
2513 };
2514
2515 if (!priv) {
2516 IPW_ERROR("Invalid args\n");
2517 return -1;
2518 }
2519
2520 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2521 sizeof(rts_threshold), &rts_threshold);
2522}
2523
2524static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2525{
2526 struct ipw_frag_threshold frag_threshold = {
2527 .frag_threshold = cpu_to_le16(frag),
2528 };
2529
2530 if (!priv) {
2531 IPW_ERROR("Invalid args\n");
2532 return -1;
2533 }
2534
2535 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2536 sizeof(frag_threshold), &frag_threshold);
2537}
2538
2539static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2540{
2541 __le32 param;
2542
2543 if (!priv) {
2544 IPW_ERROR("Invalid args\n");
2545 return -1;
2546 }
2547
2548 /* If on battery, set to 3, if AC set to CAM, else user
2549 * level */
2550 switch (mode) {
2551 case IPW_POWER_BATTERY:
2552 param = cpu_to_le32(IPW_POWER_INDEX_3);
2553 break;
2554 case IPW_POWER_AC:
2555 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2556 break;
2557 default:
2558 param = cpu_to_le32(mode);
2559 break;
2560 }
2561
2562 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2563 &param);
2564}
2565
2566static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2567{
2568 struct ipw_retry_limit retry_limit = {
2569 .short_retry_limit = slimit,
2570 .long_retry_limit = llimit
2571 };
2572
2573 if (!priv) {
2574 IPW_ERROR("Invalid args\n");
2575 return -1;
2576 }
2577
2578 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2579 &retry_limit);
2580}
2581
2582/*
2583 * The IPW device contains a Microwire compatible EEPROM that stores
2584 * various data like the MAC address. Usually the firmware has exclusive
2585 * access to the eeprom, but during device initialization (before the
2586 * device driver has sent the HostComplete command to the firmware) the
2587 * device driver has read access to the EEPROM by way of indirect addressing
2588 * through a couple of memory mapped registers.
2589 *
2590 * The following is a simplified implementation for pulling data out of the
2591 * the eeprom, along with some helper functions to find information in
2592 * the per device private data's copy of the eeprom.
2593 *
2594 * NOTE: To better understand how these functions work (i.e what is a chip
2595 * select and why do have to keep driving the eeprom clock?), read
2596 * just about any data sheet for a Microwire compatible EEPROM.
2597 */
2598
2599/* write a 32 bit value into the indirect accessor register */
2600static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2601{
2602 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2603
2604 /* the eeprom requires some time to complete the operation */
2605 udelay(p->eeprom_delay);
2606}
2607
2608/* perform a chip select operation */
2609static void eeprom_cs(struct ipw_priv *priv)
2610{
2611 eeprom_write_reg(priv, 0);
2612 eeprom_write_reg(priv, EEPROM_BIT_CS);
2613 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2614 eeprom_write_reg(priv, EEPROM_BIT_CS);
2615}
2616
2617/* perform a chip select operation */
2618static void eeprom_disable_cs(struct ipw_priv *priv)
2619{
2620 eeprom_write_reg(priv, EEPROM_BIT_CS);
2621 eeprom_write_reg(priv, 0);
2622 eeprom_write_reg(priv, EEPROM_BIT_SK);
2623}
2624
2625/* push a single bit down to the eeprom */
2626static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2627{
2628 int d = (bit ? EEPROM_BIT_DI : 0);
2629 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2630 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2631}
2632
2633/* push an opcode followed by an address down to the eeprom */
2634static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2635{
2636 int i;
2637
2638 eeprom_cs(priv);
2639 eeprom_write_bit(priv, 1);
2640 eeprom_write_bit(priv, op & 2);
2641 eeprom_write_bit(priv, op & 1);
2642 for (i = 7; i >= 0; i--) {
2643 eeprom_write_bit(priv, addr & (1 << i));
2644 }
2645}
2646
2647/* pull 16 bits off the eeprom, one bit at a time */
2648static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2649{
2650 int i;
2651 u16 r = 0;
2652
2653 /* Send READ Opcode */
2654 eeprom_op(priv, EEPROM_CMD_READ, addr);
2655
2656 /* Send dummy bit */
2657 eeprom_write_reg(priv, EEPROM_BIT_CS);
2658
2659 /* Read the byte off the eeprom one bit at a time */
2660 for (i = 0; i < 16; i++) {
2661 u32 data = 0;
2662 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2663 eeprom_write_reg(priv, EEPROM_BIT_CS);
2664 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2665 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2666 }
2667
2668 /* Send another dummy bit */
2669 eeprom_write_reg(priv, 0);
2670 eeprom_disable_cs(priv);
2671
2672 return r;
2673}
2674
2675/* helper function for pulling the mac address out of the private */
2676/* data's copy of the eeprom data */
2677static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2678{
2679 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2680}
2681
2682static void ipw_read_eeprom(struct ipw_priv *priv)
2683{
2684 int i;
2685 __le16 *eeprom = (__le16 *) priv->eeprom;
2686
2687 IPW_DEBUG_TRACE(">>\n");
2688
2689 /* read entire contents of eeprom into private buffer */
2690 for (i = 0; i < 128; i++)
2691 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2692
2693 IPW_DEBUG_TRACE("<<\n");
2694}
2695
2696/*
2697 * Either the device driver (i.e. the host) or the firmware can
2698 * load eeprom data into the designated region in SRAM. If neither
2699 * happens then the FW will shutdown with a fatal error.
2700 *
2701 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2702 * bit needs region of shared SRAM needs to be non-zero.
2703 */
2704static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2705{
2706 int i;
2707
2708 IPW_DEBUG_TRACE(">>\n");
2709
2710 /*
2711 If the data looks correct, then copy it to our private
2712 copy. Otherwise let the firmware know to perform the operation
2713 on its own.
2714 */
2715 if (priv->eeprom[EEPROM_VERSION] != 0) {
2716 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2717
2718 /* write the eeprom data to sram */
2719 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2720 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2721
2722 /* Do not load eeprom data on fatal error or suspend */
2723 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2724 } else {
2725 IPW_DEBUG_INFO("Enabling FW initialization of SRAM\n");
2726
2727 /* Load eeprom data on fatal error or suspend */
2728 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2729 }
2730
2731 IPW_DEBUG_TRACE("<<\n");
2732}
2733
2734static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2735{
2736 count >>= 2;
2737 if (!count)
2738 return;
2739 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2740 while (count--)
2741 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2742}
2743
2744static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2745{
2746 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2747 CB_NUMBER_OF_ELEMENTS_SMALL *
2748 sizeof(struct command_block));
2749}
2750
2751static int ipw_fw_dma_enable(struct ipw_priv *priv)
2752{ /* start dma engine but no transfers yet */
2753
2754 IPW_DEBUG_FW(">> :\n");
2755
2756 /* Start the dma */
2757 ipw_fw_dma_reset_command_blocks(priv);
2758
2759 /* Write CB base address */
2760 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2761
2762 IPW_DEBUG_FW("<< :\n");
2763 return 0;
2764}
2765
2766static void ipw_fw_dma_abort(struct ipw_priv *priv)
2767{
2768 u32 control = 0;
2769
2770 IPW_DEBUG_FW(">> :\n");
2771
2772 /* set the Stop and Abort bit */
2773 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2774 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2775 priv->sram_desc.last_cb_index = 0;
2776
2777 IPW_DEBUG_FW("<<\n");
2778}
2779
2780static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2781 struct command_block *cb)
2782{
2783 u32 address =
2784 IPW_SHARED_SRAM_DMA_CONTROL +
2785 (sizeof(struct command_block) * index);
2786 IPW_DEBUG_FW(">> :\n");
2787
2788 ipw_write_indirect(priv, address, (u8 *) cb,
2789 (int)sizeof(struct command_block));
2790
2791 IPW_DEBUG_FW("<< :\n");
2792 return 0;
2793
2794}
2795
2796static int ipw_fw_dma_kick(struct ipw_priv *priv)
2797{
2798 u32 control = 0;
2799 u32 index = 0;
2800
2801 IPW_DEBUG_FW(">> :\n");
2802
2803 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2804 ipw_fw_dma_write_command_block(priv, index,
2805 &priv->sram_desc.cb_list[index]);
2806
2807 /* Enable the DMA in the CSR register */
2808 ipw_clear_bit(priv, IPW_RESET_REG,
2809 IPW_RESET_REG_MASTER_DISABLED |
2810 IPW_RESET_REG_STOP_MASTER);
2811
2812 /* Set the Start bit. */
2813 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2814 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2815
2816 IPW_DEBUG_FW("<< :\n");
2817 return 0;
2818}
2819
2820static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2821{
2822 u32 address;
2823 u32 register_value = 0;
2824 u32 cb_fields_address = 0;
2825
2826 IPW_DEBUG_FW(">> :\n");
2827 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2828 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2829
2830 /* Read the DMA Controlor register */
2831 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2832 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2833
2834 /* Print the CB values */
2835 cb_fields_address = address;
2836 register_value = ipw_read_reg32(priv, cb_fields_address);
2837 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2838
2839 cb_fields_address += sizeof(u32);
2840 register_value = ipw_read_reg32(priv, cb_fields_address);
2841 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2842
2843 cb_fields_address += sizeof(u32);
2844 register_value = ipw_read_reg32(priv, cb_fields_address);
2845 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2846 register_value);
2847
2848 cb_fields_address += sizeof(u32);
2849 register_value = ipw_read_reg32(priv, cb_fields_address);
2850 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2851
2852 IPW_DEBUG_FW(">> :\n");
2853}
2854
2855static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2856{
2857 u32 current_cb_address = 0;
2858 u32 current_cb_index = 0;
2859
2860 IPW_DEBUG_FW("<< :\n");
2861 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2862
2863 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2864 sizeof(struct command_block);
2865
2866 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2867 current_cb_index, current_cb_address);
2868
2869 IPW_DEBUG_FW(">> :\n");
2870 return current_cb_index;
2871
2872}
2873
2874static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2875 u32 src_address,
2876 u32 dest_address,
2877 u32 length,
2878 int interrupt_enabled, int is_last)
2879{
2880
2881 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2882 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2883 CB_DEST_SIZE_LONG;
2884 struct command_block *cb;
2885 u32 last_cb_element = 0;
2886
2887 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2888 src_address, dest_address, length);
2889
2890 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2891 return -1;
2892
2893 last_cb_element = priv->sram_desc.last_cb_index;
2894 cb = &priv->sram_desc.cb_list[last_cb_element];
2895 priv->sram_desc.last_cb_index++;
2896
2897 /* Calculate the new CB control word */
2898 if (interrupt_enabled)
2899 control |= CB_INT_ENABLED;
2900
2901 if (is_last)
2902 control |= CB_LAST_VALID;
2903
2904 control |= length;
2905
2906 /* Calculate the CB Element's checksum value */
2907 cb->status = control ^ src_address ^ dest_address;
2908
2909 /* Copy the Source and Destination addresses */
2910 cb->dest_addr = dest_address;
2911 cb->source_addr = src_address;
2912
2913 /* Copy the Control Word last */
2914 cb->control = control;
2915
2916 return 0;
2917}
2918
2919static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2920 int nr, u32 dest_address, u32 len)
2921{
2922 int ret, i;
2923 u32 size;
2924
2925 IPW_DEBUG_FW(">>\n");
2926 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2927 nr, dest_address, len);
2928
2929 for (i = 0; i < nr; i++) {
2930 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2931 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2932 dest_address +
2933 i * CB_MAX_LENGTH, size,
2934 0, 0);
2935 if (ret) {
2936 IPW_DEBUG_FW_INFO(": Failed\n");
2937 return -1;
2938 } else
2939 IPW_DEBUG_FW_INFO(": Added new cb\n");
2940 }
2941
2942 IPW_DEBUG_FW("<<\n");
2943 return 0;
2944}
2945
2946static int ipw_fw_dma_wait(struct ipw_priv *priv)
2947{
2948 u32 current_index = 0, previous_index;
2949 u32 watchdog = 0;
2950
2951 IPW_DEBUG_FW(">> :\n");
2952
2953 current_index = ipw_fw_dma_command_block_index(priv);
2954 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2955 (int)priv->sram_desc.last_cb_index);
2956
2957 while (current_index < priv->sram_desc.last_cb_index) {
2958 udelay(50);
2959 previous_index = current_index;
2960 current_index = ipw_fw_dma_command_block_index(priv);
2961
2962 if (previous_index < current_index) {
2963 watchdog = 0;
2964 continue;
2965 }
2966 if (++watchdog > 400) {
2967 IPW_DEBUG_FW_INFO("Timeout\n");
2968 ipw_fw_dma_dump_command_block(priv);
2969 ipw_fw_dma_abort(priv);
2970 return -1;
2971 }
2972 }
2973
2974 ipw_fw_dma_abort(priv);
2975
2976 /*Disable the DMA in the CSR register */
2977 ipw_set_bit(priv, IPW_RESET_REG,
2978 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2979
2980 IPW_DEBUG_FW("<< dmaWaitSync\n");
2981 return 0;
2982}
2983
2984static void ipw_remove_current_network(struct ipw_priv *priv)
2985{
2986 struct list_head *element, *safe;
2987 struct libipw_network *network = NULL;
2988 unsigned long flags;
2989
2990 spin_lock_irqsave(&priv->ieee->lock, flags);
2991 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2992 network = list_entry(element, struct libipw_network, list);
2993 if (ether_addr_equal(network->bssid, priv->bssid)) {
2994 list_del(element);
2995 list_add_tail(&network->list,
2996 &priv->ieee->network_free_list);
2997 }
2998 }
2999 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3000}
3001
3002/**
3003 * Check that card is still alive.
3004 * Reads debug register from domain0.
3005 * If card is present, pre-defined value should
3006 * be found there.
3007 *
3008 * @param priv
3009 * @return 1 if card is present, 0 otherwise
3010 */
3011static inline int ipw_alive(struct ipw_priv *priv)
3012{
3013 return ipw_read32(priv, 0x90) == 0xd55555d5;
3014}
3015
3016/* timeout in msec, attempted in 10-msec quanta */
3017static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3018 int timeout)
3019{
3020 int i = 0;
3021
3022 do {
3023 if ((ipw_read32(priv, addr) & mask) == mask)
3024 return i;
3025 mdelay(10);
3026 i += 10;
3027 } while (i < timeout);
3028
3029 return -ETIME;
3030}
3031
3032/* These functions load the firmware and micro code for the operation of
3033 * the ipw hardware. It assumes the buffer has all the bits for the
3034 * image and the caller is handling the memory allocation and clean up.
3035 */
3036
3037static int ipw_stop_master(struct ipw_priv *priv)
3038{
3039 int rc;
3040
3041 IPW_DEBUG_TRACE(">>\n");
3042 /* stop master. typical delay - 0 */
3043 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3044
3045 /* timeout is in msec, polled in 10-msec quanta */
3046 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3047 IPW_RESET_REG_MASTER_DISABLED, 100);
3048 if (rc < 0) {
3049 IPW_ERROR("wait for stop master failed after 100ms\n");
3050 return -1;
3051 }
3052
3053 IPW_DEBUG_INFO("stop master %dms\n", rc);
3054
3055 return rc;
3056}
3057
3058static void ipw_arc_release(struct ipw_priv *priv)
3059{
3060 IPW_DEBUG_TRACE(">>\n");
3061 mdelay(5);
3062
3063 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3064
3065 /* no one knows timing, for safety add some delay */
3066 mdelay(5);
3067}
3068
3069struct fw_chunk {
3070 __le32 address;
3071 __le32 length;
3072};
3073
3074static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3075{
3076 int rc = 0, i, addr;
3077 u8 cr = 0;
3078 __le16 *image;
3079
3080 image = (__le16 *) data;
3081
3082 IPW_DEBUG_TRACE(">>\n");
3083
3084 rc = ipw_stop_master(priv);
3085
3086 if (rc < 0)
3087 return rc;
3088
3089 for (addr = IPW_SHARED_LOWER_BOUND;
3090 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3091 ipw_write32(priv, addr, 0);
3092 }
3093
3094 /* no ucode (yet) */
3095 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3096 /* destroy DMA queues */
3097 /* reset sequence */
3098
3099 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3100 ipw_arc_release(priv);
3101 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3102 mdelay(1);
3103
3104 /* reset PHY */
3105 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3106 mdelay(1);
3107
3108 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3109 mdelay(1);
3110
3111 /* enable ucode store */
3112 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3113 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3114 mdelay(1);
3115
3116 /* write ucode */
3117 /**
3118 * @bug
3119 * Do NOT set indirect address register once and then
3120 * store data to indirect data register in the loop.
3121 * It seems very reasonable, but in this case DINO do not
3122 * accept ucode. It is essential to set address each time.
3123 */
3124 /* load new ipw uCode */
3125 for (i = 0; i < len / 2; i++)
3126 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3127 le16_to_cpu(image[i]));
3128
3129 /* enable DINO */
3130 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3131 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3132
3133 /* this is where the igx / win driver deveates from the VAP driver. */
3134
3135 /* wait for alive response */
3136 for (i = 0; i < 100; i++) {
3137 /* poll for incoming data */
3138 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3139 if (cr & DINO_RXFIFO_DATA)
3140 break;
3141 mdelay(1);
3142 }
3143
3144 if (cr & DINO_RXFIFO_DATA) {
3145 /* alive_command_responce size is NOT multiple of 4 */
3146 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3147
3148 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3149 response_buffer[i] =
3150 cpu_to_le32(ipw_read_reg32(priv,
3151 IPW_BASEBAND_RX_FIFO_READ));
3152 memcpy(&priv->dino_alive, response_buffer,
3153 sizeof(priv->dino_alive));
3154 if (priv->dino_alive.alive_command == 1
3155 && priv->dino_alive.ucode_valid == 1) {
3156 rc = 0;
3157 IPW_DEBUG_INFO
3158 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3159 "of %02d/%02d/%02d %02d:%02d\n",
3160 priv->dino_alive.software_revision,
3161 priv->dino_alive.software_revision,
3162 priv->dino_alive.device_identifier,
3163 priv->dino_alive.device_identifier,
3164 priv->dino_alive.time_stamp[0],
3165 priv->dino_alive.time_stamp[1],
3166 priv->dino_alive.time_stamp[2],
3167 priv->dino_alive.time_stamp[3],
3168 priv->dino_alive.time_stamp[4]);
3169 } else {
3170 IPW_DEBUG_INFO("Microcode is not alive\n");
3171 rc = -EINVAL;
3172 }
3173 } else {
3174 IPW_DEBUG_INFO("No alive response from DINO\n");
3175 rc = -ETIME;
3176 }
3177
3178 /* disable DINO, otherwise for some reason
3179 firmware have problem getting alive resp. */
3180 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3181
3182 return rc;
3183}
3184
3185static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3186{
3187 int ret = -1;
3188 int offset = 0;
3189 struct fw_chunk *chunk;
3190 int total_nr = 0;
3191 int i;
3192 struct dma_pool *pool;
3193 void **virts;
3194 dma_addr_t *phys;
3195
3196 IPW_DEBUG_TRACE("<< :\n");
3197
3198 virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
3199 GFP_KERNEL);
3200 if (!virts)
3201 return -ENOMEM;
3202
3203 phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
3204 GFP_KERNEL);
3205 if (!phys) {
3206 kfree(virts);
3207 return -ENOMEM;
3208 }
3209 pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
3210 0);
3211 if (!pool) {
3212 IPW_ERROR("dma_pool_create failed\n");
3213 kfree(phys);
3214 kfree(virts);
3215 return -ENOMEM;
3216 }
3217
3218 /* Start the Dma */
3219 ret = ipw_fw_dma_enable(priv);
3220
3221 /* the DMA is already ready this would be a bug. */
3222 BUG_ON(priv->sram_desc.last_cb_index > 0);
3223
3224 do {
3225 u32 chunk_len;
3226 u8 *start;
3227 int size;
3228 int nr = 0;
3229
3230 chunk = (struct fw_chunk *)(data + offset);
3231 offset += sizeof(struct fw_chunk);
3232 chunk_len = le32_to_cpu(chunk->length);
3233 start = data + offset;
3234
3235 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3236 for (i = 0; i < nr; i++) {
3237 virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
3238 &phys[total_nr]);
3239 if (!virts[total_nr]) {
3240 ret = -ENOMEM;
3241 goto out;
3242 }
3243 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3244 CB_MAX_LENGTH);
3245 memcpy(virts[total_nr], start, size);
3246 start += size;
3247 total_nr++;
3248 /* We don't support fw chunk larger than 64*8K */
3249 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3250 }
3251
3252 /* build DMA packet and queue up for sending */
3253 /* dma to chunk->address, the chunk->length bytes from data +
3254 * offeset*/
3255 /* Dma loading */
3256 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3257 nr, le32_to_cpu(chunk->address),
3258 chunk_len);
3259 if (ret) {
3260 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3261 goto out;
3262 }
3263
3264 offset += chunk_len;
3265 } while (offset < len);
3266
3267 /* Run the DMA and wait for the answer */
3268 ret = ipw_fw_dma_kick(priv);
3269 if (ret) {
3270 IPW_ERROR("dmaKick Failed\n");
3271 goto out;
3272 }
3273
3274 ret = ipw_fw_dma_wait(priv);
3275 if (ret) {
3276 IPW_ERROR("dmaWaitSync Failed\n");
3277 goto out;
3278 }
3279 out:
3280 for (i = 0; i < total_nr; i++)
3281 dma_pool_free(pool, virts[i], phys[i]);
3282
3283 dma_pool_destroy(pool);
3284 kfree(phys);
3285 kfree(virts);
3286
3287 return ret;
3288}
3289
3290/* stop nic */
3291static int ipw_stop_nic(struct ipw_priv *priv)
3292{
3293 int rc = 0;
3294
3295 /* stop */
3296 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3297
3298 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3299 IPW_RESET_REG_MASTER_DISABLED, 500);
3300 if (rc < 0) {
3301 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3302 return rc;
3303 }
3304
3305 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3306
3307 return rc;
3308}
3309
3310static void ipw_start_nic(struct ipw_priv *priv)
3311{
3312 IPW_DEBUG_TRACE(">>\n");
3313
3314 /* prvHwStartNic release ARC */
3315 ipw_clear_bit(priv, IPW_RESET_REG,
3316 IPW_RESET_REG_MASTER_DISABLED |
3317 IPW_RESET_REG_STOP_MASTER |
3318 CBD_RESET_REG_PRINCETON_RESET);
3319
3320 /* enable power management */
3321 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3322 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3323
3324 IPW_DEBUG_TRACE("<<\n");
3325}
3326
3327static int ipw_init_nic(struct ipw_priv *priv)
3328{
3329 int rc;
3330
3331 IPW_DEBUG_TRACE(">>\n");
3332 /* reset */
3333 /*prvHwInitNic */
3334 /* set "initialization complete" bit to move adapter to D0 state */
3335 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3336
3337 /* low-level PLL activation */
3338 ipw_write32(priv, IPW_READ_INT_REGISTER,
3339 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3340
3341 /* wait for clock stabilization */
3342 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3343 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3344 if (rc < 0)
3345 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3346
3347 /* assert SW reset */
3348 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3349
3350 udelay(10);
3351
3352 /* set "initialization complete" bit to move adapter to D0 state */
3353 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3354
3355 IPW_DEBUG_TRACE(">>\n");
3356 return 0;
3357}
3358
3359/* Call this function from process context, it will sleep in request_firmware.
3360 * Probe is an ok place to call this from.
3361 */
3362static int ipw_reset_nic(struct ipw_priv *priv)
3363{
3364 int rc = 0;
3365 unsigned long flags;
3366
3367 IPW_DEBUG_TRACE(">>\n");
3368
3369 rc = ipw_init_nic(priv);
3370
3371 spin_lock_irqsave(&priv->lock, flags);
3372 /* Clear the 'host command active' bit... */
3373 priv->status &= ~STATUS_HCMD_ACTIVE;
3374 wake_up_interruptible(&priv->wait_command_queue);
3375 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3376 wake_up_interruptible(&priv->wait_state);
3377 spin_unlock_irqrestore(&priv->lock, flags);
3378
3379 IPW_DEBUG_TRACE("<<\n");
3380 return rc;
3381}
3382
3383
3384struct ipw_fw {
3385 __le32 ver;
3386 __le32 boot_size;
3387 __le32 ucode_size;
3388 __le32 fw_size;
3389 u8 data[0];
3390};
3391
3392static int ipw_get_fw(struct ipw_priv *priv,
3393 const struct firmware **raw, const char *name)
3394{
3395 struct ipw_fw *fw;
3396 int rc;
3397
3398 /* ask firmware_class module to get the boot firmware off disk */
3399 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3400 if (rc < 0) {
3401 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3402 return rc;
3403 }
3404
3405 if ((*raw)->size < sizeof(*fw)) {
3406 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3407 return -EINVAL;
3408 }
3409
3410 fw = (void *)(*raw)->data;
3411
3412 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3413 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3414 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3415 name, (*raw)->size);
3416 return -EINVAL;
3417 }
3418
3419 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3420 name,
3421 le32_to_cpu(fw->ver) >> 16,
3422 le32_to_cpu(fw->ver) & 0xff,
3423 (*raw)->size - sizeof(*fw));
3424 return 0;
3425}
3426
3427#define IPW_RX_BUF_SIZE (3000)
3428
3429static void ipw_rx_queue_reset(struct ipw_priv *priv,
3430 struct ipw_rx_queue *rxq)
3431{
3432 unsigned long flags;
3433 int i;
3434
3435 spin_lock_irqsave(&rxq->lock, flags);
3436
3437 INIT_LIST_HEAD(&rxq->rx_free);
3438 INIT_LIST_HEAD(&rxq->rx_used);
3439
3440 /* Fill the rx_used queue with _all_ of the Rx buffers */
3441 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3442 /* In the reset function, these buffers may have been allocated
3443 * to an SKB, so we need to unmap and free potential storage */
3444 if (rxq->pool[i].skb != NULL) {
3445 dma_unmap_single(&priv->pci_dev->dev,
3446 rxq->pool[i].dma_addr,
3447 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
3448 dev_kfree_skb_irq(rxq->pool[i].skb);
3449 rxq->pool[i].skb = NULL;
3450 }
3451 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3452 }
3453
3454 /* Set us so that we have processed and used all buffers, but have
3455 * not restocked the Rx queue with fresh buffers */
3456 rxq->read = rxq->write = 0;
3457 rxq->free_count = 0;
3458 spin_unlock_irqrestore(&rxq->lock, flags);
3459}
3460
3461#ifdef CONFIG_PM
3462static int fw_loaded = 0;
3463static const struct firmware *raw = NULL;
3464
3465static void free_firmware(void)
3466{
3467 if (fw_loaded) {
3468 release_firmware(raw);
3469 raw = NULL;
3470 fw_loaded = 0;
3471 }
3472}
3473#else
3474#define free_firmware() do {} while (0)
3475#endif
3476
3477static int ipw_load(struct ipw_priv *priv)
3478{
3479#ifndef CONFIG_PM
3480 const struct firmware *raw = NULL;
3481#endif
3482 struct ipw_fw *fw;
3483 u8 *boot_img, *ucode_img, *fw_img;
3484 u8 *name = NULL;
3485 int rc = 0, retries = 3;
3486
3487 switch (priv->ieee->iw_mode) {
3488 case IW_MODE_ADHOC:
3489 name = "ipw2200-ibss.fw";
3490 break;
3491#ifdef CONFIG_IPW2200_MONITOR
3492 case IW_MODE_MONITOR:
3493 name = "ipw2200-sniffer.fw";
3494 break;
3495#endif
3496 case IW_MODE_INFRA:
3497 name = "ipw2200-bss.fw";
3498 break;
3499 }
3500
3501 if (!name) {
3502 rc = -EINVAL;
3503 goto error;
3504 }
3505
3506#ifdef CONFIG_PM
3507 if (!fw_loaded) {
3508#endif
3509 rc = ipw_get_fw(priv, &raw, name);
3510 if (rc < 0)
3511 goto error;
3512#ifdef CONFIG_PM
3513 }
3514#endif
3515
3516 fw = (void *)raw->data;
3517 boot_img = &fw->data[0];
3518 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3519 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3520 le32_to_cpu(fw->ucode_size)];
3521
3522 if (!priv->rxq)
3523 priv->rxq = ipw_rx_queue_alloc(priv);
3524 else
3525 ipw_rx_queue_reset(priv, priv->rxq);
3526 if (!priv->rxq) {
3527 IPW_ERROR("Unable to initialize Rx queue\n");
3528 rc = -ENOMEM;
3529 goto error;
3530 }
3531
3532 retry:
3533 /* Ensure interrupts are disabled */
3534 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3535 priv->status &= ~STATUS_INT_ENABLED;
3536
3537 /* ack pending interrupts */
3538 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3539
3540 ipw_stop_nic(priv);
3541
3542 rc = ipw_reset_nic(priv);
3543 if (rc < 0) {
3544 IPW_ERROR("Unable to reset NIC\n");
3545 goto error;
3546 }
3547
3548 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3549 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3550
3551 /* DMA the initial boot firmware into the device */
3552 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3553 if (rc < 0) {
3554 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3555 goto error;
3556 }
3557
3558 /* kick start the device */
3559 ipw_start_nic(priv);
3560
3561 /* wait for the device to finish its initial startup sequence */
3562 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3563 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3564 if (rc < 0) {
3565 IPW_ERROR("device failed to boot initial fw image\n");
3566 goto error;
3567 }
3568 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3569
3570 /* ack fw init done interrupt */
3571 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3572
3573 /* DMA the ucode into the device */
3574 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3575 if (rc < 0) {
3576 IPW_ERROR("Unable to load ucode: %d\n", rc);
3577 goto error;
3578 }
3579
3580 /* stop nic */
3581 ipw_stop_nic(priv);
3582
3583 /* DMA bss firmware into the device */
3584 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3585 if (rc < 0) {
3586 IPW_ERROR("Unable to load firmware: %d\n", rc);
3587 goto error;
3588 }
3589#ifdef CONFIG_PM
3590 fw_loaded = 1;
3591#endif
3592
3593 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3594
3595 rc = ipw_queue_reset(priv);
3596 if (rc < 0) {
3597 IPW_ERROR("Unable to initialize queues\n");
3598 goto error;
3599 }
3600
3601 /* Ensure interrupts are disabled */
3602 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3603 /* ack pending interrupts */
3604 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3605
3606 /* kick start the device */
3607 ipw_start_nic(priv);
3608
3609 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3610 if (retries > 0) {
3611 IPW_WARNING("Parity error. Retrying init.\n");
3612 retries--;
3613 goto retry;
3614 }
3615
3616 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3617 rc = -EIO;
3618 goto error;
3619 }
3620
3621 /* wait for the device */
3622 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3623 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3624 if (rc < 0) {
3625 IPW_ERROR("device failed to start within 500ms\n");
3626 goto error;
3627 }
3628 IPW_DEBUG_INFO("device response after %dms\n", rc);
3629
3630 /* ack fw init done interrupt */
3631 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3632
3633 /* read eeprom data */
3634 priv->eeprom_delay = 1;
3635 ipw_read_eeprom(priv);
3636 /* initialize the eeprom region of sram */
3637 ipw_eeprom_init_sram(priv);
3638
3639 /* enable interrupts */
3640 ipw_enable_interrupts(priv);
3641
3642 /* Ensure our queue has valid packets */
3643 ipw_rx_queue_replenish(priv);
3644
3645 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3646
3647 /* ack pending interrupts */
3648 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3649
3650#ifndef CONFIG_PM
3651 release_firmware(raw);
3652#endif
3653 return 0;
3654
3655 error:
3656 if (priv->rxq) {
3657 ipw_rx_queue_free(priv, priv->rxq);
3658 priv->rxq = NULL;
3659 }
3660 ipw_tx_queue_free(priv);
3661 release_firmware(raw);
3662#ifdef CONFIG_PM
3663 fw_loaded = 0;
3664 raw = NULL;
3665#endif
3666
3667 return rc;
3668}
3669
3670/**
3671 * DMA services
3672 *
3673 * Theory of operation
3674 *
3675 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3676 * 2 empty entries always kept in the buffer to protect from overflow.
3677 *
3678 * For Tx queue, there are low mark and high mark limits. If, after queuing
3679 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3680 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3681 * Tx queue resumed.
3682 *
3683 * The IPW operates with six queues, one receive queue in the device's
3684 * sram, one transmit queue for sending commands to the device firmware,
3685 * and four transmit queues for data.
3686 *
3687 * The four transmit queues allow for performing quality of service (qos)
3688 * transmissions as per the 802.11 protocol. Currently Linux does not
3689 * provide a mechanism to the user for utilizing prioritized queues, so
3690 * we only utilize the first data transmit queue (queue1).
3691 */
3692
3693/**
3694 * Driver allocates buffers of this size for Rx
3695 */
3696
3697/**
3698 * ipw_rx_queue_space - Return number of free slots available in queue.
3699 */
3700static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3701{
3702 int s = q->read - q->write;
3703 if (s <= 0)
3704 s += RX_QUEUE_SIZE;
3705 /* keep some buffer to not confuse full and empty queue */
3706 s -= 2;
3707 if (s < 0)
3708 s = 0;
3709 return s;
3710}
3711
3712static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3713{
3714 int s = q->last_used - q->first_empty;
3715 if (s <= 0)
3716 s += q->n_bd;
3717 s -= 2; /* keep some reserve to not confuse empty and full situations */
3718 if (s < 0)
3719 s = 0;
3720 return s;
3721}
3722
3723static inline int ipw_queue_inc_wrap(int index, int n_bd)
3724{
3725 return (++index == n_bd) ? 0 : index;
3726}
3727
3728/**
3729 * Initialize common DMA queue structure
3730 *
3731 * @param q queue to init
3732 * @param count Number of BD's to allocate. Should be power of 2
3733 * @param read_register Address for 'read' register
3734 * (not offset within BAR, full address)
3735 * @param write_register Address for 'write' register
3736 * (not offset within BAR, full address)
3737 * @param base_register Address for 'base' register
3738 * (not offset within BAR, full address)
3739 * @param size Address for 'size' register
3740 * (not offset within BAR, full address)
3741 */
3742static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3743 int count, u32 read, u32 write, u32 base, u32 size)
3744{
3745 q->n_bd = count;
3746
3747 q->low_mark = q->n_bd / 4;
3748 if (q->low_mark < 4)
3749 q->low_mark = 4;
3750
3751 q->high_mark = q->n_bd / 8;
3752 if (q->high_mark < 2)
3753 q->high_mark = 2;
3754
3755 q->first_empty = q->last_used = 0;
3756 q->reg_r = read;
3757 q->reg_w = write;
3758
3759 ipw_write32(priv, base, q->dma_addr);
3760 ipw_write32(priv, size, count);
3761 ipw_write32(priv, read, 0);
3762 ipw_write32(priv, write, 0);
3763
3764 _ipw_read32(priv, 0x90);
3765}
3766
3767static int ipw_queue_tx_init(struct ipw_priv *priv,
3768 struct clx2_tx_queue *q,
3769 int count, u32 read, u32 write, u32 base, u32 size)
3770{
3771 struct pci_dev *dev = priv->pci_dev;
3772
3773 q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
3774 if (!q->txb) {
3775 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3776 return -ENOMEM;
3777 }
3778
3779 q->bd =
3780 dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
3781 &q->q.dma_addr, GFP_KERNEL);
3782 if (!q->bd) {
3783 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3784 sizeof(q->bd[0]) * count);
3785 kfree(q->txb);
3786 q->txb = NULL;
3787 return -ENOMEM;
3788 }
3789
3790 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3791 return 0;
3792}
3793
3794/**
3795 * Free one TFD, those at index [txq->q.last_used].
3796 * Do NOT advance any indexes
3797 *
3798 * @param dev
3799 * @param txq
3800 */
3801static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3802 struct clx2_tx_queue *txq)
3803{
3804 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3805 struct pci_dev *dev = priv->pci_dev;
3806 int i;
3807
3808 /* classify bd */
3809 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3810 /* nothing to cleanup after for host commands */
3811 return;
3812
3813 /* sanity check */
3814 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3815 IPW_ERROR("Too many chunks: %i\n",
3816 le32_to_cpu(bd->u.data.num_chunks));
3817 /** @todo issue fatal error, it is quite serious situation */
3818 return;
3819 }
3820
3821 /* unmap chunks if any */
3822 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3823 dma_unmap_single(&dev->dev,
3824 le32_to_cpu(bd->u.data.chunk_ptr[i]),
3825 le16_to_cpu(bd->u.data.chunk_len[i]),
3826 DMA_TO_DEVICE);
3827 if (txq->txb[txq->q.last_used]) {
3828 libipw_txb_free(txq->txb[txq->q.last_used]);
3829 txq->txb[txq->q.last_used] = NULL;
3830 }
3831 }
3832}
3833
3834/**
3835 * Deallocate DMA queue.
3836 *
3837 * Empty queue by removing and destroying all BD's.
3838 * Free all buffers.
3839 *
3840 * @param dev
3841 * @param q
3842 */
3843static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3844{
3845 struct clx2_queue *q = &txq->q;
3846 struct pci_dev *dev = priv->pci_dev;
3847
3848 if (q->n_bd == 0)
3849 return;
3850
3851 /* first, empty all BD's */
3852 for (; q->first_empty != q->last_used;
3853 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3854 ipw_queue_tx_free_tfd(priv, txq);
3855 }
3856
3857 /* free buffers belonging to queue itself */
3858 dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3859 q->dma_addr);
3860 kfree(txq->txb);
3861
3862 /* 0 fill whole structure */
3863 memset(txq, 0, sizeof(*txq));
3864}
3865
3866/**
3867 * Destroy all DMA queues and structures
3868 *
3869 * @param priv
3870 */
3871static void ipw_tx_queue_free(struct ipw_priv *priv)
3872{
3873 /* Tx CMD queue */
3874 ipw_queue_tx_free(priv, &priv->txq_cmd);
3875
3876 /* Tx queues */
3877 ipw_queue_tx_free(priv, &priv->txq[0]);
3878 ipw_queue_tx_free(priv, &priv->txq[1]);
3879 ipw_queue_tx_free(priv, &priv->txq[2]);
3880 ipw_queue_tx_free(priv, &priv->txq[3]);
3881}
3882
3883static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3884{
3885 /* First 3 bytes are manufacturer */
3886 bssid[0] = priv->mac_addr[0];
3887 bssid[1] = priv->mac_addr[1];
3888 bssid[2] = priv->mac_addr[2];
3889
3890 /* Last bytes are random */
3891 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3892
3893 bssid[0] &= 0xfe; /* clear multicast bit */
3894 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3895}
3896
3897static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3898{
3899 struct ipw_station_entry entry;
3900 int i;
3901
3902 for (i = 0; i < priv->num_stations; i++) {
3903 if (ether_addr_equal(priv->stations[i], bssid)) {
3904 /* Another node is active in network */
3905 priv->missed_adhoc_beacons = 0;
3906 if (!(priv->config & CFG_STATIC_CHANNEL))
3907 /* when other nodes drop out, we drop out */
3908 priv->config &= ~CFG_ADHOC_PERSIST;
3909
3910 return i;
3911 }
3912 }
3913
3914 if (i == MAX_STATIONS)
3915 return IPW_INVALID_STATION;
3916
3917 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3918
3919 entry.reserved = 0;
3920 entry.support_mode = 0;
3921 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3922 memcpy(priv->stations[i], bssid, ETH_ALEN);
3923 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3924 &entry, sizeof(entry));
3925 priv->num_stations++;
3926
3927 return i;
3928}
3929
3930static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3931{
3932 int i;
3933
3934 for (i = 0; i < priv->num_stations; i++)
3935 if (ether_addr_equal(priv->stations[i], bssid))
3936 return i;
3937
3938 return IPW_INVALID_STATION;
3939}
3940
3941static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3942{
3943 int err;
3944
3945 if (priv->status & STATUS_ASSOCIATING) {
3946 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3947 schedule_work(&priv->disassociate);
3948 return;
3949 }
3950
3951 if (!(priv->status & STATUS_ASSOCIATED)) {
3952 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3953 return;
3954 }
3955
3956 IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3957 "on channel %d.\n",
3958 priv->assoc_request.bssid,
3959 priv->assoc_request.channel);
3960
3961 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3962 priv->status |= STATUS_DISASSOCIATING;
3963
3964 if (quiet)
3965 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3966 else
3967 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3968
3969 err = ipw_send_associate(priv, &priv->assoc_request);
3970 if (err) {
3971 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3972 "failed.\n");
3973 return;
3974 }
3975
3976}
3977
3978static int ipw_disassociate(void *data)
3979{
3980 struct ipw_priv *priv = data;
3981 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3982 return 0;
3983 ipw_send_disassociate(data, 0);
3984 netif_carrier_off(priv->net_dev);
3985 return 1;
3986}
3987
3988static void ipw_bg_disassociate(struct work_struct *work)
3989{
3990 struct ipw_priv *priv =
3991 container_of(work, struct ipw_priv, disassociate);
3992 mutex_lock(&priv->mutex);
3993 ipw_disassociate(priv);
3994 mutex_unlock(&priv->mutex);
3995}
3996
3997static void ipw_system_config(struct work_struct *work)
3998{
3999 struct ipw_priv *priv =
4000 container_of(work, struct ipw_priv, system_config);
4001
4002#ifdef CONFIG_IPW2200_PROMISCUOUS
4003 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4004 priv->sys_config.accept_all_data_frames = 1;
4005 priv->sys_config.accept_non_directed_frames = 1;
4006 priv->sys_config.accept_all_mgmt_bcpr = 1;
4007 priv->sys_config.accept_all_mgmt_frames = 1;
4008 }
4009#endif
4010
4011 ipw_send_system_config(priv);
4012}
4013
4014struct ipw_status_code {
4015 u16 status;
4016 const char *reason;
4017};
4018
4019static const struct ipw_status_code ipw_status_codes[] = {
4020 {0x00, "Successful"},
4021 {0x01, "Unspecified failure"},
4022 {0x0A, "Cannot support all requested capabilities in the "
4023 "Capability information field"},
4024 {0x0B, "Reassociation denied due to inability to confirm that "
4025 "association exists"},
4026 {0x0C, "Association denied due to reason outside the scope of this "
4027 "standard"},
4028 {0x0D,
4029 "Responding station does not support the specified authentication "
4030 "algorithm"},
4031 {0x0E,
4032 "Received an Authentication frame with authentication sequence "
4033 "transaction sequence number out of expected sequence"},
4034 {0x0F, "Authentication rejected because of challenge failure"},
4035 {0x10, "Authentication rejected due to timeout waiting for next "
4036 "frame in sequence"},
4037 {0x11, "Association denied because AP is unable to handle additional "
4038 "associated stations"},
4039 {0x12,
4040 "Association denied due to requesting station not supporting all "
4041 "of the datarates in the BSSBasicServiceSet Parameter"},
4042 {0x13,
4043 "Association denied due to requesting station not supporting "
4044 "short preamble operation"},
4045 {0x14,
4046 "Association denied due to requesting station not supporting "
4047 "PBCC encoding"},
4048 {0x15,
4049 "Association denied due to requesting station not supporting "
4050 "channel agility"},
4051 {0x19,
4052 "Association denied due to requesting station not supporting "
4053 "short slot operation"},
4054 {0x1A,
4055 "Association denied due to requesting station not supporting "
4056 "DSSS-OFDM operation"},
4057 {0x28, "Invalid Information Element"},
4058 {0x29, "Group Cipher is not valid"},
4059 {0x2A, "Pairwise Cipher is not valid"},
4060 {0x2B, "AKMP is not valid"},
4061 {0x2C, "Unsupported RSN IE version"},
4062 {0x2D, "Invalid RSN IE Capabilities"},
4063 {0x2E, "Cipher suite is rejected per security policy"},
4064};
4065
4066static const char *ipw_get_status_code(u16 status)
4067{
4068 int i;
4069 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4070 if (ipw_status_codes[i].status == (status & 0xff))
4071 return ipw_status_codes[i].reason;
4072 return "Unknown status value.";
4073}
4074
4075static inline void average_init(struct average *avg)
4076{
4077 memset(avg, 0, sizeof(*avg));
4078}
4079
4080#define DEPTH_RSSI 8
4081#define DEPTH_NOISE 16
4082static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4083{
4084 return ((depth-1)*prev_avg + val)/depth;
4085}
4086
4087static void average_add(struct average *avg, s16 val)
4088{
4089 avg->sum -= avg->entries[avg->pos];
4090 avg->sum += val;
4091 avg->entries[avg->pos++] = val;
4092 if (unlikely(avg->pos == AVG_ENTRIES)) {
4093 avg->init = 1;
4094 avg->pos = 0;
4095 }
4096}
4097
4098static s16 average_value(struct average *avg)
4099{
4100 if (!unlikely(avg->init)) {
4101 if (avg->pos)
4102 return avg->sum / avg->pos;
4103 return 0;
4104 }
4105
4106 return avg->sum / AVG_ENTRIES;
4107}
4108
4109static void ipw_reset_stats(struct ipw_priv *priv)
4110{
4111 u32 len = sizeof(u32);
4112
4113 priv->quality = 0;
4114
4115 average_init(&priv->average_missed_beacons);
4116 priv->exp_avg_rssi = -60;
4117 priv->exp_avg_noise = -85 + 0x100;
4118
4119 priv->last_rate = 0;
4120 priv->last_missed_beacons = 0;
4121 priv->last_rx_packets = 0;
4122 priv->last_tx_packets = 0;
4123 priv->last_tx_failures = 0;
4124
4125 /* Firmware managed, reset only when NIC is restarted, so we have to
4126 * normalize on the current value */
4127 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4128 &priv->last_rx_err, &len);
4129 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4130 &priv->last_tx_failures, &len);
4131
4132 /* Driver managed, reset with each association */
4133 priv->missed_adhoc_beacons = 0;
4134 priv->missed_beacons = 0;
4135 priv->tx_packets = 0;
4136 priv->rx_packets = 0;
4137
4138}
4139
4140static u32 ipw_get_max_rate(struct ipw_priv *priv)
4141{
4142 u32 i = 0x80000000;
4143 u32 mask = priv->rates_mask;
4144 /* If currently associated in B mode, restrict the maximum
4145 * rate match to B rates */
4146 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4147 mask &= LIBIPW_CCK_RATES_MASK;
4148
4149 /* TODO: Verify that the rate is supported by the current rates
4150 * list. */
4151
4152 while (i && !(mask & i))
4153 i >>= 1;
4154 switch (i) {
4155 case LIBIPW_CCK_RATE_1MB_MASK:
4156 return 1000000;
4157 case LIBIPW_CCK_RATE_2MB_MASK:
4158 return 2000000;
4159 case LIBIPW_CCK_RATE_5MB_MASK:
4160 return 5500000;
4161 case LIBIPW_OFDM_RATE_6MB_MASK:
4162 return 6000000;
4163 case LIBIPW_OFDM_RATE_9MB_MASK:
4164 return 9000000;
4165 case LIBIPW_CCK_RATE_11MB_MASK:
4166 return 11000000;
4167 case LIBIPW_OFDM_RATE_12MB_MASK:
4168 return 12000000;
4169 case LIBIPW_OFDM_RATE_18MB_MASK:
4170 return 18000000;
4171 case LIBIPW_OFDM_RATE_24MB_MASK:
4172 return 24000000;
4173 case LIBIPW_OFDM_RATE_36MB_MASK:
4174 return 36000000;
4175 case LIBIPW_OFDM_RATE_48MB_MASK:
4176 return 48000000;
4177 case LIBIPW_OFDM_RATE_54MB_MASK:
4178 return 54000000;
4179 }
4180
4181 if (priv->ieee->mode == IEEE_B)
4182 return 11000000;
4183 else
4184 return 54000000;
4185}
4186
4187static u32 ipw_get_current_rate(struct ipw_priv *priv)
4188{
4189 u32 rate, len = sizeof(rate);
4190 int err;
4191
4192 if (!(priv->status & STATUS_ASSOCIATED))
4193 return 0;
4194
4195 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4196 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4197 &len);
4198 if (err) {
4199 IPW_DEBUG_INFO("failed querying ordinals.\n");
4200 return 0;
4201 }
4202 } else
4203 return ipw_get_max_rate(priv);
4204
4205 switch (rate) {
4206 case IPW_TX_RATE_1MB:
4207 return 1000000;
4208 case IPW_TX_RATE_2MB:
4209 return 2000000;
4210 case IPW_TX_RATE_5MB:
4211 return 5500000;
4212 case IPW_TX_RATE_6MB:
4213 return 6000000;
4214 case IPW_TX_RATE_9MB:
4215 return 9000000;
4216 case IPW_TX_RATE_11MB:
4217 return 11000000;
4218 case IPW_TX_RATE_12MB:
4219 return 12000000;
4220 case IPW_TX_RATE_18MB:
4221 return 18000000;
4222 case IPW_TX_RATE_24MB:
4223 return 24000000;
4224 case IPW_TX_RATE_36MB:
4225 return 36000000;
4226 case IPW_TX_RATE_48MB:
4227 return 48000000;
4228 case IPW_TX_RATE_54MB:
4229 return 54000000;
4230 }
4231
4232 return 0;
4233}
4234
4235#define IPW_STATS_INTERVAL (2 * HZ)
4236static void ipw_gather_stats(struct ipw_priv *priv)
4237{
4238 u32 rx_err, rx_err_delta, rx_packets_delta;
4239 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4240 u32 missed_beacons_percent, missed_beacons_delta;
4241 u32 quality = 0;
4242 u32 len = sizeof(u32);
4243 s16 rssi;
4244 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4245 rate_quality;
4246 u32 max_rate;
4247
4248 if (!(priv->status & STATUS_ASSOCIATED)) {
4249 priv->quality = 0;
4250 return;
4251 }
4252
4253 /* Update the statistics */
4254 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4255 &priv->missed_beacons, &len);
4256 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4257 priv->last_missed_beacons = priv->missed_beacons;
4258 if (priv->assoc_request.beacon_interval) {
4259 missed_beacons_percent = missed_beacons_delta *
4260 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4261 (IPW_STATS_INTERVAL * 10);
4262 } else {
4263 missed_beacons_percent = 0;
4264 }
4265 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4266
4267 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4268 rx_err_delta = rx_err - priv->last_rx_err;
4269 priv->last_rx_err = rx_err;
4270
4271 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4272 tx_failures_delta = tx_failures - priv->last_tx_failures;
4273 priv->last_tx_failures = tx_failures;
4274
4275 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4276 priv->last_rx_packets = priv->rx_packets;
4277
4278 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4279 priv->last_tx_packets = priv->tx_packets;
4280
4281 /* Calculate quality based on the following:
4282 *
4283 * Missed beacon: 100% = 0, 0% = 70% missed
4284 * Rate: 60% = 1Mbs, 100% = Max
4285 * Rx and Tx errors represent a straight % of total Rx/Tx
4286 * RSSI: 100% = > -50, 0% = < -80
4287 * Rx errors: 100% = 0, 0% = 50% missed
4288 *
4289 * The lowest computed quality is used.
4290 *
4291 */
4292#define BEACON_THRESHOLD 5
4293 beacon_quality = 100 - missed_beacons_percent;
4294 if (beacon_quality < BEACON_THRESHOLD)
4295 beacon_quality = 0;
4296 else
4297 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4298 (100 - BEACON_THRESHOLD);
4299 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4300 beacon_quality, missed_beacons_percent);
4301
4302 priv->last_rate = ipw_get_current_rate(priv);
4303 max_rate = ipw_get_max_rate(priv);
4304 rate_quality = priv->last_rate * 40 / max_rate + 60;
4305 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4306 rate_quality, priv->last_rate / 1000000);
4307
4308 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4309 rx_quality = 100 - (rx_err_delta * 100) /
4310 (rx_packets_delta + rx_err_delta);
4311 else
4312 rx_quality = 100;
4313 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4314 rx_quality, rx_err_delta, rx_packets_delta);
4315
4316 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4317 tx_quality = 100 - (tx_failures_delta * 100) /
4318 (tx_packets_delta + tx_failures_delta);
4319 else
4320 tx_quality = 100;
4321 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4322 tx_quality, tx_failures_delta, tx_packets_delta);
4323
4324 rssi = priv->exp_avg_rssi;
4325 signal_quality =
4326 (100 *
4327 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4328 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4329 (priv->ieee->perfect_rssi - rssi) *
4330 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4331 62 * (priv->ieee->perfect_rssi - rssi))) /
4332 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4333 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4334 if (signal_quality > 100)
4335 signal_quality = 100;
4336 else if (signal_quality < 1)
4337 signal_quality = 0;
4338
4339 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4340 signal_quality, rssi);
4341
4342 quality = min(rx_quality, signal_quality);
4343 quality = min(tx_quality, quality);
4344 quality = min(rate_quality, quality);
4345 quality = min(beacon_quality, quality);
4346 if (quality == beacon_quality)
4347 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4348 quality);
4349 if (quality == rate_quality)
4350 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4351 quality);
4352 if (quality == tx_quality)
4353 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4354 quality);
4355 if (quality == rx_quality)
4356 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4357 quality);
4358 if (quality == signal_quality)
4359 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4360 quality);
4361
4362 priv->quality = quality;
4363
4364 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4365}
4366
4367static void ipw_bg_gather_stats(struct work_struct *work)
4368{
4369 struct ipw_priv *priv =
4370 container_of(work, struct ipw_priv, gather_stats.work);
4371 mutex_lock(&priv->mutex);
4372 ipw_gather_stats(priv);
4373 mutex_unlock(&priv->mutex);
4374}
4375
4376/* Missed beacon behavior:
4377 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4378 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4379 * Above disassociate threshold, give up and stop scanning.
4380 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4381static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4382 int missed_count)
4383{
4384 priv->notif_missed_beacons = missed_count;
4385
4386 if (missed_count > priv->disassociate_threshold &&
4387 priv->status & STATUS_ASSOCIATED) {
4388 /* If associated and we've hit the missed
4389 * beacon threshold, disassociate, turn
4390 * off roaming, and abort any active scans */
4391 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4392 IPW_DL_STATE | IPW_DL_ASSOC,
4393 "Missed beacon: %d - disassociate\n", missed_count);
4394 priv->status &= ~STATUS_ROAMING;
4395 if (priv->status & STATUS_SCANNING) {
4396 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4397 IPW_DL_STATE,
4398 "Aborting scan with missed beacon.\n");
4399 schedule_work(&priv->abort_scan);
4400 }
4401
4402 schedule_work(&priv->disassociate);
4403 return;
4404 }
4405
4406 if (priv->status & STATUS_ROAMING) {
4407 /* If we are currently roaming, then just
4408 * print a debug statement... */
4409 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4410 "Missed beacon: %d - roam in progress\n",
4411 missed_count);
4412 return;
4413 }
4414
4415 if (roaming &&
4416 (missed_count > priv->roaming_threshold &&
4417 missed_count <= priv->disassociate_threshold)) {
4418 /* If we are not already roaming, set the ROAM
4419 * bit in the status and kick off a scan.
4420 * This can happen several times before we reach
4421 * disassociate_threshold. */
4422 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4423 "Missed beacon: %d - initiate "
4424 "roaming\n", missed_count);
4425 if (!(priv->status & STATUS_ROAMING)) {
4426 priv->status |= STATUS_ROAMING;
4427 if (!(priv->status & STATUS_SCANNING))
4428 schedule_delayed_work(&priv->request_scan, 0);
4429 }
4430 return;
4431 }
4432
4433 if (priv->status & STATUS_SCANNING &&
4434 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4435 /* Stop scan to keep fw from getting
4436 * stuck (only if we aren't roaming --
4437 * otherwise we'll never scan more than 2 or 3
4438 * channels..) */
4439 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4440 "Aborting scan with missed beacon.\n");
4441 schedule_work(&priv->abort_scan);
4442 }
4443
4444 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4445}
4446
4447static void ipw_scan_event(struct work_struct *work)
4448{
4449 union iwreq_data wrqu;
4450
4451 struct ipw_priv *priv =
4452 container_of(work, struct ipw_priv, scan_event.work);
4453
4454 wrqu.data.length = 0;
4455 wrqu.data.flags = 0;
4456 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4457}
4458
4459static void handle_scan_event(struct ipw_priv *priv)
4460{
4461 /* Only userspace-requested scan completion events go out immediately */
4462 if (!priv->user_requested_scan) {
4463 schedule_delayed_work(&priv->scan_event,
4464 round_jiffies_relative(msecs_to_jiffies(4000)));
4465 } else {
4466 priv->user_requested_scan = 0;
4467 mod_delayed_work(system_wq, &priv->scan_event, 0);
4468 }
4469}
4470
4471/**
4472 * Handle host notification packet.
4473 * Called from interrupt routine
4474 */
4475static void ipw_rx_notification(struct ipw_priv *priv,
4476 struct ipw_rx_notification *notif)
4477{
4478 u16 size = le16_to_cpu(notif->size);
4479
4480 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4481
4482 switch (notif->subtype) {
4483 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4484 struct notif_association *assoc = &notif->u.assoc;
4485
4486 switch (assoc->state) {
4487 case CMAS_ASSOCIATED:{
4488 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4489 IPW_DL_ASSOC,
4490 "associated: '%*pE' %pM\n",
4491 priv->essid_len, priv->essid,
4492 priv->bssid);
4493
4494 switch (priv->ieee->iw_mode) {
4495 case IW_MODE_INFRA:
4496 memcpy(priv->ieee->bssid,
4497 priv->bssid, ETH_ALEN);
4498 break;
4499
4500 case IW_MODE_ADHOC:
4501 memcpy(priv->ieee->bssid,
4502 priv->bssid, ETH_ALEN);
4503
4504 /* clear out the station table */
4505 priv->num_stations = 0;
4506
4507 IPW_DEBUG_ASSOC
4508 ("queueing adhoc check\n");
4509 schedule_delayed_work(
4510 &priv->adhoc_check,
4511 le16_to_cpu(priv->
4512 assoc_request.
4513 beacon_interval));
4514 break;
4515 }
4516
4517 priv->status &= ~STATUS_ASSOCIATING;
4518 priv->status |= STATUS_ASSOCIATED;
4519 schedule_work(&priv->system_config);
4520
4521#ifdef CONFIG_IPW2200_QOS
4522#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4523 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4524 if ((priv->status & STATUS_AUTH) &&
4525 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4526 == IEEE80211_STYPE_ASSOC_RESP)) {
4527 if ((sizeof
4528 (struct
4529 libipw_assoc_response)
4530 <= size)
4531 && (size <= 2314)) {
4532 struct
4533 libipw_rx_stats
4534 stats = {
4535 .len = size - 1,
4536 };
4537
4538 IPW_DEBUG_QOS
4539 ("QoS Associate "
4540 "size %d\n", size);
4541 libipw_rx_mgt(priv->
4542 ieee,
4543 (struct
4544 libipw_hdr_4addr
4545 *)
4546 &notif->u.raw, &stats);
4547 }
4548 }
4549#endif
4550
4551 schedule_work(&priv->link_up);
4552
4553 break;
4554 }
4555
4556 case CMAS_AUTHENTICATED:{
4557 if (priv->
4558 status & (STATUS_ASSOCIATED |
4559 STATUS_AUTH)) {
4560 struct notif_authenticate *auth
4561 = &notif->u.auth;
4562 IPW_DEBUG(IPW_DL_NOTIF |
4563 IPW_DL_STATE |
4564 IPW_DL_ASSOC,
4565 "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4566 priv->essid_len,
4567 priv->essid,
4568 priv->bssid,
4569 le16_to_cpu(auth->status),
4570 ipw_get_status_code
4571 (le16_to_cpu
4572 (auth->status)));
4573
4574 priv->status &=
4575 ~(STATUS_ASSOCIATING |
4576 STATUS_AUTH |
4577 STATUS_ASSOCIATED);
4578
4579 schedule_work(&priv->link_down);
4580 break;
4581 }
4582
4583 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4584 IPW_DL_ASSOC,
4585 "authenticated: '%*pE' %pM\n",
4586 priv->essid_len, priv->essid,
4587 priv->bssid);
4588 break;
4589 }
4590
4591 case CMAS_INIT:{
4592 if (priv->status & STATUS_AUTH) {
4593 struct
4594 libipw_assoc_response
4595 *resp;
4596 resp =
4597 (struct
4598 libipw_assoc_response
4599 *)&notif->u.raw;
4600 IPW_DEBUG(IPW_DL_NOTIF |
4601 IPW_DL_STATE |
4602 IPW_DL_ASSOC,
4603 "association failed (0x%04X): %s\n",
4604 le16_to_cpu(resp->status),
4605 ipw_get_status_code
4606 (le16_to_cpu
4607 (resp->status)));
4608 }
4609
4610 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4611 IPW_DL_ASSOC,
4612 "disassociated: '%*pE' %pM\n",
4613 priv->essid_len, priv->essid,
4614 priv->bssid);
4615
4616 priv->status &=
4617 ~(STATUS_DISASSOCIATING |
4618 STATUS_ASSOCIATING |
4619 STATUS_ASSOCIATED | STATUS_AUTH);
4620 if (priv->assoc_network
4621 && (priv->assoc_network->
4622 capability &
4623 WLAN_CAPABILITY_IBSS))
4624 ipw_remove_current_network
4625 (priv);
4626
4627 schedule_work(&priv->link_down);
4628
4629 break;
4630 }
4631
4632 case CMAS_RX_ASSOC_RESP:
4633 break;
4634
4635 default:
4636 IPW_ERROR("assoc: unknown (%d)\n",
4637 assoc->state);
4638 break;
4639 }
4640
4641 break;
4642 }
4643
4644 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4645 struct notif_authenticate *auth = &notif->u.auth;
4646 switch (auth->state) {
4647 case CMAS_AUTHENTICATED:
4648 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4649 "authenticated: '%*pE' %pM\n",
4650 priv->essid_len, priv->essid,
4651 priv->bssid);
4652 priv->status |= STATUS_AUTH;
4653 break;
4654
4655 case CMAS_INIT:
4656 if (priv->status & STATUS_AUTH) {
4657 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4658 IPW_DL_ASSOC,
4659 "authentication failed (0x%04X): %s\n",
4660 le16_to_cpu(auth->status),
4661 ipw_get_status_code(le16_to_cpu
4662 (auth->
4663 status)));
4664 }
4665 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4666 IPW_DL_ASSOC,
4667 "deauthenticated: '%*pE' %pM\n",
4668 priv->essid_len, priv->essid,
4669 priv->bssid);
4670
4671 priv->status &= ~(STATUS_ASSOCIATING |
4672 STATUS_AUTH |
4673 STATUS_ASSOCIATED);
4674
4675 schedule_work(&priv->link_down);
4676 break;
4677
4678 case CMAS_TX_AUTH_SEQ_1:
4679 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4680 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4681 break;
4682 case CMAS_RX_AUTH_SEQ_2:
4683 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4685 break;
4686 case CMAS_AUTH_SEQ_1_PASS:
4687 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4688 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4689 break;
4690 case CMAS_AUTH_SEQ_1_FAIL:
4691 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4692 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4693 break;
4694 case CMAS_TX_AUTH_SEQ_3:
4695 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4696 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4697 break;
4698 case CMAS_RX_AUTH_SEQ_4:
4699 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4700 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4701 break;
4702 case CMAS_AUTH_SEQ_2_PASS:
4703 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4704 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4705 break;
4706 case CMAS_AUTH_SEQ_2_FAIL:
4707 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4708 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4709 break;
4710 case CMAS_TX_ASSOC:
4711 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4712 IPW_DL_ASSOC, "TX_ASSOC\n");
4713 break;
4714 case CMAS_RX_ASSOC_RESP:
4715 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4716 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4717
4718 break;
4719 case CMAS_ASSOCIATED:
4720 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4721 IPW_DL_ASSOC, "ASSOCIATED\n");
4722 break;
4723 default:
4724 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4725 auth->state);
4726 break;
4727 }
4728 break;
4729 }
4730
4731 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4732 struct notif_channel_result *x =
4733 &notif->u.channel_result;
4734
4735 if (size == sizeof(*x)) {
4736 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4737 x->channel_num);
4738 } else {
4739 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4740 "(should be %zd)\n",
4741 size, sizeof(*x));
4742 }
4743 break;
4744 }
4745
4746 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4747 struct notif_scan_complete *x = &notif->u.scan_complete;
4748 if (size == sizeof(*x)) {
4749 IPW_DEBUG_SCAN
4750 ("Scan completed: type %d, %d channels, "
4751 "%d status\n", x->scan_type,
4752 x->num_channels, x->status);
4753 } else {
4754 IPW_ERROR("Scan completed of wrong size %d "
4755 "(should be %zd)\n",
4756 size, sizeof(*x));
4757 }
4758
4759 priv->status &=
4760 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4761
4762 wake_up_interruptible(&priv->wait_state);
4763 cancel_delayed_work(&priv->scan_check);
4764
4765 if (priv->status & STATUS_EXIT_PENDING)
4766 break;
4767
4768 priv->ieee->scans++;
4769
4770#ifdef CONFIG_IPW2200_MONITOR
4771 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4772 priv->status |= STATUS_SCAN_FORCED;
4773 schedule_delayed_work(&priv->request_scan, 0);
4774 break;
4775 }
4776 priv->status &= ~STATUS_SCAN_FORCED;
4777#endif /* CONFIG_IPW2200_MONITOR */
4778
4779 /* Do queued direct scans first */
4780 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4781 schedule_delayed_work(&priv->request_direct_scan, 0);
4782
4783 if (!(priv->status & (STATUS_ASSOCIATED |
4784 STATUS_ASSOCIATING |
4785 STATUS_ROAMING |
4786 STATUS_DISASSOCIATING)))
4787 schedule_work(&priv->associate);
4788 else if (priv->status & STATUS_ROAMING) {
4789 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4790 /* If a scan completed and we are in roam mode, then
4791 * the scan that completed was the one requested as a
4792 * result of entering roam... so, schedule the
4793 * roam work */
4794 schedule_work(&priv->roam);
4795 else
4796 /* Don't schedule if we aborted the scan */
4797 priv->status &= ~STATUS_ROAMING;
4798 } else if (priv->status & STATUS_SCAN_PENDING)
4799 schedule_delayed_work(&priv->request_scan, 0);
4800 else if (priv->config & CFG_BACKGROUND_SCAN
4801 && priv->status & STATUS_ASSOCIATED)
4802 schedule_delayed_work(&priv->request_scan,
4803 round_jiffies_relative(HZ));
4804
4805 /* Send an empty event to user space.
4806 * We don't send the received data on the event because
4807 * it would require us to do complex transcoding, and
4808 * we want to minimise the work done in the irq handler
4809 * Use a request to extract the data.
4810 * Also, we generate this even for any scan, regardless
4811 * on how the scan was initiated. User space can just
4812 * sync on periodic scan to get fresh data...
4813 * Jean II */
4814 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4815 handle_scan_event(priv);
4816 break;
4817 }
4818
4819 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4820 struct notif_frag_length *x = &notif->u.frag_len;
4821
4822 if (size == sizeof(*x))
4823 IPW_ERROR("Frag length: %d\n",
4824 le16_to_cpu(x->frag_length));
4825 else
4826 IPW_ERROR("Frag length of wrong size %d "
4827 "(should be %zd)\n",
4828 size, sizeof(*x));
4829 break;
4830 }
4831
4832 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4833 struct notif_link_deterioration *x =
4834 &notif->u.link_deterioration;
4835
4836 if (size == sizeof(*x)) {
4837 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4838 "link deterioration: type %d, cnt %d\n",
4839 x->silence_notification_type,
4840 x->silence_count);
4841 memcpy(&priv->last_link_deterioration, x,
4842 sizeof(*x));
4843 } else {
4844 IPW_ERROR("Link Deterioration of wrong size %d "
4845 "(should be %zd)\n",
4846 size, sizeof(*x));
4847 }
4848 break;
4849 }
4850
4851 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4852 IPW_ERROR("Dino config\n");
4853 if (priv->hcmd
4854 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4855 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4856
4857 break;
4858 }
4859
4860 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4861 struct notif_beacon_state *x = &notif->u.beacon_state;
4862 if (size != sizeof(*x)) {
4863 IPW_ERROR
4864 ("Beacon state of wrong size %d (should "
4865 "be %zd)\n", size, sizeof(*x));
4866 break;
4867 }
4868
4869 if (le32_to_cpu(x->state) ==
4870 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4871 ipw_handle_missed_beacon(priv,
4872 le32_to_cpu(x->
4873 number));
4874
4875 break;
4876 }
4877
4878 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4879 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4880 if (size == sizeof(*x)) {
4881 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4882 "0x%02x station %d\n",
4883 x->key_state, x->security_type,
4884 x->station_index);
4885 break;
4886 }
4887
4888 IPW_ERROR
4889 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4890 size, sizeof(*x));
4891 break;
4892 }
4893
4894 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4895 struct notif_calibration *x = &notif->u.calibration;
4896
4897 if (size == sizeof(*x)) {
4898 memcpy(&priv->calib, x, sizeof(*x));
4899 IPW_DEBUG_INFO("TODO: Calibration\n");
4900 break;
4901 }
4902
4903 IPW_ERROR
4904 ("Calibration of wrong size %d (should be %zd)\n",
4905 size, sizeof(*x));
4906 break;
4907 }
4908
4909 case HOST_NOTIFICATION_NOISE_STATS:{
4910 if (size == sizeof(u32)) {
4911 priv->exp_avg_noise =
4912 exponential_average(priv->exp_avg_noise,
4913 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4914 DEPTH_NOISE);
4915 break;
4916 }
4917
4918 IPW_ERROR
4919 ("Noise stat is wrong size %d (should be %zd)\n",
4920 size, sizeof(u32));
4921 break;
4922 }
4923
4924 default:
4925 IPW_DEBUG_NOTIF("Unknown notification: "
4926 "subtype=%d,flags=0x%2x,size=%d\n",
4927 notif->subtype, notif->flags, size);
4928 }
4929}
4930
4931/**
4932 * Destroys all DMA structures and initialise them again
4933 *
4934 * @param priv
4935 * @return error code
4936 */
4937static int ipw_queue_reset(struct ipw_priv *priv)
4938{
4939 int rc = 0;
4940 /** @todo customize queue sizes */
4941 int nTx = 64, nTxCmd = 8;
4942 ipw_tx_queue_free(priv);
4943 /* Tx CMD queue */
4944 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4945 IPW_TX_CMD_QUEUE_READ_INDEX,
4946 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4947 IPW_TX_CMD_QUEUE_BD_BASE,
4948 IPW_TX_CMD_QUEUE_BD_SIZE);
4949 if (rc) {
4950 IPW_ERROR("Tx Cmd queue init failed\n");
4951 goto error;
4952 }
4953 /* Tx queue(s) */
4954 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4955 IPW_TX_QUEUE_0_READ_INDEX,
4956 IPW_TX_QUEUE_0_WRITE_INDEX,
4957 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4958 if (rc) {
4959 IPW_ERROR("Tx 0 queue init failed\n");
4960 goto error;
4961 }
4962 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4963 IPW_TX_QUEUE_1_READ_INDEX,
4964 IPW_TX_QUEUE_1_WRITE_INDEX,
4965 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4966 if (rc) {
4967 IPW_ERROR("Tx 1 queue init failed\n");
4968 goto error;
4969 }
4970 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4971 IPW_TX_QUEUE_2_READ_INDEX,
4972 IPW_TX_QUEUE_2_WRITE_INDEX,
4973 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4974 if (rc) {
4975 IPW_ERROR("Tx 2 queue init failed\n");
4976 goto error;
4977 }
4978 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4979 IPW_TX_QUEUE_3_READ_INDEX,
4980 IPW_TX_QUEUE_3_WRITE_INDEX,
4981 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4982 if (rc) {
4983 IPW_ERROR("Tx 3 queue init failed\n");
4984 goto error;
4985 }
4986 /* statistics */
4987 priv->rx_bufs_min = 0;
4988 priv->rx_pend_max = 0;
4989 return rc;
4990
4991 error:
4992 ipw_tx_queue_free(priv);
4993 return rc;
4994}
4995
4996/**
4997 * Reclaim Tx queue entries no more used by NIC.
4998 *
4999 * When FW advances 'R' index, all entries between old and
5000 * new 'R' index need to be reclaimed. As result, some free space
5001 * forms. If there is enough free space (> low mark), wake Tx queue.
5002 *
5003 * @note Need to protect against garbage in 'R' index
5004 * @param priv
5005 * @param txq
5006 * @param qindex
5007 * @return Number of used entries remains in the queue
5008 */
5009static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5010 struct clx2_tx_queue *txq, int qindex)
5011{
5012 u32 hw_tail;
5013 int used;
5014 struct clx2_queue *q = &txq->q;
5015
5016 hw_tail = ipw_read32(priv, q->reg_r);
5017 if (hw_tail >= q->n_bd) {
5018 IPW_ERROR
5019 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5020 hw_tail, q->n_bd);
5021 goto done;
5022 }
5023 for (; q->last_used != hw_tail;
5024 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5025 ipw_queue_tx_free_tfd(priv, txq);
5026 priv->tx_packets++;
5027 }
5028 done:
5029 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5030 (qindex >= 0))
5031 netif_wake_queue(priv->net_dev);
5032 used = q->first_empty - q->last_used;
5033 if (used < 0)
5034 used += q->n_bd;
5035
5036 return used;
5037}
5038
5039static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5040 int len, int sync)
5041{
5042 struct clx2_tx_queue *txq = &priv->txq_cmd;
5043 struct clx2_queue *q = &txq->q;
5044 struct tfd_frame *tfd;
5045
5046 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5047 IPW_ERROR("No space for Tx\n");
5048 return -EBUSY;
5049 }
5050
5051 tfd = &txq->bd[q->first_empty];
5052 txq->txb[q->first_empty] = NULL;
5053
5054 memset(tfd, 0, sizeof(*tfd));
5055 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5056 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5057 priv->hcmd_seq++;
5058 tfd->u.cmd.index = hcmd;
5059 tfd->u.cmd.length = len;
5060 memcpy(tfd->u.cmd.payload, buf, len);
5061 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5062 ipw_write32(priv, q->reg_w, q->first_empty);
5063 _ipw_read32(priv, 0x90);
5064
5065 return 0;
5066}
5067
5068/*
5069 * Rx theory of operation
5070 *
5071 * The host allocates 32 DMA target addresses and passes the host address
5072 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5073 * 0 to 31
5074 *
5075 * Rx Queue Indexes
5076 * The host/firmware share two index registers for managing the Rx buffers.
5077 *
5078 * The READ index maps to the first position that the firmware may be writing
5079 * to -- the driver can read up to (but not including) this position and get
5080 * good data.
5081 * The READ index is managed by the firmware once the card is enabled.
5082 *
5083 * The WRITE index maps to the last position the driver has read from -- the
5084 * position preceding WRITE is the last slot the firmware can place a packet.
5085 *
5086 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5087 * WRITE = READ.
5088 *
5089 * During initialization the host sets up the READ queue position to the first
5090 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5091 *
5092 * When the firmware places a packet in a buffer it will advance the READ index
5093 * and fire the RX interrupt. The driver can then query the READ index and
5094 * process as many packets as possible, moving the WRITE index forward as it
5095 * resets the Rx queue buffers with new memory.
5096 *
5097 * The management in the driver is as follows:
5098 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5099 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5100 * to replensish the ipw->rxq->rx_free.
5101 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5102 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5103 * 'processed' and 'read' driver indexes as well)
5104 * + A received packet is processed and handed to the kernel network stack,
5105 * detached from the ipw->rxq. The driver 'processed' index is updated.
5106 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5107 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5108 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5109 * were enough free buffers and RX_STALLED is set it is cleared.
5110 *
5111 *
5112 * Driver sequence:
5113 *
5114 * ipw_rx_queue_alloc() Allocates rx_free
5115 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5116 * ipw_rx_queue_restock
5117 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5118 * queue, updates firmware pointers, and updates
5119 * the WRITE index. If insufficient rx_free buffers
5120 * are available, schedules ipw_rx_queue_replenish
5121 *
5122 * -- enable interrupts --
5123 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5124 * READ INDEX, detaching the SKB from the pool.
5125 * Moves the packet buffer from queue to rx_used.
5126 * Calls ipw_rx_queue_restock to refill any empty
5127 * slots.
5128 * ...
5129 *
5130 */
5131
5132/*
5133 * If there are slots in the RX queue that need to be restocked,
5134 * and we have free pre-allocated buffers, fill the ranks as much
5135 * as we can pulling from rx_free.
5136 *
5137 * This moves the 'write' index forward to catch up with 'processed', and
5138 * also updates the memory address in the firmware to reference the new
5139 * target buffer.
5140 */
5141static void ipw_rx_queue_restock(struct ipw_priv *priv)
5142{
5143 struct ipw_rx_queue *rxq = priv->rxq;
5144 struct list_head *element;
5145 struct ipw_rx_mem_buffer *rxb;
5146 unsigned long flags;
5147 int write;
5148
5149 spin_lock_irqsave(&rxq->lock, flags);
5150 write = rxq->write;
5151 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5152 element = rxq->rx_free.next;
5153 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5154 list_del(element);
5155
5156 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5157 rxb->dma_addr);
5158 rxq->queue[rxq->write] = rxb;
5159 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5160 rxq->free_count--;
5161 }
5162 spin_unlock_irqrestore(&rxq->lock, flags);
5163
5164 /* If the pre-allocated buffer pool is dropping low, schedule to
5165 * refill it */
5166 if (rxq->free_count <= RX_LOW_WATERMARK)
5167 schedule_work(&priv->rx_replenish);
5168
5169 /* If we've added more space for the firmware to place data, tell it */
5170 if (write != rxq->write)
5171 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5172}
5173
5174/*
5175 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5176 * Also restock the Rx queue via ipw_rx_queue_restock.
5177 *
5178 * This is called as a scheduled work item (except for during initialization)
5179 */
5180static void ipw_rx_queue_replenish(void *data)
5181{
5182 struct ipw_priv *priv = data;
5183 struct ipw_rx_queue *rxq = priv->rxq;
5184 struct list_head *element;
5185 struct ipw_rx_mem_buffer *rxb;
5186 unsigned long flags;
5187
5188 spin_lock_irqsave(&rxq->lock, flags);
5189 while (!list_empty(&rxq->rx_used)) {
5190 element = rxq->rx_used.next;
5191 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5192 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5193 if (!rxb->skb) {
5194 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5195 priv->net_dev->name);
5196 /* We don't reschedule replenish work here -- we will
5197 * call the restock method and if it still needs
5198 * more buffers it will schedule replenish */
5199 break;
5200 }
5201 list_del(element);
5202
5203 rxb->dma_addr =
5204 dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
5205 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5206
5207 list_add_tail(&rxb->list, &rxq->rx_free);
5208 rxq->free_count++;
5209 }
5210 spin_unlock_irqrestore(&rxq->lock, flags);
5211
5212 ipw_rx_queue_restock(priv);
5213}
5214
5215static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5216{
5217 struct ipw_priv *priv =
5218 container_of(work, struct ipw_priv, rx_replenish);
5219 mutex_lock(&priv->mutex);
5220 ipw_rx_queue_replenish(priv);
5221 mutex_unlock(&priv->mutex);
5222}
5223
5224/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5225 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5226 * This free routine walks the list of POOL entries and if SKB is set to
5227 * non NULL it is unmapped and freed
5228 */
5229static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5230{
5231 int i;
5232
5233 if (!rxq)
5234 return;
5235
5236 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5237 if (rxq->pool[i].skb != NULL) {
5238 dma_unmap_single(&priv->pci_dev->dev,
5239 rxq->pool[i].dma_addr,
5240 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
5241 dev_kfree_skb(rxq->pool[i].skb);
5242 }
5243 }
5244
5245 kfree(rxq);
5246}
5247
5248static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5249{
5250 struct ipw_rx_queue *rxq;
5251 int i;
5252
5253 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5254 if (unlikely(!rxq)) {
5255 IPW_ERROR("memory allocation failed\n");
5256 return NULL;
5257 }
5258 spin_lock_init(&rxq->lock);
5259 INIT_LIST_HEAD(&rxq->rx_free);
5260 INIT_LIST_HEAD(&rxq->rx_used);
5261
5262 /* Fill the rx_used queue with _all_ of the Rx buffers */
5263 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5264 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5265
5266 /* Set us so that we have processed and used all buffers, but have
5267 * not restocked the Rx queue with fresh buffers */
5268 rxq->read = rxq->write = 0;
5269 rxq->free_count = 0;
5270
5271 return rxq;
5272}
5273
5274static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5275{
5276 rate &= ~LIBIPW_BASIC_RATE_MASK;
5277 if (ieee_mode == IEEE_A) {
5278 switch (rate) {
5279 case LIBIPW_OFDM_RATE_6MB:
5280 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5281 1 : 0;
5282 case LIBIPW_OFDM_RATE_9MB:
5283 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5284 1 : 0;
5285 case LIBIPW_OFDM_RATE_12MB:
5286 return priv->
5287 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5288 case LIBIPW_OFDM_RATE_18MB:
5289 return priv->
5290 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5291 case LIBIPW_OFDM_RATE_24MB:
5292 return priv->
5293 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5294 case LIBIPW_OFDM_RATE_36MB:
5295 return priv->
5296 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5297 case LIBIPW_OFDM_RATE_48MB:
5298 return priv->
5299 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5300 case LIBIPW_OFDM_RATE_54MB:
5301 return priv->
5302 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5303 default:
5304 return 0;
5305 }
5306 }
5307
5308 /* B and G mixed */
5309 switch (rate) {
5310 case LIBIPW_CCK_RATE_1MB:
5311 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5312 case LIBIPW_CCK_RATE_2MB:
5313 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5314 case LIBIPW_CCK_RATE_5MB:
5315 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5316 case LIBIPW_CCK_RATE_11MB:
5317 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5318 }
5319
5320 /* If we are limited to B modulations, bail at this point */
5321 if (ieee_mode == IEEE_B)
5322 return 0;
5323
5324 /* G */
5325 switch (rate) {
5326 case LIBIPW_OFDM_RATE_6MB:
5327 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5328 case LIBIPW_OFDM_RATE_9MB:
5329 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5330 case LIBIPW_OFDM_RATE_12MB:
5331 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5332 case LIBIPW_OFDM_RATE_18MB:
5333 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5334 case LIBIPW_OFDM_RATE_24MB:
5335 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5336 case LIBIPW_OFDM_RATE_36MB:
5337 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5338 case LIBIPW_OFDM_RATE_48MB:
5339 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5340 case LIBIPW_OFDM_RATE_54MB:
5341 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5342 }
5343
5344 return 0;
5345}
5346
5347static int ipw_compatible_rates(struct ipw_priv *priv,
5348 const struct libipw_network *network,
5349 struct ipw_supported_rates *rates)
5350{
5351 int num_rates, i;
5352
5353 memset(rates, 0, sizeof(*rates));
5354 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5355 rates->num_rates = 0;
5356 for (i = 0; i < num_rates; i++) {
5357 if (!ipw_is_rate_in_mask(priv, network->mode,
5358 network->rates[i])) {
5359
5360 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5361 IPW_DEBUG_SCAN("Adding masked mandatory "
5362 "rate %02X\n",
5363 network->rates[i]);
5364 rates->supported_rates[rates->num_rates++] =
5365 network->rates[i];
5366 continue;
5367 }
5368
5369 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5370 network->rates[i], priv->rates_mask);
5371 continue;
5372 }
5373
5374 rates->supported_rates[rates->num_rates++] = network->rates[i];
5375 }
5376
5377 num_rates = min(network->rates_ex_len,
5378 (u8) (IPW_MAX_RATES - num_rates));
5379 for (i = 0; i < num_rates; i++) {
5380 if (!ipw_is_rate_in_mask(priv, network->mode,
5381 network->rates_ex[i])) {
5382 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5383 IPW_DEBUG_SCAN("Adding masked mandatory "
5384 "rate %02X\n",
5385 network->rates_ex[i]);
5386 rates->supported_rates[rates->num_rates++] =
5387 network->rates[i];
5388 continue;
5389 }
5390
5391 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5392 network->rates_ex[i], priv->rates_mask);
5393 continue;
5394 }
5395
5396 rates->supported_rates[rates->num_rates++] =
5397 network->rates_ex[i];
5398 }
5399
5400 return 1;
5401}
5402
5403static void ipw_copy_rates(struct ipw_supported_rates *dest,
5404 const struct ipw_supported_rates *src)
5405{
5406 u8 i;
5407 for (i = 0; i < src->num_rates; i++)
5408 dest->supported_rates[i] = src->supported_rates[i];
5409 dest->num_rates = src->num_rates;
5410}
5411
5412/* TODO: Look at sniffed packets in the air to determine if the basic rate
5413 * mask should ever be used -- right now all callers to add the scan rates are
5414 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5415static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5416 u8 modulation, u32 rate_mask)
5417{
5418 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5419 LIBIPW_BASIC_RATE_MASK : 0;
5420
5421 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5422 rates->supported_rates[rates->num_rates++] =
5423 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5424
5425 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5426 rates->supported_rates[rates->num_rates++] =
5427 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5428
5429 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5430 rates->supported_rates[rates->num_rates++] = basic_mask |
5431 LIBIPW_CCK_RATE_5MB;
5432
5433 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5434 rates->supported_rates[rates->num_rates++] = basic_mask |
5435 LIBIPW_CCK_RATE_11MB;
5436}
5437
5438static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5439 u8 modulation, u32 rate_mask)
5440{
5441 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5442 LIBIPW_BASIC_RATE_MASK : 0;
5443
5444 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5445 rates->supported_rates[rates->num_rates++] = basic_mask |
5446 LIBIPW_OFDM_RATE_6MB;
5447
5448 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5449 rates->supported_rates[rates->num_rates++] =
5450 LIBIPW_OFDM_RATE_9MB;
5451
5452 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5453 rates->supported_rates[rates->num_rates++] = basic_mask |
5454 LIBIPW_OFDM_RATE_12MB;
5455
5456 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5457 rates->supported_rates[rates->num_rates++] =
5458 LIBIPW_OFDM_RATE_18MB;
5459
5460 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5461 rates->supported_rates[rates->num_rates++] = basic_mask |
5462 LIBIPW_OFDM_RATE_24MB;
5463
5464 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5465 rates->supported_rates[rates->num_rates++] =
5466 LIBIPW_OFDM_RATE_36MB;
5467
5468 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5469 rates->supported_rates[rates->num_rates++] =
5470 LIBIPW_OFDM_RATE_48MB;
5471
5472 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5473 rates->supported_rates[rates->num_rates++] =
5474 LIBIPW_OFDM_RATE_54MB;
5475}
5476
5477struct ipw_network_match {
5478 struct libipw_network *network;
5479 struct ipw_supported_rates rates;
5480};
5481
5482static int ipw_find_adhoc_network(struct ipw_priv *priv,
5483 struct ipw_network_match *match,
5484 struct libipw_network *network,
5485 int roaming)
5486{
5487 struct ipw_supported_rates rates;
5488
5489 /* Verify that this network's capability is compatible with the
5490 * current mode (AdHoc or Infrastructure) */
5491 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5492 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5493 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5494 network->ssid_len, network->ssid,
5495 network->bssid);
5496 return 0;
5497 }
5498
5499 if (unlikely(roaming)) {
5500 /* If we are roaming, then ensure check if this is a valid
5501 * network to try and roam to */
5502 if ((network->ssid_len != match->network->ssid_len) ||
5503 memcmp(network->ssid, match->network->ssid,
5504 network->ssid_len)) {
5505 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5506 network->ssid_len, network->ssid,
5507 network->bssid);
5508 return 0;
5509 }
5510 } else {
5511 /* If an ESSID has been configured then compare the broadcast
5512 * ESSID to ours */
5513 if ((priv->config & CFG_STATIC_ESSID) &&
5514 ((network->ssid_len != priv->essid_len) ||
5515 memcmp(network->ssid, priv->essid,
5516 min(network->ssid_len, priv->essid_len)))) {
5517 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5518 network->ssid_len, network->ssid,
5519 network->bssid, priv->essid_len,
5520 priv->essid);
5521 return 0;
5522 }
5523 }
5524
5525 /* If the old network rate is better than this one, don't bother
5526 * testing everything else. */
5527
5528 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5529 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5530 match->network->ssid_len, match->network->ssid);
5531 return 0;
5532 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5533 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5534 match->network->ssid_len, match->network->ssid);
5535 return 0;
5536 }
5537
5538 /* Now go through and see if the requested network is valid... */
5539 if (priv->ieee->scan_age != 0 &&
5540 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5541 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5542 network->ssid_len, network->ssid,
5543 network->bssid,
5544 jiffies_to_msecs(jiffies -
5545 network->last_scanned));
5546 return 0;
5547 }
5548
5549 if ((priv->config & CFG_STATIC_CHANNEL) &&
5550 (network->channel != priv->channel)) {
5551 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5552 network->ssid_len, network->ssid,
5553 network->bssid,
5554 network->channel, priv->channel);
5555 return 0;
5556 }
5557
5558 /* Verify privacy compatibility */
5559 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5560 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5561 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5562 network->ssid_len, network->ssid,
5563 network->bssid,
5564 priv->
5565 capability & CAP_PRIVACY_ON ? "on" : "off",
5566 network->
5567 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5568 "off");
5569 return 0;
5570 }
5571
5572 if (ether_addr_equal(network->bssid, priv->bssid)) {
5573 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5574 network->ssid_len, network->ssid,
5575 network->bssid, priv->bssid);
5576 return 0;
5577 }
5578
5579 /* Filter out any incompatible freq / mode combinations */
5580 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5581 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5582 network->ssid_len, network->ssid,
5583 network->bssid);
5584 return 0;
5585 }
5586
5587 /* Ensure that the rates supported by the driver are compatible with
5588 * this AP, including verification of basic rates (mandatory) */
5589 if (!ipw_compatible_rates(priv, network, &rates)) {
5590 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5591 network->ssid_len, network->ssid,
5592 network->bssid);
5593 return 0;
5594 }
5595
5596 if (rates.num_rates == 0) {
5597 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5598 network->ssid_len, network->ssid,
5599 network->bssid);
5600 return 0;
5601 }
5602
5603 /* TODO: Perform any further minimal comparititive tests. We do not
5604 * want to put too much policy logic here; intelligent scan selection
5605 * should occur within a generic IEEE 802.11 user space tool. */
5606
5607 /* Set up 'new' AP to this network */
5608 ipw_copy_rates(&match->rates, &rates);
5609 match->network = network;
5610 IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5611 network->ssid_len, network->ssid, network->bssid);
5612
5613 return 1;
5614}
5615
5616static void ipw_merge_adhoc_network(struct work_struct *work)
5617{
5618 struct ipw_priv *priv =
5619 container_of(work, struct ipw_priv, merge_networks);
5620 struct libipw_network *network = NULL;
5621 struct ipw_network_match match = {
5622 .network = priv->assoc_network
5623 };
5624
5625 if ((priv->status & STATUS_ASSOCIATED) &&
5626 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5627 /* First pass through ROAM process -- look for a better
5628 * network */
5629 unsigned long flags;
5630
5631 spin_lock_irqsave(&priv->ieee->lock, flags);
5632 list_for_each_entry(network, &priv->ieee->network_list, list) {
5633 if (network != priv->assoc_network)
5634 ipw_find_adhoc_network(priv, &match, network,
5635 1);
5636 }
5637 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5638
5639 if (match.network == priv->assoc_network) {
5640 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5641 "merge to.\n");
5642 return;
5643 }
5644
5645 mutex_lock(&priv->mutex);
5646 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5647 IPW_DEBUG_MERGE("remove network %*pE\n",
5648 priv->essid_len, priv->essid);
5649 ipw_remove_current_network(priv);
5650 }
5651
5652 ipw_disassociate(priv);
5653 priv->assoc_network = match.network;
5654 mutex_unlock(&priv->mutex);
5655 return;
5656 }
5657}
5658
5659static int ipw_best_network(struct ipw_priv *priv,
5660 struct ipw_network_match *match,
5661 struct libipw_network *network, int roaming)
5662{
5663 struct ipw_supported_rates rates;
5664
5665 /* Verify that this network's capability is compatible with the
5666 * current mode (AdHoc or Infrastructure) */
5667 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5668 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5669 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5670 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5671 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5672 network->ssid_len, network->ssid,
5673 network->bssid);
5674 return 0;
5675 }
5676
5677 if (unlikely(roaming)) {
5678 /* If we are roaming, then ensure check if this is a valid
5679 * network to try and roam to */
5680 if ((network->ssid_len != match->network->ssid_len) ||
5681 memcmp(network->ssid, match->network->ssid,
5682 network->ssid_len)) {
5683 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5684 network->ssid_len, network->ssid,
5685 network->bssid);
5686 return 0;
5687 }
5688 } else {
5689 /* If an ESSID has been configured then compare the broadcast
5690 * ESSID to ours */
5691 if ((priv->config & CFG_STATIC_ESSID) &&
5692 ((network->ssid_len != priv->essid_len) ||
5693 memcmp(network->ssid, priv->essid,
5694 min(network->ssid_len, priv->essid_len)))) {
5695 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5696 network->ssid_len, network->ssid,
5697 network->bssid, priv->essid_len,
5698 priv->essid);
5699 return 0;
5700 }
5701 }
5702
5703 /* If the old network rate is better than this one, don't bother
5704 * testing everything else. */
5705 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5706 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5707 network->ssid_len, network->ssid,
5708 network->bssid, match->network->ssid_len,
5709 match->network->ssid, match->network->bssid);
5710 return 0;
5711 }
5712
5713 /* If this network has already had an association attempt within the
5714 * last 3 seconds, do not try and associate again... */
5715 if (network->last_associate &&
5716 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5717 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5718 network->ssid_len, network->ssid,
5719 network->bssid,
5720 jiffies_to_msecs(jiffies -
5721 network->last_associate));
5722 return 0;
5723 }
5724
5725 /* Now go through and see if the requested network is valid... */
5726 if (priv->ieee->scan_age != 0 &&
5727 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5728 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5729 network->ssid_len, network->ssid,
5730 network->bssid,
5731 jiffies_to_msecs(jiffies -
5732 network->last_scanned));
5733 return 0;
5734 }
5735
5736 if ((priv->config & CFG_STATIC_CHANNEL) &&
5737 (network->channel != priv->channel)) {
5738 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5739 network->ssid_len, network->ssid,
5740 network->bssid,
5741 network->channel, priv->channel);
5742 return 0;
5743 }
5744
5745 /* Verify privacy compatibility */
5746 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5747 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5748 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5749 network->ssid_len, network->ssid,
5750 network->bssid,
5751 priv->capability & CAP_PRIVACY_ON ? "on" :
5752 "off",
5753 network->capability &
5754 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5755 return 0;
5756 }
5757
5758 if ((priv->config & CFG_STATIC_BSSID) &&
5759 !ether_addr_equal(network->bssid, priv->bssid)) {
5760 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5761 network->ssid_len, network->ssid,
5762 network->bssid, priv->bssid);
5763 return 0;
5764 }
5765
5766 /* Filter out any incompatible freq / mode combinations */
5767 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5768 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5769 network->ssid_len, network->ssid,
5770 network->bssid);
5771 return 0;
5772 }
5773
5774 /* Filter out invalid channel in current GEO */
5775 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5776 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5777 network->ssid_len, network->ssid,
5778 network->bssid);
5779 return 0;
5780 }
5781
5782 /* Ensure that the rates supported by the driver are compatible with
5783 * this AP, including verification of basic rates (mandatory) */
5784 if (!ipw_compatible_rates(priv, network, &rates)) {
5785 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5786 network->ssid_len, network->ssid,
5787 network->bssid);
5788 return 0;
5789 }
5790
5791 if (rates.num_rates == 0) {
5792 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5793 network->ssid_len, network->ssid,
5794 network->bssid);
5795 return 0;
5796 }
5797
5798 /* TODO: Perform any further minimal comparititive tests. We do not
5799 * want to put too much policy logic here; intelligent scan selection
5800 * should occur within a generic IEEE 802.11 user space tool. */
5801
5802 /* Set up 'new' AP to this network */
5803 ipw_copy_rates(&match->rates, &rates);
5804 match->network = network;
5805
5806 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5807 network->ssid_len, network->ssid, network->bssid);
5808
5809 return 1;
5810}
5811
5812static void ipw_adhoc_create(struct ipw_priv *priv,
5813 struct libipw_network *network)
5814{
5815 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5816 int i;
5817
5818 /*
5819 * For the purposes of scanning, we can set our wireless mode
5820 * to trigger scans across combinations of bands, but when it
5821 * comes to creating a new ad-hoc network, we have tell the FW
5822 * exactly which band to use.
5823 *
5824 * We also have the possibility of an invalid channel for the
5825 * chossen band. Attempting to create a new ad-hoc network
5826 * with an invalid channel for wireless mode will trigger a
5827 * FW fatal error.
5828 *
5829 */
5830 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5831 case LIBIPW_52GHZ_BAND:
5832 network->mode = IEEE_A;
5833 i = libipw_channel_to_index(priv->ieee, priv->channel);
5834 BUG_ON(i == -1);
5835 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5836 IPW_WARNING("Overriding invalid channel\n");
5837 priv->channel = geo->a[0].channel;
5838 }
5839 break;
5840
5841 case LIBIPW_24GHZ_BAND:
5842 if (priv->ieee->mode & IEEE_G)
5843 network->mode = IEEE_G;
5844 else
5845 network->mode = IEEE_B;
5846 i = libipw_channel_to_index(priv->ieee, priv->channel);
5847 BUG_ON(i == -1);
5848 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5849 IPW_WARNING("Overriding invalid channel\n");
5850 priv->channel = geo->bg[0].channel;
5851 }
5852 break;
5853
5854 default:
5855 IPW_WARNING("Overriding invalid channel\n");
5856 if (priv->ieee->mode & IEEE_A) {
5857 network->mode = IEEE_A;
5858 priv->channel = geo->a[0].channel;
5859 } else if (priv->ieee->mode & IEEE_G) {
5860 network->mode = IEEE_G;
5861 priv->channel = geo->bg[0].channel;
5862 } else {
5863 network->mode = IEEE_B;
5864 priv->channel = geo->bg[0].channel;
5865 }
5866 break;
5867 }
5868
5869 network->channel = priv->channel;
5870 priv->config |= CFG_ADHOC_PERSIST;
5871 ipw_create_bssid(priv, network->bssid);
5872 network->ssid_len = priv->essid_len;
5873 memcpy(network->ssid, priv->essid, priv->essid_len);
5874 memset(&network->stats, 0, sizeof(network->stats));
5875 network->capability = WLAN_CAPABILITY_IBSS;
5876 if (!(priv->config & CFG_PREAMBLE_LONG))
5877 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5878 if (priv->capability & CAP_PRIVACY_ON)
5879 network->capability |= WLAN_CAPABILITY_PRIVACY;
5880 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5881 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5882 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5883 memcpy(network->rates_ex,
5884 &priv->rates.supported_rates[network->rates_len],
5885 network->rates_ex_len);
5886 network->last_scanned = 0;
5887 network->flags = 0;
5888 network->last_associate = 0;
5889 network->time_stamp[0] = 0;
5890 network->time_stamp[1] = 0;
5891 network->beacon_interval = 100; /* Default */
5892 network->listen_interval = 10; /* Default */
5893 network->atim_window = 0; /* Default */
5894 network->wpa_ie_len = 0;
5895 network->rsn_ie_len = 0;
5896}
5897
5898static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5899{
5900 struct ipw_tgi_tx_key key;
5901
5902 if (!(priv->ieee->sec.flags & (1 << index)))
5903 return;
5904
5905 key.key_id = index;
5906 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5907 key.security_type = type;
5908 key.station_index = 0; /* always 0 for BSS */
5909 key.flags = 0;
5910 /* 0 for new key; previous value of counter (after fatal error) */
5911 key.tx_counter[0] = cpu_to_le32(0);
5912 key.tx_counter[1] = cpu_to_le32(0);
5913
5914 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5915}
5916
5917static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5918{
5919 struct ipw_wep_key key;
5920 int i;
5921
5922 key.cmd_id = DINO_CMD_WEP_KEY;
5923 key.seq_num = 0;
5924
5925 /* Note: AES keys cannot be set for multiple times.
5926 * Only set it at the first time. */
5927 for (i = 0; i < 4; i++) {
5928 key.key_index = i | type;
5929 if (!(priv->ieee->sec.flags & (1 << i))) {
5930 key.key_size = 0;
5931 continue;
5932 }
5933
5934 key.key_size = priv->ieee->sec.key_sizes[i];
5935 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5936
5937 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5938 }
5939}
5940
5941static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5942{
5943 if (priv->ieee->host_encrypt)
5944 return;
5945
5946 switch (level) {
5947 case SEC_LEVEL_3:
5948 priv->sys_config.disable_unicast_decryption = 0;
5949 priv->ieee->host_decrypt = 0;
5950 break;
5951 case SEC_LEVEL_2:
5952 priv->sys_config.disable_unicast_decryption = 1;
5953 priv->ieee->host_decrypt = 1;
5954 break;
5955 case SEC_LEVEL_1:
5956 priv->sys_config.disable_unicast_decryption = 0;
5957 priv->ieee->host_decrypt = 0;
5958 break;
5959 case SEC_LEVEL_0:
5960 priv->sys_config.disable_unicast_decryption = 1;
5961 break;
5962 default:
5963 break;
5964 }
5965}
5966
5967static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5968{
5969 if (priv->ieee->host_encrypt)
5970 return;
5971
5972 switch (level) {
5973 case SEC_LEVEL_3:
5974 priv->sys_config.disable_multicast_decryption = 0;
5975 break;
5976 case SEC_LEVEL_2:
5977 priv->sys_config.disable_multicast_decryption = 1;
5978 break;
5979 case SEC_LEVEL_1:
5980 priv->sys_config.disable_multicast_decryption = 0;
5981 break;
5982 case SEC_LEVEL_0:
5983 priv->sys_config.disable_multicast_decryption = 1;
5984 break;
5985 default:
5986 break;
5987 }
5988}
5989
5990static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5991{
5992 switch (priv->ieee->sec.level) {
5993 case SEC_LEVEL_3:
5994 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5995 ipw_send_tgi_tx_key(priv,
5996 DCT_FLAG_EXT_SECURITY_CCM,
5997 priv->ieee->sec.active_key);
5998
5999 if (!priv->ieee->host_mc_decrypt)
6000 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6001 break;
6002 case SEC_LEVEL_2:
6003 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6004 ipw_send_tgi_tx_key(priv,
6005 DCT_FLAG_EXT_SECURITY_TKIP,
6006 priv->ieee->sec.active_key);
6007 break;
6008 case SEC_LEVEL_1:
6009 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6010 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6011 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6012 break;
6013 case SEC_LEVEL_0:
6014 default:
6015 break;
6016 }
6017}
6018
6019static void ipw_adhoc_check(void *data)
6020{
6021 struct ipw_priv *priv = data;
6022
6023 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6024 !(priv->config & CFG_ADHOC_PERSIST)) {
6025 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6026 IPW_DL_STATE | IPW_DL_ASSOC,
6027 "Missed beacon: %d - disassociate\n",
6028 priv->missed_adhoc_beacons);
6029 ipw_remove_current_network(priv);
6030 ipw_disassociate(priv);
6031 return;
6032 }
6033
6034 schedule_delayed_work(&priv->adhoc_check,
6035 le16_to_cpu(priv->assoc_request.beacon_interval));
6036}
6037
6038static void ipw_bg_adhoc_check(struct work_struct *work)
6039{
6040 struct ipw_priv *priv =
6041 container_of(work, struct ipw_priv, adhoc_check.work);
6042 mutex_lock(&priv->mutex);
6043 ipw_adhoc_check(priv);
6044 mutex_unlock(&priv->mutex);
6045}
6046
6047static void ipw_debug_config(struct ipw_priv *priv)
6048{
6049 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6050 "[CFG 0x%08X]\n", priv->config);
6051 if (priv->config & CFG_STATIC_CHANNEL)
6052 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6053 else
6054 IPW_DEBUG_INFO("Channel unlocked.\n");
6055 if (priv->config & CFG_STATIC_ESSID)
6056 IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6057 priv->essid_len, priv->essid);
6058 else
6059 IPW_DEBUG_INFO("ESSID unlocked.\n");
6060 if (priv->config & CFG_STATIC_BSSID)
6061 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6062 else
6063 IPW_DEBUG_INFO("BSSID unlocked.\n");
6064 if (priv->capability & CAP_PRIVACY_ON)
6065 IPW_DEBUG_INFO("PRIVACY on\n");
6066 else
6067 IPW_DEBUG_INFO("PRIVACY off\n");
6068 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6069}
6070
6071static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6072{
6073 /* TODO: Verify that this works... */
6074 struct ipw_fixed_rate fr;
6075 u32 reg;
6076 u16 mask = 0;
6077 u16 new_tx_rates = priv->rates_mask;
6078
6079 /* Identify 'current FW band' and match it with the fixed
6080 * Tx rates */
6081
6082 switch (priv->ieee->freq_band) {
6083 case LIBIPW_52GHZ_BAND: /* A only */
6084 /* IEEE_A */
6085 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6086 /* Invalid fixed rate mask */
6087 IPW_DEBUG_WX
6088 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6089 new_tx_rates = 0;
6090 break;
6091 }
6092
6093 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6094 break;
6095
6096 default: /* 2.4Ghz or Mixed */
6097 /* IEEE_B */
6098 if (mode == IEEE_B) {
6099 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6100 /* Invalid fixed rate mask */
6101 IPW_DEBUG_WX
6102 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6103 new_tx_rates = 0;
6104 }
6105 break;
6106 }
6107
6108 /* IEEE_G */
6109 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6110 LIBIPW_OFDM_RATES_MASK)) {
6111 /* Invalid fixed rate mask */
6112 IPW_DEBUG_WX
6113 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6114 new_tx_rates = 0;
6115 break;
6116 }
6117
6118 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6119 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6120 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6121 }
6122
6123 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6124 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6125 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6126 }
6127
6128 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6129 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6130 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6131 }
6132
6133 new_tx_rates |= mask;
6134 break;
6135 }
6136
6137 fr.tx_rates = cpu_to_le16(new_tx_rates);
6138
6139 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6140 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6141}
6142
6143static void ipw_abort_scan(struct ipw_priv *priv)
6144{
6145 int err;
6146
6147 if (priv->status & STATUS_SCAN_ABORTING) {
6148 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6149 return;
6150 }
6151 priv->status |= STATUS_SCAN_ABORTING;
6152
6153 err = ipw_send_scan_abort(priv);
6154 if (err)
6155 IPW_DEBUG_HC("Request to abort scan failed.\n");
6156}
6157
6158static void ipw_add_scan_channels(struct ipw_priv *priv,
6159 struct ipw_scan_request_ext *scan,
6160 int scan_type)
6161{
6162 int channel_index = 0;
6163 const struct libipw_geo *geo;
6164 int i;
6165
6166 geo = libipw_get_geo(priv->ieee);
6167
6168 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6169 int start = channel_index;
6170 for (i = 0; i < geo->a_channels; i++) {
6171 if ((priv->status & STATUS_ASSOCIATED) &&
6172 geo->a[i].channel == priv->channel)
6173 continue;
6174 channel_index++;
6175 scan->channels_list[channel_index] = geo->a[i].channel;
6176 ipw_set_scan_type(scan, channel_index,
6177 geo->a[i].
6178 flags & LIBIPW_CH_PASSIVE_ONLY ?
6179 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6180 scan_type);
6181 }
6182
6183 if (start != channel_index) {
6184 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6185 (channel_index - start);
6186 channel_index++;
6187 }
6188 }
6189
6190 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6191 int start = channel_index;
6192 if (priv->config & CFG_SPEED_SCAN) {
6193 int index;
6194 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6195 /* nop out the list */
6196 [0] = 0
6197 };
6198
6199 u8 channel;
6200 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6201 channel =
6202 priv->speed_scan[priv->speed_scan_pos];
6203 if (channel == 0) {
6204 priv->speed_scan_pos = 0;
6205 channel = priv->speed_scan[0];
6206 }
6207 if ((priv->status & STATUS_ASSOCIATED) &&
6208 channel == priv->channel) {
6209 priv->speed_scan_pos++;
6210 continue;
6211 }
6212
6213 /* If this channel has already been
6214 * added in scan, break from loop
6215 * and this will be the first channel
6216 * in the next scan.
6217 */
6218 if (channels[channel - 1] != 0)
6219 break;
6220
6221 channels[channel - 1] = 1;
6222 priv->speed_scan_pos++;
6223 channel_index++;
6224 scan->channels_list[channel_index] = channel;
6225 index =
6226 libipw_channel_to_index(priv->ieee, channel);
6227 ipw_set_scan_type(scan, channel_index,
6228 geo->bg[index].
6229 flags &
6230 LIBIPW_CH_PASSIVE_ONLY ?
6231 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6232 : scan_type);
6233 }
6234 } else {
6235 for (i = 0; i < geo->bg_channels; i++) {
6236 if ((priv->status & STATUS_ASSOCIATED) &&
6237 geo->bg[i].channel == priv->channel)
6238 continue;
6239 channel_index++;
6240 scan->channels_list[channel_index] =
6241 geo->bg[i].channel;
6242 ipw_set_scan_type(scan, channel_index,
6243 geo->bg[i].
6244 flags &
6245 LIBIPW_CH_PASSIVE_ONLY ?
6246 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6247 : scan_type);
6248 }
6249 }
6250
6251 if (start != channel_index) {
6252 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6253 (channel_index - start);
6254 }
6255 }
6256}
6257
6258static int ipw_passive_dwell_time(struct ipw_priv *priv)
6259{
6260 /* staying on passive channels longer than the DTIM interval during a
6261 * scan, while associated, causes the firmware to cancel the scan
6262 * without notification. Hence, don't stay on passive channels longer
6263 * than the beacon interval.
6264 */
6265 if (priv->status & STATUS_ASSOCIATED
6266 && priv->assoc_network->beacon_interval > 10)
6267 return priv->assoc_network->beacon_interval - 10;
6268 else
6269 return 120;
6270}
6271
6272static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6273{
6274 struct ipw_scan_request_ext scan;
6275 int err = 0, scan_type;
6276
6277 if (!(priv->status & STATUS_INIT) ||
6278 (priv->status & STATUS_EXIT_PENDING))
6279 return 0;
6280
6281 mutex_lock(&priv->mutex);
6282
6283 if (direct && (priv->direct_scan_ssid_len == 0)) {
6284 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6285 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6286 goto done;
6287 }
6288
6289 if (priv->status & STATUS_SCANNING) {
6290 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6291 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6292 STATUS_SCAN_PENDING;
6293 goto done;
6294 }
6295
6296 if (!(priv->status & STATUS_SCAN_FORCED) &&
6297 priv->status & STATUS_SCAN_ABORTING) {
6298 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6299 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6300 STATUS_SCAN_PENDING;
6301 goto done;
6302 }
6303
6304 if (priv->status & STATUS_RF_KILL_MASK) {
6305 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6306 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6307 STATUS_SCAN_PENDING;
6308 goto done;
6309 }
6310
6311 memset(&scan, 0, sizeof(scan));
6312 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6313
6314 if (type == IW_SCAN_TYPE_PASSIVE) {
6315 IPW_DEBUG_WX("use passive scanning\n");
6316 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6317 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6318 cpu_to_le16(ipw_passive_dwell_time(priv));
6319 ipw_add_scan_channels(priv, &scan, scan_type);
6320 goto send_request;
6321 }
6322
6323 /* Use active scan by default. */
6324 if (priv->config & CFG_SPEED_SCAN)
6325 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6326 cpu_to_le16(30);
6327 else
6328 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6329 cpu_to_le16(20);
6330
6331 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6332 cpu_to_le16(20);
6333
6334 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6335 cpu_to_le16(ipw_passive_dwell_time(priv));
6336 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6337
6338#ifdef CONFIG_IPW2200_MONITOR
6339 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6340 u8 channel;
6341 u8 band = 0;
6342
6343 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6344 case LIBIPW_52GHZ_BAND:
6345 band = (u8) (IPW_A_MODE << 6) | 1;
6346 channel = priv->channel;
6347 break;
6348
6349 case LIBIPW_24GHZ_BAND:
6350 band = (u8) (IPW_B_MODE << 6) | 1;
6351 channel = priv->channel;
6352 break;
6353
6354 default:
6355 band = (u8) (IPW_B_MODE << 6) | 1;
6356 channel = 9;
6357 break;
6358 }
6359
6360 scan.channels_list[0] = band;
6361 scan.channels_list[1] = channel;
6362 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6363
6364 /* NOTE: The card will sit on this channel for this time
6365 * period. Scan aborts are timing sensitive and frequently
6366 * result in firmware restarts. As such, it is best to
6367 * set a small dwell_time here and just keep re-issuing
6368 * scans. Otherwise fast channel hopping will not actually
6369 * hop channels.
6370 *
6371 * TODO: Move SPEED SCAN support to all modes and bands */
6372 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6373 cpu_to_le16(2000);
6374 } else {
6375#endif /* CONFIG_IPW2200_MONITOR */
6376 /* Honor direct scans first, otherwise if we are roaming make
6377 * this a direct scan for the current network. Finally,
6378 * ensure that every other scan is a fast channel hop scan */
6379 if (direct) {
6380 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6381 priv->direct_scan_ssid_len);
6382 if (err) {
6383 IPW_DEBUG_HC("Attempt to send SSID command "
6384 "failed\n");
6385 goto done;
6386 }
6387
6388 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6389 } else if ((priv->status & STATUS_ROAMING)
6390 || (!(priv->status & STATUS_ASSOCIATED)
6391 && (priv->config & CFG_STATIC_ESSID)
6392 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6393 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6394 if (err) {
6395 IPW_DEBUG_HC("Attempt to send SSID command "
6396 "failed.\n");
6397 goto done;
6398 }
6399
6400 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6401 } else
6402 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6403
6404 ipw_add_scan_channels(priv, &scan, scan_type);
6405#ifdef CONFIG_IPW2200_MONITOR
6406 }
6407#endif
6408
6409send_request:
6410 err = ipw_send_scan_request_ext(priv, &scan);
6411 if (err) {
6412 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6413 goto done;
6414 }
6415
6416 priv->status |= STATUS_SCANNING;
6417 if (direct) {
6418 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6419 priv->direct_scan_ssid_len = 0;
6420 } else
6421 priv->status &= ~STATUS_SCAN_PENDING;
6422
6423 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6424done:
6425 mutex_unlock(&priv->mutex);
6426 return err;
6427}
6428
6429static void ipw_request_passive_scan(struct work_struct *work)
6430{
6431 struct ipw_priv *priv =
6432 container_of(work, struct ipw_priv, request_passive_scan.work);
6433 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6434}
6435
6436static void ipw_request_scan(struct work_struct *work)
6437{
6438 struct ipw_priv *priv =
6439 container_of(work, struct ipw_priv, request_scan.work);
6440 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6441}
6442
6443static void ipw_request_direct_scan(struct work_struct *work)
6444{
6445 struct ipw_priv *priv =
6446 container_of(work, struct ipw_priv, request_direct_scan.work);
6447 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6448}
6449
6450static void ipw_bg_abort_scan(struct work_struct *work)
6451{
6452 struct ipw_priv *priv =
6453 container_of(work, struct ipw_priv, abort_scan);
6454 mutex_lock(&priv->mutex);
6455 ipw_abort_scan(priv);
6456 mutex_unlock(&priv->mutex);
6457}
6458
6459static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6460{
6461 /* This is called when wpa_supplicant loads and closes the driver
6462 * interface. */
6463 priv->ieee->wpa_enabled = value;
6464 return 0;
6465}
6466
6467static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6468{
6469 struct libipw_device *ieee = priv->ieee;
6470 struct libipw_security sec = {
6471 .flags = SEC_AUTH_MODE,
6472 };
6473 int ret = 0;
6474
6475 if (value & IW_AUTH_ALG_SHARED_KEY) {
6476 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6477 ieee->open_wep = 0;
6478 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6479 sec.auth_mode = WLAN_AUTH_OPEN;
6480 ieee->open_wep = 1;
6481 } else if (value & IW_AUTH_ALG_LEAP) {
6482 sec.auth_mode = WLAN_AUTH_LEAP;
6483 ieee->open_wep = 1;
6484 } else
6485 return -EINVAL;
6486
6487 if (ieee->set_security)
6488 ieee->set_security(ieee->dev, &sec);
6489 else
6490 ret = -EOPNOTSUPP;
6491
6492 return ret;
6493}
6494
6495static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6496 int wpa_ie_len)
6497{
6498 /* make sure WPA is enabled */
6499 ipw_wpa_enable(priv, 1);
6500}
6501
6502static int ipw_set_rsn_capa(struct ipw_priv *priv,
6503 char *capabilities, int length)
6504{
6505 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6506
6507 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6508 capabilities);
6509}
6510
6511/*
6512 * WE-18 support
6513 */
6514
6515/* SIOCSIWGENIE */
6516static int ipw_wx_set_genie(struct net_device *dev,
6517 struct iw_request_info *info,
6518 union iwreq_data *wrqu, char *extra)
6519{
6520 struct ipw_priv *priv = libipw_priv(dev);
6521 struct libipw_device *ieee = priv->ieee;
6522 u8 *buf;
6523 int err = 0;
6524
6525 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6526 (wrqu->data.length && extra == NULL))
6527 return -EINVAL;
6528
6529 if (wrqu->data.length) {
6530 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6531 if (buf == NULL) {
6532 err = -ENOMEM;
6533 goto out;
6534 }
6535
6536 kfree(ieee->wpa_ie);
6537 ieee->wpa_ie = buf;
6538 ieee->wpa_ie_len = wrqu->data.length;
6539 } else {
6540 kfree(ieee->wpa_ie);
6541 ieee->wpa_ie = NULL;
6542 ieee->wpa_ie_len = 0;
6543 }
6544
6545 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6546 out:
6547 return err;
6548}
6549
6550/* SIOCGIWGENIE */
6551static int ipw_wx_get_genie(struct net_device *dev,
6552 struct iw_request_info *info,
6553 union iwreq_data *wrqu, char *extra)
6554{
6555 struct ipw_priv *priv = libipw_priv(dev);
6556 struct libipw_device *ieee = priv->ieee;
6557 int err = 0;
6558
6559 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6560 wrqu->data.length = 0;
6561 goto out;
6562 }
6563
6564 if (wrqu->data.length < ieee->wpa_ie_len) {
6565 err = -E2BIG;
6566 goto out;
6567 }
6568
6569 wrqu->data.length = ieee->wpa_ie_len;
6570 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6571
6572 out:
6573 return err;
6574}
6575
6576static int wext_cipher2level(int cipher)
6577{
6578 switch (cipher) {
6579 case IW_AUTH_CIPHER_NONE:
6580 return SEC_LEVEL_0;
6581 case IW_AUTH_CIPHER_WEP40:
6582 case IW_AUTH_CIPHER_WEP104:
6583 return SEC_LEVEL_1;
6584 case IW_AUTH_CIPHER_TKIP:
6585 return SEC_LEVEL_2;
6586 case IW_AUTH_CIPHER_CCMP:
6587 return SEC_LEVEL_3;
6588 default:
6589 return -1;
6590 }
6591}
6592
6593/* SIOCSIWAUTH */
6594static int ipw_wx_set_auth(struct net_device *dev,
6595 struct iw_request_info *info,
6596 union iwreq_data *wrqu, char *extra)
6597{
6598 struct ipw_priv *priv = libipw_priv(dev);
6599 struct libipw_device *ieee = priv->ieee;
6600 struct iw_param *param = &wrqu->param;
6601 struct lib80211_crypt_data *crypt;
6602 unsigned long flags;
6603 int ret = 0;
6604
6605 switch (param->flags & IW_AUTH_INDEX) {
6606 case IW_AUTH_WPA_VERSION:
6607 break;
6608 case IW_AUTH_CIPHER_PAIRWISE:
6609 ipw_set_hw_decrypt_unicast(priv,
6610 wext_cipher2level(param->value));
6611 break;
6612 case IW_AUTH_CIPHER_GROUP:
6613 ipw_set_hw_decrypt_multicast(priv,
6614 wext_cipher2level(param->value));
6615 break;
6616 case IW_AUTH_KEY_MGMT:
6617 /*
6618 * ipw2200 does not use these parameters
6619 */
6620 break;
6621
6622 case IW_AUTH_TKIP_COUNTERMEASURES:
6623 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6624 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6625 break;
6626
6627 flags = crypt->ops->get_flags(crypt->priv);
6628
6629 if (param->value)
6630 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6631 else
6632 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6633
6634 crypt->ops->set_flags(flags, crypt->priv);
6635
6636 break;
6637
6638 case IW_AUTH_DROP_UNENCRYPTED:{
6639 /* HACK:
6640 *
6641 * wpa_supplicant calls set_wpa_enabled when the driver
6642 * is loaded and unloaded, regardless of if WPA is being
6643 * used. No other calls are made which can be used to
6644 * determine if encryption will be used or not prior to
6645 * association being expected. If encryption is not being
6646 * used, drop_unencrypted is set to false, else true -- we
6647 * can use this to determine if the CAP_PRIVACY_ON bit should
6648 * be set.
6649 */
6650 struct libipw_security sec = {
6651 .flags = SEC_ENABLED,
6652 .enabled = param->value,
6653 };
6654 priv->ieee->drop_unencrypted = param->value;
6655 /* We only change SEC_LEVEL for open mode. Others
6656 * are set by ipw_wpa_set_encryption.
6657 */
6658 if (!param->value) {
6659 sec.flags |= SEC_LEVEL;
6660 sec.level = SEC_LEVEL_0;
6661 } else {
6662 sec.flags |= SEC_LEVEL;
6663 sec.level = SEC_LEVEL_1;
6664 }
6665 if (priv->ieee->set_security)
6666 priv->ieee->set_security(priv->ieee->dev, &sec);
6667 break;
6668 }
6669
6670 case IW_AUTH_80211_AUTH_ALG:
6671 ret = ipw_wpa_set_auth_algs(priv, param->value);
6672 break;
6673
6674 case IW_AUTH_WPA_ENABLED:
6675 ret = ipw_wpa_enable(priv, param->value);
6676 ipw_disassociate(priv);
6677 break;
6678
6679 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6680 ieee->ieee802_1x = param->value;
6681 break;
6682
6683 case IW_AUTH_PRIVACY_INVOKED:
6684 ieee->privacy_invoked = param->value;
6685 break;
6686
6687 default:
6688 return -EOPNOTSUPP;
6689 }
6690 return ret;
6691}
6692
6693/* SIOCGIWAUTH */
6694static int ipw_wx_get_auth(struct net_device *dev,
6695 struct iw_request_info *info,
6696 union iwreq_data *wrqu, char *extra)
6697{
6698 struct ipw_priv *priv = libipw_priv(dev);
6699 struct libipw_device *ieee = priv->ieee;
6700 struct lib80211_crypt_data *crypt;
6701 struct iw_param *param = &wrqu->param;
6702
6703 switch (param->flags & IW_AUTH_INDEX) {
6704 case IW_AUTH_WPA_VERSION:
6705 case IW_AUTH_CIPHER_PAIRWISE:
6706 case IW_AUTH_CIPHER_GROUP:
6707 case IW_AUTH_KEY_MGMT:
6708 /*
6709 * wpa_supplicant will control these internally
6710 */
6711 return -EOPNOTSUPP;
6712
6713 case IW_AUTH_TKIP_COUNTERMEASURES:
6714 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6715 if (!crypt || !crypt->ops->get_flags)
6716 break;
6717
6718 param->value = (crypt->ops->get_flags(crypt->priv) &
6719 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6720
6721 break;
6722
6723 case IW_AUTH_DROP_UNENCRYPTED:
6724 param->value = ieee->drop_unencrypted;
6725 break;
6726
6727 case IW_AUTH_80211_AUTH_ALG:
6728 param->value = ieee->sec.auth_mode;
6729 break;
6730
6731 case IW_AUTH_WPA_ENABLED:
6732 param->value = ieee->wpa_enabled;
6733 break;
6734
6735 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6736 param->value = ieee->ieee802_1x;
6737 break;
6738
6739 case IW_AUTH_ROAMING_CONTROL:
6740 case IW_AUTH_PRIVACY_INVOKED:
6741 param->value = ieee->privacy_invoked;
6742 break;
6743
6744 default:
6745 return -EOPNOTSUPP;
6746 }
6747 return 0;
6748}
6749
6750/* SIOCSIWENCODEEXT */
6751static int ipw_wx_set_encodeext(struct net_device *dev,
6752 struct iw_request_info *info,
6753 union iwreq_data *wrqu, char *extra)
6754{
6755 struct ipw_priv *priv = libipw_priv(dev);
6756 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6757
6758 if (hwcrypto) {
6759 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6760 /* IPW HW can't build TKIP MIC,
6761 host decryption still needed */
6762 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6763 priv->ieee->host_mc_decrypt = 1;
6764 else {
6765 priv->ieee->host_encrypt = 0;
6766 priv->ieee->host_encrypt_msdu = 1;
6767 priv->ieee->host_decrypt = 1;
6768 }
6769 } else {
6770 priv->ieee->host_encrypt = 0;
6771 priv->ieee->host_encrypt_msdu = 0;
6772 priv->ieee->host_decrypt = 0;
6773 priv->ieee->host_mc_decrypt = 0;
6774 }
6775 }
6776
6777 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6778}
6779
6780/* SIOCGIWENCODEEXT */
6781static int ipw_wx_get_encodeext(struct net_device *dev,
6782 struct iw_request_info *info,
6783 union iwreq_data *wrqu, char *extra)
6784{
6785 struct ipw_priv *priv = libipw_priv(dev);
6786 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6787}
6788
6789/* SIOCSIWMLME */
6790static int ipw_wx_set_mlme(struct net_device *dev,
6791 struct iw_request_info *info,
6792 union iwreq_data *wrqu, char *extra)
6793{
6794 struct ipw_priv *priv = libipw_priv(dev);
6795 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6796 __le16 reason;
6797
6798 reason = cpu_to_le16(mlme->reason_code);
6799
6800 switch (mlme->cmd) {
6801 case IW_MLME_DEAUTH:
6802 /* silently ignore */
6803 break;
6804
6805 case IW_MLME_DISASSOC:
6806 ipw_disassociate(priv);
6807 break;
6808
6809 default:
6810 return -EOPNOTSUPP;
6811 }
6812 return 0;
6813}
6814
6815#ifdef CONFIG_IPW2200_QOS
6816
6817/* QoS */
6818/*
6819* get the modulation type of the current network or
6820* the card current mode
6821*/
6822static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6823{
6824 u8 mode = 0;
6825
6826 if (priv->status & STATUS_ASSOCIATED) {
6827 unsigned long flags;
6828
6829 spin_lock_irqsave(&priv->ieee->lock, flags);
6830 mode = priv->assoc_network->mode;
6831 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6832 } else {
6833 mode = priv->ieee->mode;
6834 }
6835 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6836 return mode;
6837}
6838
6839/*
6840* Handle management frame beacon and probe response
6841*/
6842static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6843 int active_network,
6844 struct libipw_network *network)
6845{
6846 u32 size = sizeof(struct libipw_qos_parameters);
6847
6848 if (network->capability & WLAN_CAPABILITY_IBSS)
6849 network->qos_data.active = network->qos_data.supported;
6850
6851 if (network->flags & NETWORK_HAS_QOS_MASK) {
6852 if (active_network &&
6853 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6854 network->qos_data.active = network->qos_data.supported;
6855
6856 if ((network->qos_data.active == 1) && (active_network == 1) &&
6857 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6858 (network->qos_data.old_param_count !=
6859 network->qos_data.param_count)) {
6860 network->qos_data.old_param_count =
6861 network->qos_data.param_count;
6862 schedule_work(&priv->qos_activate);
6863 IPW_DEBUG_QOS("QoS parameters change call "
6864 "qos_activate\n");
6865 }
6866 } else {
6867 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6868 memcpy(&network->qos_data.parameters,
6869 &def_parameters_CCK, size);
6870 else
6871 memcpy(&network->qos_data.parameters,
6872 &def_parameters_OFDM, size);
6873
6874 if ((network->qos_data.active == 1) && (active_network == 1)) {
6875 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6876 schedule_work(&priv->qos_activate);
6877 }
6878
6879 network->qos_data.active = 0;
6880 network->qos_data.supported = 0;
6881 }
6882 if ((priv->status & STATUS_ASSOCIATED) &&
6883 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6884 if (!ether_addr_equal(network->bssid, priv->bssid))
6885 if (network->capability & WLAN_CAPABILITY_IBSS)
6886 if ((network->ssid_len ==
6887 priv->assoc_network->ssid_len) &&
6888 !memcmp(network->ssid,
6889 priv->assoc_network->ssid,
6890 network->ssid_len)) {
6891 schedule_work(&priv->merge_networks);
6892 }
6893 }
6894
6895 return 0;
6896}
6897
6898/*
6899* This function set up the firmware to support QoS. It sends
6900* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6901*/
6902static int ipw_qos_activate(struct ipw_priv *priv,
6903 struct libipw_qos_data *qos_network_data)
6904{
6905 int err;
6906 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6907 struct libipw_qos_parameters *active_one = NULL;
6908 u32 size = sizeof(struct libipw_qos_parameters);
6909 u32 burst_duration;
6910 int i;
6911 u8 type;
6912
6913 type = ipw_qos_current_mode(priv);
6914
6915 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6916 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6917 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6918 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6919
6920 if (qos_network_data == NULL) {
6921 if (type == IEEE_B) {
6922 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6923 active_one = &def_parameters_CCK;
6924 } else
6925 active_one = &def_parameters_OFDM;
6926
6927 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6928 burst_duration = ipw_qos_get_burst_duration(priv);
6929 for (i = 0; i < QOS_QUEUE_NUM; i++)
6930 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6931 cpu_to_le16(burst_duration);
6932 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6933 if (type == IEEE_B) {
6934 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6935 type);
6936 if (priv->qos_data.qos_enable == 0)
6937 active_one = &def_parameters_CCK;
6938 else
6939 active_one = priv->qos_data.def_qos_parm_CCK;
6940 } else {
6941 if (priv->qos_data.qos_enable == 0)
6942 active_one = &def_parameters_OFDM;
6943 else
6944 active_one = priv->qos_data.def_qos_parm_OFDM;
6945 }
6946 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6947 } else {
6948 unsigned long flags;
6949 int active;
6950
6951 spin_lock_irqsave(&priv->ieee->lock, flags);
6952 active_one = &(qos_network_data->parameters);
6953 qos_network_data->old_param_count =
6954 qos_network_data->param_count;
6955 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6956 active = qos_network_data->supported;
6957 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6958
6959 if (active == 0) {
6960 burst_duration = ipw_qos_get_burst_duration(priv);
6961 for (i = 0; i < QOS_QUEUE_NUM; i++)
6962 qos_parameters[QOS_PARAM_SET_ACTIVE].
6963 tx_op_limit[i] = cpu_to_le16(burst_duration);
6964 }
6965 }
6966
6967 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6968 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6969 if (err)
6970 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6971
6972 return err;
6973}
6974
6975/*
6976* send IPW_CMD_WME_INFO to the firmware
6977*/
6978static int ipw_qos_set_info_element(struct ipw_priv *priv)
6979{
6980 int ret = 0;
6981 struct libipw_qos_information_element qos_info;
6982
6983 if (priv == NULL)
6984 return -1;
6985
6986 qos_info.elementID = QOS_ELEMENT_ID;
6987 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
6988
6989 qos_info.version = QOS_VERSION_1;
6990 qos_info.ac_info = 0;
6991
6992 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6993 qos_info.qui_type = QOS_OUI_TYPE;
6994 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6995
6996 ret = ipw_send_qos_info_command(priv, &qos_info);
6997 if (ret != 0) {
6998 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6999 }
7000 return ret;
7001}
7002
7003/*
7004* Set the QoS parameter with the association request structure
7005*/
7006static int ipw_qos_association(struct ipw_priv *priv,
7007 struct libipw_network *network)
7008{
7009 int err = 0;
7010 struct libipw_qos_data *qos_data = NULL;
7011 struct libipw_qos_data ibss_data = {
7012 .supported = 1,
7013 .active = 1,
7014 };
7015
7016 switch (priv->ieee->iw_mode) {
7017 case IW_MODE_ADHOC:
7018 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7019
7020 qos_data = &ibss_data;
7021 break;
7022
7023 case IW_MODE_INFRA:
7024 qos_data = &network->qos_data;
7025 break;
7026
7027 default:
7028 BUG();
7029 break;
7030 }
7031
7032 err = ipw_qos_activate(priv, qos_data);
7033 if (err) {
7034 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7035 return err;
7036 }
7037
7038 if (priv->qos_data.qos_enable && qos_data->supported) {
7039 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7040 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7041 return ipw_qos_set_info_element(priv);
7042 }
7043
7044 return 0;
7045}
7046
7047/*
7048* handling the beaconing responses. if we get different QoS setting
7049* off the network from the associated setting, adjust the QoS
7050* setting
7051*/
7052static int ipw_qos_association_resp(struct ipw_priv *priv,
7053 struct libipw_network *network)
7054{
7055 int ret = 0;
7056 unsigned long flags;
7057 u32 size = sizeof(struct libipw_qos_parameters);
7058 int set_qos_param = 0;
7059
7060 if ((priv == NULL) || (network == NULL) ||
7061 (priv->assoc_network == NULL))
7062 return ret;
7063
7064 if (!(priv->status & STATUS_ASSOCIATED))
7065 return ret;
7066
7067 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7068 return ret;
7069
7070 spin_lock_irqsave(&priv->ieee->lock, flags);
7071 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7072 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7073 sizeof(struct libipw_qos_data));
7074 priv->assoc_network->qos_data.active = 1;
7075 if ((network->qos_data.old_param_count !=
7076 network->qos_data.param_count)) {
7077 set_qos_param = 1;
7078 network->qos_data.old_param_count =
7079 network->qos_data.param_count;
7080 }
7081
7082 } else {
7083 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7084 memcpy(&priv->assoc_network->qos_data.parameters,
7085 &def_parameters_CCK, size);
7086 else
7087 memcpy(&priv->assoc_network->qos_data.parameters,
7088 &def_parameters_OFDM, size);
7089 priv->assoc_network->qos_data.active = 0;
7090 priv->assoc_network->qos_data.supported = 0;
7091 set_qos_param = 1;
7092 }
7093
7094 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7095
7096 if (set_qos_param == 1)
7097 schedule_work(&priv->qos_activate);
7098
7099 return ret;
7100}
7101
7102static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7103{
7104 u32 ret = 0;
7105
7106 if (!priv)
7107 return 0;
7108
7109 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7110 ret = priv->qos_data.burst_duration_CCK;
7111 else
7112 ret = priv->qos_data.burst_duration_OFDM;
7113
7114 return ret;
7115}
7116
7117/*
7118* Initialize the setting of QoS global
7119*/
7120static void ipw_qos_init(struct ipw_priv *priv, int enable,
7121 int burst_enable, u32 burst_duration_CCK,
7122 u32 burst_duration_OFDM)
7123{
7124 priv->qos_data.qos_enable = enable;
7125
7126 if (priv->qos_data.qos_enable) {
7127 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7128 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7129 IPW_DEBUG_QOS("QoS is enabled\n");
7130 } else {
7131 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7132 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7133 IPW_DEBUG_QOS("QoS is not enabled\n");
7134 }
7135
7136 priv->qos_data.burst_enable = burst_enable;
7137
7138 if (burst_enable) {
7139 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7140 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7141 } else {
7142 priv->qos_data.burst_duration_CCK = 0;
7143 priv->qos_data.burst_duration_OFDM = 0;
7144 }
7145}
7146
7147/*
7148* map the packet priority to the right TX Queue
7149*/
7150static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7151{
7152 if (priority > 7 || !priv->qos_data.qos_enable)
7153 priority = 0;
7154
7155 return from_priority_to_tx_queue[priority] - 1;
7156}
7157
7158static int ipw_is_qos_active(struct net_device *dev,
7159 struct sk_buff *skb)
7160{
7161 struct ipw_priv *priv = libipw_priv(dev);
7162 struct libipw_qos_data *qos_data = NULL;
7163 int active, supported;
7164 u8 *daddr = skb->data + ETH_ALEN;
7165 int unicast = !is_multicast_ether_addr(daddr);
7166
7167 if (!(priv->status & STATUS_ASSOCIATED))
7168 return 0;
7169
7170 qos_data = &priv->assoc_network->qos_data;
7171
7172 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7173 if (unicast == 0)
7174 qos_data->active = 0;
7175 else
7176 qos_data->active = qos_data->supported;
7177 }
7178 active = qos_data->active;
7179 supported = qos_data->supported;
7180 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7181 "unicast %d\n",
7182 priv->qos_data.qos_enable, active, supported, unicast);
7183 if (active && priv->qos_data.qos_enable)
7184 return 1;
7185
7186 return 0;
7187
7188}
7189/*
7190* add QoS parameter to the TX command
7191*/
7192static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7193 u16 priority,
7194 struct tfd_data *tfd)
7195{
7196 int tx_queue_id = 0;
7197
7198
7199 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7200 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7201
7202 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7203 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7204 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7205 }
7206 return 0;
7207}
7208
7209/*
7210* background support to run QoS activate functionality
7211*/
7212static void ipw_bg_qos_activate(struct work_struct *work)
7213{
7214 struct ipw_priv *priv =
7215 container_of(work, struct ipw_priv, qos_activate);
7216
7217 mutex_lock(&priv->mutex);
7218
7219 if (priv->status & STATUS_ASSOCIATED)
7220 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7221
7222 mutex_unlock(&priv->mutex);
7223}
7224
7225static int ipw_handle_probe_response(struct net_device *dev,
7226 struct libipw_probe_response *resp,
7227 struct libipw_network *network)
7228{
7229 struct ipw_priv *priv = libipw_priv(dev);
7230 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7231 (network == priv->assoc_network));
7232
7233 ipw_qos_handle_probe_response(priv, active_network, network);
7234
7235 return 0;
7236}
7237
7238static int ipw_handle_beacon(struct net_device *dev,
7239 struct libipw_beacon *resp,
7240 struct libipw_network *network)
7241{
7242 struct ipw_priv *priv = libipw_priv(dev);
7243 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7244 (network == priv->assoc_network));
7245
7246 ipw_qos_handle_probe_response(priv, active_network, network);
7247
7248 return 0;
7249}
7250
7251static int ipw_handle_assoc_response(struct net_device *dev,
7252 struct libipw_assoc_response *resp,
7253 struct libipw_network *network)
7254{
7255 struct ipw_priv *priv = libipw_priv(dev);
7256 ipw_qos_association_resp(priv, network);
7257 return 0;
7258}
7259
7260static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7261 *qos_param)
7262{
7263 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7264 sizeof(*qos_param) * 3, qos_param);
7265}
7266
7267static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7268 *qos_param)
7269{
7270 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7271 qos_param);
7272}
7273
7274#endif /* CONFIG_IPW2200_QOS */
7275
7276static int ipw_associate_network(struct ipw_priv *priv,
7277 struct libipw_network *network,
7278 struct ipw_supported_rates *rates, int roaming)
7279{
7280 int err;
7281
7282 if (priv->config & CFG_FIXED_RATE)
7283 ipw_set_fixed_rate(priv, network->mode);
7284
7285 if (!(priv->config & CFG_STATIC_ESSID)) {
7286 priv->essid_len = min(network->ssid_len,
7287 (u8) IW_ESSID_MAX_SIZE);
7288 memcpy(priv->essid, network->ssid, priv->essid_len);
7289 }
7290
7291 network->last_associate = jiffies;
7292
7293 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7294 priv->assoc_request.channel = network->channel;
7295 priv->assoc_request.auth_key = 0;
7296
7297 if ((priv->capability & CAP_PRIVACY_ON) &&
7298 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7299 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7300 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7301
7302 if (priv->ieee->sec.level == SEC_LEVEL_1)
7303 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7304
7305 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7306 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7307 priv->assoc_request.auth_type = AUTH_LEAP;
7308 else
7309 priv->assoc_request.auth_type = AUTH_OPEN;
7310
7311 if (priv->ieee->wpa_ie_len) {
7312 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7313 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7314 priv->ieee->wpa_ie_len);
7315 }
7316
7317 /*
7318 * It is valid for our ieee device to support multiple modes, but
7319 * when it comes to associating to a given network we have to choose
7320 * just one mode.
7321 */
7322 if (network->mode & priv->ieee->mode & IEEE_A)
7323 priv->assoc_request.ieee_mode = IPW_A_MODE;
7324 else if (network->mode & priv->ieee->mode & IEEE_G)
7325 priv->assoc_request.ieee_mode = IPW_G_MODE;
7326 else if (network->mode & priv->ieee->mode & IEEE_B)
7327 priv->assoc_request.ieee_mode = IPW_B_MODE;
7328
7329 priv->assoc_request.capability = cpu_to_le16(network->capability);
7330 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7331 && !(priv->config & CFG_PREAMBLE_LONG)) {
7332 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7333 } else {
7334 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7335
7336 /* Clear the short preamble if we won't be supporting it */
7337 priv->assoc_request.capability &=
7338 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7339 }
7340
7341 /* Clear capability bits that aren't used in Ad Hoc */
7342 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7343 priv->assoc_request.capability &=
7344 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7345
7346 IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7347 roaming ? "Rea" : "A",
7348 priv->essid_len, priv->essid,
7349 network->channel,
7350 ipw_modes[priv->assoc_request.ieee_mode],
7351 rates->num_rates,
7352 (priv->assoc_request.preamble_length ==
7353 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7354 network->capability &
7355 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7356 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7357 priv->capability & CAP_PRIVACY_ON ?
7358 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7359 "(open)") : "",
7360 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7361 priv->capability & CAP_PRIVACY_ON ?
7362 '1' + priv->ieee->sec.active_key : '.',
7363 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7364
7365 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7366 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7367 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7368 priv->assoc_request.assoc_type = HC_IBSS_START;
7369 priv->assoc_request.assoc_tsf_msw = 0;
7370 priv->assoc_request.assoc_tsf_lsw = 0;
7371 } else {
7372 if (unlikely(roaming))
7373 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7374 else
7375 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7376 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7377 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7378 }
7379
7380 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7381
7382 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7383 eth_broadcast_addr(priv->assoc_request.dest);
7384 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7385 } else {
7386 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7387 priv->assoc_request.atim_window = 0;
7388 }
7389
7390 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7391
7392 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7393 if (err) {
7394 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7395 return err;
7396 }
7397
7398 rates->ieee_mode = priv->assoc_request.ieee_mode;
7399 rates->purpose = IPW_RATE_CONNECT;
7400 ipw_send_supported_rates(priv, rates);
7401
7402 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7403 priv->sys_config.dot11g_auto_detection = 1;
7404 else
7405 priv->sys_config.dot11g_auto_detection = 0;
7406
7407 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7408 priv->sys_config.answer_broadcast_ssid_probe = 1;
7409 else
7410 priv->sys_config.answer_broadcast_ssid_probe = 0;
7411
7412 err = ipw_send_system_config(priv);
7413 if (err) {
7414 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7415 return err;
7416 }
7417
7418 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7419 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7420 if (err) {
7421 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7422 return err;
7423 }
7424
7425 /*
7426 * If preemption is enabled, it is possible for the association
7427 * to complete before we return from ipw_send_associate. Therefore
7428 * we have to be sure and update our priviate data first.
7429 */
7430 priv->channel = network->channel;
7431 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7432 priv->status |= STATUS_ASSOCIATING;
7433 priv->status &= ~STATUS_SECURITY_UPDATED;
7434
7435 priv->assoc_network = network;
7436
7437#ifdef CONFIG_IPW2200_QOS
7438 ipw_qos_association(priv, network);
7439#endif
7440
7441 err = ipw_send_associate(priv, &priv->assoc_request);
7442 if (err) {
7443 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7444 return err;
7445 }
7446
7447 IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7448 priv->essid_len, priv->essid, priv->bssid);
7449
7450 return 0;
7451}
7452
7453static void ipw_roam(void *data)
7454{
7455 struct ipw_priv *priv = data;
7456 struct libipw_network *network = NULL;
7457 struct ipw_network_match match = {
7458 .network = priv->assoc_network
7459 };
7460
7461 /* The roaming process is as follows:
7462 *
7463 * 1. Missed beacon threshold triggers the roaming process by
7464 * setting the status ROAM bit and requesting a scan.
7465 * 2. When the scan completes, it schedules the ROAM work
7466 * 3. The ROAM work looks at all of the known networks for one that
7467 * is a better network than the currently associated. If none
7468 * found, the ROAM process is over (ROAM bit cleared)
7469 * 4. If a better network is found, a disassociation request is
7470 * sent.
7471 * 5. When the disassociation completes, the roam work is again
7472 * scheduled. The second time through, the driver is no longer
7473 * associated, and the newly selected network is sent an
7474 * association request.
7475 * 6. At this point ,the roaming process is complete and the ROAM
7476 * status bit is cleared.
7477 */
7478
7479 /* If we are no longer associated, and the roaming bit is no longer
7480 * set, then we are not actively roaming, so just return */
7481 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7482 return;
7483
7484 if (priv->status & STATUS_ASSOCIATED) {
7485 /* First pass through ROAM process -- look for a better
7486 * network */
7487 unsigned long flags;
7488 u8 rssi = priv->assoc_network->stats.rssi;
7489 priv->assoc_network->stats.rssi = -128;
7490 spin_lock_irqsave(&priv->ieee->lock, flags);
7491 list_for_each_entry(network, &priv->ieee->network_list, list) {
7492 if (network != priv->assoc_network)
7493 ipw_best_network(priv, &match, network, 1);
7494 }
7495 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7496 priv->assoc_network->stats.rssi = rssi;
7497
7498 if (match.network == priv->assoc_network) {
7499 IPW_DEBUG_ASSOC("No better APs in this network to "
7500 "roam to.\n");
7501 priv->status &= ~STATUS_ROAMING;
7502 ipw_debug_config(priv);
7503 return;
7504 }
7505
7506 ipw_send_disassociate(priv, 1);
7507 priv->assoc_network = match.network;
7508
7509 return;
7510 }
7511
7512 /* Second pass through ROAM process -- request association */
7513 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7514 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7515 priv->status &= ~STATUS_ROAMING;
7516}
7517
7518static void ipw_bg_roam(struct work_struct *work)
7519{
7520 struct ipw_priv *priv =
7521 container_of(work, struct ipw_priv, roam);
7522 mutex_lock(&priv->mutex);
7523 ipw_roam(priv);
7524 mutex_unlock(&priv->mutex);
7525}
7526
7527static int ipw_associate(void *data)
7528{
7529 struct ipw_priv *priv = data;
7530
7531 struct libipw_network *network = NULL;
7532 struct ipw_network_match match = {
7533 .network = NULL
7534 };
7535 struct ipw_supported_rates *rates;
7536 struct list_head *element;
7537 unsigned long flags;
7538
7539 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7540 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7541 return 0;
7542 }
7543
7544 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7545 IPW_DEBUG_ASSOC("Not attempting association (already in "
7546 "progress)\n");
7547 return 0;
7548 }
7549
7550 if (priv->status & STATUS_DISASSOCIATING) {
7551 IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
7552 schedule_work(&priv->associate);
7553 return 0;
7554 }
7555
7556 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7557 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7558 "initialized)\n");
7559 return 0;
7560 }
7561
7562 if (!(priv->config & CFG_ASSOCIATE) &&
7563 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7564 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7565 return 0;
7566 }
7567
7568 /* Protect our use of the network_list */
7569 spin_lock_irqsave(&priv->ieee->lock, flags);
7570 list_for_each_entry(network, &priv->ieee->network_list, list)
7571 ipw_best_network(priv, &match, network, 0);
7572
7573 network = match.network;
7574 rates = &match.rates;
7575
7576 if (network == NULL &&
7577 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7578 priv->config & CFG_ADHOC_CREATE &&
7579 priv->config & CFG_STATIC_ESSID &&
7580 priv->config & CFG_STATIC_CHANNEL) {
7581 /* Use oldest network if the free list is empty */
7582 if (list_empty(&priv->ieee->network_free_list)) {
7583 struct libipw_network *oldest = NULL;
7584 struct libipw_network *target;
7585
7586 list_for_each_entry(target, &priv->ieee->network_list, list) {
7587 if ((oldest == NULL) ||
7588 (target->last_scanned < oldest->last_scanned))
7589 oldest = target;
7590 }
7591
7592 /* If there are no more slots, expire the oldest */
7593 list_del(&oldest->list);
7594 target = oldest;
7595 IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7596 target->ssid_len, target->ssid,
7597 target->bssid);
7598 list_add_tail(&target->list,
7599 &priv->ieee->network_free_list);
7600 }
7601
7602 element = priv->ieee->network_free_list.next;
7603 network = list_entry(element, struct libipw_network, list);
7604 ipw_adhoc_create(priv, network);
7605 rates = &priv->rates;
7606 list_del(element);
7607 list_add_tail(&network->list, &priv->ieee->network_list);
7608 }
7609 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7610
7611 /* If we reached the end of the list, then we don't have any valid
7612 * matching APs */
7613 if (!network) {
7614 ipw_debug_config(priv);
7615
7616 if (!(priv->status & STATUS_SCANNING)) {
7617 if (!(priv->config & CFG_SPEED_SCAN))
7618 schedule_delayed_work(&priv->request_scan,
7619 SCAN_INTERVAL);
7620 else
7621 schedule_delayed_work(&priv->request_scan, 0);
7622 }
7623
7624 return 0;
7625 }
7626
7627 ipw_associate_network(priv, network, rates, 0);
7628
7629 return 1;
7630}
7631
7632static void ipw_bg_associate(struct work_struct *work)
7633{
7634 struct ipw_priv *priv =
7635 container_of(work, struct ipw_priv, associate);
7636 mutex_lock(&priv->mutex);
7637 ipw_associate(priv);
7638 mutex_unlock(&priv->mutex);
7639}
7640
7641static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7642 struct sk_buff *skb)
7643{
7644 struct ieee80211_hdr *hdr;
7645 u16 fc;
7646
7647 hdr = (struct ieee80211_hdr *)skb->data;
7648 fc = le16_to_cpu(hdr->frame_control);
7649 if (!(fc & IEEE80211_FCTL_PROTECTED))
7650 return;
7651
7652 fc &= ~IEEE80211_FCTL_PROTECTED;
7653 hdr->frame_control = cpu_to_le16(fc);
7654 switch (priv->ieee->sec.level) {
7655 case SEC_LEVEL_3:
7656 /* Remove CCMP HDR */
7657 memmove(skb->data + LIBIPW_3ADDR_LEN,
7658 skb->data + LIBIPW_3ADDR_LEN + 8,
7659 skb->len - LIBIPW_3ADDR_LEN - 8);
7660 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7661 break;
7662 case SEC_LEVEL_2:
7663 break;
7664 case SEC_LEVEL_1:
7665 /* Remove IV */
7666 memmove(skb->data + LIBIPW_3ADDR_LEN,
7667 skb->data + LIBIPW_3ADDR_LEN + 4,
7668 skb->len - LIBIPW_3ADDR_LEN - 4);
7669 skb_trim(skb, skb->len - 8); /* IV + ICV */
7670 break;
7671 case SEC_LEVEL_0:
7672 break;
7673 default:
7674 printk(KERN_ERR "Unknown security level %d\n",
7675 priv->ieee->sec.level);
7676 break;
7677 }
7678}
7679
7680static void ipw_handle_data_packet(struct ipw_priv *priv,
7681 struct ipw_rx_mem_buffer *rxb,
7682 struct libipw_rx_stats *stats)
7683{
7684 struct net_device *dev = priv->net_dev;
7685 struct libipw_hdr_4addr *hdr;
7686 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7687
7688 /* We received data from the HW, so stop the watchdog */
7689 netif_trans_update(dev);
7690
7691 /* We only process data packets if the
7692 * interface is open */
7693 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7694 skb_tailroom(rxb->skb))) {
7695 dev->stats.rx_errors++;
7696 priv->wstats.discard.misc++;
7697 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7698 return;
7699 } else if (unlikely(!netif_running(priv->net_dev))) {
7700 dev->stats.rx_dropped++;
7701 priv->wstats.discard.misc++;
7702 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7703 return;
7704 }
7705
7706 /* Advance skb->data to the start of the actual payload */
7707 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7708
7709 /* Set the size of the skb to the size of the frame */
7710 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7711
7712 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7713
7714 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7715 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7716 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7717 (is_multicast_ether_addr(hdr->addr1) ?
7718 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7719 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7720
7721 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7722 dev->stats.rx_errors++;
7723 else { /* libipw_rx succeeded, so it now owns the SKB */
7724 rxb->skb = NULL;
7725 __ipw_led_activity_on(priv);
7726 }
7727}
7728
7729#ifdef CONFIG_IPW2200_RADIOTAP
7730static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7731 struct ipw_rx_mem_buffer *rxb,
7732 struct libipw_rx_stats *stats)
7733{
7734 struct net_device *dev = priv->net_dev;
7735 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7736 struct ipw_rx_frame *frame = &pkt->u.frame;
7737
7738 /* initial pull of some data */
7739 u16 received_channel = frame->received_channel;
7740 u8 antennaAndPhy = frame->antennaAndPhy;
7741 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7742 u16 pktrate = frame->rate;
7743
7744 /* Magic struct that slots into the radiotap header -- no reason
7745 * to build this manually element by element, we can write it much
7746 * more efficiently than we can parse it. ORDER MATTERS HERE */
7747 struct ipw_rt_hdr *ipw_rt;
7748
7749 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7750
7751 /* We received data from the HW, so stop the watchdog */
7752 netif_trans_update(dev);
7753
7754 /* We only process data packets if the
7755 * interface is open */
7756 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7757 skb_tailroom(rxb->skb))) {
7758 dev->stats.rx_errors++;
7759 priv->wstats.discard.misc++;
7760 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7761 return;
7762 } else if (unlikely(!netif_running(priv->net_dev))) {
7763 dev->stats.rx_dropped++;
7764 priv->wstats.discard.misc++;
7765 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7766 return;
7767 }
7768
7769 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7770 * that now */
7771 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7772 /* FIXME: Should alloc bigger skb instead */
7773 dev->stats.rx_dropped++;
7774 priv->wstats.discard.misc++;
7775 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7776 return;
7777 }
7778
7779 /* copy the frame itself */
7780 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7781 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7782
7783 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7784
7785 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7786 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7787 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7788
7789 /* Big bitfield of all the fields we provide in radiotap */
7790 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7791 (1 << IEEE80211_RADIOTAP_TSFT) |
7792 (1 << IEEE80211_RADIOTAP_FLAGS) |
7793 (1 << IEEE80211_RADIOTAP_RATE) |
7794 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7795 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7796 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7797 (1 << IEEE80211_RADIOTAP_ANTENNA));
7798
7799 /* Zero the flags, we'll add to them as we go */
7800 ipw_rt->rt_flags = 0;
7801 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7802 frame->parent_tsf[2] << 16 |
7803 frame->parent_tsf[1] << 8 |
7804 frame->parent_tsf[0]);
7805
7806 /* Convert signal to DBM */
7807 ipw_rt->rt_dbmsignal = antsignal;
7808 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7809
7810 /* Convert the channel data and set the flags */
7811 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7812 if (received_channel > 14) { /* 802.11a */
7813 ipw_rt->rt_chbitmask =
7814 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7815 } else if (antennaAndPhy & 32) { /* 802.11b */
7816 ipw_rt->rt_chbitmask =
7817 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7818 } else { /* 802.11g */
7819 ipw_rt->rt_chbitmask =
7820 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7821 }
7822
7823 /* set the rate in multiples of 500k/s */
7824 switch (pktrate) {
7825 case IPW_TX_RATE_1MB:
7826 ipw_rt->rt_rate = 2;
7827 break;
7828 case IPW_TX_RATE_2MB:
7829 ipw_rt->rt_rate = 4;
7830 break;
7831 case IPW_TX_RATE_5MB:
7832 ipw_rt->rt_rate = 10;
7833 break;
7834 case IPW_TX_RATE_6MB:
7835 ipw_rt->rt_rate = 12;
7836 break;
7837 case IPW_TX_RATE_9MB:
7838 ipw_rt->rt_rate = 18;
7839 break;
7840 case IPW_TX_RATE_11MB:
7841 ipw_rt->rt_rate = 22;
7842 break;
7843 case IPW_TX_RATE_12MB:
7844 ipw_rt->rt_rate = 24;
7845 break;
7846 case IPW_TX_RATE_18MB:
7847 ipw_rt->rt_rate = 36;
7848 break;
7849 case IPW_TX_RATE_24MB:
7850 ipw_rt->rt_rate = 48;
7851 break;
7852 case IPW_TX_RATE_36MB:
7853 ipw_rt->rt_rate = 72;
7854 break;
7855 case IPW_TX_RATE_48MB:
7856 ipw_rt->rt_rate = 96;
7857 break;
7858 case IPW_TX_RATE_54MB:
7859 ipw_rt->rt_rate = 108;
7860 break;
7861 default:
7862 ipw_rt->rt_rate = 0;
7863 break;
7864 }
7865
7866 /* antenna number */
7867 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7868
7869 /* set the preamble flag if we have it */
7870 if ((antennaAndPhy & 64))
7871 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7872
7873 /* Set the size of the skb to the size of the frame */
7874 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7875
7876 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7877
7878 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7879 dev->stats.rx_errors++;
7880 else { /* libipw_rx succeeded, so it now owns the SKB */
7881 rxb->skb = NULL;
7882 /* no LED during capture */
7883 }
7884}
7885#endif
7886
7887#ifdef CONFIG_IPW2200_PROMISCUOUS
7888#define libipw_is_probe_response(fc) \
7889 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7890 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7891
7892#define libipw_is_management(fc) \
7893 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7894
7895#define libipw_is_control(fc) \
7896 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7897
7898#define libipw_is_data(fc) \
7899 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7900
7901#define libipw_is_assoc_request(fc) \
7902 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7903
7904#define libipw_is_reassoc_request(fc) \
7905 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7906
7907static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7908 struct ipw_rx_mem_buffer *rxb,
7909 struct libipw_rx_stats *stats)
7910{
7911 struct net_device *dev = priv->prom_net_dev;
7912 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7913 struct ipw_rx_frame *frame = &pkt->u.frame;
7914 struct ipw_rt_hdr *ipw_rt;
7915
7916 /* First cache any information we need before we overwrite
7917 * the information provided in the skb from the hardware */
7918 struct ieee80211_hdr *hdr;
7919 u16 channel = frame->received_channel;
7920 u8 phy_flags = frame->antennaAndPhy;
7921 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7922 s8 noise = (s8) le16_to_cpu(frame->noise);
7923 u8 rate = frame->rate;
7924 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7925 struct sk_buff *skb;
7926 int hdr_only = 0;
7927 u16 filter = priv->prom_priv->filter;
7928
7929 /* If the filter is set to not include Rx frames then return */
7930 if (filter & IPW_PROM_NO_RX)
7931 return;
7932
7933 /* We received data from the HW, so stop the watchdog */
7934 netif_trans_update(dev);
7935
7936 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7937 dev->stats.rx_errors++;
7938 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7939 return;
7940 }
7941
7942 /* We only process data packets if the interface is open */
7943 if (unlikely(!netif_running(dev))) {
7944 dev->stats.rx_dropped++;
7945 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7946 return;
7947 }
7948
7949 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7950 * that now */
7951 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7952 /* FIXME: Should alloc bigger skb instead */
7953 dev->stats.rx_dropped++;
7954 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7955 return;
7956 }
7957
7958 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7959 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7960 if (filter & IPW_PROM_NO_MGMT)
7961 return;
7962 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7963 hdr_only = 1;
7964 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7965 if (filter & IPW_PROM_NO_CTL)
7966 return;
7967 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7968 hdr_only = 1;
7969 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7970 if (filter & IPW_PROM_NO_DATA)
7971 return;
7972 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7973 hdr_only = 1;
7974 }
7975
7976 /* Copy the SKB since this is for the promiscuous side */
7977 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7978 if (skb == NULL) {
7979 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7980 return;
7981 }
7982
7983 /* copy the frame data to write after where the radiotap header goes */
7984 ipw_rt = (void *)skb->data;
7985
7986 if (hdr_only)
7987 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
7988
7989 memcpy(ipw_rt->payload, hdr, len);
7990
7991 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7992 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7993 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
7994
7995 /* Set the size of the skb to the size of the frame */
7996 skb_put(skb, sizeof(*ipw_rt) + len);
7997
7998 /* Big bitfield of all the fields we provide in radiotap */
7999 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8000 (1 << IEEE80211_RADIOTAP_TSFT) |
8001 (1 << IEEE80211_RADIOTAP_FLAGS) |
8002 (1 << IEEE80211_RADIOTAP_RATE) |
8003 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8004 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8005 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8006 (1 << IEEE80211_RADIOTAP_ANTENNA));
8007
8008 /* Zero the flags, we'll add to them as we go */
8009 ipw_rt->rt_flags = 0;
8010 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8011 frame->parent_tsf[2] << 16 |
8012 frame->parent_tsf[1] << 8 |
8013 frame->parent_tsf[0]);
8014
8015 /* Convert to DBM */
8016 ipw_rt->rt_dbmsignal = signal;
8017 ipw_rt->rt_dbmnoise = noise;
8018
8019 /* Convert the channel data and set the flags */
8020 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8021 if (channel > 14) { /* 802.11a */
8022 ipw_rt->rt_chbitmask =
8023 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8024 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8025 ipw_rt->rt_chbitmask =
8026 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8027 } else { /* 802.11g */
8028 ipw_rt->rt_chbitmask =
8029 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8030 }
8031
8032 /* set the rate in multiples of 500k/s */
8033 switch (rate) {
8034 case IPW_TX_RATE_1MB:
8035 ipw_rt->rt_rate = 2;
8036 break;
8037 case IPW_TX_RATE_2MB:
8038 ipw_rt->rt_rate = 4;
8039 break;
8040 case IPW_TX_RATE_5MB:
8041 ipw_rt->rt_rate = 10;
8042 break;
8043 case IPW_TX_RATE_6MB:
8044 ipw_rt->rt_rate = 12;
8045 break;
8046 case IPW_TX_RATE_9MB:
8047 ipw_rt->rt_rate = 18;
8048 break;
8049 case IPW_TX_RATE_11MB:
8050 ipw_rt->rt_rate = 22;
8051 break;
8052 case IPW_TX_RATE_12MB:
8053 ipw_rt->rt_rate = 24;
8054 break;
8055 case IPW_TX_RATE_18MB:
8056 ipw_rt->rt_rate = 36;
8057 break;
8058 case IPW_TX_RATE_24MB:
8059 ipw_rt->rt_rate = 48;
8060 break;
8061 case IPW_TX_RATE_36MB:
8062 ipw_rt->rt_rate = 72;
8063 break;
8064 case IPW_TX_RATE_48MB:
8065 ipw_rt->rt_rate = 96;
8066 break;
8067 case IPW_TX_RATE_54MB:
8068 ipw_rt->rt_rate = 108;
8069 break;
8070 default:
8071 ipw_rt->rt_rate = 0;
8072 break;
8073 }
8074
8075 /* antenna number */
8076 ipw_rt->rt_antenna = (phy_flags & 3);
8077
8078 /* set the preamble flag if we have it */
8079 if (phy_flags & (1 << 6))
8080 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8081
8082 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8083
8084 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8085 dev->stats.rx_errors++;
8086 dev_kfree_skb_any(skb);
8087 }
8088}
8089#endif
8090
8091static int is_network_packet(struct ipw_priv *priv,
8092 struct libipw_hdr_4addr *header)
8093{
8094 /* Filter incoming packets to determine if they are targeted toward
8095 * this network, discarding packets coming from ourselves */
8096 switch (priv->ieee->iw_mode) {
8097 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8098 /* packets from our adapter are dropped (echo) */
8099 if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8100 return 0;
8101
8102 /* {broad,multi}cast packets to our BSSID go through */
8103 if (is_multicast_ether_addr(header->addr1))
8104 return ether_addr_equal(header->addr3, priv->bssid);
8105
8106 /* packets to our adapter go through */
8107 return ether_addr_equal(header->addr1,
8108 priv->net_dev->dev_addr);
8109
8110 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8111 /* packets from our adapter are dropped (echo) */
8112 if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8113 return 0;
8114
8115 /* {broad,multi}cast packets to our BSS go through */
8116 if (is_multicast_ether_addr(header->addr1))
8117 return ether_addr_equal(header->addr2, priv->bssid);
8118
8119 /* packets to our adapter go through */
8120 return ether_addr_equal(header->addr1,
8121 priv->net_dev->dev_addr);
8122 }
8123
8124 return 1;
8125}
8126
8127#define IPW_PACKET_RETRY_TIME HZ
8128
8129static int is_duplicate_packet(struct ipw_priv *priv,
8130 struct libipw_hdr_4addr *header)
8131{
8132 u16 sc = le16_to_cpu(header->seq_ctl);
8133 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8134 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8135 u16 *last_seq, *last_frag;
8136 unsigned long *last_time;
8137
8138 switch (priv->ieee->iw_mode) {
8139 case IW_MODE_ADHOC:
8140 {
8141 struct list_head *p;
8142 struct ipw_ibss_seq *entry = NULL;
8143 u8 *mac = header->addr2;
8144 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8145
8146 list_for_each(p, &priv->ibss_mac_hash[index]) {
8147 entry =
8148 list_entry(p, struct ipw_ibss_seq, list);
8149 if (ether_addr_equal(entry->mac, mac))
8150 break;
8151 }
8152 if (p == &priv->ibss_mac_hash[index]) {
8153 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8154 if (!entry) {
8155 IPW_ERROR
8156 ("Cannot malloc new mac entry\n");
8157 return 0;
8158 }
8159 memcpy(entry->mac, mac, ETH_ALEN);
8160 entry->seq_num = seq;
8161 entry->frag_num = frag;
8162 entry->packet_time = jiffies;
8163 list_add(&entry->list,
8164 &priv->ibss_mac_hash[index]);
8165 return 0;
8166 }
8167 last_seq = &entry->seq_num;
8168 last_frag = &entry->frag_num;
8169 last_time = &entry->packet_time;
8170 break;
8171 }
8172 case IW_MODE_INFRA:
8173 last_seq = &priv->last_seq_num;
8174 last_frag = &priv->last_frag_num;
8175 last_time = &priv->last_packet_time;
8176 break;
8177 default:
8178 return 0;
8179 }
8180 if ((*last_seq == seq) &&
8181 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8182 if (*last_frag == frag)
8183 goto drop;
8184 if (*last_frag + 1 != frag)
8185 /* out-of-order fragment */
8186 goto drop;
8187 } else
8188 *last_seq = seq;
8189
8190 *last_frag = frag;
8191 *last_time = jiffies;
8192 return 0;
8193
8194 drop:
8195 /* Comment this line now since we observed the card receives
8196 * duplicate packets but the FCTL_RETRY bit is not set in the
8197 * IBSS mode with fragmentation enabled.
8198 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8199 return 1;
8200}
8201
8202static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8203 struct ipw_rx_mem_buffer *rxb,
8204 struct libipw_rx_stats *stats)
8205{
8206 struct sk_buff *skb = rxb->skb;
8207 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8208 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8209 (skb->data + IPW_RX_FRAME_SIZE);
8210
8211 libipw_rx_mgt(priv->ieee, header, stats);
8212
8213 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8214 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8215 IEEE80211_STYPE_PROBE_RESP) ||
8216 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8217 IEEE80211_STYPE_BEACON))) {
8218 if (ether_addr_equal(header->addr3, priv->bssid))
8219 ipw_add_station(priv, header->addr2);
8220 }
8221
8222 if (priv->config & CFG_NET_STATS) {
8223 IPW_DEBUG_HC("sending stat packet\n");
8224
8225 /* Set the size of the skb to the size of the full
8226 * ipw header and 802.11 frame */
8227 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8228 IPW_RX_FRAME_SIZE);
8229
8230 /* Advance past the ipw packet header to the 802.11 frame */
8231 skb_pull(skb, IPW_RX_FRAME_SIZE);
8232
8233 /* Push the libipw_rx_stats before the 802.11 frame */
8234 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8235
8236 skb->dev = priv->ieee->dev;
8237
8238 /* Point raw at the libipw_stats */
8239 skb_reset_mac_header(skb);
8240
8241 skb->pkt_type = PACKET_OTHERHOST;
8242 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8243 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8244 netif_rx(skb);
8245 rxb->skb = NULL;
8246 }
8247}
8248
8249/*
8250 * Main entry function for receiving a packet with 80211 headers. This
8251 * should be called when ever the FW has notified us that there is a new
8252 * skb in the receive queue.
8253 */
8254static void ipw_rx(struct ipw_priv *priv)
8255{
8256 struct ipw_rx_mem_buffer *rxb;
8257 struct ipw_rx_packet *pkt;
8258 struct libipw_hdr_4addr *header;
8259 u32 r, w, i;
8260 u8 network_packet;
8261 u8 fill_rx = 0;
8262
8263 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8264 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8265 i = priv->rxq->read;
8266
8267 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8268 fill_rx = 1;
8269
8270 while (i != r) {
8271 rxb = priv->rxq->queue[i];
8272 if (unlikely(rxb == NULL)) {
8273 printk(KERN_CRIT "Queue not allocated!\n");
8274 break;
8275 }
8276 priv->rxq->queue[i] = NULL;
8277
8278 dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
8279 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8280
8281 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8282 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8283 pkt->header.message_type,
8284 pkt->header.rx_seq_num, pkt->header.control_bits);
8285
8286 switch (pkt->header.message_type) {
8287 case RX_FRAME_TYPE: /* 802.11 frame */ {
8288 struct libipw_rx_stats stats = {
8289 .rssi = pkt->u.frame.rssi_dbm -
8290 IPW_RSSI_TO_DBM,
8291 .signal =
8292 pkt->u.frame.rssi_dbm -
8293 IPW_RSSI_TO_DBM + 0x100,
8294 .noise =
8295 le16_to_cpu(pkt->u.frame.noise),
8296 .rate = pkt->u.frame.rate,
8297 .mac_time = jiffies,
8298 .received_channel =
8299 pkt->u.frame.received_channel,
8300 .freq =
8301 (pkt->u.frame.
8302 control & (1 << 0)) ?
8303 LIBIPW_24GHZ_BAND :
8304 LIBIPW_52GHZ_BAND,
8305 .len = le16_to_cpu(pkt->u.frame.length),
8306 };
8307
8308 if (stats.rssi != 0)
8309 stats.mask |= LIBIPW_STATMASK_RSSI;
8310 if (stats.signal != 0)
8311 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8312 if (stats.noise != 0)
8313 stats.mask |= LIBIPW_STATMASK_NOISE;
8314 if (stats.rate != 0)
8315 stats.mask |= LIBIPW_STATMASK_RATE;
8316
8317 priv->rx_packets++;
8318
8319#ifdef CONFIG_IPW2200_PROMISCUOUS
8320 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8321 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8322#endif
8323
8324#ifdef CONFIG_IPW2200_MONITOR
8325 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8326#ifdef CONFIG_IPW2200_RADIOTAP
8327
8328 ipw_handle_data_packet_monitor(priv,
8329 rxb,
8330 &stats);
8331#else
8332 ipw_handle_data_packet(priv, rxb,
8333 &stats);
8334#endif
8335 break;
8336 }
8337#endif
8338
8339 header =
8340 (struct libipw_hdr_4addr *)(rxb->skb->
8341 data +
8342 IPW_RX_FRAME_SIZE);
8343 /* TODO: Check Ad-Hoc dest/source and make sure
8344 * that we are actually parsing these packets
8345 * correctly -- we should probably use the
8346 * frame control of the packet and disregard
8347 * the current iw_mode */
8348
8349 network_packet =
8350 is_network_packet(priv, header);
8351 if (network_packet && priv->assoc_network) {
8352 priv->assoc_network->stats.rssi =
8353 stats.rssi;
8354 priv->exp_avg_rssi =
8355 exponential_average(priv->exp_avg_rssi,
8356 stats.rssi, DEPTH_RSSI);
8357 }
8358
8359 IPW_DEBUG_RX("Frame: len=%u\n",
8360 le16_to_cpu(pkt->u.frame.length));
8361
8362 if (le16_to_cpu(pkt->u.frame.length) <
8363 libipw_get_hdrlen(le16_to_cpu(
8364 header->frame_ctl))) {
8365 IPW_DEBUG_DROP
8366 ("Received packet is too small. "
8367 "Dropping.\n");
8368 priv->net_dev->stats.rx_errors++;
8369 priv->wstats.discard.misc++;
8370 break;
8371 }
8372
8373 switch (WLAN_FC_GET_TYPE
8374 (le16_to_cpu(header->frame_ctl))) {
8375
8376 case IEEE80211_FTYPE_MGMT:
8377 ipw_handle_mgmt_packet(priv, rxb,
8378 &stats);
8379 break;
8380
8381 case IEEE80211_FTYPE_CTL:
8382 break;
8383
8384 case IEEE80211_FTYPE_DATA:
8385 if (unlikely(!network_packet ||
8386 is_duplicate_packet(priv,
8387 header)))
8388 {
8389 IPW_DEBUG_DROP("Dropping: "
8390 "%pM, "
8391 "%pM, "
8392 "%pM\n",
8393 header->addr1,
8394 header->addr2,
8395 header->addr3);
8396 break;
8397 }
8398
8399 ipw_handle_data_packet(priv, rxb,
8400 &stats);
8401
8402 break;
8403 }
8404 break;
8405 }
8406
8407 case RX_HOST_NOTIFICATION_TYPE:{
8408 IPW_DEBUG_RX
8409 ("Notification: subtype=%02X flags=%02X size=%d\n",
8410 pkt->u.notification.subtype,
8411 pkt->u.notification.flags,
8412 le16_to_cpu(pkt->u.notification.size));
8413 ipw_rx_notification(priv, &pkt->u.notification);
8414 break;
8415 }
8416
8417 default:
8418 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8419 pkt->header.message_type);
8420 break;
8421 }
8422
8423 /* For now we just don't re-use anything. We can tweak this
8424 * later to try and re-use notification packets and SKBs that
8425 * fail to Rx correctly */
8426 if (rxb->skb != NULL) {
8427 dev_kfree_skb_any(rxb->skb);
8428 rxb->skb = NULL;
8429 }
8430
8431 dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
8432 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
8433 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8434
8435 i = (i + 1) % RX_QUEUE_SIZE;
8436
8437 /* If there are a lot of unsued frames, restock the Rx queue
8438 * so the ucode won't assert */
8439 if (fill_rx) {
8440 priv->rxq->read = i;
8441 ipw_rx_queue_replenish(priv);
8442 }
8443 }
8444
8445 /* Backtrack one entry */
8446 priv->rxq->read = i;
8447 ipw_rx_queue_restock(priv);
8448}
8449
8450#define DEFAULT_RTS_THRESHOLD 2304U
8451#define MIN_RTS_THRESHOLD 1U
8452#define MAX_RTS_THRESHOLD 2304U
8453#define DEFAULT_BEACON_INTERVAL 100U
8454#define DEFAULT_SHORT_RETRY_LIMIT 7U
8455#define DEFAULT_LONG_RETRY_LIMIT 4U
8456
8457/**
8458 * ipw_sw_reset
8459 * @option: options to control different reset behaviour
8460 * 0 = reset everything except the 'disable' module_param
8461 * 1 = reset everything and print out driver info (for probe only)
8462 * 2 = reset everything
8463 */
8464static int ipw_sw_reset(struct ipw_priv *priv, int option)
8465{
8466 int band, modulation;
8467 int old_mode = priv->ieee->iw_mode;
8468
8469 /* Initialize module parameter values here */
8470 priv->config = 0;
8471
8472 /* We default to disabling the LED code as right now it causes
8473 * too many systems to lock up... */
8474 if (!led_support)
8475 priv->config |= CFG_NO_LED;
8476
8477 if (associate)
8478 priv->config |= CFG_ASSOCIATE;
8479 else
8480 IPW_DEBUG_INFO("Auto associate disabled.\n");
8481
8482 if (auto_create)
8483 priv->config |= CFG_ADHOC_CREATE;
8484 else
8485 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8486
8487 priv->config &= ~CFG_STATIC_ESSID;
8488 priv->essid_len = 0;
8489 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8490
8491 if (disable && option) {
8492 priv->status |= STATUS_RF_KILL_SW;
8493 IPW_DEBUG_INFO("Radio disabled.\n");
8494 }
8495
8496 if (default_channel != 0) {
8497 priv->config |= CFG_STATIC_CHANNEL;
8498 priv->channel = default_channel;
8499 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8500 /* TODO: Validate that provided channel is in range */
8501 }
8502#ifdef CONFIG_IPW2200_QOS
8503 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8504 burst_duration_CCK, burst_duration_OFDM);
8505#endif /* CONFIG_IPW2200_QOS */
8506
8507 switch (network_mode) {
8508 case 1:
8509 priv->ieee->iw_mode = IW_MODE_ADHOC;
8510 priv->net_dev->type = ARPHRD_ETHER;
8511
8512 break;
8513#ifdef CONFIG_IPW2200_MONITOR
8514 case 2:
8515 priv->ieee->iw_mode = IW_MODE_MONITOR;
8516#ifdef CONFIG_IPW2200_RADIOTAP
8517 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8518#else
8519 priv->net_dev->type = ARPHRD_IEEE80211;
8520#endif
8521 break;
8522#endif
8523 default:
8524 case 0:
8525 priv->net_dev->type = ARPHRD_ETHER;
8526 priv->ieee->iw_mode = IW_MODE_INFRA;
8527 break;
8528 }
8529
8530 if (hwcrypto) {
8531 priv->ieee->host_encrypt = 0;
8532 priv->ieee->host_encrypt_msdu = 0;
8533 priv->ieee->host_decrypt = 0;
8534 priv->ieee->host_mc_decrypt = 0;
8535 }
8536 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8537
8538 /* IPW2200/2915 is abled to do hardware fragmentation. */
8539 priv->ieee->host_open_frag = 0;
8540
8541 if ((priv->pci_dev->device == 0x4223) ||
8542 (priv->pci_dev->device == 0x4224)) {
8543 if (option == 1)
8544 printk(KERN_INFO DRV_NAME
8545 ": Detected Intel PRO/Wireless 2915ABG Network "
8546 "Connection\n");
8547 priv->ieee->abg_true = 1;
8548 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8549 modulation = LIBIPW_OFDM_MODULATION |
8550 LIBIPW_CCK_MODULATION;
8551 priv->adapter = IPW_2915ABG;
8552 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8553 } else {
8554 if (option == 1)
8555 printk(KERN_INFO DRV_NAME
8556 ": Detected Intel PRO/Wireless 2200BG Network "
8557 "Connection\n");
8558
8559 priv->ieee->abg_true = 0;
8560 band = LIBIPW_24GHZ_BAND;
8561 modulation = LIBIPW_OFDM_MODULATION |
8562 LIBIPW_CCK_MODULATION;
8563 priv->adapter = IPW_2200BG;
8564 priv->ieee->mode = IEEE_G | IEEE_B;
8565 }
8566
8567 priv->ieee->freq_band = band;
8568 priv->ieee->modulation = modulation;
8569
8570 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8571
8572 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8573 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8574
8575 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8576 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8577 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8578
8579 /* If power management is turned on, default to AC mode */
8580 priv->power_mode = IPW_POWER_AC;
8581 priv->tx_power = IPW_TX_POWER_DEFAULT;
8582
8583 return old_mode == priv->ieee->iw_mode;
8584}
8585
8586/*
8587 * This file defines the Wireless Extension handlers. It does not
8588 * define any methods of hardware manipulation and relies on the
8589 * functions defined in ipw_main to provide the HW interaction.
8590 *
8591 * The exception to this is the use of the ipw_get_ordinal()
8592 * function used to poll the hardware vs. making unnecessary calls.
8593 *
8594 */
8595
8596static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8597{
8598 if (channel == 0) {
8599 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8600 priv->config &= ~CFG_STATIC_CHANNEL;
8601 IPW_DEBUG_ASSOC("Attempting to associate with new "
8602 "parameters.\n");
8603 ipw_associate(priv);
8604 return 0;
8605 }
8606
8607 priv->config |= CFG_STATIC_CHANNEL;
8608
8609 if (priv->channel == channel) {
8610 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8611 channel);
8612 return 0;
8613 }
8614
8615 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8616 priv->channel = channel;
8617
8618#ifdef CONFIG_IPW2200_MONITOR
8619 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8620 int i;
8621 if (priv->status & STATUS_SCANNING) {
8622 IPW_DEBUG_SCAN("Scan abort triggered due to "
8623 "channel change.\n");
8624 ipw_abort_scan(priv);
8625 }
8626
8627 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8628 udelay(10);
8629
8630 if (priv->status & STATUS_SCANNING)
8631 IPW_DEBUG_SCAN("Still scanning...\n");
8632 else
8633 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8634 1000 - i);
8635
8636 return 0;
8637 }
8638#endif /* CONFIG_IPW2200_MONITOR */
8639
8640 /* Network configuration changed -- force [re]association */
8641 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8642 if (!ipw_disassociate(priv))
8643 ipw_associate(priv);
8644
8645 return 0;
8646}
8647
8648static int ipw_wx_set_freq(struct net_device *dev,
8649 struct iw_request_info *info,
8650 union iwreq_data *wrqu, char *extra)
8651{
8652 struct ipw_priv *priv = libipw_priv(dev);
8653 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8654 struct iw_freq *fwrq = &wrqu->freq;
8655 int ret = 0, i;
8656 u8 channel, flags;
8657 int band;
8658
8659 if (fwrq->m == 0) {
8660 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8661 mutex_lock(&priv->mutex);
8662 ret = ipw_set_channel(priv, 0);
8663 mutex_unlock(&priv->mutex);
8664 return ret;
8665 }
8666 /* if setting by freq convert to channel */
8667 if (fwrq->e == 1) {
8668 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8669 if (channel == 0)
8670 return -EINVAL;
8671 } else
8672 channel = fwrq->m;
8673
8674 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8675 return -EINVAL;
8676
8677 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8678 i = libipw_channel_to_index(priv->ieee, channel);
8679 if (i == -1)
8680 return -EINVAL;
8681
8682 flags = (band == LIBIPW_24GHZ_BAND) ?
8683 geo->bg[i].flags : geo->a[i].flags;
8684 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8685 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8686 return -EINVAL;
8687 }
8688 }
8689
8690 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8691 mutex_lock(&priv->mutex);
8692 ret = ipw_set_channel(priv, channel);
8693 mutex_unlock(&priv->mutex);
8694 return ret;
8695}
8696
8697static int ipw_wx_get_freq(struct net_device *dev,
8698 struct iw_request_info *info,
8699 union iwreq_data *wrqu, char *extra)
8700{
8701 struct ipw_priv *priv = libipw_priv(dev);
8702
8703 wrqu->freq.e = 0;
8704
8705 /* If we are associated, trying to associate, or have a statically
8706 * configured CHANNEL then return that; otherwise return ANY */
8707 mutex_lock(&priv->mutex);
8708 if (priv->config & CFG_STATIC_CHANNEL ||
8709 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8710 int i;
8711
8712 i = libipw_channel_to_index(priv->ieee, priv->channel);
8713 BUG_ON(i == -1);
8714 wrqu->freq.e = 1;
8715
8716 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8717 case LIBIPW_52GHZ_BAND:
8718 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8719 break;
8720
8721 case LIBIPW_24GHZ_BAND:
8722 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8723 break;
8724
8725 default:
8726 BUG();
8727 }
8728 } else
8729 wrqu->freq.m = 0;
8730
8731 mutex_unlock(&priv->mutex);
8732 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8733 return 0;
8734}
8735
8736static int ipw_wx_set_mode(struct net_device *dev,
8737 struct iw_request_info *info,
8738 union iwreq_data *wrqu, char *extra)
8739{
8740 struct ipw_priv *priv = libipw_priv(dev);
8741 int err = 0;
8742
8743 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8744
8745 switch (wrqu->mode) {
8746#ifdef CONFIG_IPW2200_MONITOR
8747 case IW_MODE_MONITOR:
8748#endif
8749 case IW_MODE_ADHOC:
8750 case IW_MODE_INFRA:
8751 break;
8752 case IW_MODE_AUTO:
8753 wrqu->mode = IW_MODE_INFRA;
8754 break;
8755 default:
8756 return -EINVAL;
8757 }
8758 if (wrqu->mode == priv->ieee->iw_mode)
8759 return 0;
8760
8761 mutex_lock(&priv->mutex);
8762
8763 ipw_sw_reset(priv, 0);
8764
8765#ifdef CONFIG_IPW2200_MONITOR
8766 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8767 priv->net_dev->type = ARPHRD_ETHER;
8768
8769 if (wrqu->mode == IW_MODE_MONITOR)
8770#ifdef CONFIG_IPW2200_RADIOTAP
8771 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8772#else
8773 priv->net_dev->type = ARPHRD_IEEE80211;
8774#endif
8775#endif /* CONFIG_IPW2200_MONITOR */
8776
8777 /* Free the existing firmware and reset the fw_loaded
8778 * flag so ipw_load() will bring in the new firmware */
8779 free_firmware();
8780
8781 priv->ieee->iw_mode = wrqu->mode;
8782
8783 schedule_work(&priv->adapter_restart);
8784 mutex_unlock(&priv->mutex);
8785 return err;
8786}
8787
8788static int ipw_wx_get_mode(struct net_device *dev,
8789 struct iw_request_info *info,
8790 union iwreq_data *wrqu, char *extra)
8791{
8792 struct ipw_priv *priv = libipw_priv(dev);
8793 mutex_lock(&priv->mutex);
8794 wrqu->mode = priv->ieee->iw_mode;
8795 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8796 mutex_unlock(&priv->mutex);
8797 return 0;
8798}
8799
8800/* Values are in microsecond */
8801static const s32 timeout_duration[] = {
8802 350000,
8803 250000,
8804 75000,
8805 37000,
8806 25000,
8807};
8808
8809static const s32 period_duration[] = {
8810 400000,
8811 700000,
8812 1000000,
8813 1000000,
8814 1000000
8815};
8816
8817static int ipw_wx_get_range(struct net_device *dev,
8818 struct iw_request_info *info,
8819 union iwreq_data *wrqu, char *extra)
8820{
8821 struct ipw_priv *priv = libipw_priv(dev);
8822 struct iw_range *range = (struct iw_range *)extra;
8823 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8824 int i = 0, j;
8825
8826 wrqu->data.length = sizeof(*range);
8827 memset(range, 0, sizeof(*range));
8828
8829 /* 54Mbs == ~27 Mb/s real (802.11g) */
8830 range->throughput = 27 * 1000 * 1000;
8831
8832 range->max_qual.qual = 100;
8833 /* TODO: Find real max RSSI and stick here */
8834 range->max_qual.level = 0;
8835 range->max_qual.noise = 0;
8836 range->max_qual.updated = 7; /* Updated all three */
8837
8838 range->avg_qual.qual = 70;
8839 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8840 range->avg_qual.level = 0; /* FIXME to real average level */
8841 range->avg_qual.noise = 0;
8842 range->avg_qual.updated = 7; /* Updated all three */
8843 mutex_lock(&priv->mutex);
8844 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8845
8846 for (i = 0; i < range->num_bitrates; i++)
8847 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8848 500000;
8849
8850 range->max_rts = DEFAULT_RTS_THRESHOLD;
8851 range->min_frag = MIN_FRAG_THRESHOLD;
8852 range->max_frag = MAX_FRAG_THRESHOLD;
8853
8854 range->encoding_size[0] = 5;
8855 range->encoding_size[1] = 13;
8856 range->num_encoding_sizes = 2;
8857 range->max_encoding_tokens = WEP_KEYS;
8858
8859 /* Set the Wireless Extension versions */
8860 range->we_version_compiled = WIRELESS_EXT;
8861 range->we_version_source = 18;
8862
8863 i = 0;
8864 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8865 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8866 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8867 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8868 continue;
8869
8870 range->freq[i].i = geo->bg[j].channel;
8871 range->freq[i].m = geo->bg[j].freq * 100000;
8872 range->freq[i].e = 1;
8873 i++;
8874 }
8875 }
8876
8877 if (priv->ieee->mode & IEEE_A) {
8878 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8879 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8880 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8881 continue;
8882
8883 range->freq[i].i = geo->a[j].channel;
8884 range->freq[i].m = geo->a[j].freq * 100000;
8885 range->freq[i].e = 1;
8886 i++;
8887 }
8888 }
8889
8890 range->num_channels = i;
8891 range->num_frequency = i;
8892
8893 mutex_unlock(&priv->mutex);
8894
8895 /* Event capability (kernel + driver) */
8896 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8897 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8898 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8899 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8900 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8901
8902 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8903 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8904
8905 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8906
8907 IPW_DEBUG_WX("GET Range\n");
8908 return 0;
8909}
8910
8911static int ipw_wx_set_wap(struct net_device *dev,
8912 struct iw_request_info *info,
8913 union iwreq_data *wrqu, char *extra)
8914{
8915 struct ipw_priv *priv = libipw_priv(dev);
8916
8917 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8918 return -EINVAL;
8919 mutex_lock(&priv->mutex);
8920 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8921 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8922 /* we disable mandatory BSSID association */
8923 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8924 priv->config &= ~CFG_STATIC_BSSID;
8925 IPW_DEBUG_ASSOC("Attempting to associate with new "
8926 "parameters.\n");
8927 ipw_associate(priv);
8928 mutex_unlock(&priv->mutex);
8929 return 0;
8930 }
8931
8932 priv->config |= CFG_STATIC_BSSID;
8933 if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8934 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8935 mutex_unlock(&priv->mutex);
8936 return 0;
8937 }
8938
8939 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8940 wrqu->ap_addr.sa_data);
8941
8942 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8943
8944 /* Network configuration changed -- force [re]association */
8945 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8946 if (!ipw_disassociate(priv))
8947 ipw_associate(priv);
8948
8949 mutex_unlock(&priv->mutex);
8950 return 0;
8951}
8952
8953static int ipw_wx_get_wap(struct net_device *dev,
8954 struct iw_request_info *info,
8955 union iwreq_data *wrqu, char *extra)
8956{
8957 struct ipw_priv *priv = libipw_priv(dev);
8958
8959 /* If we are associated, trying to associate, or have a statically
8960 * configured BSSID then return that; otherwise return ANY */
8961 mutex_lock(&priv->mutex);
8962 if (priv->config & CFG_STATIC_BSSID ||
8963 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8964 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8965 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8966 } else
8967 eth_zero_addr(wrqu->ap_addr.sa_data);
8968
8969 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8970 wrqu->ap_addr.sa_data);
8971 mutex_unlock(&priv->mutex);
8972 return 0;
8973}
8974
8975static int ipw_wx_set_essid(struct net_device *dev,
8976 struct iw_request_info *info,
8977 union iwreq_data *wrqu, char *extra)
8978{
8979 struct ipw_priv *priv = libipw_priv(dev);
8980 int length;
8981
8982 mutex_lock(&priv->mutex);
8983
8984 if (!wrqu->essid.flags)
8985 {
8986 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8987 ipw_disassociate(priv);
8988 priv->config &= ~CFG_STATIC_ESSID;
8989 ipw_associate(priv);
8990 mutex_unlock(&priv->mutex);
8991 return 0;
8992 }
8993
8994 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8995
8996 priv->config |= CFG_STATIC_ESSID;
8997
8998 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8999 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9000 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9001 mutex_unlock(&priv->mutex);
9002 return 0;
9003 }
9004
9005 IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9006
9007 priv->essid_len = length;
9008 memcpy(priv->essid, extra, priv->essid_len);
9009
9010 /* Network configuration changed -- force [re]association */
9011 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9012 if (!ipw_disassociate(priv))
9013 ipw_associate(priv);
9014
9015 mutex_unlock(&priv->mutex);
9016 return 0;
9017}
9018
9019static int ipw_wx_get_essid(struct net_device *dev,
9020 struct iw_request_info *info,
9021 union iwreq_data *wrqu, char *extra)
9022{
9023 struct ipw_priv *priv = libipw_priv(dev);
9024
9025 /* If we are associated, trying to associate, or have a statically
9026 * configured ESSID then return that; otherwise return ANY */
9027 mutex_lock(&priv->mutex);
9028 if (priv->config & CFG_STATIC_ESSID ||
9029 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9030 IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9031 priv->essid_len, priv->essid);
9032 memcpy(extra, priv->essid, priv->essid_len);
9033 wrqu->essid.length = priv->essid_len;
9034 wrqu->essid.flags = 1; /* active */
9035 } else {
9036 IPW_DEBUG_WX("Getting essid: ANY\n");
9037 wrqu->essid.length = 0;
9038 wrqu->essid.flags = 0; /* active */
9039 }
9040 mutex_unlock(&priv->mutex);
9041 return 0;
9042}
9043
9044static int ipw_wx_set_nick(struct net_device *dev,
9045 struct iw_request_info *info,
9046 union iwreq_data *wrqu, char *extra)
9047{
9048 struct ipw_priv *priv = libipw_priv(dev);
9049
9050 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9051 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9052 return -E2BIG;
9053 mutex_lock(&priv->mutex);
9054 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9055 memset(priv->nick, 0, sizeof(priv->nick));
9056 memcpy(priv->nick, extra, wrqu->data.length);
9057 IPW_DEBUG_TRACE("<<\n");
9058 mutex_unlock(&priv->mutex);
9059 return 0;
9060
9061}
9062
9063static int ipw_wx_get_nick(struct net_device *dev,
9064 struct iw_request_info *info,
9065 union iwreq_data *wrqu, char *extra)
9066{
9067 struct ipw_priv *priv = libipw_priv(dev);
9068 IPW_DEBUG_WX("Getting nick\n");
9069 mutex_lock(&priv->mutex);
9070 wrqu->data.length = strlen(priv->nick);
9071 memcpy(extra, priv->nick, wrqu->data.length);
9072 wrqu->data.flags = 1; /* active */
9073 mutex_unlock(&priv->mutex);
9074 return 0;
9075}
9076
9077static int ipw_wx_set_sens(struct net_device *dev,
9078 struct iw_request_info *info,
9079 union iwreq_data *wrqu, char *extra)
9080{
9081 struct ipw_priv *priv = libipw_priv(dev);
9082 int err = 0;
9083
9084 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9085 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9086 mutex_lock(&priv->mutex);
9087
9088 if (wrqu->sens.fixed == 0)
9089 {
9090 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9091 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9092 goto out;
9093 }
9094 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9095 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9096 err = -EINVAL;
9097 goto out;
9098 }
9099
9100 priv->roaming_threshold = wrqu->sens.value;
9101 priv->disassociate_threshold = 3*wrqu->sens.value;
9102 out:
9103 mutex_unlock(&priv->mutex);
9104 return err;
9105}
9106
9107static int ipw_wx_get_sens(struct net_device *dev,
9108 struct iw_request_info *info,
9109 union iwreq_data *wrqu, char *extra)
9110{
9111 struct ipw_priv *priv = libipw_priv(dev);
9112 mutex_lock(&priv->mutex);
9113 wrqu->sens.fixed = 1;
9114 wrqu->sens.value = priv->roaming_threshold;
9115 mutex_unlock(&priv->mutex);
9116
9117 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9118 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9119
9120 return 0;
9121}
9122
9123static int ipw_wx_set_rate(struct net_device *dev,
9124 struct iw_request_info *info,
9125 union iwreq_data *wrqu, char *extra)
9126{
9127 /* TODO: We should use semaphores or locks for access to priv */
9128 struct ipw_priv *priv = libipw_priv(dev);
9129 u32 target_rate = wrqu->bitrate.value;
9130 u32 fixed, mask;
9131
9132 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9133 /* value = X, fixed = 1 means only rate X */
9134 /* value = X, fixed = 0 means all rates lower equal X */
9135
9136 if (target_rate == -1) {
9137 fixed = 0;
9138 mask = LIBIPW_DEFAULT_RATES_MASK;
9139 /* Now we should reassociate */
9140 goto apply;
9141 }
9142
9143 mask = 0;
9144 fixed = wrqu->bitrate.fixed;
9145
9146 if (target_rate == 1000000 || !fixed)
9147 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9148 if (target_rate == 1000000)
9149 goto apply;
9150
9151 if (target_rate == 2000000 || !fixed)
9152 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9153 if (target_rate == 2000000)
9154 goto apply;
9155
9156 if (target_rate == 5500000 || !fixed)
9157 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9158 if (target_rate == 5500000)
9159 goto apply;
9160
9161 if (target_rate == 6000000 || !fixed)
9162 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9163 if (target_rate == 6000000)
9164 goto apply;
9165
9166 if (target_rate == 9000000 || !fixed)
9167 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9168 if (target_rate == 9000000)
9169 goto apply;
9170
9171 if (target_rate == 11000000 || !fixed)
9172 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9173 if (target_rate == 11000000)
9174 goto apply;
9175
9176 if (target_rate == 12000000 || !fixed)
9177 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9178 if (target_rate == 12000000)
9179 goto apply;
9180
9181 if (target_rate == 18000000 || !fixed)
9182 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9183 if (target_rate == 18000000)
9184 goto apply;
9185
9186 if (target_rate == 24000000 || !fixed)
9187 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9188 if (target_rate == 24000000)
9189 goto apply;
9190
9191 if (target_rate == 36000000 || !fixed)
9192 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9193 if (target_rate == 36000000)
9194 goto apply;
9195
9196 if (target_rate == 48000000 || !fixed)
9197 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9198 if (target_rate == 48000000)
9199 goto apply;
9200
9201 if (target_rate == 54000000 || !fixed)
9202 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9203 if (target_rate == 54000000)
9204 goto apply;
9205
9206 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9207 return -EINVAL;
9208
9209 apply:
9210 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9211 mask, fixed ? "fixed" : "sub-rates");
9212 mutex_lock(&priv->mutex);
9213 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9214 priv->config &= ~CFG_FIXED_RATE;
9215 ipw_set_fixed_rate(priv, priv->ieee->mode);
9216 } else
9217 priv->config |= CFG_FIXED_RATE;
9218
9219 if (priv->rates_mask == mask) {
9220 IPW_DEBUG_WX("Mask set to current mask.\n");
9221 mutex_unlock(&priv->mutex);
9222 return 0;
9223 }
9224
9225 priv->rates_mask = mask;
9226
9227 /* Network configuration changed -- force [re]association */
9228 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9229 if (!ipw_disassociate(priv))
9230 ipw_associate(priv);
9231
9232 mutex_unlock(&priv->mutex);
9233 return 0;
9234}
9235
9236static int ipw_wx_get_rate(struct net_device *dev,
9237 struct iw_request_info *info,
9238 union iwreq_data *wrqu, char *extra)
9239{
9240 struct ipw_priv *priv = libipw_priv(dev);
9241 mutex_lock(&priv->mutex);
9242 wrqu->bitrate.value = priv->last_rate;
9243 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9244 mutex_unlock(&priv->mutex);
9245 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9246 return 0;
9247}
9248
9249static int ipw_wx_set_rts(struct net_device *dev,
9250 struct iw_request_info *info,
9251 union iwreq_data *wrqu, char *extra)
9252{
9253 struct ipw_priv *priv = libipw_priv(dev);
9254 mutex_lock(&priv->mutex);
9255 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9256 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9257 else {
9258 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9259 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9260 mutex_unlock(&priv->mutex);
9261 return -EINVAL;
9262 }
9263 priv->rts_threshold = wrqu->rts.value;
9264 }
9265
9266 ipw_send_rts_threshold(priv, priv->rts_threshold);
9267 mutex_unlock(&priv->mutex);
9268 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9269 return 0;
9270}
9271
9272static int ipw_wx_get_rts(struct net_device *dev,
9273 struct iw_request_info *info,
9274 union iwreq_data *wrqu, char *extra)
9275{
9276 struct ipw_priv *priv = libipw_priv(dev);
9277 mutex_lock(&priv->mutex);
9278 wrqu->rts.value = priv->rts_threshold;
9279 wrqu->rts.fixed = 0; /* no auto select */
9280 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9281 mutex_unlock(&priv->mutex);
9282 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9283 return 0;
9284}
9285
9286static int ipw_wx_set_txpow(struct net_device *dev,
9287 struct iw_request_info *info,
9288 union iwreq_data *wrqu, char *extra)
9289{
9290 struct ipw_priv *priv = libipw_priv(dev);
9291 int err = 0;
9292
9293 mutex_lock(&priv->mutex);
9294 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9295 err = -EINPROGRESS;
9296 goto out;
9297 }
9298
9299 if (!wrqu->power.fixed)
9300 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9301
9302 if (wrqu->power.flags != IW_TXPOW_DBM) {
9303 err = -EINVAL;
9304 goto out;
9305 }
9306
9307 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9308 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9309 err = -EINVAL;
9310 goto out;
9311 }
9312
9313 priv->tx_power = wrqu->power.value;
9314 err = ipw_set_tx_power(priv);
9315 out:
9316 mutex_unlock(&priv->mutex);
9317 return err;
9318}
9319
9320static int ipw_wx_get_txpow(struct net_device *dev,
9321 struct iw_request_info *info,
9322 union iwreq_data *wrqu, char *extra)
9323{
9324 struct ipw_priv *priv = libipw_priv(dev);
9325 mutex_lock(&priv->mutex);
9326 wrqu->power.value = priv->tx_power;
9327 wrqu->power.fixed = 1;
9328 wrqu->power.flags = IW_TXPOW_DBM;
9329 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9330 mutex_unlock(&priv->mutex);
9331
9332 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9333 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9334
9335 return 0;
9336}
9337
9338static int ipw_wx_set_frag(struct net_device *dev,
9339 struct iw_request_info *info,
9340 union iwreq_data *wrqu, char *extra)
9341{
9342 struct ipw_priv *priv = libipw_priv(dev);
9343 mutex_lock(&priv->mutex);
9344 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9345 priv->ieee->fts = DEFAULT_FTS;
9346 else {
9347 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9348 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9349 mutex_unlock(&priv->mutex);
9350 return -EINVAL;
9351 }
9352
9353 priv->ieee->fts = wrqu->frag.value & ~0x1;
9354 }
9355
9356 ipw_send_frag_threshold(priv, wrqu->frag.value);
9357 mutex_unlock(&priv->mutex);
9358 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9359 return 0;
9360}
9361
9362static int ipw_wx_get_frag(struct net_device *dev,
9363 struct iw_request_info *info,
9364 union iwreq_data *wrqu, char *extra)
9365{
9366 struct ipw_priv *priv = libipw_priv(dev);
9367 mutex_lock(&priv->mutex);
9368 wrqu->frag.value = priv->ieee->fts;
9369 wrqu->frag.fixed = 0; /* no auto select */
9370 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9371 mutex_unlock(&priv->mutex);
9372 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9373
9374 return 0;
9375}
9376
9377static int ipw_wx_set_retry(struct net_device *dev,
9378 struct iw_request_info *info,
9379 union iwreq_data *wrqu, char *extra)
9380{
9381 struct ipw_priv *priv = libipw_priv(dev);
9382
9383 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9384 return -EINVAL;
9385
9386 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9387 return 0;
9388
9389 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9390 return -EINVAL;
9391
9392 mutex_lock(&priv->mutex);
9393 if (wrqu->retry.flags & IW_RETRY_SHORT)
9394 priv->short_retry_limit = (u8) wrqu->retry.value;
9395 else if (wrqu->retry.flags & IW_RETRY_LONG)
9396 priv->long_retry_limit = (u8) wrqu->retry.value;
9397 else {
9398 priv->short_retry_limit = (u8) wrqu->retry.value;
9399 priv->long_retry_limit = (u8) wrqu->retry.value;
9400 }
9401
9402 ipw_send_retry_limit(priv, priv->short_retry_limit,
9403 priv->long_retry_limit);
9404 mutex_unlock(&priv->mutex);
9405 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9406 priv->short_retry_limit, priv->long_retry_limit);
9407 return 0;
9408}
9409
9410static int ipw_wx_get_retry(struct net_device *dev,
9411 struct iw_request_info *info,
9412 union iwreq_data *wrqu, char *extra)
9413{
9414 struct ipw_priv *priv = libipw_priv(dev);
9415
9416 mutex_lock(&priv->mutex);
9417 wrqu->retry.disabled = 0;
9418
9419 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9420 mutex_unlock(&priv->mutex);
9421 return -EINVAL;
9422 }
9423
9424 if (wrqu->retry.flags & IW_RETRY_LONG) {
9425 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9426 wrqu->retry.value = priv->long_retry_limit;
9427 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9428 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9429 wrqu->retry.value = priv->short_retry_limit;
9430 } else {
9431 wrqu->retry.flags = IW_RETRY_LIMIT;
9432 wrqu->retry.value = priv->short_retry_limit;
9433 }
9434 mutex_unlock(&priv->mutex);
9435
9436 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9437
9438 return 0;
9439}
9440
9441static int ipw_wx_set_scan(struct net_device *dev,
9442 struct iw_request_info *info,
9443 union iwreq_data *wrqu, char *extra)
9444{
9445 struct ipw_priv *priv = libipw_priv(dev);
9446 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9447 struct delayed_work *work = NULL;
9448
9449 mutex_lock(&priv->mutex);
9450
9451 priv->user_requested_scan = 1;
9452
9453 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9454 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9455 int len = min((int)req->essid_len,
9456 (int)sizeof(priv->direct_scan_ssid));
9457 memcpy(priv->direct_scan_ssid, req->essid, len);
9458 priv->direct_scan_ssid_len = len;
9459 work = &priv->request_direct_scan;
9460 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9461 work = &priv->request_passive_scan;
9462 }
9463 } else {
9464 /* Normal active broadcast scan */
9465 work = &priv->request_scan;
9466 }
9467
9468 mutex_unlock(&priv->mutex);
9469
9470 IPW_DEBUG_WX("Start scan\n");
9471
9472 schedule_delayed_work(work, 0);
9473
9474 return 0;
9475}
9476
9477static int ipw_wx_get_scan(struct net_device *dev,
9478 struct iw_request_info *info,
9479 union iwreq_data *wrqu, char *extra)
9480{
9481 struct ipw_priv *priv = libipw_priv(dev);
9482 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9483}
9484
9485static int ipw_wx_set_encode(struct net_device *dev,
9486 struct iw_request_info *info,
9487 union iwreq_data *wrqu, char *key)
9488{
9489 struct ipw_priv *priv = libipw_priv(dev);
9490 int ret;
9491 u32 cap = priv->capability;
9492
9493 mutex_lock(&priv->mutex);
9494 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9495
9496 /* In IBSS mode, we need to notify the firmware to update
9497 * the beacon info after we changed the capability. */
9498 if (cap != priv->capability &&
9499 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9500 priv->status & STATUS_ASSOCIATED)
9501 ipw_disassociate(priv);
9502
9503 mutex_unlock(&priv->mutex);
9504 return ret;
9505}
9506
9507static int ipw_wx_get_encode(struct net_device *dev,
9508 struct iw_request_info *info,
9509 union iwreq_data *wrqu, char *key)
9510{
9511 struct ipw_priv *priv = libipw_priv(dev);
9512 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9513}
9514
9515static int ipw_wx_set_power(struct net_device *dev,
9516 struct iw_request_info *info,
9517 union iwreq_data *wrqu, char *extra)
9518{
9519 struct ipw_priv *priv = libipw_priv(dev);
9520 int err;
9521 mutex_lock(&priv->mutex);
9522 if (wrqu->power.disabled) {
9523 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9524 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9525 if (err) {
9526 IPW_DEBUG_WX("failed setting power mode.\n");
9527 mutex_unlock(&priv->mutex);
9528 return err;
9529 }
9530 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9531 mutex_unlock(&priv->mutex);
9532 return 0;
9533 }
9534
9535 switch (wrqu->power.flags & IW_POWER_MODE) {
9536 case IW_POWER_ON: /* If not specified */
9537 case IW_POWER_MODE: /* If set all mask */
9538 case IW_POWER_ALL_R: /* If explicitly state all */
9539 break;
9540 default: /* Otherwise we don't support it */
9541 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9542 wrqu->power.flags);
9543 mutex_unlock(&priv->mutex);
9544 return -EOPNOTSUPP;
9545 }
9546
9547 /* If the user hasn't specified a power management mode yet, default
9548 * to BATTERY */
9549 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9550 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9551 else
9552 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9553
9554 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9555 if (err) {
9556 IPW_DEBUG_WX("failed setting power mode.\n");
9557 mutex_unlock(&priv->mutex);
9558 return err;
9559 }
9560
9561 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9562 mutex_unlock(&priv->mutex);
9563 return 0;
9564}
9565
9566static int ipw_wx_get_power(struct net_device *dev,
9567 struct iw_request_info *info,
9568 union iwreq_data *wrqu, char *extra)
9569{
9570 struct ipw_priv *priv = libipw_priv(dev);
9571 mutex_lock(&priv->mutex);
9572 if (!(priv->power_mode & IPW_POWER_ENABLED))
9573 wrqu->power.disabled = 1;
9574 else
9575 wrqu->power.disabled = 0;
9576
9577 mutex_unlock(&priv->mutex);
9578 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9579
9580 return 0;
9581}
9582
9583static int ipw_wx_set_powermode(struct net_device *dev,
9584 struct iw_request_info *info,
9585 union iwreq_data *wrqu, char *extra)
9586{
9587 struct ipw_priv *priv = libipw_priv(dev);
9588 int mode = *(int *)extra;
9589 int err;
9590
9591 mutex_lock(&priv->mutex);
9592 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9593 mode = IPW_POWER_AC;
9594
9595 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9596 err = ipw_send_power_mode(priv, mode);
9597 if (err) {
9598 IPW_DEBUG_WX("failed setting power mode.\n");
9599 mutex_unlock(&priv->mutex);
9600 return err;
9601 }
9602 priv->power_mode = IPW_POWER_ENABLED | mode;
9603 }
9604 mutex_unlock(&priv->mutex);
9605 return 0;
9606}
9607
9608#define MAX_WX_STRING 80
9609static int ipw_wx_get_powermode(struct net_device *dev,
9610 struct iw_request_info *info,
9611 union iwreq_data *wrqu, char *extra)
9612{
9613 struct ipw_priv *priv = libipw_priv(dev);
9614 int level = IPW_POWER_LEVEL(priv->power_mode);
9615 char *p = extra;
9616
9617 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9618
9619 switch (level) {
9620 case IPW_POWER_AC:
9621 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9622 break;
9623 case IPW_POWER_BATTERY:
9624 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9625 break;
9626 default:
9627 p += snprintf(p, MAX_WX_STRING - (p - extra),
9628 "(Timeout %dms, Period %dms)",
9629 timeout_duration[level - 1] / 1000,
9630 period_duration[level - 1] / 1000);
9631 }
9632
9633 if (!(priv->power_mode & IPW_POWER_ENABLED))
9634 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9635
9636 wrqu->data.length = p - extra + 1;
9637
9638 return 0;
9639}
9640
9641static int ipw_wx_set_wireless_mode(struct net_device *dev,
9642 struct iw_request_info *info,
9643 union iwreq_data *wrqu, char *extra)
9644{
9645 struct ipw_priv *priv = libipw_priv(dev);
9646 int mode = *(int *)extra;
9647 u8 band = 0, modulation = 0;
9648
9649 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9650 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9651 return -EINVAL;
9652 }
9653 mutex_lock(&priv->mutex);
9654 if (priv->adapter == IPW_2915ABG) {
9655 priv->ieee->abg_true = 1;
9656 if (mode & IEEE_A) {
9657 band |= LIBIPW_52GHZ_BAND;
9658 modulation |= LIBIPW_OFDM_MODULATION;
9659 } else
9660 priv->ieee->abg_true = 0;
9661 } else {
9662 if (mode & IEEE_A) {
9663 IPW_WARNING("Attempt to set 2200BG into "
9664 "802.11a mode\n");
9665 mutex_unlock(&priv->mutex);
9666 return -EINVAL;
9667 }
9668
9669 priv->ieee->abg_true = 0;
9670 }
9671
9672 if (mode & IEEE_B) {
9673 band |= LIBIPW_24GHZ_BAND;
9674 modulation |= LIBIPW_CCK_MODULATION;
9675 } else
9676 priv->ieee->abg_true = 0;
9677
9678 if (mode & IEEE_G) {
9679 band |= LIBIPW_24GHZ_BAND;
9680 modulation |= LIBIPW_OFDM_MODULATION;
9681 } else
9682 priv->ieee->abg_true = 0;
9683
9684 priv->ieee->mode = mode;
9685 priv->ieee->freq_band = band;
9686 priv->ieee->modulation = modulation;
9687 init_supported_rates(priv, &priv->rates);
9688
9689 /* Network configuration changed -- force [re]association */
9690 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9691 if (!ipw_disassociate(priv)) {
9692 ipw_send_supported_rates(priv, &priv->rates);
9693 ipw_associate(priv);
9694 }
9695
9696 /* Update the band LEDs */
9697 ipw_led_band_on(priv);
9698
9699 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9700 mode & IEEE_A ? 'a' : '.',
9701 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9702 mutex_unlock(&priv->mutex);
9703 return 0;
9704}
9705
9706static int ipw_wx_get_wireless_mode(struct net_device *dev,
9707 struct iw_request_info *info,
9708 union iwreq_data *wrqu, char *extra)
9709{
9710 struct ipw_priv *priv = libipw_priv(dev);
9711 mutex_lock(&priv->mutex);
9712 switch (priv->ieee->mode) {
9713 case IEEE_A:
9714 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9715 break;
9716 case IEEE_B:
9717 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9718 break;
9719 case IEEE_A | IEEE_B:
9720 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9721 break;
9722 case IEEE_G:
9723 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9724 break;
9725 case IEEE_A | IEEE_G:
9726 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9727 break;
9728 case IEEE_B | IEEE_G:
9729 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9730 break;
9731 case IEEE_A | IEEE_B | IEEE_G:
9732 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9733 break;
9734 default:
9735 strncpy(extra, "unknown", MAX_WX_STRING);
9736 break;
9737 }
9738 extra[MAX_WX_STRING - 1] = '\0';
9739
9740 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9741
9742 wrqu->data.length = strlen(extra) + 1;
9743 mutex_unlock(&priv->mutex);
9744
9745 return 0;
9746}
9747
9748static int ipw_wx_set_preamble(struct net_device *dev,
9749 struct iw_request_info *info,
9750 union iwreq_data *wrqu, char *extra)
9751{
9752 struct ipw_priv *priv = libipw_priv(dev);
9753 int mode = *(int *)extra;
9754 mutex_lock(&priv->mutex);
9755 /* Switching from SHORT -> LONG requires a disassociation */
9756 if (mode == 1) {
9757 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9758 priv->config |= CFG_PREAMBLE_LONG;
9759
9760 /* Network configuration changed -- force [re]association */
9761 IPW_DEBUG_ASSOC
9762 ("[re]association triggered due to preamble change.\n");
9763 if (!ipw_disassociate(priv))
9764 ipw_associate(priv);
9765 }
9766 goto done;
9767 }
9768
9769 if (mode == 0) {
9770 priv->config &= ~CFG_PREAMBLE_LONG;
9771 goto done;
9772 }
9773 mutex_unlock(&priv->mutex);
9774 return -EINVAL;
9775
9776 done:
9777 mutex_unlock(&priv->mutex);
9778 return 0;
9779}
9780
9781static int ipw_wx_get_preamble(struct net_device *dev,
9782 struct iw_request_info *info,
9783 union iwreq_data *wrqu, char *extra)
9784{
9785 struct ipw_priv *priv = libipw_priv(dev);
9786 mutex_lock(&priv->mutex);
9787 if (priv->config & CFG_PREAMBLE_LONG)
9788 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9789 else
9790 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9791 mutex_unlock(&priv->mutex);
9792 return 0;
9793}
9794
9795#ifdef CONFIG_IPW2200_MONITOR
9796static int ipw_wx_set_monitor(struct net_device *dev,
9797 struct iw_request_info *info,
9798 union iwreq_data *wrqu, char *extra)
9799{
9800 struct ipw_priv *priv = libipw_priv(dev);
9801 int *parms = (int *)extra;
9802 int enable = (parms[0] > 0);
9803 mutex_lock(&priv->mutex);
9804 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9805 if (enable) {
9806 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9807#ifdef CONFIG_IPW2200_RADIOTAP
9808 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9809#else
9810 priv->net_dev->type = ARPHRD_IEEE80211;
9811#endif
9812 schedule_work(&priv->adapter_restart);
9813 }
9814
9815 ipw_set_channel(priv, parms[1]);
9816 } else {
9817 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9818 mutex_unlock(&priv->mutex);
9819 return 0;
9820 }
9821 priv->net_dev->type = ARPHRD_ETHER;
9822 schedule_work(&priv->adapter_restart);
9823 }
9824 mutex_unlock(&priv->mutex);
9825 return 0;
9826}
9827
9828#endif /* CONFIG_IPW2200_MONITOR */
9829
9830static int ipw_wx_reset(struct net_device *dev,
9831 struct iw_request_info *info,
9832 union iwreq_data *wrqu, char *extra)
9833{
9834 struct ipw_priv *priv = libipw_priv(dev);
9835 IPW_DEBUG_WX("RESET\n");
9836 schedule_work(&priv->adapter_restart);
9837 return 0;
9838}
9839
9840static int ipw_wx_sw_reset(struct net_device *dev,
9841 struct iw_request_info *info,
9842 union iwreq_data *wrqu, char *extra)
9843{
9844 struct ipw_priv *priv = libipw_priv(dev);
9845 union iwreq_data wrqu_sec = {
9846 .encoding = {
9847 .flags = IW_ENCODE_DISABLED,
9848 },
9849 };
9850 int ret;
9851
9852 IPW_DEBUG_WX("SW_RESET\n");
9853
9854 mutex_lock(&priv->mutex);
9855
9856 ret = ipw_sw_reset(priv, 2);
9857 if (!ret) {
9858 free_firmware();
9859 ipw_adapter_restart(priv);
9860 }
9861
9862 /* The SW reset bit might have been toggled on by the 'disable'
9863 * module parameter, so take appropriate action */
9864 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9865
9866 mutex_unlock(&priv->mutex);
9867 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9868 mutex_lock(&priv->mutex);
9869
9870 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9871 /* Configuration likely changed -- force [re]association */
9872 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9873 "reset.\n");
9874 if (!ipw_disassociate(priv))
9875 ipw_associate(priv);
9876 }
9877
9878 mutex_unlock(&priv->mutex);
9879
9880 return 0;
9881}
9882
9883/* Rebase the WE IOCTLs to zero for the handler array */
9884static iw_handler ipw_wx_handlers[] = {
9885 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9886 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9887 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9888 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9889 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9890 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9891 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9892 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9893 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9894 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9895 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9896 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9897 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9898 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9899 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9900 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9901 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9902 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9903 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9904 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9905 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9906 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9907 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9908 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9909 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9910 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9911 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9912 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9913 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9914 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9915 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9916 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9917 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9918 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9919 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9920 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9921 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9922 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9923 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9924 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9925 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9926};
9927
9928enum {
9929 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9930 IPW_PRIV_GET_POWER,
9931 IPW_PRIV_SET_MODE,
9932 IPW_PRIV_GET_MODE,
9933 IPW_PRIV_SET_PREAMBLE,
9934 IPW_PRIV_GET_PREAMBLE,
9935 IPW_PRIV_RESET,
9936 IPW_PRIV_SW_RESET,
9937#ifdef CONFIG_IPW2200_MONITOR
9938 IPW_PRIV_SET_MONITOR,
9939#endif
9940};
9941
9942static struct iw_priv_args ipw_priv_args[] = {
9943 {
9944 .cmd = IPW_PRIV_SET_POWER,
9945 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9946 .name = "set_power"},
9947 {
9948 .cmd = IPW_PRIV_GET_POWER,
9949 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9950 .name = "get_power"},
9951 {
9952 .cmd = IPW_PRIV_SET_MODE,
9953 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9954 .name = "set_mode"},
9955 {
9956 .cmd = IPW_PRIV_GET_MODE,
9957 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9958 .name = "get_mode"},
9959 {
9960 .cmd = IPW_PRIV_SET_PREAMBLE,
9961 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9962 .name = "set_preamble"},
9963 {
9964 .cmd = IPW_PRIV_GET_PREAMBLE,
9965 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9966 .name = "get_preamble"},
9967 {
9968 IPW_PRIV_RESET,
9969 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9970 {
9971 IPW_PRIV_SW_RESET,
9972 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9973#ifdef CONFIG_IPW2200_MONITOR
9974 {
9975 IPW_PRIV_SET_MONITOR,
9976 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9977#endif /* CONFIG_IPW2200_MONITOR */
9978};
9979
9980static iw_handler ipw_priv_handler[] = {
9981 ipw_wx_set_powermode,
9982 ipw_wx_get_powermode,
9983 ipw_wx_set_wireless_mode,
9984 ipw_wx_get_wireless_mode,
9985 ipw_wx_set_preamble,
9986 ipw_wx_get_preamble,
9987 ipw_wx_reset,
9988 ipw_wx_sw_reset,
9989#ifdef CONFIG_IPW2200_MONITOR
9990 ipw_wx_set_monitor,
9991#endif
9992};
9993
9994static const struct iw_handler_def ipw_wx_handler_def = {
9995 .standard = ipw_wx_handlers,
9996 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
9997 .num_private = ARRAY_SIZE(ipw_priv_handler),
9998 .num_private_args = ARRAY_SIZE(ipw_priv_args),
9999 .private = ipw_priv_handler,
10000 .private_args = ipw_priv_args,
10001 .get_wireless_stats = ipw_get_wireless_stats,
10002};
10003
10004/*
10005 * Get wireless statistics.
10006 * Called by /proc/net/wireless
10007 * Also called by SIOCGIWSTATS
10008 */
10009static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10010{
10011 struct ipw_priv *priv = libipw_priv(dev);
10012 struct iw_statistics *wstats;
10013
10014 wstats = &priv->wstats;
10015
10016 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10017 * netdev->get_wireless_stats seems to be called before fw is
10018 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10019 * and associated; if not associcated, the values are all meaningless
10020 * anyway, so set them all to NULL and INVALID */
10021 if (!(priv->status & STATUS_ASSOCIATED)) {
10022 wstats->miss.beacon = 0;
10023 wstats->discard.retries = 0;
10024 wstats->qual.qual = 0;
10025 wstats->qual.level = 0;
10026 wstats->qual.noise = 0;
10027 wstats->qual.updated = 7;
10028 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10029 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10030 return wstats;
10031 }
10032
10033 wstats->qual.qual = priv->quality;
10034 wstats->qual.level = priv->exp_avg_rssi;
10035 wstats->qual.noise = priv->exp_avg_noise;
10036 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10037 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10038
10039 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10040 wstats->discard.retries = priv->last_tx_failures;
10041 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10042
10043/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10044 goto fail_get_ordinal;
10045 wstats->discard.retries += tx_retry; */
10046
10047 return wstats;
10048}
10049
10050/* net device stuff */
10051
10052static void init_sys_config(struct ipw_sys_config *sys_config)
10053{
10054 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10055 sys_config->bt_coexistence = 0;
10056 sys_config->answer_broadcast_ssid_probe = 0;
10057 sys_config->accept_all_data_frames = 0;
10058 sys_config->accept_non_directed_frames = 1;
10059 sys_config->exclude_unicast_unencrypted = 0;
10060 sys_config->disable_unicast_decryption = 1;
10061 sys_config->exclude_multicast_unencrypted = 0;
10062 sys_config->disable_multicast_decryption = 1;
10063 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10064 antenna = CFG_SYS_ANTENNA_BOTH;
10065 sys_config->antenna_diversity = antenna;
10066 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10067 sys_config->dot11g_auto_detection = 0;
10068 sys_config->enable_cts_to_self = 0;
10069 sys_config->bt_coexist_collision_thr = 0;
10070 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10071 sys_config->silence_threshold = 0x1e;
10072}
10073
10074static int ipw_net_open(struct net_device *dev)
10075{
10076 IPW_DEBUG_INFO("dev->open\n");
10077 netif_start_queue(dev);
10078 return 0;
10079}
10080
10081static int ipw_net_stop(struct net_device *dev)
10082{
10083 IPW_DEBUG_INFO("dev->close\n");
10084 netif_stop_queue(dev);
10085 return 0;
10086}
10087
10088/*
10089todo:
10090
10091modify to send one tfd per fragment instead of using chunking. otherwise
10092we need to heavily modify the libipw_skb_to_txb.
10093*/
10094
10095static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10096 int pri)
10097{
10098 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10099 txb->fragments[0]->data;
10100 int i = 0;
10101 struct tfd_frame *tfd;
10102#ifdef CONFIG_IPW2200_QOS
10103 int tx_id = ipw_get_tx_queue_number(priv, pri);
10104 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10105#else
10106 struct clx2_tx_queue *txq = &priv->txq[0];
10107#endif
10108 struct clx2_queue *q = &txq->q;
10109 u8 id, hdr_len, unicast;
10110 int fc;
10111
10112 if (!(priv->status & STATUS_ASSOCIATED))
10113 goto drop;
10114
10115 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10116 switch (priv->ieee->iw_mode) {
10117 case IW_MODE_ADHOC:
10118 unicast = !is_multicast_ether_addr(hdr->addr1);
10119 id = ipw_find_station(priv, hdr->addr1);
10120 if (id == IPW_INVALID_STATION) {
10121 id = ipw_add_station(priv, hdr->addr1);
10122 if (id == IPW_INVALID_STATION) {
10123 IPW_WARNING("Attempt to send data to "
10124 "invalid cell: %pM\n",
10125 hdr->addr1);
10126 goto drop;
10127 }
10128 }
10129 break;
10130
10131 case IW_MODE_INFRA:
10132 default:
10133 unicast = !is_multicast_ether_addr(hdr->addr3);
10134 id = 0;
10135 break;
10136 }
10137
10138 tfd = &txq->bd[q->first_empty];
10139 txq->txb[q->first_empty] = txb;
10140 memset(tfd, 0, sizeof(*tfd));
10141 tfd->u.data.station_number = id;
10142
10143 tfd->control_flags.message_type = TX_FRAME_TYPE;
10144 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10145
10146 tfd->u.data.cmd_id = DINO_CMD_TX;
10147 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10148
10149 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10150 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10151 else
10152 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10153
10154 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10155 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10156
10157 fc = le16_to_cpu(hdr->frame_ctl);
10158 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10159
10160 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10161
10162 if (likely(unicast))
10163 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10164
10165 if (txb->encrypted && !priv->ieee->host_encrypt) {
10166 switch (priv->ieee->sec.level) {
10167 case SEC_LEVEL_3:
10168 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10169 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10170 /* XXX: ACK flag must be set for CCMP even if it
10171 * is a multicast/broadcast packet, because CCMP
10172 * group communication encrypted by GTK is
10173 * actually done by the AP. */
10174 if (!unicast)
10175 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10176
10177 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10178 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10179 tfd->u.data.key_index = 0;
10180 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10181 break;
10182 case SEC_LEVEL_2:
10183 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10184 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10185 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10186 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10187 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10188 break;
10189 case SEC_LEVEL_1:
10190 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10191 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10192 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10193 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10194 40)
10195 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10196 else
10197 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10198 break;
10199 case SEC_LEVEL_0:
10200 break;
10201 default:
10202 printk(KERN_ERR "Unknown security level %d\n",
10203 priv->ieee->sec.level);
10204 break;
10205 }
10206 } else
10207 /* No hardware encryption */
10208 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10209
10210#ifdef CONFIG_IPW2200_QOS
10211 if (fc & IEEE80211_STYPE_QOS_DATA)
10212 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10213#endif /* CONFIG_IPW2200_QOS */
10214
10215 /* payload */
10216 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10217 txb->nr_frags));
10218 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10219 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10220 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10221 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10222 i, le32_to_cpu(tfd->u.data.num_chunks),
10223 txb->fragments[i]->len - hdr_len);
10224 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10225 i, tfd->u.data.num_chunks,
10226 txb->fragments[i]->len - hdr_len);
10227 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10228 txb->fragments[i]->len - hdr_len);
10229
10230 tfd->u.data.chunk_ptr[i] =
10231 cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10232 txb->fragments[i]->data + hdr_len,
10233 txb->fragments[i]->len - hdr_len,
10234 DMA_TO_DEVICE));
10235 tfd->u.data.chunk_len[i] =
10236 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10237 }
10238
10239 if (i != txb->nr_frags) {
10240 struct sk_buff *skb;
10241 u16 remaining_bytes = 0;
10242 int j;
10243
10244 for (j = i; j < txb->nr_frags; j++)
10245 remaining_bytes += txb->fragments[j]->len - hdr_len;
10246
10247 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10248 remaining_bytes);
10249 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10250 if (skb != NULL) {
10251 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10252 for (j = i; j < txb->nr_frags; j++) {
10253 int size = txb->fragments[j]->len - hdr_len;
10254
10255 printk(KERN_INFO "Adding frag %d %d...\n",
10256 j, size);
10257 skb_put_data(skb,
10258 txb->fragments[j]->data + hdr_len,
10259 size);
10260 }
10261 dev_kfree_skb_any(txb->fragments[i]);
10262 txb->fragments[i] = skb;
10263 tfd->u.data.chunk_ptr[i] =
10264 cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
10265 skb->data,
10266 remaining_bytes,
10267 DMA_TO_DEVICE));
10268
10269 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10270 }
10271 }
10272
10273 /* kick DMA */
10274 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10275 ipw_write32(priv, q->reg_w, q->first_empty);
10276
10277 if (ipw_tx_queue_space(q) < q->high_mark)
10278 netif_stop_queue(priv->net_dev);
10279
10280 return NETDEV_TX_OK;
10281
10282 drop:
10283 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10284 libipw_txb_free(txb);
10285 return NETDEV_TX_OK;
10286}
10287
10288static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10289{
10290 struct ipw_priv *priv = libipw_priv(dev);
10291#ifdef CONFIG_IPW2200_QOS
10292 int tx_id = ipw_get_tx_queue_number(priv, pri);
10293 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10294#else
10295 struct clx2_tx_queue *txq = &priv->txq[0];
10296#endif /* CONFIG_IPW2200_QOS */
10297
10298 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10299 return 1;
10300
10301 return 0;
10302}
10303
10304#ifdef CONFIG_IPW2200_PROMISCUOUS
10305static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10306 struct libipw_txb *txb)
10307{
10308 struct libipw_rx_stats dummystats;
10309 struct ieee80211_hdr *hdr;
10310 u8 n;
10311 u16 filter = priv->prom_priv->filter;
10312 int hdr_only = 0;
10313
10314 if (filter & IPW_PROM_NO_TX)
10315 return;
10316
10317 memset(&dummystats, 0, sizeof(dummystats));
10318
10319 /* Filtering of fragment chains is done against the first fragment */
10320 hdr = (void *)txb->fragments[0]->data;
10321 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10322 if (filter & IPW_PROM_NO_MGMT)
10323 return;
10324 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10325 hdr_only = 1;
10326 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10327 if (filter & IPW_PROM_NO_CTL)
10328 return;
10329 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10330 hdr_only = 1;
10331 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10332 if (filter & IPW_PROM_NO_DATA)
10333 return;
10334 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10335 hdr_only = 1;
10336 }
10337
10338 for(n=0; n<txb->nr_frags; ++n) {
10339 struct sk_buff *src = txb->fragments[n];
10340 struct sk_buff *dst;
10341 struct ieee80211_radiotap_header *rt_hdr;
10342 int len;
10343
10344 if (hdr_only) {
10345 hdr = (void *)src->data;
10346 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10347 } else
10348 len = src->len;
10349
10350 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10351 if (!dst)
10352 continue;
10353
10354 rt_hdr = skb_put(dst, sizeof(*rt_hdr));
10355
10356 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10357 rt_hdr->it_pad = 0;
10358 rt_hdr->it_present = 0; /* after all, it's just an idea */
10359 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10360
10361 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10362 ieee80211chan2mhz(priv->channel));
10363 if (priv->channel > 14) /* 802.11a */
10364 *(__le16*)skb_put(dst, sizeof(u16)) =
10365 cpu_to_le16(IEEE80211_CHAN_OFDM |
10366 IEEE80211_CHAN_5GHZ);
10367 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10368 *(__le16*)skb_put(dst, sizeof(u16)) =
10369 cpu_to_le16(IEEE80211_CHAN_CCK |
10370 IEEE80211_CHAN_2GHZ);
10371 else /* 802.11g */
10372 *(__le16*)skb_put(dst, sizeof(u16)) =
10373 cpu_to_le16(IEEE80211_CHAN_OFDM |
10374 IEEE80211_CHAN_2GHZ);
10375
10376 rt_hdr->it_len = cpu_to_le16(dst->len);
10377
10378 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10379
10380 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10381 dev_kfree_skb_any(dst);
10382 }
10383}
10384#endif
10385
10386static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10387 struct net_device *dev, int pri)
10388{
10389 struct ipw_priv *priv = libipw_priv(dev);
10390 unsigned long flags;
10391 netdev_tx_t ret;
10392
10393 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10394 spin_lock_irqsave(&priv->lock, flags);
10395
10396#ifdef CONFIG_IPW2200_PROMISCUOUS
10397 if (rtap_iface && netif_running(priv->prom_net_dev))
10398 ipw_handle_promiscuous_tx(priv, txb);
10399#endif
10400
10401 ret = ipw_tx_skb(priv, txb, pri);
10402 if (ret == NETDEV_TX_OK)
10403 __ipw_led_activity_on(priv);
10404 spin_unlock_irqrestore(&priv->lock, flags);
10405
10406 return ret;
10407}
10408
10409static void ipw_net_set_multicast_list(struct net_device *dev)
10410{
10411
10412}
10413
10414static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10415{
10416 struct ipw_priv *priv = libipw_priv(dev);
10417 struct sockaddr *addr = p;
10418
10419 if (!is_valid_ether_addr(addr->sa_data))
10420 return -EADDRNOTAVAIL;
10421 mutex_lock(&priv->mutex);
10422 priv->config |= CFG_CUSTOM_MAC;
10423 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10424 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10425 priv->net_dev->name, priv->mac_addr);
10426 schedule_work(&priv->adapter_restart);
10427 mutex_unlock(&priv->mutex);
10428 return 0;
10429}
10430
10431static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10432 struct ethtool_drvinfo *info)
10433{
10434 struct ipw_priv *p = libipw_priv(dev);
10435 char vers[64];
10436 char date[32];
10437 u32 len;
10438
10439 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10440 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10441
10442 len = sizeof(vers);
10443 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10444 len = sizeof(date);
10445 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10446
10447 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10448 vers, date);
10449 strlcpy(info->bus_info, pci_name(p->pci_dev),
10450 sizeof(info->bus_info));
10451}
10452
10453static u32 ipw_ethtool_get_link(struct net_device *dev)
10454{
10455 struct ipw_priv *priv = libipw_priv(dev);
10456 return (priv->status & STATUS_ASSOCIATED) != 0;
10457}
10458
10459static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10460{
10461 return IPW_EEPROM_IMAGE_SIZE;
10462}
10463
10464static int ipw_ethtool_get_eeprom(struct net_device *dev,
10465 struct ethtool_eeprom *eeprom, u8 * bytes)
10466{
10467 struct ipw_priv *p = libipw_priv(dev);
10468
10469 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10470 return -EINVAL;
10471 mutex_lock(&p->mutex);
10472 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10473 mutex_unlock(&p->mutex);
10474 return 0;
10475}
10476
10477static int ipw_ethtool_set_eeprom(struct net_device *dev,
10478 struct ethtool_eeprom *eeprom, u8 * bytes)
10479{
10480 struct ipw_priv *p = libipw_priv(dev);
10481 int i;
10482
10483 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10484 return -EINVAL;
10485 mutex_lock(&p->mutex);
10486 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10487 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10488 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10489 mutex_unlock(&p->mutex);
10490 return 0;
10491}
10492
10493static const struct ethtool_ops ipw_ethtool_ops = {
10494 .get_link = ipw_ethtool_get_link,
10495 .get_drvinfo = ipw_ethtool_get_drvinfo,
10496 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10497 .get_eeprom = ipw_ethtool_get_eeprom,
10498 .set_eeprom = ipw_ethtool_set_eeprom,
10499};
10500
10501static irqreturn_t ipw_isr(int irq, void *data)
10502{
10503 struct ipw_priv *priv = data;
10504 u32 inta, inta_mask;
10505
10506 if (!priv)
10507 return IRQ_NONE;
10508
10509 spin_lock(&priv->irq_lock);
10510
10511 if (!(priv->status & STATUS_INT_ENABLED)) {
10512 /* IRQ is disabled */
10513 goto none;
10514 }
10515
10516 inta = ipw_read32(priv, IPW_INTA_RW);
10517 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10518
10519 if (inta == 0xFFFFFFFF) {
10520 /* Hardware disappeared */
10521 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10522 goto none;
10523 }
10524
10525 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10526 /* Shared interrupt */
10527 goto none;
10528 }
10529
10530 /* tell the device to stop sending interrupts */
10531 __ipw_disable_interrupts(priv);
10532
10533 /* ack current interrupts */
10534 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10535 ipw_write32(priv, IPW_INTA_RW, inta);
10536
10537 /* Cache INTA value for our tasklet */
10538 priv->isr_inta = inta;
10539
10540 tasklet_schedule(&priv->irq_tasklet);
10541
10542 spin_unlock(&priv->irq_lock);
10543
10544 return IRQ_HANDLED;
10545 none:
10546 spin_unlock(&priv->irq_lock);
10547 return IRQ_NONE;
10548}
10549
10550static void ipw_rf_kill(void *adapter)
10551{
10552 struct ipw_priv *priv = adapter;
10553 unsigned long flags;
10554
10555 spin_lock_irqsave(&priv->lock, flags);
10556
10557 if (rf_kill_active(priv)) {
10558 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10559 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10560 goto exit_unlock;
10561 }
10562
10563 /* RF Kill is now disabled, so bring the device back up */
10564
10565 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10566 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10567 "device\n");
10568
10569 /* we can not do an adapter restart while inside an irq lock */
10570 schedule_work(&priv->adapter_restart);
10571 } else
10572 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10573 "enabled\n");
10574
10575 exit_unlock:
10576 spin_unlock_irqrestore(&priv->lock, flags);
10577}
10578
10579static void ipw_bg_rf_kill(struct work_struct *work)
10580{
10581 struct ipw_priv *priv =
10582 container_of(work, struct ipw_priv, rf_kill.work);
10583 mutex_lock(&priv->mutex);
10584 ipw_rf_kill(priv);
10585 mutex_unlock(&priv->mutex);
10586}
10587
10588static void ipw_link_up(struct ipw_priv *priv)
10589{
10590 priv->last_seq_num = -1;
10591 priv->last_frag_num = -1;
10592 priv->last_packet_time = 0;
10593
10594 netif_carrier_on(priv->net_dev);
10595
10596 cancel_delayed_work(&priv->request_scan);
10597 cancel_delayed_work(&priv->request_direct_scan);
10598 cancel_delayed_work(&priv->request_passive_scan);
10599 cancel_delayed_work(&priv->scan_event);
10600 ipw_reset_stats(priv);
10601 /* Ensure the rate is updated immediately */
10602 priv->last_rate = ipw_get_current_rate(priv);
10603 ipw_gather_stats(priv);
10604 ipw_led_link_up(priv);
10605 notify_wx_assoc_event(priv);
10606
10607 if (priv->config & CFG_BACKGROUND_SCAN)
10608 schedule_delayed_work(&priv->request_scan, HZ);
10609}
10610
10611static void ipw_bg_link_up(struct work_struct *work)
10612{
10613 struct ipw_priv *priv =
10614 container_of(work, struct ipw_priv, link_up);
10615 mutex_lock(&priv->mutex);
10616 ipw_link_up(priv);
10617 mutex_unlock(&priv->mutex);
10618}
10619
10620static void ipw_link_down(struct ipw_priv *priv)
10621{
10622 ipw_led_link_down(priv);
10623 netif_carrier_off(priv->net_dev);
10624 notify_wx_assoc_event(priv);
10625
10626 /* Cancel any queued work ... */
10627 cancel_delayed_work(&priv->request_scan);
10628 cancel_delayed_work(&priv->request_direct_scan);
10629 cancel_delayed_work(&priv->request_passive_scan);
10630 cancel_delayed_work(&priv->adhoc_check);
10631 cancel_delayed_work(&priv->gather_stats);
10632
10633 ipw_reset_stats(priv);
10634
10635 if (!(priv->status & STATUS_EXIT_PENDING)) {
10636 /* Queue up another scan... */
10637 schedule_delayed_work(&priv->request_scan, 0);
10638 } else
10639 cancel_delayed_work(&priv->scan_event);
10640}
10641
10642static void ipw_bg_link_down(struct work_struct *work)
10643{
10644 struct ipw_priv *priv =
10645 container_of(work, struct ipw_priv, link_down);
10646 mutex_lock(&priv->mutex);
10647 ipw_link_down(priv);
10648 mutex_unlock(&priv->mutex);
10649}
10650
10651static int ipw_setup_deferred_work(struct ipw_priv *priv)
10652{
10653 int ret = 0;
10654
10655 init_waitqueue_head(&priv->wait_command_queue);
10656 init_waitqueue_head(&priv->wait_state);
10657
10658 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10659 INIT_WORK(&priv->associate, ipw_bg_associate);
10660 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10661 INIT_WORK(&priv->system_config, ipw_system_config);
10662 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10663 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10664 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10665 INIT_WORK(&priv->up, ipw_bg_up);
10666 INIT_WORK(&priv->down, ipw_bg_down);
10667 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10668 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10669 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10670 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10671 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10672 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10673 INIT_WORK(&priv->roam, ipw_bg_roam);
10674 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10675 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10676 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10677 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10678 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10679 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10680 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10681
10682#ifdef CONFIG_IPW2200_QOS
10683 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10684#endif /* CONFIG_IPW2200_QOS */
10685
10686 tasklet_init(&priv->irq_tasklet,
10687 ipw_irq_tasklet, (unsigned long)priv);
10688
10689 return ret;
10690}
10691
10692static void shim__set_security(struct net_device *dev,
10693 struct libipw_security *sec)
10694{
10695 struct ipw_priv *priv = libipw_priv(dev);
10696 int i;
10697 for (i = 0; i < 4; i++) {
10698 if (sec->flags & (1 << i)) {
10699 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10700 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10701 if (sec->key_sizes[i] == 0)
10702 priv->ieee->sec.flags &= ~(1 << i);
10703 else {
10704 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10705 sec->key_sizes[i]);
10706 priv->ieee->sec.flags |= (1 << i);
10707 }
10708 priv->status |= STATUS_SECURITY_UPDATED;
10709 } else if (sec->level != SEC_LEVEL_1)
10710 priv->ieee->sec.flags &= ~(1 << i);
10711 }
10712
10713 if (sec->flags & SEC_ACTIVE_KEY) {
10714 priv->ieee->sec.active_key = sec->active_key;
10715 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10716 priv->status |= STATUS_SECURITY_UPDATED;
10717 } else
10718 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10719
10720 if ((sec->flags & SEC_AUTH_MODE) &&
10721 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10722 priv->ieee->sec.auth_mode = sec->auth_mode;
10723 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10724 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10725 priv->capability |= CAP_SHARED_KEY;
10726 else
10727 priv->capability &= ~CAP_SHARED_KEY;
10728 priv->status |= STATUS_SECURITY_UPDATED;
10729 }
10730
10731 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10732 priv->ieee->sec.flags |= SEC_ENABLED;
10733 priv->ieee->sec.enabled = sec->enabled;
10734 priv->status |= STATUS_SECURITY_UPDATED;
10735 if (sec->enabled)
10736 priv->capability |= CAP_PRIVACY_ON;
10737 else
10738 priv->capability &= ~CAP_PRIVACY_ON;
10739 }
10740
10741 if (sec->flags & SEC_ENCRYPT)
10742 priv->ieee->sec.encrypt = sec->encrypt;
10743
10744 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10745 priv->ieee->sec.level = sec->level;
10746 priv->ieee->sec.flags |= SEC_LEVEL;
10747 priv->status |= STATUS_SECURITY_UPDATED;
10748 }
10749
10750 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10751 ipw_set_hwcrypto_keys(priv);
10752
10753 /* To match current functionality of ipw2100 (which works well w/
10754 * various supplicants, we don't force a disassociate if the
10755 * privacy capability changes ... */
10756#if 0
10757 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10758 (((priv->assoc_request.capability &
10759 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10760 (!(priv->assoc_request.capability &
10761 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10762 IPW_DEBUG_ASSOC("Disassociating due to capability "
10763 "change.\n");
10764 ipw_disassociate(priv);
10765 }
10766#endif
10767}
10768
10769static int init_supported_rates(struct ipw_priv *priv,
10770 struct ipw_supported_rates *rates)
10771{
10772 /* TODO: Mask out rates based on priv->rates_mask */
10773
10774 memset(rates, 0, sizeof(*rates));
10775 /* configure supported rates */
10776 switch (priv->ieee->freq_band) {
10777 case LIBIPW_52GHZ_BAND:
10778 rates->ieee_mode = IPW_A_MODE;
10779 rates->purpose = IPW_RATE_CAPABILITIES;
10780 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10781 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10782 break;
10783
10784 default: /* Mixed or 2.4Ghz */
10785 rates->ieee_mode = IPW_G_MODE;
10786 rates->purpose = IPW_RATE_CAPABILITIES;
10787 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10788 LIBIPW_CCK_DEFAULT_RATES_MASK);
10789 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10790 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10791 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10792 }
10793 break;
10794 }
10795
10796 return 0;
10797}
10798
10799static int ipw_config(struct ipw_priv *priv)
10800{
10801 /* This is only called from ipw_up, which resets/reloads the firmware
10802 so, we don't need to first disable the card before we configure
10803 it */
10804 if (ipw_set_tx_power(priv))
10805 goto error;
10806
10807 /* initialize adapter address */
10808 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10809 goto error;
10810
10811 /* set basic system config settings */
10812 init_sys_config(&priv->sys_config);
10813
10814 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10815 * Does not support BT priority yet (don't abort or defer our Tx) */
10816 if (bt_coexist) {
10817 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10818
10819 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10820 priv->sys_config.bt_coexistence
10821 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10822 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10823 priv->sys_config.bt_coexistence
10824 |= CFG_BT_COEXISTENCE_OOB;
10825 }
10826
10827#ifdef CONFIG_IPW2200_PROMISCUOUS
10828 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10829 priv->sys_config.accept_all_data_frames = 1;
10830 priv->sys_config.accept_non_directed_frames = 1;
10831 priv->sys_config.accept_all_mgmt_bcpr = 1;
10832 priv->sys_config.accept_all_mgmt_frames = 1;
10833 }
10834#endif
10835
10836 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10837 priv->sys_config.answer_broadcast_ssid_probe = 1;
10838 else
10839 priv->sys_config.answer_broadcast_ssid_probe = 0;
10840
10841 if (ipw_send_system_config(priv))
10842 goto error;
10843
10844 init_supported_rates(priv, &priv->rates);
10845 if (ipw_send_supported_rates(priv, &priv->rates))
10846 goto error;
10847
10848 /* Set request-to-send threshold */
10849 if (priv->rts_threshold) {
10850 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10851 goto error;
10852 }
10853#ifdef CONFIG_IPW2200_QOS
10854 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10855 ipw_qos_activate(priv, NULL);
10856#endif /* CONFIG_IPW2200_QOS */
10857
10858 if (ipw_set_random_seed(priv))
10859 goto error;
10860
10861 /* final state transition to the RUN state */
10862 if (ipw_send_host_complete(priv))
10863 goto error;
10864
10865 priv->status |= STATUS_INIT;
10866
10867 ipw_led_init(priv);
10868 ipw_led_radio_on(priv);
10869 priv->notif_missed_beacons = 0;
10870
10871 /* Set hardware WEP key if it is configured. */
10872 if ((priv->capability & CAP_PRIVACY_ON) &&
10873 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10874 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10875 ipw_set_hwcrypto_keys(priv);
10876
10877 return 0;
10878
10879 error:
10880 return -EIO;
10881}
10882
10883/*
10884 * NOTE:
10885 *
10886 * These tables have been tested in conjunction with the
10887 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10888 *
10889 * Altering this values, using it on other hardware, or in geographies
10890 * not intended for resale of the above mentioned Intel adapters has
10891 * not been tested.
10892 *
10893 * Remember to update the table in README.ipw2200 when changing this
10894 * table.
10895 *
10896 */
10897static const struct libipw_geo ipw_geos[] = {
10898 { /* Restricted */
10899 "---",
10900 .bg_channels = 11,
10901 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10902 {2427, 4}, {2432, 5}, {2437, 6},
10903 {2442, 7}, {2447, 8}, {2452, 9},
10904 {2457, 10}, {2462, 11}},
10905 },
10906
10907 { /* Custom US/Canada */
10908 "ZZF",
10909 .bg_channels = 11,
10910 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10911 {2427, 4}, {2432, 5}, {2437, 6},
10912 {2442, 7}, {2447, 8}, {2452, 9},
10913 {2457, 10}, {2462, 11}},
10914 .a_channels = 8,
10915 .a = {{5180, 36},
10916 {5200, 40},
10917 {5220, 44},
10918 {5240, 48},
10919 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10920 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10921 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10922 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10923 },
10924
10925 { /* Rest of World */
10926 "ZZD",
10927 .bg_channels = 13,
10928 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10929 {2427, 4}, {2432, 5}, {2437, 6},
10930 {2442, 7}, {2447, 8}, {2452, 9},
10931 {2457, 10}, {2462, 11}, {2467, 12},
10932 {2472, 13}},
10933 },
10934
10935 { /* Custom USA & Europe & High */
10936 "ZZA",
10937 .bg_channels = 11,
10938 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10939 {2427, 4}, {2432, 5}, {2437, 6},
10940 {2442, 7}, {2447, 8}, {2452, 9},
10941 {2457, 10}, {2462, 11}},
10942 .a_channels = 13,
10943 .a = {{5180, 36},
10944 {5200, 40},
10945 {5220, 44},
10946 {5240, 48},
10947 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10948 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10949 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10950 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10951 {5745, 149},
10952 {5765, 153},
10953 {5785, 157},
10954 {5805, 161},
10955 {5825, 165}},
10956 },
10957
10958 { /* Custom NA & Europe */
10959 "ZZB",
10960 .bg_channels = 11,
10961 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10962 {2427, 4}, {2432, 5}, {2437, 6},
10963 {2442, 7}, {2447, 8}, {2452, 9},
10964 {2457, 10}, {2462, 11}},
10965 .a_channels = 13,
10966 .a = {{5180, 36},
10967 {5200, 40},
10968 {5220, 44},
10969 {5240, 48},
10970 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10971 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10972 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10973 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10974 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10975 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10976 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10977 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
10978 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
10979 },
10980
10981 { /* Custom Japan */
10982 "ZZC",
10983 .bg_channels = 11,
10984 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10985 {2427, 4}, {2432, 5}, {2437, 6},
10986 {2442, 7}, {2447, 8}, {2452, 9},
10987 {2457, 10}, {2462, 11}},
10988 .a_channels = 4,
10989 .a = {{5170, 34}, {5190, 38},
10990 {5210, 42}, {5230, 46}},
10991 },
10992
10993 { /* Custom */
10994 "ZZM",
10995 .bg_channels = 11,
10996 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10997 {2427, 4}, {2432, 5}, {2437, 6},
10998 {2442, 7}, {2447, 8}, {2452, 9},
10999 {2457, 10}, {2462, 11}},
11000 },
11001
11002 { /* Europe */
11003 "ZZE",
11004 .bg_channels = 13,
11005 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11006 {2427, 4}, {2432, 5}, {2437, 6},
11007 {2442, 7}, {2447, 8}, {2452, 9},
11008 {2457, 10}, {2462, 11}, {2467, 12},
11009 {2472, 13}},
11010 .a_channels = 19,
11011 .a = {{5180, 36},
11012 {5200, 40},
11013 {5220, 44},
11014 {5240, 48},
11015 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11016 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11017 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11018 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11019 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11020 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11021 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11022 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11023 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11024 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11025 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11026 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11027 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11028 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11029 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11030 },
11031
11032 { /* Custom Japan */
11033 "ZZJ",
11034 .bg_channels = 14,
11035 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11036 {2427, 4}, {2432, 5}, {2437, 6},
11037 {2442, 7}, {2447, 8}, {2452, 9},
11038 {2457, 10}, {2462, 11}, {2467, 12},
11039 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11040 .a_channels = 4,
11041 .a = {{5170, 34}, {5190, 38},
11042 {5210, 42}, {5230, 46}},
11043 },
11044
11045 { /* Rest of World */
11046 "ZZR",
11047 .bg_channels = 14,
11048 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11049 {2427, 4}, {2432, 5}, {2437, 6},
11050 {2442, 7}, {2447, 8}, {2452, 9},
11051 {2457, 10}, {2462, 11}, {2467, 12},
11052 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11053 LIBIPW_CH_PASSIVE_ONLY}},
11054 },
11055
11056 { /* High Band */
11057 "ZZH",
11058 .bg_channels = 13,
11059 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11060 {2427, 4}, {2432, 5}, {2437, 6},
11061 {2442, 7}, {2447, 8}, {2452, 9},
11062 {2457, 10}, {2462, 11},
11063 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11064 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11065 .a_channels = 4,
11066 .a = {{5745, 149}, {5765, 153},
11067 {5785, 157}, {5805, 161}},
11068 },
11069
11070 { /* Custom Europe */
11071 "ZZG",
11072 .bg_channels = 13,
11073 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11074 {2427, 4}, {2432, 5}, {2437, 6},
11075 {2442, 7}, {2447, 8}, {2452, 9},
11076 {2457, 10}, {2462, 11},
11077 {2467, 12}, {2472, 13}},
11078 .a_channels = 4,
11079 .a = {{5180, 36}, {5200, 40},
11080 {5220, 44}, {5240, 48}},
11081 },
11082
11083 { /* Europe */
11084 "ZZK",
11085 .bg_channels = 13,
11086 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11087 {2427, 4}, {2432, 5}, {2437, 6},
11088 {2442, 7}, {2447, 8}, {2452, 9},
11089 {2457, 10}, {2462, 11},
11090 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11091 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11092 .a_channels = 24,
11093 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11094 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11095 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11096 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11097 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11098 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11099 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11100 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11101 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11102 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11103 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11104 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11105 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11106 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11107 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11108 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11109 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11110 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11111 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11112 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11113 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11114 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11115 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11116 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11117 },
11118
11119 { /* Europe */
11120 "ZZL",
11121 .bg_channels = 11,
11122 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11123 {2427, 4}, {2432, 5}, {2437, 6},
11124 {2442, 7}, {2447, 8}, {2452, 9},
11125 {2457, 10}, {2462, 11}},
11126 .a_channels = 13,
11127 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11128 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11129 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11130 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11131 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11132 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11133 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11134 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11135 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11136 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11137 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11138 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11139 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11140 }
11141};
11142
11143static void ipw_set_geo(struct ipw_priv *priv)
11144{
11145 int j;
11146
11147 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11148 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11149 ipw_geos[j].name, 3))
11150 break;
11151 }
11152
11153 if (j == ARRAY_SIZE(ipw_geos)) {
11154 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11155 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11156 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11157 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11158 j = 0;
11159 }
11160
11161 libipw_set_geo(priv->ieee, &ipw_geos[j]);
11162}
11163
11164#define MAX_HW_RESTARTS 5
11165static int ipw_up(struct ipw_priv *priv)
11166{
11167 int rc, i;
11168
11169 /* Age scan list entries found before suspend */
11170 if (priv->suspend_time) {
11171 libipw_networks_age(priv->ieee, priv->suspend_time);
11172 priv->suspend_time = 0;
11173 }
11174
11175 if (priv->status & STATUS_EXIT_PENDING)
11176 return -EIO;
11177
11178 if (cmdlog && !priv->cmdlog) {
11179 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11180 GFP_KERNEL);
11181 if (priv->cmdlog == NULL) {
11182 IPW_ERROR("Error allocating %d command log entries.\n",
11183 cmdlog);
11184 return -ENOMEM;
11185 } else {
11186 priv->cmdlog_len = cmdlog;
11187 }
11188 }
11189
11190 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11191 /* Load the microcode, firmware, and eeprom.
11192 * Also start the clocks. */
11193 rc = ipw_load(priv);
11194 if (rc) {
11195 IPW_ERROR("Unable to load firmware: %d\n", rc);
11196 return rc;
11197 }
11198
11199 ipw_init_ordinals(priv);
11200 if (!(priv->config & CFG_CUSTOM_MAC))
11201 eeprom_parse_mac(priv, priv->mac_addr);
11202 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11203
11204 ipw_set_geo(priv);
11205
11206 if (priv->status & STATUS_RF_KILL_SW) {
11207 IPW_WARNING("Radio disabled by module parameter.\n");
11208 return 0;
11209 } else if (rf_kill_active(priv)) {
11210 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11211 "Kill switch must be turned off for "
11212 "wireless networking to work.\n");
11213 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11214 return 0;
11215 }
11216
11217 rc = ipw_config(priv);
11218 if (!rc) {
11219 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11220
11221 /* If configure to try and auto-associate, kick
11222 * off a scan. */
11223 schedule_delayed_work(&priv->request_scan, 0);
11224
11225 return 0;
11226 }
11227
11228 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11229 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11230 i, MAX_HW_RESTARTS);
11231
11232 /* We had an error bringing up the hardware, so take it
11233 * all the way back down so we can try again */
11234 ipw_down(priv);
11235 }
11236
11237 /* tried to restart and config the device for as long as our
11238 * patience could withstand */
11239 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11240
11241 return -EIO;
11242}
11243
11244static void ipw_bg_up(struct work_struct *work)
11245{
11246 struct ipw_priv *priv =
11247 container_of(work, struct ipw_priv, up);
11248 mutex_lock(&priv->mutex);
11249 ipw_up(priv);
11250 mutex_unlock(&priv->mutex);
11251}
11252
11253static void ipw_deinit(struct ipw_priv *priv)
11254{
11255 int i;
11256
11257 if (priv->status & STATUS_SCANNING) {
11258 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11259 ipw_abort_scan(priv);
11260 }
11261
11262 if (priv->status & STATUS_ASSOCIATED) {
11263 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11264 ipw_disassociate(priv);
11265 }
11266
11267 ipw_led_shutdown(priv);
11268
11269 /* Wait up to 1s for status to change to not scanning and not
11270 * associated (disassociation can take a while for a ful 802.11
11271 * exchange */
11272 for (i = 1000; i && (priv->status &
11273 (STATUS_DISASSOCIATING |
11274 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11275 udelay(10);
11276
11277 if (priv->status & (STATUS_DISASSOCIATING |
11278 STATUS_ASSOCIATED | STATUS_SCANNING))
11279 IPW_DEBUG_INFO("Still associated or scanning...\n");
11280 else
11281 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11282
11283 /* Attempt to disable the card */
11284 ipw_send_card_disable(priv, 0);
11285
11286 priv->status &= ~STATUS_INIT;
11287}
11288
11289static void ipw_down(struct ipw_priv *priv)
11290{
11291 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11292
11293 priv->status |= STATUS_EXIT_PENDING;
11294
11295 if (ipw_is_init(priv))
11296 ipw_deinit(priv);
11297
11298 /* Wipe out the EXIT_PENDING status bit if we are not actually
11299 * exiting the module */
11300 if (!exit_pending)
11301 priv->status &= ~STATUS_EXIT_PENDING;
11302
11303 /* tell the device to stop sending interrupts */
11304 ipw_disable_interrupts(priv);
11305
11306 /* Clear all bits but the RF Kill */
11307 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11308 netif_carrier_off(priv->net_dev);
11309
11310 ipw_stop_nic(priv);
11311
11312 ipw_led_radio_off(priv);
11313}
11314
11315static void ipw_bg_down(struct work_struct *work)
11316{
11317 struct ipw_priv *priv =
11318 container_of(work, struct ipw_priv, down);
11319 mutex_lock(&priv->mutex);
11320 ipw_down(priv);
11321 mutex_unlock(&priv->mutex);
11322}
11323
11324static int ipw_wdev_init(struct net_device *dev)
11325{
11326 int i, rc = 0;
11327 struct ipw_priv *priv = libipw_priv(dev);
11328 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11329 struct wireless_dev *wdev = &priv->ieee->wdev;
11330
11331 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11332
11333 /* fill-out priv->ieee->bg_band */
11334 if (geo->bg_channels) {
11335 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11336
11337 bg_band->band = NL80211_BAND_2GHZ;
11338 bg_band->n_channels = geo->bg_channels;
11339 bg_band->channels = kcalloc(geo->bg_channels,
11340 sizeof(struct ieee80211_channel),
11341 GFP_KERNEL);
11342 if (!bg_band->channels) {
11343 rc = -ENOMEM;
11344 goto out;
11345 }
11346 /* translate geo->bg to bg_band.channels */
11347 for (i = 0; i < geo->bg_channels; i++) {
11348 bg_band->channels[i].band = NL80211_BAND_2GHZ;
11349 bg_band->channels[i].center_freq = geo->bg[i].freq;
11350 bg_band->channels[i].hw_value = geo->bg[i].channel;
11351 bg_band->channels[i].max_power = geo->bg[i].max_power;
11352 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11353 bg_band->channels[i].flags |=
11354 IEEE80211_CHAN_NO_IR;
11355 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11356 bg_band->channels[i].flags |=
11357 IEEE80211_CHAN_NO_IR;
11358 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11359 bg_band->channels[i].flags |=
11360 IEEE80211_CHAN_RADAR;
11361 /* No equivalent for LIBIPW_CH_80211H_RULES,
11362 LIBIPW_CH_UNIFORM_SPREADING, or
11363 LIBIPW_CH_B_ONLY... */
11364 }
11365 /* point at bitrate info */
11366 bg_band->bitrates = ipw2200_bg_rates;
11367 bg_band->n_bitrates = ipw2200_num_bg_rates;
11368
11369 wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11370 }
11371
11372 /* fill-out priv->ieee->a_band */
11373 if (geo->a_channels) {
11374 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11375
11376 a_band->band = NL80211_BAND_5GHZ;
11377 a_band->n_channels = geo->a_channels;
11378 a_band->channels = kcalloc(geo->a_channels,
11379 sizeof(struct ieee80211_channel),
11380 GFP_KERNEL);
11381 if (!a_band->channels) {
11382 rc = -ENOMEM;
11383 goto out;
11384 }
11385 /* translate geo->a to a_band.channels */
11386 for (i = 0; i < geo->a_channels; i++) {
11387 a_band->channels[i].band = NL80211_BAND_5GHZ;
11388 a_band->channels[i].center_freq = geo->a[i].freq;
11389 a_band->channels[i].hw_value = geo->a[i].channel;
11390 a_band->channels[i].max_power = geo->a[i].max_power;
11391 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11392 a_band->channels[i].flags |=
11393 IEEE80211_CHAN_NO_IR;
11394 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11395 a_band->channels[i].flags |=
11396 IEEE80211_CHAN_NO_IR;
11397 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11398 a_band->channels[i].flags |=
11399 IEEE80211_CHAN_RADAR;
11400 /* No equivalent for LIBIPW_CH_80211H_RULES,
11401 LIBIPW_CH_UNIFORM_SPREADING, or
11402 LIBIPW_CH_B_ONLY... */
11403 }
11404 /* point at bitrate info */
11405 a_band->bitrates = ipw2200_a_rates;
11406 a_band->n_bitrates = ipw2200_num_a_rates;
11407
11408 wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11409 }
11410
11411 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11412 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11413
11414 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11415
11416 /* With that information in place, we can now register the wiphy... */
11417 rc = wiphy_register(wdev->wiphy);
11418 if (rc)
11419 goto out;
11420
11421 return 0;
11422out:
11423 kfree(priv->ieee->a_band.channels);
11424 kfree(priv->ieee->bg_band.channels);
11425 return rc;
11426}
11427
11428/* PCI driver stuff */
11429static const struct pci_device_id card_ids[] = {
11430 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11431 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11432 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11433 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11434 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11435 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11436 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11437 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11438 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11439 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11440 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11441 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11442 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11443 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11444 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11445 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11446 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11447 {PCI_VDEVICE(INTEL, 0x104f), 0},
11448 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11449 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11450 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11451 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11452
11453 /* required last entry */
11454 {0,}
11455};
11456
11457MODULE_DEVICE_TABLE(pci, card_ids);
11458
11459static struct attribute *ipw_sysfs_entries[] = {
11460 &dev_attr_rf_kill.attr,
11461 &dev_attr_direct_dword.attr,
11462 &dev_attr_indirect_byte.attr,
11463 &dev_attr_indirect_dword.attr,
11464 &dev_attr_mem_gpio_reg.attr,
11465 &dev_attr_command_event_reg.attr,
11466 &dev_attr_nic_type.attr,
11467 &dev_attr_status.attr,
11468 &dev_attr_cfg.attr,
11469 &dev_attr_error.attr,
11470 &dev_attr_event_log.attr,
11471 &dev_attr_cmd_log.attr,
11472 &dev_attr_eeprom_delay.attr,
11473 &dev_attr_ucode_version.attr,
11474 &dev_attr_rtc.attr,
11475 &dev_attr_scan_age.attr,
11476 &dev_attr_led.attr,
11477 &dev_attr_speed_scan.attr,
11478 &dev_attr_net_stats.attr,
11479 &dev_attr_channels.attr,
11480#ifdef CONFIG_IPW2200_PROMISCUOUS
11481 &dev_attr_rtap_iface.attr,
11482 &dev_attr_rtap_filter.attr,
11483#endif
11484 NULL
11485};
11486
11487static const struct attribute_group ipw_attribute_group = {
11488 .name = NULL, /* put in device directory */
11489 .attrs = ipw_sysfs_entries,
11490};
11491
11492#ifdef CONFIG_IPW2200_PROMISCUOUS
11493static int ipw_prom_open(struct net_device *dev)
11494{
11495 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11496 struct ipw_priv *priv = prom_priv->priv;
11497
11498 IPW_DEBUG_INFO("prom dev->open\n");
11499 netif_carrier_off(dev);
11500
11501 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11502 priv->sys_config.accept_all_data_frames = 1;
11503 priv->sys_config.accept_non_directed_frames = 1;
11504 priv->sys_config.accept_all_mgmt_bcpr = 1;
11505 priv->sys_config.accept_all_mgmt_frames = 1;
11506
11507 ipw_send_system_config(priv);
11508 }
11509
11510 return 0;
11511}
11512
11513static int ipw_prom_stop(struct net_device *dev)
11514{
11515 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11516 struct ipw_priv *priv = prom_priv->priv;
11517
11518 IPW_DEBUG_INFO("prom dev->stop\n");
11519
11520 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11521 priv->sys_config.accept_all_data_frames = 0;
11522 priv->sys_config.accept_non_directed_frames = 0;
11523 priv->sys_config.accept_all_mgmt_bcpr = 0;
11524 priv->sys_config.accept_all_mgmt_frames = 0;
11525
11526 ipw_send_system_config(priv);
11527 }
11528
11529 return 0;
11530}
11531
11532static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11533 struct net_device *dev)
11534{
11535 IPW_DEBUG_INFO("prom dev->xmit\n");
11536 dev_kfree_skb(skb);
11537 return NETDEV_TX_OK;
11538}
11539
11540static const struct net_device_ops ipw_prom_netdev_ops = {
11541 .ndo_open = ipw_prom_open,
11542 .ndo_stop = ipw_prom_stop,
11543 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11544 .ndo_set_mac_address = eth_mac_addr,
11545 .ndo_validate_addr = eth_validate_addr,
11546};
11547
11548static int ipw_prom_alloc(struct ipw_priv *priv)
11549{
11550 int rc = 0;
11551
11552 if (priv->prom_net_dev)
11553 return -EPERM;
11554
11555 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11556 if (priv->prom_net_dev == NULL)
11557 return -ENOMEM;
11558
11559 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11560 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11561 priv->prom_priv->priv = priv;
11562
11563 strcpy(priv->prom_net_dev->name, "rtap%d");
11564 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11565
11566 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11567 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11568
11569 priv->prom_net_dev->min_mtu = 68;
11570 priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11571
11572 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11573 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11574
11575 rc = register_netdev(priv->prom_net_dev);
11576 if (rc) {
11577 free_libipw(priv->prom_net_dev, 1);
11578 priv->prom_net_dev = NULL;
11579 return rc;
11580 }
11581
11582 return 0;
11583}
11584
11585static void ipw_prom_free(struct ipw_priv *priv)
11586{
11587 if (!priv->prom_net_dev)
11588 return;
11589
11590 unregister_netdev(priv->prom_net_dev);
11591 free_libipw(priv->prom_net_dev, 1);
11592
11593 priv->prom_net_dev = NULL;
11594}
11595
11596#endif
11597
11598static const struct net_device_ops ipw_netdev_ops = {
11599 .ndo_open = ipw_net_open,
11600 .ndo_stop = ipw_net_stop,
11601 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11602 .ndo_set_mac_address = ipw_net_set_mac_address,
11603 .ndo_start_xmit = libipw_xmit,
11604 .ndo_validate_addr = eth_validate_addr,
11605};
11606
11607static int ipw_pci_probe(struct pci_dev *pdev,
11608 const struct pci_device_id *ent)
11609{
11610 int err = 0;
11611 struct net_device *net_dev;
11612 void __iomem *base;
11613 u32 length, val;
11614 struct ipw_priv *priv;
11615 int i;
11616
11617 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11618 if (net_dev == NULL) {
11619 err = -ENOMEM;
11620 goto out;
11621 }
11622
11623 priv = libipw_priv(net_dev);
11624 priv->ieee = netdev_priv(net_dev);
11625
11626 priv->net_dev = net_dev;
11627 priv->pci_dev = pdev;
11628 ipw_debug_level = debug;
11629 spin_lock_init(&priv->irq_lock);
11630 spin_lock_init(&priv->lock);
11631 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11632 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11633
11634 mutex_init(&priv->mutex);
11635 if (pci_enable_device(pdev)) {
11636 err = -ENODEV;
11637 goto out_free_libipw;
11638 }
11639
11640 pci_set_master(pdev);
11641
11642 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
11643 if (!err)
11644 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
11645 if (err) {
11646 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11647 goto out_pci_disable_device;
11648 }
11649
11650 pci_set_drvdata(pdev, priv);
11651
11652 err = pci_request_regions(pdev, DRV_NAME);
11653 if (err)
11654 goto out_pci_disable_device;
11655
11656 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11657 * PCI Tx retries from interfering with C3 CPU state */
11658 pci_read_config_dword(pdev, 0x40, &val);
11659 if ((val & 0x0000ff00) != 0)
11660 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11661
11662 length = pci_resource_len(pdev, 0);
11663 priv->hw_len = length;
11664
11665 base = pci_ioremap_bar(pdev, 0);
11666 if (!base) {
11667 err = -ENODEV;
11668 goto out_pci_release_regions;
11669 }
11670
11671 priv->hw_base = base;
11672 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11673 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11674
11675 err = ipw_setup_deferred_work(priv);
11676 if (err) {
11677 IPW_ERROR("Unable to setup deferred work\n");
11678 goto out_iounmap;
11679 }
11680
11681 ipw_sw_reset(priv, 1);
11682
11683 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11684 if (err) {
11685 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11686 goto out_iounmap;
11687 }
11688
11689 SET_NETDEV_DEV(net_dev, &pdev->dev);
11690
11691 mutex_lock(&priv->mutex);
11692
11693 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11694 priv->ieee->set_security = shim__set_security;
11695 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11696
11697#ifdef CONFIG_IPW2200_QOS
11698 priv->ieee->is_qos_active = ipw_is_qos_active;
11699 priv->ieee->handle_probe_response = ipw_handle_beacon;
11700 priv->ieee->handle_beacon = ipw_handle_probe_response;
11701 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11702#endif /* CONFIG_IPW2200_QOS */
11703
11704 priv->ieee->perfect_rssi = -20;
11705 priv->ieee->worst_rssi = -85;
11706
11707 net_dev->netdev_ops = &ipw_netdev_ops;
11708 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11709 net_dev->wireless_data = &priv->wireless_data;
11710 net_dev->wireless_handlers = &ipw_wx_handler_def;
11711 net_dev->ethtool_ops = &ipw_ethtool_ops;
11712
11713 net_dev->min_mtu = 68;
11714 net_dev->max_mtu = LIBIPW_DATA_LEN;
11715
11716 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11717 if (err) {
11718 IPW_ERROR("failed to create sysfs device attributes\n");
11719 mutex_unlock(&priv->mutex);
11720 goto out_release_irq;
11721 }
11722
11723 if (ipw_up(priv)) {
11724 mutex_unlock(&priv->mutex);
11725 err = -EIO;
11726 goto out_remove_sysfs;
11727 }
11728
11729 mutex_unlock(&priv->mutex);
11730
11731 err = ipw_wdev_init(net_dev);
11732 if (err) {
11733 IPW_ERROR("failed to register wireless device\n");
11734 goto out_remove_sysfs;
11735 }
11736
11737 err = register_netdev(net_dev);
11738 if (err) {
11739 IPW_ERROR("failed to register network device\n");
11740 goto out_unregister_wiphy;
11741 }
11742
11743#ifdef CONFIG_IPW2200_PROMISCUOUS
11744 if (rtap_iface) {
11745 err = ipw_prom_alloc(priv);
11746 if (err) {
11747 IPW_ERROR("Failed to register promiscuous network "
11748 "device (error %d).\n", err);
11749 unregister_netdev(priv->net_dev);
11750 goto out_unregister_wiphy;
11751 }
11752 }
11753#endif
11754
11755 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11756 "channels, %d 802.11a channels)\n",
11757 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11758 priv->ieee->geo.a_channels);
11759
11760 return 0;
11761
11762 out_unregister_wiphy:
11763 wiphy_unregister(priv->ieee->wdev.wiphy);
11764 kfree(priv->ieee->a_band.channels);
11765 kfree(priv->ieee->bg_band.channels);
11766 out_remove_sysfs:
11767 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11768 out_release_irq:
11769 free_irq(pdev->irq, priv);
11770 out_iounmap:
11771 iounmap(priv->hw_base);
11772 out_pci_release_regions:
11773 pci_release_regions(pdev);
11774 out_pci_disable_device:
11775 pci_disable_device(pdev);
11776 out_free_libipw:
11777 free_libipw(priv->net_dev, 0);
11778 out:
11779 return err;
11780}
11781
11782static void ipw_pci_remove(struct pci_dev *pdev)
11783{
11784 struct ipw_priv *priv = pci_get_drvdata(pdev);
11785 struct list_head *p, *q;
11786 int i;
11787
11788 if (!priv)
11789 return;
11790
11791 mutex_lock(&priv->mutex);
11792
11793 priv->status |= STATUS_EXIT_PENDING;
11794 ipw_down(priv);
11795 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11796
11797 mutex_unlock(&priv->mutex);
11798
11799 unregister_netdev(priv->net_dev);
11800
11801 if (priv->rxq) {
11802 ipw_rx_queue_free(priv, priv->rxq);
11803 priv->rxq = NULL;
11804 }
11805 ipw_tx_queue_free(priv);
11806
11807 if (priv->cmdlog) {
11808 kfree(priv->cmdlog);
11809 priv->cmdlog = NULL;
11810 }
11811
11812 /* make sure all works are inactive */
11813 cancel_delayed_work_sync(&priv->adhoc_check);
11814 cancel_work_sync(&priv->associate);
11815 cancel_work_sync(&priv->disassociate);
11816 cancel_work_sync(&priv->system_config);
11817 cancel_work_sync(&priv->rx_replenish);
11818 cancel_work_sync(&priv->adapter_restart);
11819 cancel_delayed_work_sync(&priv->rf_kill);
11820 cancel_work_sync(&priv->up);
11821 cancel_work_sync(&priv->down);
11822 cancel_delayed_work_sync(&priv->request_scan);
11823 cancel_delayed_work_sync(&priv->request_direct_scan);
11824 cancel_delayed_work_sync(&priv->request_passive_scan);
11825 cancel_delayed_work_sync(&priv->scan_event);
11826 cancel_delayed_work_sync(&priv->gather_stats);
11827 cancel_work_sync(&priv->abort_scan);
11828 cancel_work_sync(&priv->roam);
11829 cancel_delayed_work_sync(&priv->scan_check);
11830 cancel_work_sync(&priv->link_up);
11831 cancel_work_sync(&priv->link_down);
11832 cancel_delayed_work_sync(&priv->led_link_on);
11833 cancel_delayed_work_sync(&priv->led_link_off);
11834 cancel_delayed_work_sync(&priv->led_act_off);
11835 cancel_work_sync(&priv->merge_networks);
11836
11837 /* Free MAC hash list for ADHOC */
11838 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11839 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11840 list_del(p);
11841 kfree(list_entry(p, struct ipw_ibss_seq, list));
11842 }
11843 }
11844
11845 kfree(priv->error);
11846 priv->error = NULL;
11847
11848#ifdef CONFIG_IPW2200_PROMISCUOUS
11849 ipw_prom_free(priv);
11850#endif
11851
11852 free_irq(pdev->irq, priv);
11853 iounmap(priv->hw_base);
11854 pci_release_regions(pdev);
11855 pci_disable_device(pdev);
11856 /* wiphy_unregister needs to be here, before free_libipw */
11857 wiphy_unregister(priv->ieee->wdev.wiphy);
11858 kfree(priv->ieee->a_band.channels);
11859 kfree(priv->ieee->bg_band.channels);
11860 free_libipw(priv->net_dev, 0);
11861 free_firmware();
11862}
11863
11864#ifdef CONFIG_PM
11865static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11866{
11867 struct ipw_priv *priv = pci_get_drvdata(pdev);
11868 struct net_device *dev = priv->net_dev;
11869
11870 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11871
11872 /* Take down the device; powers it off, etc. */
11873 ipw_down(priv);
11874
11875 /* Remove the PRESENT state of the device */
11876 netif_device_detach(dev);
11877
11878 pci_save_state(pdev);
11879 pci_disable_device(pdev);
11880 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11881
11882 priv->suspend_at = ktime_get_boottime_seconds();
11883
11884 return 0;
11885}
11886
11887static int ipw_pci_resume(struct pci_dev *pdev)
11888{
11889 struct ipw_priv *priv = pci_get_drvdata(pdev);
11890 struct net_device *dev = priv->net_dev;
11891 int err;
11892 u32 val;
11893
11894 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11895
11896 pci_set_power_state(pdev, PCI_D0);
11897 err = pci_enable_device(pdev);
11898 if (err) {
11899 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11900 dev->name);
11901 return err;
11902 }
11903 pci_restore_state(pdev);
11904
11905 /*
11906 * Suspend/Resume resets the PCI configuration space, so we have to
11907 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11908 * from interfering with C3 CPU state. pci_restore_state won't help
11909 * here since it only restores the first 64 bytes pci config header.
11910 */
11911 pci_read_config_dword(pdev, 0x40, &val);
11912 if ((val & 0x0000ff00) != 0)
11913 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11914
11915 /* Set the device back into the PRESENT state; this will also wake
11916 * the queue of needed */
11917 netif_device_attach(dev);
11918
11919 priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
11920
11921 /* Bring the device back up */
11922 schedule_work(&priv->up);
11923
11924 return 0;
11925}
11926#endif
11927
11928static void ipw_pci_shutdown(struct pci_dev *pdev)
11929{
11930 struct ipw_priv *priv = pci_get_drvdata(pdev);
11931
11932 /* Take down the device; powers it off, etc. */
11933 ipw_down(priv);
11934
11935 pci_disable_device(pdev);
11936}
11937
11938/* driver initialization stuff */
11939static struct pci_driver ipw_driver = {
11940 .name = DRV_NAME,
11941 .id_table = card_ids,
11942 .probe = ipw_pci_probe,
11943 .remove = ipw_pci_remove,
11944#ifdef CONFIG_PM
11945 .suspend = ipw_pci_suspend,
11946 .resume = ipw_pci_resume,
11947#endif
11948 .shutdown = ipw_pci_shutdown,
11949};
11950
11951static int __init ipw_init(void)
11952{
11953 int ret;
11954
11955 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11956 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11957
11958 ret = pci_register_driver(&ipw_driver);
11959 if (ret) {
11960 IPW_ERROR("Unable to initialize PCI module\n");
11961 return ret;
11962 }
11963
11964 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11965 if (ret) {
11966 IPW_ERROR("Unable to create driver sysfs file\n");
11967 pci_unregister_driver(&ipw_driver);
11968 return ret;
11969 }
11970
11971 return ret;
11972}
11973
11974static void __exit ipw_exit(void)
11975{
11976 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11977 pci_unregister_driver(&ipw_driver);
11978}
11979
11980module_param(disable, int, 0444);
11981MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11982
11983module_param(associate, int, 0444);
11984MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11985
11986module_param(auto_create, int, 0444);
11987MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11988
11989module_param_named(led, led_support, int, 0444);
11990MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
11991
11992module_param(debug, int, 0444);
11993MODULE_PARM_DESC(debug, "debug output mask");
11994
11995module_param_named(channel, default_channel, int, 0444);
11996MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11997
11998#ifdef CONFIG_IPW2200_PROMISCUOUS
11999module_param(rtap_iface, int, 0444);
12000MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12001#endif
12002
12003#ifdef CONFIG_IPW2200_QOS
12004module_param(qos_enable, int, 0444);
12005MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
12006
12007module_param(qos_burst_enable, int, 0444);
12008MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12009
12010module_param(qos_no_ack_mask, int, 0444);
12011MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12012
12013module_param(burst_duration_CCK, int, 0444);
12014MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12015
12016module_param(burst_duration_OFDM, int, 0444);
12017MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12018#endif /* CONFIG_IPW2200_QOS */
12019
12020#ifdef CONFIG_IPW2200_MONITOR
12021module_param_named(mode, network_mode, int, 0444);
12022MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12023#else
12024module_param_named(mode, network_mode, int, 0444);
12025MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12026#endif
12027
12028module_param(bt_coexist, int, 0444);
12029MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12030
12031module_param(hwcrypto, int, 0444);
12032MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12033
12034module_param(cmdlog, int, 0444);
12035MODULE_PARM_DESC(cmdlog,
12036 "allocate a ring buffer for logging firmware commands");
12037
12038module_param(roaming, int, 0444);
12039MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12040
12041module_param(antenna, int, 0444);
12042MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12043
12044module_exit(ipw_exit);
12045module_init(ipw_init);