blob: f97e2573e623a442d0bbac8a69f87f90d75ee496 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/**
2 * rwnx_utils.c
3 *
4 * IPC utility function definitions
5 *
6 * Copyright (C) RivieraWaves 2012-2021
7 */
8#include "rwnx_utils.h"
9#include "rwnx_defs.h"
10#include "rwnx_rx.h"
11#include "rwnx_tx.h"
12#include "rwnx_msg_rx.h"
13#include "rwnx_debugfs.h"
14#include "rwnx_prof.h"
15#include "ipc_host.h"
16
17#ifdef CONFIG_RWNX_FULLMAC
18#define FW_STR "fmac"
19#endif
20
21/**
22 * rwnx_ipc_buf_pool_alloc() - Allocate and push to fw a pool of IPC buffer.
23 *
24 * @rwnx_hw: Main driver structure
25 * @pool: Pool to allocate
26 * @nb: Size of the pool to allocate
27 * @buf_size: Size of one pool element
28 * @pool_name: Name of the pool
29 * @push: Function to push one pool buffer to fw
30 *
31 * This function will allocate an array to store the list of IPC buffers,
32 * a dma pool and @nb element in the dma pool.
33 * Each buffer is initialized with '0' and then pushed to fw using the @push function.
34 *
35 * Return: 0 on success and <0 upon error. If error is returned any allocated
36 * memory is NOT freed and rwnx_ipc_buf_pool_dealloc() must be called.
37 */
38static int rwnx_ipc_buf_pool_alloc(struct rwnx_hw *rwnx_hw,
39 struct rwnx_ipc_buf_pool *pool,
40 int nb, size_t buf_size, char *pool_name,
41 int (*push)(struct ipc_host_env_tag *,
42 struct rwnx_ipc_buf *))
43{
44 struct rwnx_ipc_buf *buf;
45 int i;
46
47 pool->nb = 0;
48
49 /* allocate buf array */
50 pool->buffers = kmalloc(nb * sizeof(struct rwnx_ipc_buf), GFP_KERNEL);
51 if (!pool->buffers) {
52 dev_err(rwnx_hw->dev, "Allocation of buffer array for %s failed\n",
53 pool_name);
54 return -ENOMEM;
55 }
56
57 /* allocate dma pool */
58 pool->pool = dma_pool_create(pool_name, rwnx_hw->dev, buf_size,
59 cache_line_size(), 0);
60 if (!pool->pool) {
61 dev_err(rwnx_hw->dev, "Allocation of dma pool %s failed\n",
62 pool_name);
63 return -ENOMEM;
64 }
65
66 for (i = 0, buf = pool->buffers; i < nb; buf++, i++) {
67 /* allocate a buffer */
68 buf->size = buf_size;
69 buf->addr = dma_pool_alloc(pool->pool, GFP_KERNEL, &buf->dma_addr);
70 if (!buf->addr) {
71 dev_err(rwnx_hw->dev, "Allocation of block %d/%d in %s failed\n",
72 (i + 1), nb, pool_name);
73 return -ENOMEM;
74 }
75 pool->nb++;
76
77 /* reset the buffer */
78 memset(buf->addr, 0, buf_size);
79
80 /* push it to FW */
81 push(rwnx_hw->ipc_env, buf);
82 }
83
84 return 0;
85}
86
87/**
88 * rwnx_ipc_buf_pool_dealloc() - Free all memory allocated for a pool
89 *
90 * @pool: Pool to free
91 *
92 * Must be call once after rwnx_ipc_buf_pool_alloc(), even if it returned
93 * an error
94 */
95static void rwnx_ipc_buf_pool_dealloc(struct rwnx_ipc_buf_pool *pool)
96{
97 struct rwnx_ipc_buf *buf;
98 int i;
99
100 for (i = 0, buf = pool->buffers; i < pool->nb ; buf++, i++) {
101 dma_pool_free(pool->pool, buf->addr, buf->dma_addr);
102 }
103 pool->nb = 0;
104
105 if (pool->pool)
106 dma_pool_destroy(pool->pool);
107 pool->pool = NULL;
108
109 if (pool->buffers)
110 kfree(pool->buffers);
111 pool->buffers = NULL;
112}
113
114/**
115 * rwnx_ipc_buf_alloc - Alloc a single ipc buffer and MAP it for DMA access
116 *
117 * @rwnx_hw: Main driver structure
118 * @buf: IPC buffer to allocate
119 * @buf_size: Size of the buffer to allocate
120 * @dir: DMA direction
121 * @init: Pointer to initial data to write in buffer before DMA sync. Used
122 * only if direction is DMA_TO_DEVICE and it must be at least @buf_size long
123 *
124 * It allocates a buffer, initializes it if @init is set, and map it for DMA
125 * Use @rwnx_ipc_buf_dealloc when this buffer is no longer needed.
126 *
127 * @return: 0 on success and <0 upon error. If error is returned any allocated
128 * memory has been freed.
129 */
130int rwnx_ipc_buf_alloc(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf,
131 size_t buf_size, enum dma_data_direction dir, const void *init)
132{
133 buf->addr = kmalloc(buf_size, GFP_KERNEL);
134 if (!buf->addr)
135 return -ENOMEM;
136
137 buf->size = buf_size;
138
139 if ((dir == DMA_TO_DEVICE) && init) {
140 memcpy(buf->addr, init, buf_size);
141 }
142
143 buf->dma_addr = dma_map_single(rwnx_hw->dev, buf->addr, buf_size, dir);
144 if (dma_mapping_error(rwnx_hw->dev, buf->dma_addr)) {
145 kfree(buf->addr);
146 buf->addr = NULL;
147 return -EIO;
148 }
149
150 return 0;
151}
152
153/**
154 * rwnx_ipc_buf_dealloc() - Free memory allocated for a single ipc buffer
155 *
156 * @rwnx_hw: Main driver structure
157 * @buf: IPC buffer to free
158 *
159 * IPC buffer must have been allocated by @rwnx_ipc_buf_alloc() or initialized
160 * by @rwnx_ipc_buf_init() and pointing to a buffer allocated by kmalloc.
161 */
162void rwnx_ipc_buf_dealloc(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf)
163{
164 if (!buf->addr)
165 return;
166 dma_unmap_single(rwnx_hw->dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
167 kfree(buf->addr);
168 buf->addr = NULL;
169}
170
171/**
172 * rwnx_ipc_buf_a2e_init - Initialize an Application to Embedded IPC buffer
173 * with a pre-allocated buffer
174 *
175 * @rwnx_hw: Main driver structure
176 * @buf: IPC buffer to initialize
177 * @data: Data buffer to use for the IPC buffer.
178 * @buf_size: Size of the buffer the @data buffer
179 *
180 * Initialize the IPC buffer with the provided buffer and map it for DMA transfer.
181 * The mapping direction is always DMA_TO_DEVICE as this an "a2e" buffer.
182 * Use @rwnx_ipc_buf_dealloc() when this buffer is no longer needed.
183 *
184 * @return: 0 on success and <0 upon error. If error is returned the @data buffer
185 * is freed.
186 */
187int rwnx_ipc_buf_a2e_init(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf,
188 void *data, size_t buf_size)
189{
190 buf->addr = data;
191 buf->size = buf_size;
192 buf->dma_addr = dma_map_single(rwnx_hw->dev, buf->addr, buf_size,
193 DMA_TO_DEVICE);
194 if (dma_mapping_error(rwnx_hw->dev, buf->dma_addr)) {
195 buf->addr = NULL;
196 return -EIO;
197 }
198
199 return 0;
200}
201
202/**
203 * rwnx_ipc_buf_release() - Release DMA mapping for an IPC buffer
204 *
205 * @rwnx_hw: Main driver structure
206 * @buf: IPC buffer to release
207 * @dir: DMA direction.
208 *
209 * This also "release" the IPC buffer structure (i.e. its addr field is reset)
210 * so that it cannot be re-used except to map another buffer.
211 */
212void rwnx_ipc_buf_release(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf,
213 enum dma_data_direction dir)
214{
215 if (!buf->addr)
216 return;
217 dma_unmap_single(rwnx_hw->dev, buf->dma_addr, buf->size, dir);
218 buf->addr = NULL;
219}
220
221/**
222 * rwnx_ipc_buf_e2a_sync() - Synchronize all (or part) of an IPC buffer before
223 * reading content written by the embedded
224 *
225 * @rwnx_hw: Main driver structure
226 * @buf: IPC buffer to sync
227 * @len: Length to read, 0 means the whole buffer
228 */
229void rwnx_ipc_buf_e2a_sync(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf,
230 size_t len)
231{
232 if (!len)
233 len = buf->size;
234
235 dma_sync_single_for_cpu(rwnx_hw->dev, buf->dma_addr, len, DMA_FROM_DEVICE);
236}
237
238/**
239 * rwnx_ipc_buf_e2a_sync_back() - Synchronize back all (or part) of an IPC buffer
240 * to allow embedded updating its content.
241 *
242 * @rwnx_hw: Main driver structure
243 * @buf: IPC buffer to sync
244 * @len: Length to sync back, 0 means the whole buffer
245 *
246 * Must be called after each call to rwnx_ipc_buf_e2a_sync() even if host didn't
247 * modified the content of the buffer.
248 */
249void rwnx_ipc_buf_e2a_sync_back(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf,
250 size_t len)
251{
252 if (!len)
253 len = buf->size;
254
255 dma_sync_single_for_device(rwnx_hw->dev, buf->dma_addr, len, DMA_FROM_DEVICE);
256}
257
258/**
259 * rwnx_ipc_rxskb_alloc() - Allocate a skb for RX path
260 *
261 * @rwnx_hw: Main driver data
262 * @buf: rwnx_ipc_buf structure to store skb address
263 * @skb_size: Size of the buffer to allocate
264 *
265 * Allocate a skb for RX path, meaning that the data buffer is written by the firmware
266 * and needs then to be DMA mapped.
267 *
268 * Note that even though the result is stored in a struct rwnx_ipc_buf, in this case the
269 * rwnx_ipc_buf.addr points to skb structure whereas the rwnx_ipc_buf.dma_addr is the
270 * DMA address of the skb data buffer (i.e. skb->data)
271 */
272static int rwnx_ipc_rxskb_alloc(struct rwnx_hw *rwnx_hw,
273 struct rwnx_ipc_buf *buf, size_t skb_size)
274{
275 struct sk_buff *skb = dev_alloc_skb(skb_size);
276
277 if (unlikely(!skb)) {
278 dev_err(rwnx_hw->dev, "Allocation of RX skb failed\n");
279 buf->addr = NULL;
280 return -ENOMEM;
281 }
282
283 buf->dma_addr = dma_map_single(rwnx_hw->dev, skb->data, skb_size,
284 DMA_FROM_DEVICE);
285 if (unlikely(dma_mapping_error(rwnx_hw->dev, buf->dma_addr))) {
286 dev_err(rwnx_hw->dev, "DMA mapping of RX skb failed\n");
287 dev_kfree_skb(skb);
288 buf->addr = NULL;
289 return -EIO;
290 }
291
292 buf->addr = skb;
293 buf->size = skb_size;
294
295 return 0;
296}
297
298/**
299 * rwnx_ipc_rxskb_reset_pattern() - Reset pattern in a RX skb or unsupported
300 * RX vector buffer
301 *
302 * @rwnx_hw: Main driver data
303 * @buf: RX skb to reset
304 * @pattern_offset: Pattern location, in byte from the start of the buffer
305 *
306 * Reset the pattern in a RX/unsupported RX vector skb buffer to inform embedded
307 * that it has been processed by the host.
308 * Pattern in a 32bit value.
309 */
310static void rwnx_ipc_rxskb_reset_pattern(struct rwnx_hw *rwnx_hw,
311 struct rwnx_ipc_buf *buf,
312 size_t pattern_offset)
313{
314 struct sk_buff *skb = buf->addr;
315 u32 *pattern = (u32 *)(skb->data + pattern_offset);
316
317 *pattern = 0;
318 *(u32 *)(skb->data) = 0; // aic
319 dma_sync_single_for_device(rwnx_hw->dev, buf->dma_addr + pattern_offset,
320 sizeof(u32), DMA_FROM_DEVICE);
321}
322
323/**
324 * rwnx_ipc_rxskb_dealloc() - Free a skb allocated for the RX path
325 *
326 * @rwnx_hw: Main driver data
327 * @buf: Rx skb to free
328 *
329 * Free a RX skb allocated by @rwnx_ipc_rxskb_alloc
330 */
331static void rwnx_ipc_rxskb_dealloc(struct rwnx_hw *rwnx_hw,
332 struct rwnx_ipc_buf *buf)
333{
334 if (!buf->addr)
335 return;
336
337 dma_unmap_single(rwnx_hw->dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
338 dev_kfree_skb((struct sk_buff *)buf->addr);
339 buf->addr = NULL;
340}
341
342
343/**
344 * rwnx_ipc_unsup_rx_vec_elem_allocs() - Allocate and push an unsupported
345 * RX vector buffer for the FW
346 *
347 * @rwnx_hw: Main driver data
348 * @elem: Pointer to the skb elem that will contain the address of the buffer
349 */
350int rwnx_ipc_unsuprxvec_alloc(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf)
351{
352 int err;
353
354 err = rwnx_ipc_rxskb_alloc(rwnx_hw, buf, rwnx_hw->ipc_env->unsuprxvec_sz);
355 if (err)
356 return err;
357
358 rwnx_ipc_rxskb_reset_pattern(rwnx_hw, buf,
359 offsetof(struct rx_vector_desc, pattern));
360 ipc_host_unsuprxvec_push(rwnx_hw->ipc_env, buf);
361 return 0;
362}
363
364/**
365 * rwnx_ipc_unsuprxvec_repush() - Reset and repush an already allocated buffer
366 * for unsupported RX vector
367 *
368 * @rwnx_hw: Main driver data
369 * @buf: Buf to repush
370 */
371void rwnx_ipc_unsuprxvec_repush(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf)
372{
373 rwnx_ipc_rxskb_reset_pattern(rwnx_hw, buf,
374 offsetof(struct rx_vector_desc, pattern));
375 ipc_host_unsuprxvec_push(rwnx_hw->ipc_env, buf);
376}
377
378/**
379* rwnx_ipc_unsuprxvecs_alloc() - Allocate and push all unsupported RX
380* vector buffers for the FW
381*
382* @rwnx_hw: Main driver data
383*/
384static int rwnx_ipc_unsuprxvecs_alloc(struct rwnx_hw *rwnx_hw)
385{
386 struct rwnx_ipc_buf *buf;
387 int i;
388
389 memset(rwnx_hw->unsuprxvecs, 0, sizeof(rwnx_hw->unsuprxvecs));
390
391 for (i = 0, buf = rwnx_hw->unsuprxvecs; i < ARRAY_SIZE(rwnx_hw->unsuprxvecs); i++, buf++)
392 {
393 if (rwnx_ipc_unsuprxvec_alloc(rwnx_hw, buf)) {
394 dev_err(rwnx_hw->dev, "Failed to allocate unsuprxvec buf %d\n", i + 1);
395 return -ENOMEM;
396 }
397 }
398
399 return 0;
400}
401
402/**
403 * rwnx_ipc_unsuprxvecs_dealloc() - Free all unsupported RX vector buffers
404 * allocated for the FW
405 *
406 * @rwnx_hw: Main driver data
407 */
408static void rwnx_ipc_unsuprxvecs_dealloc(struct rwnx_hw *rwnx_hw)
409{
410 struct rwnx_ipc_buf *buf;
411 int i;
412
413 for (i = 0, buf = rwnx_hw->unsuprxvecs; i < ARRAY_SIZE(rwnx_hw->unsuprxvecs); i++, buf++)
414 {
415 rwnx_ipc_rxskb_dealloc(rwnx_hw, buf);
416 }
417}
418
419/**
420 * rwnx_ipc_rxbuf_alloc() - Allocate and push a rx buffer for the FW
421 *
422 * @rwnx_hw: Main driver data
423 * @buf: IPC buffer where to store address of the skb. In fullmac this
424 * parameter is not available so look for the first free IPC buffer
425 */
426int rwnx_ipc_rxbuf_alloc(struct rwnx_hw *rwnx_hw)
427{
428 int err;
429 struct rwnx_ipc_buf *buf;
430 int nb = 0, idx;
431
432 spin_lock_bh(&rwnx_hw->rxbuf_lock);
433
434 idx = rwnx_hw->rxbuf_idx;
435 while (rwnx_hw->rxbufs[idx].addr && (nb < RWNX_RXBUFF_MAX)) {
436 printk("w %d %p\n", idx, rwnx_hw->rxbufs[idx].addr);
437 idx = ( idx + 1 ) % RWNX_RXBUFF_MAX;
438 nb++;
439 }
440 if (nb == RWNX_RXBUFF_MAX) {
441 dev_err(rwnx_hw->dev, "No more free space for rxbuff");
442 printk("No more free space for rxbuff %d %d %d\n", rwnx_hw->rxbuf_idx, atomic_read(&rwnx_hw->rxbuf_cnt), rwnx_hw->ipc_env->rxbuf_idx);
443 spin_unlock_bh(&rwnx_hw->rxbuf_lock);
444 return -ENOMEM;
445 }
446
447 buf = &rwnx_hw->rxbufs[idx];
448
449 //printk("alloc %d\n", idx);
450 err = rwnx_ipc_rxskb_alloc(rwnx_hw, buf, rwnx_hw->ipc_env->rxbuf_sz);
451 if (err){
452 printk("ipc_rxskb_alloc fail %d %d %d %d\n", rwnx_hw->rxbuf_idx, atomic_read(&rwnx_hw->rxbuf_cnt), rwnx_hw->ipc_env->rxbuf_idx, err);
453 spin_unlock_bh(&rwnx_hw->rxbuf_lock);
454 return err;
455 }
456 /* Save idx so that on next push the free slot will be found quicker */
457 rwnx_hw->rxbuf_idx = ( idx + 1 ) % RWNX_RXBUFF_MAX;
458 atomic_inc(&rwnx_hw->rxbuf_cnt);
459
460 rwnx_ipc_rxskb_reset_pattern(rwnx_hw, buf, offsetof(struct hw_rxhdr, pattern));
461 RWNX_RXBUFF_HOSTID_SET(buf, RWNX_RXBUFF_IDX_TO_HOSTID(idx));
462 ipc_host_rxbuf_push(rwnx_hw->ipc_env, buf);
463 spin_unlock_bh(&rwnx_hw->rxbuf_lock);
464
465 return 0;
466}
467
468/**
469 * rwnx_ipc_rxbuf_dealloc() - Free a RX buffer for the FW
470 *
471 * @rwnx_hw: Main driver data
472 * @buf: IPC buffer associated to the RX buffer to free
473 */
474void rwnx_ipc_rxbuf_dealloc(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf)
475{
476 rwnx_ipc_rxskb_dealloc(rwnx_hw, buf);
477}
478
479/**
480 * rwnx_ipc_rxbuf_repush() - Reset and repush an already allocated RX buffer
481 *
482 * @rwnx_hw: Main driver data
483 * @buf: Buf to repush
484 *
485 * In case a skb is not forwarded to upper layer it can be re-used.
486 */
487void rwnx_ipc_rxbuf_repush(struct rwnx_hw *rwnx_hw, struct rwnx_ipc_buf *buf)
488{
489 rwnx_ipc_rxskb_reset_pattern(rwnx_hw, buf, offsetof(struct hw_rxhdr, pattern));
490 ipc_host_rxbuf_push(rwnx_hw->ipc_env, buf);
491}
492
493/**
494 * rwnx_ipc_rxbufs_alloc() - Allocate and push all RX buffer for the FW
495 *
496 * @rwnx_hw: Main driver data
497 */
498static int rwnx_ipc_rxbufs_alloc(struct rwnx_hw *rwnx_hw)
499{
500 int i, nb = rwnx_hw->ipc_env->rxbuf_nb;
501
502 memset(rwnx_hw->rxbufs, 0, sizeof(rwnx_hw->rxbufs));
503
504 for (i = 0; i < nb; i++) {
505 if (rwnx_ipc_rxbuf_alloc(rwnx_hw)) {
506 dev_err(rwnx_hw->dev, "Failed to allocate rx buf %d/%d\n",
507 i + 1, nb);
508 return -ENOMEM;
509 }
510 }
511
512 return 0;
513}
514
515/**
516 * rwnx_ipc_rxbufs_dealloc() - Free all RX buffer allocated for the FW
517 *
518 * @rwnx_hw: Main driver data
519 */
520static void rwnx_ipc_rxbufs_dealloc(struct rwnx_hw *rwnx_hw)
521{
522 struct rwnx_ipc_buf *buf;
523 int i;
524
525 for (i = 0, buf = rwnx_hw->rxbufs; i < ARRAY_SIZE(rwnx_hw->rxbufs); i++, buf++) {
526 rwnx_ipc_rxskb_dealloc(rwnx_hw, buf);
527 }
528}
529
530/**
531 * rwnx_ipc_rxdesc_repush() - Repush a RX descriptor to FW
532 *
533 * @rwnx_hw: Main driver data
534 * @buf: RX descriptor to repush
535 *
536 * Once RX buffer has been received, the RX descriptor used by FW to upload this
537 * buffer can be re-used for another RX buffer.
538 */
539void rwnx_ipc_rxdesc_repush(struct rwnx_hw *rwnx_hw,
540 struct rwnx_ipc_buf *buf)
541{
542 struct rxdesc_tag *rxdesc = buf->addr;
543 rxdesc->status = 0;
544 dma_sync_single_for_device(rwnx_hw->dev, buf->dma_addr,
545 sizeof(struct rxdesc_tag), DMA_BIDIRECTIONAL);
546 ipc_host_rxdesc_push(rwnx_hw->ipc_env, buf);
547}
548
549/**
550 * rwnx_ipc_rxbuf_from_hostid() - Return IPC buffer of a RX buffer from a hostid
551 *
552 * @rwnx_hw: Main driver data
553 * @hostid: Hostid of the RX buffer
554 * @return: Pointer to the RX buffer with the provided hostid and NULL if the
555 * hostid is invalid or no buffer is associated.
556 */
557struct rwnx_ipc_buf *rwnx_ipc_rxbuf_from_hostid(struct rwnx_hw *rwnx_hw, u32 hostid)
558{
559 int rxbuf_idx = RWNX_RXBUFF_HOSTID_TO_IDX(hostid);
560
561 if (RWNX_RXBUFF_VALID_IDX(rxbuf_idx)) {
562 struct rwnx_ipc_buf *buf = &rwnx_hw->rxbufs[rxbuf_idx];
563 if (buf->addr && (RWNX_RXBUFF_HOSTID_GET(buf) == hostid))
564 return buf;
565
566 dev_err(rwnx_hw->dev, "Invalid Rx buff: hostid=%d addr=%p hostid_in_buff=%d\n",
567 hostid, buf->addr, (buf->addr) ? RWNX_RXBUFF_HOSTID_GET(buf): -1);
568
569 if (buf->addr)
570 rwnx_ipc_rxbuf_dealloc(rwnx_hw, buf);
571 }
572
573 dev_err(rwnx_hw->dev, "RX Buff invalid hostid [%d]\n", hostid);
574 return NULL;
575}
576
577/**
578 * rwnx_elems_deallocs() - Deallocate IPC storage elements.
579 * @rwnx_hw: Main driver data
580 *
581 * This function deallocates all the elements required for communications with
582 * LMAC, such as Rx Data elements, MSGs elements, ...
583 * This function should be called in correspondence with the allocation function.
584 */
585static void rwnx_elems_deallocs(struct rwnx_hw *rwnx_hw)
586{
587 printk("rwnx_elems_deallocs 1\n");
588 rwnx_ipc_rxbufs_dealloc(rwnx_hw);
589 rwnx_ipc_unsuprxvecs_dealloc(rwnx_hw);
590#ifdef CONFIG_RWNX_FULLMAC
591 rwnx_ipc_buf_pool_dealloc(&rwnx_hw->rxdesc_pool);
592#endif
593 rwnx_ipc_buf_pool_dealloc(&rwnx_hw->msgbuf_pool);
594 rwnx_ipc_buf_pool_dealloc(&rwnx_hw->dbgbuf_pool);
595 rwnx_ipc_buf_pool_dealloc(&rwnx_hw->radar_pool);
596 rwnx_ipc_buf_pool_dealloc(&rwnx_hw->txcfm_pool);
597 rwnx_ipc_buf_dealloc(rwnx_hw, &rwnx_hw->tx_pattern);
598 rwnx_ipc_buf_dealloc(rwnx_hw, &rwnx_hw->dbgdump.buf);
599
600 printk("rwnx_elems_deallocs end\n");
601}
602
603/**
604 * rwnx_elems_allocs() - Allocate IPC storage elements.
605 * @rwnx_hw: Main driver data
606 *
607 * This function allocates all the elements required for communications with
608 * LMAC, such as Rx Data elements, MSGs elements, ...
609 * This function should be called in correspondence with the deallocation function.
610 */
611static int rwnx_elems_allocs(struct rwnx_hw *rwnx_hw)
612{
613 RWNX_DBG(RWNX_FN_ENTRY_STR);
614
615 if (dma_set_coherent_mask(rwnx_hw->dev, DMA_BIT_MASK(32)))
616 goto err_alloc;
617
618 if (rwnx_ipc_buf_pool_alloc(rwnx_hw, &rwnx_hw->msgbuf_pool,
619 IPC_MSGE2A_BUF_CNT,
620 sizeof(struct ipc_e2a_msg),
621 "rwnx_ipc_msgbuf_pool",
622 ipc_host_msgbuf_push))
623 goto err_alloc;
624
625 if (rwnx_ipc_buf_pool_alloc(rwnx_hw, &rwnx_hw->dbgbuf_pool,
626 IPC_DBGBUF_CNT,
627 sizeof(struct ipc_dbg_msg),
628 "rwnx_ipc_dbgbuf_pool",
629 ipc_host_dbgbuf_push))
630 goto err_alloc;
631
632 if (rwnx_ipc_buf_pool_alloc(rwnx_hw, &rwnx_hw->radar_pool,
633 IPC_RADARBUF_CNT,
634 sizeof(struct radar_pulse_array_desc),
635 "rwnx_ipc_radar_pool",
636 ipc_host_radar_push))
637 goto err_alloc;
638
639 if (rwnx_ipc_unsuprxvecs_alloc(rwnx_hw))
640 goto err_alloc;
641
642
643 if (rwnx_ipc_buf_a2e_alloc(rwnx_hw, &rwnx_hw->tx_pattern, sizeof(u32),
644 &rwnx_tx_pattern))
645 goto err_alloc;
646
647 ipc_host_pattern_push(rwnx_hw->ipc_env, &rwnx_hw->tx_pattern);
648
649 #if 0
650 printk("%s: 8\n", __func__);
651
652 if (rwnx_ipc_buf_e2a_alloc(rwnx_hw, &rwnx_hw->dbgdump.buf,
653 sizeof(struct dbg_debug_dump_tag)))
654 goto err_alloc;
655
656
657 printk("%s: 9\n", __func__);
658 ipc_host_dbginfo_push(rwnx_hw->ipc_env, &rwnx_hw->dbgdump.buf);
659
660 /*
661 * Note that the RX buffers are no longer allocated here as their size depends on the
662 * FW configuration, which is not available at that time.
663 * They will be allocated when checking the parameter compatibility between the driver
664 * and the underlying components (i.e. during the rwnx_handle_dynparams() execution)
665 */
666 printk("%s: 10\n", __func__);
667
668 #endif
669#ifdef CONFIG_RWNX_FULLMAC
670 if (rwnx_ipc_buf_pool_alloc(rwnx_hw, &rwnx_hw->rxdesc_pool,
671 rwnx_hw->ipc_env->rxdesc_nb,
672 sizeof(struct rxdesc_tag),
673 "rwnx_ipc_rxdesc_pool",
674 ipc_host_rxdesc_push))
675 goto err_alloc;
676
677#endif /* CONFIG_RWNX_FULLMAC */
678
679 return 0;
680
681err_alloc:
682 dev_err(rwnx_hw->dev, "Error while allocating IPC buffers\n");
683 rwnx_elems_deallocs(rwnx_hw);
684 return -ENOMEM;
685}
686
687/**
688 * rwnx_ipc_msg_push() - Push a msg to IPC queue
689 *
690 * @rwnx_hw: Main driver data
691 * @msg_buf: Pointer to message
692 * @len: Size, in bytes, of message
693 */
694void rwnx_ipc_msg_push(struct rwnx_hw *rwnx_hw, void *msg_buf, uint16_t len)
695{
696 ipc_host_msg_push(rwnx_hw->ipc_env, msg_buf, len);
697}
698
699/**
700 * rwnx_ipc_txdesc_push() - Push a txdesc to FW
701 *
702 * @rwnx_hw: Main driver data
703 * @sw_txhdr: Pointer to the SW TX header associated to the descriptor to push
704 * @skb: TX Buffer associated. Pointer saved in ipc env to retrieve it upon confirmation.
705 * @hw_queue: Hw queue to push txdesc to
706 */
707#if 0
708void rwnx_ipc_txdesc_push(struct rwnx_hw *rwnx_hw, struct rwnx_sw_txhdr *sw_txhdr,
709 struct sk_buff *skb, int hw_queue)
710{
711 struct txdesc_host *txdesc_host = &sw_txhdr->desc;
712 struct rwnx_ipc_buf *ipc_desc = &sw_txhdr->ipc_desc;
713
714 txdesc_host->ctrl.hwq = hw_queue;
715 txdesc_host->api.host.hostid = ipc_host_tx_host_ptr_to_id(rwnx_hw->ipc_env, skb);
716 txdesc_host->ready = 0xFFFFFFFF;
717 if (!txdesc_host->api.host.hostid) {
718 dev_err(rwnx_hw->dev, "No more tx_hostid available \n");
719 return;
720 }
721
722 if (rwnx_ipc_buf_a2e_init(rwnx_hw, ipc_desc, txdesc_host, sizeof(*txdesc_host)))
723 return ;
724
725 ipc_host_txdesc_push(rwnx_hw->ipc_env, ipc_desc);
726}
727#endif
728
729/**
730 * rwnx_ipc_get_skb_from_cfm() - Retrieve the TX buffer associated to a confirmation buffer
731 *
732 * @rwnx_hw: Main driver data
733 * @buf: IPC buffer for the confirmation buffer
734 * @return: Pointer to TX buffer associated to this confirmation and NULL if confirmation
735 * has not yet been updated by firmware
736 *
737 * To ensure that a confirmation has been processed by firmware check if the hostid field
738 * has been updated. If this is the case retrieve TX buffer from it and reset it, otherwise
739 * simply return NULL.
740 */
741struct sk_buff *rwnx_ipc_get_skb_from_cfm(struct rwnx_hw *rwnx_hw,
742 struct rwnx_ipc_buf *buf)
743{
744 struct sk_buff *skb = NULL;
745 struct tx_cfm_tag *cfm = buf->addr;
746
747 /* get ownership of confirmation */
748 rwnx_ipc_buf_e2a_sync(rwnx_hw, buf, 0);
749
750 /* Check host id in the confirmation. */
751 /* If 0 it means that this confirmation has not yet been updated by firmware */
752 if (cfm->hostid) {
753 skb = ipc_host_tx_host_id_to_ptr(rwnx_hw->ipc_env, cfm->hostid);
754 if (unlikely(!skb)) {
755 dev_err(rwnx_hw->dev, "Cannot retrieve skb from cfm=%p/0x%llx, hostid %d in confirmation\n",
756 buf->addr, buf->dma_addr, cfm->hostid);
757 } else {
758 /* Unmap TX descriptor */
759 struct rwnx_ipc_buf *ipc_desc = &((struct rwnx_txhdr *)skb->data)->sw_hdr->ipc_desc;
760 rwnx_ipc_buf_a2e_release(rwnx_hw, ipc_desc);
761 }
762
763 cfm->hostid = 0;
764 }
765
766 /* always re-give ownership to firmware. */
767 rwnx_ipc_buf_e2a_sync_back(rwnx_hw, buf, 0);
768
769 return skb;
770}
771
772/**
773 * rwnx_ipc_sta_buffer_init - Initialize counter of buffered data for a given sta
774 *
775 * @rwnx_hw: Main driver data
776 * @sta_idx: Index of the station to initialize
777 */
778void rwnx_ipc_sta_buffer_init(struct rwnx_hw *rwnx_hw, int sta_idx)
779{
780 int i;
781 volatile u32_l *buffered;
782
783 if (sta_idx >= NX_REMOTE_STA_MAX)
784 return;
785
786 buffered = rwnx_hw->ipc_env->shared->buffered[sta_idx];
787
788 for (i = 0; i < TID_MAX; i++) {
789 *buffered++ = 0;
790 }
791}
792
793/**
794 * rwnx_ipc_sta_buffer - Update counter of buffered data for a given sta
795 *
796 * @rwnx_hw: Main driver data
797 * @sta: Managed station
798 * @tid: TID on which data has been added or removed
799 * @size: Size of data to add (or remove if < 0) to STA buffer.
800 */
801void rwnx_ipc_sta_buffer(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta, int tid, int size)
802{
803 u32_l *buffered;
804
805 if (!sta)
806 return;
807
808 if ((sta->sta_idx >= NX_REMOTE_STA_MAX) || (tid >= TID_MAX))
809 return;
810
811 buffered = &rwnx_hw->ipc_env->shared->buffered[sta->sta_idx][tid];
812
813 if (size < 0) {
814 size = -size;
815 if (*buffered < size)
816 *buffered = 0;
817 else
818 *buffered -= size;
819 } else {
820 // no test on overflow
821 *buffered += size;
822 }
823}
824
825/**
826 * rwnx_msgind() - IRQ handler callback for %IPC_IRQ_E2A_MSG
827 *
828 * @pthis: Pointer to main driver data
829 * @arg: Pointer to IPC buffer from msgbuf_pool
830 */
831static u8 rwnx_msgind(void *pthis, void *arg)
832{
833 struct rwnx_hw *rwnx_hw = pthis;
834 struct rwnx_ipc_buf *buf = arg;
835 struct ipc_e2a_msg *msg = buf->addr;
836 u8 ret = 0;
837
838 REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_MSGIND);
839
840 /* Look for pattern which means that this hostbuf has been used for a MSG */
841 if (msg->pattern != IPC_MSGE2A_VALID_PATTERN) {
842 ret = -1;
843 goto msg_no_push;
844 }
845 /* Relay further actions to the msg parser */
846 rwnx_rx_handle_msg(rwnx_hw, msg);
847
848 /* Reset the msg buffer and re-use it */
849 msg->pattern = 0;
850 wmb();
851
852 /* Push back the buffer to the LMAC */
853 ipc_host_msgbuf_push(rwnx_hw->ipc_env, buf);
854
855msg_no_push:
856 REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_MSGIND);
857 return ret;
858}
859
860/**
861 * rwnx_msgackind() - IRQ handler callback for %IPC_IRQ_E2A_MSG_ACK
862 *
863 * @pthis: Pointer to main driver data
864 * @hostid: Pointer to command acknowledged
865 */
866static u8 rwnx_msgackind(void *pthis, void *hostid)
867{
868 struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)pthis;
869 rwnx_hw->cmd_mgr->llind(rwnx_hw->cmd_mgr, (struct rwnx_cmd *)hostid);
870 return -1;
871}
872
873/**
874 * rwnx_radarind() - IRQ handler callback for %IPC_IRQ_E2A_RADAR
875 *
876 * @pthis: Pointer to main driver data
877 * @arg: Pointer to IPC buffer from radar_pool
878 */
879static u8 rwnx_radarind(void *pthis, void *arg)
880{
881#ifdef CONFIG_RWNX_RADAR
882 struct rwnx_hw *rwnx_hw = pthis;
883 struct rwnx_ipc_buf *buf = arg;
884 struct radar_pulse_array_desc *pulses = buf->addr;
885 u8 ret = 0;
886 int i;
887
888 /* Look for pulse count meaning that this hostbuf contains RADAR pulses */
889 if (pulses->cnt == 0) {
890 ret = -1;
891 goto radar_no_push;
892 }
893
894 if (rwnx_radar_detection_is_enable(&rwnx_hw->radar, pulses->idx)) {
895 /* Save the received pulses only if radar detection is enabled */
896 for (i = 0; i < pulses->cnt; i++) {
897 struct rwnx_radar_pulses *p = &rwnx_hw->radar.pulses[pulses->idx];
898
899 p->buffer[p->index] = pulses->pulse[i];
900 p->index = (p->index + 1) % RWNX_RADAR_PULSE_MAX;
901 if (p->count < RWNX_RADAR_PULSE_MAX)
902 p->count++;
903 }
904
905 /* Defer pulse processing in separate work */
906 if (! work_pending(&rwnx_hw->radar.detection_work))
907 schedule_work(&rwnx_hw->radar.detection_work);
908 }
909
910 /* Reset the radar bufent and re-use it */
911 pulses->cnt = 0;
912 wmb();
913
914 /* Push back the buffer to the LMAC */
915 ipc_host_radar_push(rwnx_hw->ipc_env, buf);
916
917radar_no_push:
918 return ret;
919#else
920 return -1;
921#endif
922}
923
924/**
925 * rwnx_prim_tbtt_ind() - IRQ handler callback for %IPC_IRQ_E2A_TBTT_PRIM
926 *
927 * @pthis: Pointer to main driver data
928 */
929#if 0
930static void rwnx_prim_tbtt_ind(void *pthis)
931{
932#if 0
933 struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)pthis;
934 rwnx_tx_bcns(rwnx_hw);
935#endif
936}
937#endif
938/**
939 * rwnx_sec_tbtt_ind() - IRQ handler callback for %IPC_IRQ_E2A_TBTT_SEC
940 *
941 * @pthis: Pointer to main driver data
942 */
943#if 0
944static void rwnx_sec_tbtt_ind(void *pthis)
945{
946}
947#endif
948/**
949 * rwnx_dbgind() - IRQ handler callback for %IPC_IRQ_E2A_DBG
950 *
951 * @pthis: Pointer to main driver data
952 * @hostid: Pointer to IPC buffer from dbgbuf_pool
953 */
954static u8 rwnx_dbgind(void *pthis, void *arg)
955{
956 struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)pthis;
957 struct rwnx_ipc_buf *buf = arg;
958 struct ipc_dbg_msg *dbg_msg = buf->addr;
959 u8 ret = 0;
960
961 REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_DBGIND);
962
963 /* Look for pattern which means that this hostbuf has been used for a MSG */
964 if (dbg_msg->pattern != IPC_DBG_VALID_PATTERN) {
965 ret = -1;
966 goto dbg_no_push;
967 }
968
969 /* Display the string */
970 printk("%s %s", (char *)FW_STR, (char *)dbg_msg->string);
971
972 /* Reset the msg buffer and re-use it */
973 dbg_msg->pattern = 0;
974 wmb();
975
976 /* Push back the buffer to the LMAC */
977 ipc_host_dbgbuf_push(rwnx_hw->ipc_env, buf);
978
979dbg_no_push:
980 REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_DBGIND);
981
982 return ret;
983}
984
985/**
986 * rwnx_ipc_rxbuf_init() - Allocate and initialize RX buffers.
987 *
988 * @rwnx_hw: Main driver data
989 * @rxbuf_sz: Size of the buffer to be allocated
990 *
991 * This function updates the RX buffer size according to the parameter and allocates the
992 * RX buffers
993 */
994int rwnx_ipc_rxbuf_init(struct rwnx_hw *rwnx_hw, uint32_t rxbuf_sz)
995{
996 rwnx_hw->ipc_env->rxbuf_sz = rxbuf_sz;
997 return rwnx_ipc_rxbufs_alloc(rwnx_hw);
998}
999
1000/**
1001 * rwnx_ipc_init() - Initialize IPC interface.
1002 *
1003 * @rwnx_hw: Main driver data
1004 * @shared_ram: Pointer to shared memory that contains IPC shared struct
1005 *
1006 * This function initializes IPC interface by registering callbacks, setting
1007 * shared memory area and calling IPC Init function.
1008 * It should be called only once during driver's lifetime.
1009 */
1010int rwnx_ipc_init(struct rwnx_hw *rwnx_hw, u8 *shared_ram)
1011{
1012 struct ipc_host_cb_tag cb;
1013 int res;
1014
1015 RWNX_DBG(RWNX_FN_ENTRY_STR);
1016
1017 /* initialize the API interface */
1018 cb.recv_data_ind = rwnx_rxdataind;
1019 cb.recv_radar_ind = rwnx_radarind;
1020 cb.recv_msg_ind = rwnx_msgind;
1021 cb.recv_msgack_ind = rwnx_msgackind;
1022 cb.recv_dbg_ind = rwnx_dbgind;
1023 cb.send_data_cfm = rwnx_txdatacfm;
1024 cb.recv_unsup_rx_vec_ind = rwnx_unsup_rx_vec_ind;
1025
1026 /* set the IPC environment */
1027 rwnx_hw->ipc_env = (struct ipc_host_env_tag *)
1028 kzalloc(sizeof(struct ipc_host_env_tag), GFP_KERNEL);
1029
1030 if (!rwnx_hw->ipc_env)
1031 return -ENOMEM;
1032
1033 /* call the initialization of the IPC */
1034 ipc_host_init(rwnx_hw->ipc_env, &cb,
1035 (struct ipc_shared_env_tag *)shared_ram, rwnx_hw);
1036
1037 rwnx_cmd_mgr_init(rwnx_hw->cmd_mgr);
1038
1039 res = rwnx_elems_allocs(rwnx_hw);
1040 if (res) {
1041 kfree(rwnx_hw->ipc_env);
1042 rwnx_hw->ipc_env = NULL;
1043 }
1044
1045 return res;
1046}
1047
1048/**
1049 * rwnx_ipc_deinit() - Release IPC interface
1050 *
1051 * @rwnx_hw: Main driver data
1052 */
1053void rwnx_ipc_deinit(struct rwnx_hw *rwnx_hw)
1054{
1055 RWNX_DBG(RWNX_FN_ENTRY_STR);
1056
1057 rwnx_ipc_tx_drain(rwnx_hw);
1058 rwnx_cmd_mgr_deinit(rwnx_hw->cmd_mgr);
1059 rwnx_elems_deallocs(rwnx_hw);
1060 if (rwnx_hw->ipc_env) {
1061 kfree(rwnx_hw->ipc_env);
1062 rwnx_hw->ipc_env = NULL;
1063 }
1064}
1065
1066/**
1067 * rwnx_ipc_start() - Start IPC interface
1068 *
1069 * @rwnx_hw: Main driver data
1070 */
1071void rwnx_ipc_start(struct rwnx_hw *rwnx_hw)
1072{
1073 ipc_host_enable_irq(rwnx_hw->ipc_env, IPC_IRQ_E2A_ALL);
1074}
1075
1076/**
1077 * rwnx_ipc_stop() - Stop IPC interface
1078 *
1079 * @rwnx_hw: Main driver data
1080 */
1081void rwnx_ipc_stop(struct rwnx_hw *rwnx_hw)
1082{
1083 ipc_host_disable_irq(rwnx_hw->ipc_env, IPC_IRQ_E2A_ALL);
1084}
1085
1086/**
1087 * rwnx_ipc_tx_drain() - Flush IPC TX buffers
1088 *
1089 * @rwnx_hw: Main driver data
1090 *
1091 * This assumes LMAC is still (tx wise) and there's no TX race until LMAC is up
1092 * tx wise.
1093 * This also lets both IPC sides remain in sync before resetting the LMAC,
1094 * e.g with rwnx_send_reset.
1095 */
1096void rwnx_ipc_tx_drain(struct rwnx_hw *rwnx_hw)
1097{
1098 struct sk_buff *skb;
1099
1100 RWNX_DBG(RWNX_FN_ENTRY_STR);
1101
1102 if (!rwnx_hw->ipc_env) {
1103 printk(KERN_CRIT "%s: bypassing (restart must have failed)\n", __func__);
1104 return;
1105 }
1106
1107 while ((skb = ipc_host_tx_flush(rwnx_hw->ipc_env))) {
1108 struct rwnx_sw_txhdr *sw_txhdr = ((struct rwnx_txhdr *)skb->data)->sw_hdr;
1109
1110#ifdef CONFIG_RWNX_AMSDUS_TX
1111 if (sw_txhdr->desc.api.host.packet_cnt > 1) {
1112 struct rwnx_amsdu_txhdr *amsdu_txhdr;
1113 list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
1114 rwnx_ipc_buf_a2e_release(rwnx_hw, &amsdu_txhdr->ipc_data);
1115 dev_kfree_skb_any(amsdu_txhdr->skb);
1116 }
1117 }
1118#endif
1119 rwnx_ipc_buf_a2e_release(rwnx_hw, &sw_txhdr->ipc_data);
1120 kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
1121 skb_pull(skb, RWNX_TX_HEADROOM);
1122 dev_kfree_skb_any(skb);
1123 }
1124}
1125
1126/**
1127 * rwnx_ipc_tx_pending() - Check if TX frames are pending at FW level
1128 *
1129 * @rwnx_hw: Main driver data
1130 */
1131bool rwnx_ipc_tx_pending(struct rwnx_hw *rwnx_hw)
1132{
1133 return ipc_host_tx_frames_pending(rwnx_hw->ipc_env);
1134}
1135
1136/**
1137 * rwnx_error_ind() - %DBG_ERROR_IND message callback
1138 *
1139 * @rwnx_hw: Main driver data
1140 *
1141 * This function triggers the UMH script call that will indicate to the user
1142 * space the error that occurred and stored the debug dump. Once the UMH script
1143 * is executed, the rwnx_umh_done() function has to be called.
1144 */
1145void rwnx_error_ind(struct rwnx_hw *rwnx_hw)
1146{
1147 struct rwnx_ipc_buf *buf = &rwnx_hw->dbgdump.buf;
1148 struct dbg_debug_dump_tag *dump = buf->addr;
1149
1150 rwnx_ipc_buf_e2a_sync(rwnx_hw, buf, 0);
1151 dev_err(rwnx_hw->dev, "(type %d): dump received\n",
1152 dump->dbg_info.error_type);
1153#ifdef CONFIG_RWNX_DEBUGFS
1154 rwnx_hw->debugfs.trace_prst = true;
1155#endif
1156 //rwnx_trigger_um_helper(&rwnx_hw->debugfs);
1157}
1158
1159/**
1160 * rwnx_umh_done() - Indicate User Mode helper finished
1161 *
1162 * @rwnx_hw: Main driver data
1163 *
1164 */
1165
1166extern void aicwf_pcie_host_init(struct ipc_host_env_tag *env, void *cb, struct ipc_shared_env_tag *shared_env_ptr, void *pthis);
1167void rwnx_umh_done(struct rwnx_hw *rwnx_hw)
1168{
1169 if (!test_bit(RWNX_DEV_STARTED, &rwnx_hw->flags))
1170 return;
1171
1172 /* this assumes error_ind won't trigger before ipc_host_dbginfo_push
1173 is called and so does not irq protect (TODO) against error_ind */
1174#ifdef CONFIG_RWNX_DEBUGFS
1175 rwnx_hw->debugfs.trace_prst = false;
1176#endif
1177 ipc_host_dbginfo_push(rwnx_hw->ipc_env, &rwnx_hw->dbgdump.buf);
1178}
1179
1180int rwnx_init_aic(struct rwnx_hw *rwnx_hw)
1181{
1182 int res = 0;
1183 struct ipc_shared_env_tag *shared_env = NULL;
1184
1185 RWNX_DBG(RWNX_FN_ENTRY_STR);
1186#ifdef AICWF_SDIO_SUPPORT
1187 aicwf_sdio_host_init(&(rwnx_hw->sdio_env), NULL, NULL, rwnx_hw);
1188#endif
1189
1190#ifdef AICWF_USB_SUPPORT
1191 aicwf_usb_host_init(&(rwnx_hw->usb_env), NULL, NULL, rwnx_hw);
1192#endif
1193
1194#ifdef AICWF_PCIE_SUPPORT
1195 rwnx_hw->ipc_env = (struct ipc_host_env_tag *) kzalloc(sizeof(struct ipc_host_env_tag), GFP_KERNEL);
1196
1197 if (!rwnx_hw->ipc_env){
1198 return -ENOMEM;
1199 }
1200
1201 if(rwnx_hw->pcidev->chip_id == PRODUCT_ID_AIC8800D80) {
1202 if (rwnx_hw->pcidev->bar_count == 1) {
1203 aicwf_pcie_host_init(rwnx_hw->ipc_env, NULL, (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->emb_shrm), rwnx_hw);
1204 shared_env = (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->emb_shrm);
1205 } else {
1206 aicwf_pcie_host_init(rwnx_hw->ipc_env, NULL, (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->pci_bar0_vaddr + 0x1DC000), rwnx_hw);
1207 shared_env = (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->pci_bar0_vaddr + 0x1DC000);
1208 }
1209 } else {
1210 aicwf_pcie_host_init(rwnx_hw->ipc_env, NULL, (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->emb_shrm), rwnx_hw);
1211 shared_env = (struct ipc_shared_env_tag *)(rwnx_hw->pcidev->emb_shrm);
1212 }
1213
1214 res = rwnx_elems_allocs(rwnx_hw);
1215 if (res) {
1216 kfree(rwnx_hw->ipc_env);
1217 rwnx_hw->ipc_env = NULL;
1218 }
1219 printk("sizeof struct ipc_shared_env_tag is %ld byte, offset=%ld, %ld, %ld , txdesc %lx\n", sizeof(struct ipc_shared_env_tag),
1220 (u8 *)&shared_env->host_rxdesc - (u8 *)shared_env,
1221 (u8 *)&shared_env->host_rxbuf - (u8 *)shared_env,
1222 (u8 *)&shared_env->buffered - (u8 *)shared_env,
1223 (u8 *)&rwnx_hw->ipc_env->shared->txdesc - (u8 *)shared_env);
1224 printk("txdesc size %ld\n", sizeof(rwnx_hw->ipc_env->shared->txdesc));
1225
1226#endif
1227 rwnx_cmd_mgr_init(rwnx_hw->cmd_mgr);
1228
1229 return res;
1230}
1231
1232void rwnx_aic_deinit(struct rwnx_hw *rwnx_hw)
1233{
1234 RWNX_DBG(RWNX_FN_ENTRY_STR);
1235
1236 //rwnx_ipc_tx_drain(rwnx_hw);
1237 rwnx_elems_deallocs(rwnx_hw);
1238 if (rwnx_hw->ipc_env) {
1239 kfree(rwnx_hw->ipc_env);
1240 rwnx_hw->ipc_env = NULL;
1241 }
1242}
1243
1244