| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | /******************************************************************* |
| 2 | * This file is part of the Emulex Linux Device Driver for * |
| 3 | * Fibre Channel Host Bus Adapters. * |
| 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
| 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
| 6 | * Copyright (C) 2004-2014 Emulex. All rights reserved. * |
| 7 | * EMULEX and SLI are trademarks of Emulex. * |
| 8 | * www.broadcom.com * |
| 9 | * Portions Copyright (C) 2004-2005 Christoph Hellwig * |
| 10 | * * |
| 11 | * This program is free software; you can redistribute it and/or * |
| 12 | * modify it under the terms of version 2 of the GNU General * |
| 13 | * Public License as published by the Free Software Foundation. * |
| 14 | * This program is distributed in the hope that it will be useful. * |
| 15 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * |
| 16 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * |
| 18 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * |
| 19 | * TO BE LEGALLY INVALID. See the GNU General Public License for * |
| 20 | * more details, a copy of which can be found in the file COPYING * |
| 21 | * included with this package. * |
| 22 | *******************************************************************/ |
| 23 | |
| 24 | #include <linux/mempool.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/pci.h> |
| 27 | #include <linux/interrupt.h> |
| 28 | |
| 29 | #include <scsi/scsi.h> |
| 30 | #include <scsi/scsi_device.h> |
| 31 | #include <scsi/scsi_transport_fc.h> |
| 32 | #include <scsi/fc/fc_fs.h> |
| 33 | |
| 34 | #include <linux/nvme-fc-driver.h> |
| 35 | |
| 36 | #include "lpfc_hw4.h" |
| 37 | #include "lpfc_hw.h" |
| 38 | #include "lpfc_sli.h" |
| 39 | #include "lpfc_sli4.h" |
| 40 | #include "lpfc_nl.h" |
| 41 | #include "lpfc_disc.h" |
| 42 | #include "lpfc.h" |
| 43 | #include "lpfc_scsi.h" |
| 44 | #include "lpfc_nvme.h" |
| 45 | #include "lpfc_nvmet.h" |
| 46 | #include "lpfc_crtn.h" |
| 47 | #include "lpfc_logmsg.h" |
| 48 | |
| 49 | #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ |
| 50 | #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ |
| 51 | #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ |
| 52 | |
| 53 | int |
| 54 | lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { |
| 55 | size_t bytes; |
| 56 | int max_xri = phba->sli4_hba.max_cfg_param.max_xri; |
| 57 | |
| 58 | if (max_xri <= 0) |
| 59 | return -ENOMEM; |
| 60 | bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * |
| 61 | sizeof(unsigned long); |
| 62 | phba->cfg_rrq_xri_bitmap_sz = bytes; |
| 63 | phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
| 64 | bytes); |
| 65 | if (!phba->active_rrq_pool) |
| 66 | return -ENOMEM; |
| 67 | else |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | /** |
| 72 | * lpfc_mem_alloc - create and allocate all PCI and memory pools |
| 73 | * @phba: HBA to allocate pools for |
| 74 | * |
| 75 | * Description: Creates and allocates PCI pools lpfc_mbuf_pool, |
| 76 | * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools |
| 77 | * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. |
| 78 | * |
| 79 | * Notes: Not interrupt-safe. Must be called with no locks held. If any |
| 80 | * allocation fails, frees all successfully allocated memory before returning. |
| 81 | * |
| 82 | * Returns: |
| 83 | * 0 on success |
| 84 | * -ENOMEM on failure (if any memory allocations fail) |
| 85 | **/ |
| 86 | int |
| 87 | lpfc_mem_alloc(struct lpfc_hba *phba, int align) |
| 88 | { |
| 89 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; |
| 90 | int i; |
| 91 | |
| 92 | |
| 93 | phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, |
| 94 | LPFC_BPL_SIZE, |
| 95 | align, 0); |
| 96 | if (!phba->lpfc_mbuf_pool) |
| 97 | goto fail; |
| 98 | |
| 99 | pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, |
| 100 | sizeof(struct lpfc_dmabuf), |
| 101 | GFP_KERNEL); |
| 102 | if (!pool->elements) |
| 103 | goto fail_free_lpfc_mbuf_pool; |
| 104 | |
| 105 | pool->max_count = 0; |
| 106 | pool->current_count = 0; |
| 107 | for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) { |
| 108 | pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, |
| 109 | GFP_KERNEL, &pool->elements[i].phys); |
| 110 | if (!pool->elements[i].virt) |
| 111 | goto fail_free_mbuf_pool; |
| 112 | pool->max_count++; |
| 113 | pool->current_count++; |
| 114 | } |
| 115 | |
| 116 | phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
| 117 | sizeof(LPFC_MBOXQ_t)); |
| 118 | if (!phba->mbox_mem_pool) |
| 119 | goto fail_free_mbuf_pool; |
| 120 | |
| 121 | phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
| 122 | sizeof(struct lpfc_nodelist)); |
| 123 | if (!phba->nlp_mem_pool) |
| 124 | goto fail_free_mbox_pool; |
| 125 | |
| 126 | if (phba->sli_rev == LPFC_SLI_REV4) { |
| 127 | phba->rrq_pool = |
| 128 | mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
| 129 | sizeof(struct lpfc_node_rrq)); |
| 130 | if (!phba->rrq_pool) |
| 131 | goto fail_free_nlp_mem_pool; |
| 132 | phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", |
| 133 | &phba->pcidev->dev, |
| 134 | LPFC_HDR_BUF_SIZE, align, 0); |
| 135 | if (!phba->lpfc_hrb_pool) |
| 136 | goto fail_free_rrq_mem_pool; |
| 137 | |
| 138 | phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", |
| 139 | &phba->pcidev->dev, |
| 140 | LPFC_DATA_BUF_SIZE, align, 0); |
| 141 | if (!phba->lpfc_drb_pool) |
| 142 | goto fail_free_hrb_pool; |
| 143 | phba->lpfc_hbq_pool = NULL; |
| 144 | } else { |
| 145 | phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", |
| 146 | &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); |
| 147 | if (!phba->lpfc_hbq_pool) |
| 148 | goto fail_free_nlp_mem_pool; |
| 149 | phba->lpfc_hrb_pool = NULL; |
| 150 | phba->lpfc_drb_pool = NULL; |
| 151 | } |
| 152 | |
| 153 | if (phba->cfg_EnableXLane) { |
| 154 | phba->device_data_mem_pool = mempool_create_kmalloc_pool( |
| 155 | LPFC_DEVICE_DATA_POOL_SIZE, |
| 156 | sizeof(struct lpfc_device_data)); |
| 157 | if (!phba->device_data_mem_pool) |
| 158 | goto fail_free_drb_pool; |
| 159 | } else { |
| 160 | phba->device_data_mem_pool = NULL; |
| 161 | } |
| 162 | |
| 163 | return 0; |
| 164 | fail_free_drb_pool: |
| 165 | dma_pool_destroy(phba->lpfc_drb_pool); |
| 166 | phba->lpfc_drb_pool = NULL; |
| 167 | fail_free_hrb_pool: |
| 168 | dma_pool_destroy(phba->lpfc_hrb_pool); |
| 169 | phba->lpfc_hrb_pool = NULL; |
| 170 | fail_free_rrq_mem_pool: |
| 171 | mempool_destroy(phba->rrq_pool); |
| 172 | phba->rrq_pool = NULL; |
| 173 | fail_free_nlp_mem_pool: |
| 174 | mempool_destroy(phba->nlp_mem_pool); |
| 175 | phba->nlp_mem_pool = NULL; |
| 176 | fail_free_mbox_pool: |
| 177 | mempool_destroy(phba->mbox_mem_pool); |
| 178 | phba->mbox_mem_pool = NULL; |
| 179 | fail_free_mbuf_pool: |
| 180 | while (i--) |
| 181 | dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, |
| 182 | pool->elements[i].phys); |
| 183 | kfree(pool->elements); |
| 184 | fail_free_lpfc_mbuf_pool: |
| 185 | dma_pool_destroy(phba->lpfc_mbuf_pool); |
| 186 | phba->lpfc_mbuf_pool = NULL; |
| 187 | fail: |
| 188 | return -ENOMEM; |
| 189 | } |
| 190 | |
| 191 | int |
| 192 | lpfc_nvmet_mem_alloc(struct lpfc_hba *phba) |
| 193 | { |
| 194 | phba->lpfc_nvmet_drb_pool = |
| 195 | dma_pool_create("lpfc_nvmet_drb_pool", |
| 196 | &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, |
| 197 | SGL_ALIGN_SZ, 0); |
| 198 | if (!phba->lpfc_nvmet_drb_pool) { |
| 199 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 200 | "6024 Can't enable NVME Target - no memory\n"); |
| 201 | return -ENOMEM; |
| 202 | } |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | /** |
| 207 | * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc |
| 208 | * @phba: HBA to free memory for |
| 209 | * |
| 210 | * Description: Free the memory allocated by lpfc_mem_alloc routine. This |
| 211 | * routine is a the counterpart of lpfc_mem_alloc. |
| 212 | * |
| 213 | * Returns: None |
| 214 | **/ |
| 215 | void |
| 216 | lpfc_mem_free(struct lpfc_hba *phba) |
| 217 | { |
| 218 | int i; |
| 219 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; |
| 220 | struct lpfc_device_data *device_data; |
| 221 | |
| 222 | /* Free HBQ pools */ |
| 223 | lpfc_sli_hbqbuf_free_all(phba); |
| 224 | dma_pool_destroy(phba->lpfc_nvmet_drb_pool); |
| 225 | phba->lpfc_nvmet_drb_pool = NULL; |
| 226 | |
| 227 | dma_pool_destroy(phba->lpfc_drb_pool); |
| 228 | phba->lpfc_drb_pool = NULL; |
| 229 | |
| 230 | dma_pool_destroy(phba->lpfc_hrb_pool); |
| 231 | phba->lpfc_hrb_pool = NULL; |
| 232 | |
| 233 | dma_pool_destroy(phba->txrdy_payload_pool); |
| 234 | phba->txrdy_payload_pool = NULL; |
| 235 | |
| 236 | dma_pool_destroy(phba->lpfc_hbq_pool); |
| 237 | phba->lpfc_hbq_pool = NULL; |
| 238 | |
| 239 | mempool_destroy(phba->rrq_pool); |
| 240 | phba->rrq_pool = NULL; |
| 241 | |
| 242 | /* Free NLP memory pool */ |
| 243 | mempool_destroy(phba->nlp_mem_pool); |
| 244 | phba->nlp_mem_pool = NULL; |
| 245 | if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { |
| 246 | mempool_destroy(phba->active_rrq_pool); |
| 247 | phba->active_rrq_pool = NULL; |
| 248 | } |
| 249 | |
| 250 | /* Free mbox memory pool */ |
| 251 | mempool_destroy(phba->mbox_mem_pool); |
| 252 | phba->mbox_mem_pool = NULL; |
| 253 | |
| 254 | /* Free MBUF memory pool */ |
| 255 | for (i = 0; i < pool->current_count; i++) |
| 256 | dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, |
| 257 | pool->elements[i].phys); |
| 258 | kfree(pool->elements); |
| 259 | |
| 260 | dma_pool_destroy(phba->lpfc_mbuf_pool); |
| 261 | phba->lpfc_mbuf_pool = NULL; |
| 262 | |
| 263 | /* Free Device Data memory pool */ |
| 264 | if (phba->device_data_mem_pool) { |
| 265 | /* Ensure all objects have been returned to the pool */ |
| 266 | while (!list_empty(&phba->luns)) { |
| 267 | device_data = list_first_entry(&phba->luns, |
| 268 | struct lpfc_device_data, |
| 269 | listentry); |
| 270 | list_del(&device_data->listentry); |
| 271 | mempool_free(device_data, phba->device_data_mem_pool); |
| 272 | } |
| 273 | mempool_destroy(phba->device_data_mem_pool); |
| 274 | } |
| 275 | phba->device_data_mem_pool = NULL; |
| 276 | return; |
| 277 | } |
| 278 | |
| 279 | /** |
| 280 | * lpfc_mem_free_all - Frees all PCI and driver memory |
| 281 | * @phba: HBA to free memory for |
| 282 | * |
| 283 | * Description: Free memory from PCI and driver memory pools and also those |
| 284 | * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees |
| 285 | * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees |
| 286 | * the VPI bitmask. |
| 287 | * |
| 288 | * Returns: None |
| 289 | **/ |
| 290 | void |
| 291 | lpfc_mem_free_all(struct lpfc_hba *phba) |
| 292 | { |
| 293 | struct lpfc_sli *psli = &phba->sli; |
| 294 | LPFC_MBOXQ_t *mbox, *next_mbox; |
| 295 | struct lpfc_dmabuf *mp; |
| 296 | |
| 297 | /* Free memory used in mailbox queue back to mailbox memory pool */ |
| 298 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { |
| 299 | mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); |
| 300 | if (mp) { |
| 301 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 302 | kfree(mp); |
| 303 | } |
| 304 | list_del(&mbox->list); |
| 305 | mempool_free(mbox, phba->mbox_mem_pool); |
| 306 | } |
| 307 | /* Free memory used in mailbox cmpl list back to mailbox memory pool */ |
| 308 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { |
| 309 | mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); |
| 310 | if (mp) { |
| 311 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 312 | kfree(mp); |
| 313 | } |
| 314 | list_del(&mbox->list); |
| 315 | mempool_free(mbox, phba->mbox_mem_pool); |
| 316 | } |
| 317 | /* Free the active mailbox command back to the mailbox memory pool */ |
| 318 | spin_lock_irq(&phba->hbalock); |
| 319 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
| 320 | spin_unlock_irq(&phba->hbalock); |
| 321 | if (psli->mbox_active) { |
| 322 | mbox = psli->mbox_active; |
| 323 | mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); |
| 324 | if (mp) { |
| 325 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 326 | kfree(mp); |
| 327 | } |
| 328 | mempool_free(mbox, phba->mbox_mem_pool); |
| 329 | psli->mbox_active = NULL; |
| 330 | } |
| 331 | |
| 332 | /* Free and destroy all the allocated memory pools */ |
| 333 | lpfc_mem_free(phba); |
| 334 | |
| 335 | /* Free DMA buffer memory pool */ |
| 336 | dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); |
| 337 | phba->lpfc_sg_dma_buf_pool = NULL; |
| 338 | |
| 339 | dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); |
| 340 | phba->lpfc_cmd_rsp_buf_pool = NULL; |
| 341 | |
| 342 | /* Free the iocb lookup array */ |
| 343 | kfree(psli->iocbq_lookup); |
| 344 | psli->iocbq_lookup = NULL; |
| 345 | |
| 346 | return; |
| 347 | } |
| 348 | |
| 349 | /** |
| 350 | * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool |
| 351 | * @phba: HBA which owns the pool to allocate from |
| 352 | * @mem_flags: indicates if this is a priority (MEM_PRI) allocation |
| 353 | * @handle: used to return the DMA-mapped address of the mbuf |
| 354 | * |
| 355 | * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool. |
| 356 | * Allocates from generic dma_pool_alloc function first and if that fails and |
| 357 | * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the |
| 358 | * HBA's pool. |
| 359 | * |
| 360 | * Notes: Not interrupt-safe. Must be called with no locks held. Takes |
| 361 | * phba->hbalock. |
| 362 | * |
| 363 | * Returns: |
| 364 | * pointer to the allocated mbuf on success |
| 365 | * NULL on failure |
| 366 | **/ |
| 367 | void * |
| 368 | lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) |
| 369 | { |
| 370 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; |
| 371 | unsigned long iflags; |
| 372 | void *ret; |
| 373 | |
| 374 | ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); |
| 375 | |
| 376 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 377 | if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { |
| 378 | pool->current_count--; |
| 379 | ret = pool->elements[pool->current_count].virt; |
| 380 | *handle = pool->elements[pool->current_count].phys; |
| 381 | } |
| 382 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
| 383 | return ret; |
| 384 | } |
| 385 | |
| 386 | /** |
| 387 | * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked) |
| 388 | * @phba: HBA which owns the pool to return to |
| 389 | * @virt: mbuf to free |
| 390 | * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed |
| 391 | * |
| 392 | * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if |
| 393 | * it is below its max_count, frees the mbuf otherwise. |
| 394 | * |
| 395 | * Notes: Must be called with phba->hbalock held to synchronize access to |
| 396 | * lpfc_mbuf_safety_pool. |
| 397 | * |
| 398 | * Returns: None |
| 399 | **/ |
| 400 | void |
| 401 | __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) |
| 402 | { |
| 403 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; |
| 404 | |
| 405 | if (pool->current_count < pool->max_count) { |
| 406 | pool->elements[pool->current_count].virt = virt; |
| 407 | pool->elements[pool->current_count].phys = dma; |
| 408 | pool->current_count++; |
| 409 | } else { |
| 410 | dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); |
| 411 | } |
| 412 | return; |
| 413 | } |
| 414 | |
| 415 | /** |
| 416 | * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked) |
| 417 | * @phba: HBA which owns the pool to return to |
| 418 | * @virt: mbuf to free |
| 419 | * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed |
| 420 | * |
| 421 | * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if |
| 422 | * it is below its max_count, frees the mbuf otherwise. |
| 423 | * |
| 424 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. |
| 425 | * |
| 426 | * Returns: None |
| 427 | **/ |
| 428 | void |
| 429 | lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) |
| 430 | { |
| 431 | unsigned long iflags; |
| 432 | |
| 433 | spin_lock_irqsave(&phba->hbalock, iflags); |
| 434 | __lpfc_mbuf_free(phba, virt, dma); |
| 435 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
| 436 | return; |
| 437 | } |
| 438 | |
| 439 | /** |
| 440 | * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the |
| 441 | * lpfc_sg_dma_buf_pool PCI pool |
| 442 | * @phba: HBA which owns the pool to allocate from |
| 443 | * @mem_flags: indicates if this is a priority (MEM_PRI) allocation |
| 444 | * @handle: used to return the DMA-mapped address of the nvmet_buf |
| 445 | * |
| 446 | * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool |
| 447 | * PCI pool. Allocates from generic dma_pool_alloc function. |
| 448 | * |
| 449 | * Returns: |
| 450 | * pointer to the allocated nvmet_buf on success |
| 451 | * NULL on failure |
| 452 | **/ |
| 453 | void * |
| 454 | lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) |
| 455 | { |
| 456 | void *ret; |
| 457 | |
| 458 | ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); |
| 459 | return ret; |
| 460 | } |
| 461 | |
| 462 | /** |
| 463 | * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool |
| 464 | * PCI pool |
| 465 | * @phba: HBA which owns the pool to return to |
| 466 | * @virt: nvmet_buf to free |
| 467 | * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed |
| 468 | * |
| 469 | * Returns: None |
| 470 | **/ |
| 471 | void |
| 472 | lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma) |
| 473 | { |
| 474 | dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); |
| 475 | } |
| 476 | |
| 477 | /** |
| 478 | * lpfc_els_hbq_alloc - Allocate an HBQ buffer |
| 479 | * @phba: HBA to allocate HBQ buffer for |
| 480 | * |
| 481 | * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI |
| 482 | * pool along a non-DMA-mapped container for it. |
| 483 | * |
| 484 | * Notes: Not interrupt-safe. Must be called with no locks held. |
| 485 | * |
| 486 | * Returns: |
| 487 | * pointer to HBQ on success |
| 488 | * NULL on failure |
| 489 | **/ |
| 490 | struct hbq_dmabuf * |
| 491 | lpfc_els_hbq_alloc(struct lpfc_hba *phba) |
| 492 | { |
| 493 | struct hbq_dmabuf *hbqbp; |
| 494 | |
| 495 | hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); |
| 496 | if (!hbqbp) |
| 497 | return NULL; |
| 498 | |
| 499 | hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, |
| 500 | &hbqbp->dbuf.phys); |
| 501 | if (!hbqbp->dbuf.virt) { |
| 502 | kfree(hbqbp); |
| 503 | return NULL; |
| 504 | } |
| 505 | hbqbp->total_size = LPFC_BPL_SIZE; |
| 506 | return hbqbp; |
| 507 | } |
| 508 | |
| 509 | /** |
| 510 | * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc |
| 511 | * @phba: HBA buffer was allocated for |
| 512 | * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc |
| 513 | * |
| 514 | * Description: Frees both the container and the DMA-mapped buffer returned by |
| 515 | * lpfc_els_hbq_alloc. |
| 516 | * |
| 517 | * Notes: Can be called with or without locks held. |
| 518 | * |
| 519 | * Returns: None |
| 520 | **/ |
| 521 | void |
| 522 | lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) |
| 523 | { |
| 524 | dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); |
| 525 | kfree(hbqbp); |
| 526 | return; |
| 527 | } |
| 528 | |
| 529 | /** |
| 530 | * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer |
| 531 | * @phba: HBA to allocate a receive buffer for |
| 532 | * |
| 533 | * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI |
| 534 | * pool along a non-DMA-mapped container for it. |
| 535 | * |
| 536 | * Notes: Not interrupt-safe. Must be called with no locks held. |
| 537 | * |
| 538 | * Returns: |
| 539 | * pointer to HBQ on success |
| 540 | * NULL on failure |
| 541 | **/ |
| 542 | struct hbq_dmabuf * |
| 543 | lpfc_sli4_rb_alloc(struct lpfc_hba *phba) |
| 544 | { |
| 545 | struct hbq_dmabuf *dma_buf; |
| 546 | |
| 547 | dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); |
| 548 | if (!dma_buf) |
| 549 | return NULL; |
| 550 | |
| 551 | dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, |
| 552 | &dma_buf->hbuf.phys); |
| 553 | if (!dma_buf->hbuf.virt) { |
| 554 | kfree(dma_buf); |
| 555 | return NULL; |
| 556 | } |
| 557 | dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, |
| 558 | &dma_buf->dbuf.phys); |
| 559 | if (!dma_buf->dbuf.virt) { |
| 560 | dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, |
| 561 | dma_buf->hbuf.phys); |
| 562 | kfree(dma_buf); |
| 563 | return NULL; |
| 564 | } |
| 565 | dma_buf->total_size = LPFC_DATA_BUF_SIZE; |
| 566 | return dma_buf; |
| 567 | } |
| 568 | |
| 569 | /** |
| 570 | * lpfc_sli4_rb_free - Frees a receive buffer |
| 571 | * @phba: HBA buffer was allocated for |
| 572 | * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc |
| 573 | * |
| 574 | * Description: Frees both the container and the DMA-mapped buffers returned by |
| 575 | * lpfc_sli4_rb_alloc. |
| 576 | * |
| 577 | * Notes: Can be called with or without locks held. |
| 578 | * |
| 579 | * Returns: None |
| 580 | **/ |
| 581 | void |
| 582 | lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) |
| 583 | { |
| 584 | dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); |
| 585 | dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); |
| 586 | kfree(dmab); |
| 587 | } |
| 588 | |
| 589 | /** |
| 590 | * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer |
| 591 | * @phba: HBA to allocate a receive buffer for |
| 592 | * |
| 593 | * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI |
| 594 | * pool along a non-DMA-mapped container for it. |
| 595 | * |
| 596 | * Returns: |
| 597 | * pointer to HBQ on success |
| 598 | * NULL on failure |
| 599 | **/ |
| 600 | struct rqb_dmabuf * |
| 601 | lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba) |
| 602 | { |
| 603 | struct rqb_dmabuf *dma_buf; |
| 604 | |
| 605 | dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL); |
| 606 | if (!dma_buf) |
| 607 | return NULL; |
| 608 | |
| 609 | dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, |
| 610 | &dma_buf->hbuf.phys); |
| 611 | if (!dma_buf->hbuf.virt) { |
| 612 | kfree(dma_buf); |
| 613 | return NULL; |
| 614 | } |
| 615 | dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, |
| 616 | GFP_KERNEL, &dma_buf->dbuf.phys); |
| 617 | if (!dma_buf->dbuf.virt) { |
| 618 | dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, |
| 619 | dma_buf->hbuf.phys); |
| 620 | kfree(dma_buf); |
| 621 | return NULL; |
| 622 | } |
| 623 | dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; |
| 624 | return dma_buf; |
| 625 | } |
| 626 | |
| 627 | /** |
| 628 | * lpfc_sli4_nvmet_free - Frees a receive buffer |
| 629 | * @phba: HBA buffer was allocated for |
| 630 | * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc |
| 631 | * |
| 632 | * Description: Frees both the container and the DMA-mapped buffers returned by |
| 633 | * lpfc_sli4_nvmet_alloc. |
| 634 | * |
| 635 | * Notes: Can be called with or without locks held. |
| 636 | * |
| 637 | * Returns: None |
| 638 | **/ |
| 639 | void |
| 640 | lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab) |
| 641 | { |
| 642 | dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); |
| 643 | dma_pool_free(phba->lpfc_nvmet_drb_pool, |
| 644 | dmab->dbuf.virt, dmab->dbuf.phys); |
| 645 | kfree(dmab); |
| 646 | } |
| 647 | |
| 648 | /** |
| 649 | * lpfc_in_buf_free - Free a DMA buffer |
| 650 | * @phba: HBA buffer is associated with |
| 651 | * @mp: Buffer to free |
| 652 | * |
| 653 | * Description: Frees the given DMA buffer in the appropriate way given if the |
| 654 | * HBA is running in SLI3 mode with HBQs enabled. |
| 655 | * |
| 656 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. |
| 657 | * |
| 658 | * Returns: None |
| 659 | **/ |
| 660 | void |
| 661 | lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) |
| 662 | { |
| 663 | struct hbq_dmabuf *hbq_entry; |
| 664 | unsigned long flags; |
| 665 | |
| 666 | if (!mp) |
| 667 | return; |
| 668 | |
| 669 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
| 670 | hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); |
| 671 | /* Check whether HBQ is still in use */ |
| 672 | spin_lock_irqsave(&phba->hbalock, flags); |
| 673 | if (!phba->hbq_in_use) { |
| 674 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 675 | return; |
| 676 | } |
| 677 | list_del(&hbq_entry->dbuf.list); |
| 678 | if (hbq_entry->tag == -1) { |
| 679 | (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) |
| 680 | (phba, hbq_entry); |
| 681 | } else { |
| 682 | lpfc_sli_free_hbq(phba, hbq_entry); |
| 683 | } |
| 684 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 685 | } else { |
| 686 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
| 687 | kfree(mp); |
| 688 | } |
| 689 | return; |
| 690 | } |
| 691 | |
| 692 | /** |
| 693 | * lpfc_rq_buf_free - Free a RQ DMA buffer |
| 694 | * @phba: HBA buffer is associated with |
| 695 | * @mp: Buffer to free |
| 696 | * |
| 697 | * Description: Frees the given DMA buffer in the appropriate way given by |
| 698 | * reposting it to its associated RQ so it can be reused. |
| 699 | * |
| 700 | * Notes: Takes phba->hbalock. Can be called with or without other locks held. |
| 701 | * |
| 702 | * Returns: None |
| 703 | **/ |
| 704 | void |
| 705 | lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) |
| 706 | { |
| 707 | struct lpfc_rqb *rqbp; |
| 708 | struct lpfc_rqe hrqe; |
| 709 | struct lpfc_rqe drqe; |
| 710 | struct rqb_dmabuf *rqb_entry; |
| 711 | unsigned long flags; |
| 712 | int rc; |
| 713 | |
| 714 | if (!mp) |
| 715 | return; |
| 716 | |
| 717 | rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf); |
| 718 | rqbp = rqb_entry->hrq->rqbp; |
| 719 | |
| 720 | spin_lock_irqsave(&phba->hbalock, flags); |
| 721 | list_del(&rqb_entry->hbuf.list); |
| 722 | hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); |
| 723 | hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); |
| 724 | drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); |
| 725 | drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); |
| 726 | rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); |
| 727 | if (rc < 0) { |
| 728 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 729 | "6409 Cannot post to HRQ %d: %x %x %x " |
| 730 | "DRQ %x %x\n", |
| 731 | rqb_entry->hrq->queue_id, |
| 732 | rqb_entry->hrq->host_index, |
| 733 | rqb_entry->hrq->hba_index, |
| 734 | rqb_entry->hrq->entry_count, |
| 735 | rqb_entry->drq->host_index, |
| 736 | rqb_entry->drq->hba_index); |
| 737 | (rqbp->rqb_free_buffer)(phba, rqb_entry); |
| 738 | } else { |
| 739 | list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); |
| 740 | rqbp->buffer_count++; |
| 741 | } |
| 742 | |
| 743 | spin_unlock_irqrestore(&phba->hbalock, flags); |
| 744 | } |