Lines Matching +full:memory +full:- +full:mapped

4  * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
6 * Copyright (C) 2004-2014 Emulex. All rights reserved. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */
49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */
53 * @phba: HBA to free memory for
57 * free routine to fully release all associated memory.
66 if (phba->sli_rev == LPFC_SLI_REV4 && in lpfc_mem_free_sli_mbox()
67 bf_get(lpfc_mqe_command, &mbox->u.mqe) == MBX_SLI4_CONFIG) { in lpfc_mem_free_sli_mbox()
77 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; in lpfc_mem_alloc_active_rrq_pool_s4()
80 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
81 bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * in lpfc_mem_alloc_active_rrq_pool_s4()
83 phba->cfg_rrq_xri_bitmap_sz = bytes; in lpfc_mem_alloc_active_rrq_pool_s4()
84 phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc_active_rrq_pool_s4()
86 if (!phba->active_rrq_pool) in lpfc_mem_alloc_active_rrq_pool_s4()
87 return -ENOMEM; in lpfc_mem_alloc_active_rrq_pool_s4()
93 * lpfc_mem_alloc - create and allocate all PCI and memory pools
98 * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
101 * Notes: Not interrupt-safe. Must be called with no locks held. If any
102 * allocation fails, frees all successfully allocated memory before returning.
106 * -ENOMEM on failure (if any memory allocations fail)
111 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc()
115 phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, in lpfc_mem_alloc()
118 if (!phba->lpfc_mbuf_pool) in lpfc_mem_alloc()
121 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, in lpfc_mem_alloc()
124 if (!pool->elements) in lpfc_mem_alloc()
127 pool->max_count = 0; in lpfc_mem_alloc()
128 pool->current_count = 0; in lpfc_mem_alloc()
130 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc()
131 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc()
132 if (!pool->elements[i].virt) in lpfc_mem_alloc()
134 pool->max_count++; in lpfc_mem_alloc()
135 pool->current_count++; in lpfc_mem_alloc()
138 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MBX_POOL_SIZE, in lpfc_mem_alloc()
140 if (!phba->mbox_mem_pool) in lpfc_mem_alloc()
143 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, in lpfc_mem_alloc()
145 if (!phba->nlp_mem_pool) in lpfc_mem_alloc()
148 if (phba->sli_rev == LPFC_SLI_REV4) { in lpfc_mem_alloc()
149 phba->rrq_pool = in lpfc_mem_alloc()
152 if (!phba->rrq_pool) in lpfc_mem_alloc()
154 phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool", in lpfc_mem_alloc()
155 &phba->pcidev->dev, in lpfc_mem_alloc()
157 if (!phba->lpfc_hrb_pool) in lpfc_mem_alloc()
160 phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool", in lpfc_mem_alloc()
161 &phba->pcidev->dev, in lpfc_mem_alloc()
163 if (!phba->lpfc_drb_pool) in lpfc_mem_alloc()
165 phba->lpfc_hbq_pool = NULL; in lpfc_mem_alloc()
167 phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool", in lpfc_mem_alloc()
168 &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); in lpfc_mem_alloc()
169 if (!phba->lpfc_hbq_pool) in lpfc_mem_alloc()
171 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
172 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
175 if (phba->cfg_EnableXLane) { in lpfc_mem_alloc()
176 phba->device_data_mem_pool = mempool_create_kmalloc_pool( in lpfc_mem_alloc()
179 if (!phba->device_data_mem_pool) in lpfc_mem_alloc()
182 phba->device_data_mem_pool = NULL; in lpfc_mem_alloc()
187 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_alloc()
188 phba->lpfc_drb_pool = NULL; in lpfc_mem_alloc()
190 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_alloc()
191 phba->lpfc_hrb_pool = NULL; in lpfc_mem_alloc()
193 mempool_destroy(phba->rrq_pool); in lpfc_mem_alloc()
194 phba->rrq_pool = NULL; in lpfc_mem_alloc()
196 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_alloc()
197 phba->nlp_mem_pool = NULL; in lpfc_mem_alloc()
199 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_alloc()
200 phba->mbox_mem_pool = NULL; in lpfc_mem_alloc()
202 while (i--) in lpfc_mem_alloc()
203 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_alloc()
204 pool->elements[i].phys); in lpfc_mem_alloc()
205 kfree(pool->elements); in lpfc_mem_alloc()
207 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_alloc()
208 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_alloc()
210 return -ENOMEM; in lpfc_mem_alloc()
216 phba->lpfc_nvmet_drb_pool = in lpfc_nvmet_mem_alloc()
218 &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE, in lpfc_nvmet_mem_alloc()
220 if (!phba->lpfc_nvmet_drb_pool) { in lpfc_nvmet_mem_alloc()
222 "6024 Can't enable NVME Target - no memory\n"); in lpfc_nvmet_mem_alloc()
223 return -ENOMEM; in lpfc_nvmet_mem_alloc()
229 * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
230 * @phba: HBA to free memory for
232 * Description: Free the memory allocated by lpfc_mem_alloc routine. This
241 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_free()
246 dma_pool_destroy(phba->lpfc_nvmet_drb_pool); in lpfc_mem_free()
247 phba->lpfc_nvmet_drb_pool = NULL; in lpfc_mem_free()
249 dma_pool_destroy(phba->lpfc_drb_pool); in lpfc_mem_free()
250 phba->lpfc_drb_pool = NULL; in lpfc_mem_free()
252 dma_pool_destroy(phba->lpfc_hrb_pool); in lpfc_mem_free()
253 phba->lpfc_hrb_pool = NULL; in lpfc_mem_free()
255 dma_pool_destroy(phba->lpfc_hbq_pool); in lpfc_mem_free()
256 phba->lpfc_hbq_pool = NULL; in lpfc_mem_free()
258 mempool_destroy(phba->rrq_pool); in lpfc_mem_free()
259 phba->rrq_pool = NULL; in lpfc_mem_free()
261 /* Free NLP memory pool */ in lpfc_mem_free()
262 mempool_destroy(phba->nlp_mem_pool); in lpfc_mem_free()
263 phba->nlp_mem_pool = NULL; in lpfc_mem_free()
264 if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { in lpfc_mem_free()
265 mempool_destroy(phba->active_rrq_pool); in lpfc_mem_free()
266 phba->active_rrq_pool = NULL; in lpfc_mem_free()
269 /* Free mbox memory pool */ in lpfc_mem_free()
270 mempool_destroy(phba->mbox_mem_pool); in lpfc_mem_free()
271 phba->mbox_mem_pool = NULL; in lpfc_mem_free()
273 /* Free MBUF memory pool */ in lpfc_mem_free()
274 for (i = 0; i < pool->current_count; i++) in lpfc_mem_free()
275 dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, in lpfc_mem_free()
276 pool->elements[i].phys); in lpfc_mem_free()
277 kfree(pool->elements); in lpfc_mem_free()
279 dma_pool_destroy(phba->lpfc_mbuf_pool); in lpfc_mem_free()
280 phba->lpfc_mbuf_pool = NULL; in lpfc_mem_free()
282 /* Free Device Data memory pool */ in lpfc_mem_free()
283 if (phba->device_data_mem_pool) { in lpfc_mem_free()
285 while (!list_empty(&phba->luns)) { in lpfc_mem_free()
286 device_data = list_first_entry(&phba->luns, in lpfc_mem_free()
289 list_del(&device_data->listentry); in lpfc_mem_free()
290 mempool_free(device_data, phba->device_data_mem_pool); in lpfc_mem_free()
292 mempool_destroy(phba->device_data_mem_pool); in lpfc_mem_free()
294 phba->device_data_mem_pool = NULL; in lpfc_mem_free()
299 * lpfc_mem_free_all - Frees all PCI and driver memory
300 * @phba: HBA to free memory for
302 * Description: Free memory from PCI and driver memory pools and also those
304 * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
312 struct lpfc_sli *psli = &phba->sli; in lpfc_mem_free_all()
315 /* Free memory used in mailbox queue back to mailbox memory pool */ in lpfc_mem_free_all()
316 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { in lpfc_mem_free_all()
317 list_del(&mbox->list); in lpfc_mem_free_all()
320 /* Free memory used in mailbox cmpl list back to mailbox memory pool */ in lpfc_mem_free_all()
321 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { in lpfc_mem_free_all()
322 list_del(&mbox->list); in lpfc_mem_free_all()
325 /* Free the active mailbox command back to the mailbox memory pool */ in lpfc_mem_free_all()
326 spin_lock_irq(&phba->hbalock); in lpfc_mem_free_all()
327 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; in lpfc_mem_free_all()
328 spin_unlock_irq(&phba->hbalock); in lpfc_mem_free_all()
329 if (psli->mbox_active) { in lpfc_mem_free_all()
330 mbox = psli->mbox_active; in lpfc_mem_free_all()
332 psli->mbox_active = NULL; in lpfc_mem_free_all()
335 /* Free and destroy all the allocated memory pools */ in lpfc_mem_free_all()
338 /* Free DMA buffer memory pool */ in lpfc_mem_free_all()
339 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); in lpfc_mem_free_all()
340 phba->lpfc_sg_dma_buf_pool = NULL; in lpfc_mem_free_all()
342 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); in lpfc_mem_free_all()
343 phba->lpfc_cmd_rsp_buf_pool = NULL; in lpfc_mem_free_all()
346 if (phba->cgn_i) { in lpfc_mem_free_all()
347 dma_free_coherent(&phba->pcidev->dev, in lpfc_mem_free_all()
349 phba->cgn_i->virt, phba->cgn_i->phys); in lpfc_mem_free_all()
350 kfree(phba->cgn_i); in lpfc_mem_free_all()
351 phba->cgn_i = NULL; in lpfc_mem_free_all()
355 if (phba->rx_monitor) { in lpfc_mem_free_all()
356 lpfc_rx_monitor_destroy_ring(phba->rx_monitor); in lpfc_mem_free_all()
357 kfree(phba->rx_monitor); in lpfc_mem_free_all()
358 phba->rx_monitor = NULL; in lpfc_mem_free_all()
362 kfree(psli->iocbq_lookup); in lpfc_mem_free_all()
363 psli->iocbq_lookup = NULL; in lpfc_mem_free_all()
369 * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
372 * @handle: used to return the DMA-mapped address of the mbuf
374 * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
379 * Notes: Not interrupt-safe. Must be called with no locks held. Takes
380 * phba->hbalock.
389 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mbuf_alloc()
393 ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); in lpfc_mbuf_alloc()
395 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
396 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) { in lpfc_mbuf_alloc()
397 pool->current_count--; in lpfc_mbuf_alloc()
398 ret = pool->elements[pool->current_count].virt; in lpfc_mbuf_alloc()
399 *handle = pool->elements[pool->current_count].phys; in lpfc_mbuf_alloc()
401 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_alloc()
406 * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
409 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
414 * Notes: Must be called with phba->hbalock held to synchronize access to
422 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in __lpfc_mbuf_free()
424 if (pool->current_count < pool->max_count) { in __lpfc_mbuf_free()
425 pool->elements[pool->current_count].virt = virt; in __lpfc_mbuf_free()
426 pool->elements[pool->current_count].phys = dma; in __lpfc_mbuf_free()
427 pool->current_count++; in __lpfc_mbuf_free()
429 dma_pool_free(phba->lpfc_mbuf_pool, virt, dma); in __lpfc_mbuf_free()
435 * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
438 * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
443 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
452 spin_lock_irqsave(&phba->hbalock, iflags); in lpfc_mbuf_free()
454 spin_unlock_irqrestore(&phba->hbalock, iflags); in lpfc_mbuf_free()
459 * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
463 * @handle: used to return the DMA-mapped address of the nvmet_buf
465 * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
477 ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle); in lpfc_nvmet_buf_alloc()
482 * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
486 * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
493 dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma); in lpfc_nvmet_buf_free()
497 * lpfc_els_hbq_alloc - Allocate an HBQ buffer
500 * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
501 * pool along a non-DMA-mapped container for it.
503 * Notes: Not interrupt-safe. Must be called with no locks held.
518 hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, in lpfc_els_hbq_alloc()
519 &hbqbp->dbuf.phys); in lpfc_els_hbq_alloc()
520 if (!hbqbp->dbuf.virt) { in lpfc_els_hbq_alloc()
524 hbqbp->total_size = LPFC_BPL_SIZE; in lpfc_els_hbq_alloc()
529 * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
533 * Description: Frees both the container and the DMA-mapped buffer returned by
543 dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); in lpfc_els_hbq_free()
549 * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
552 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
553 * pool along a non-DMA-mapped container for it.
555 * Notes: Not interrupt-safe. Must be called with no locks held.
570 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
571 &dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
572 if (!dma_buf->hbuf.virt) { in lpfc_sli4_rb_alloc()
576 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, in lpfc_sli4_rb_alloc()
577 &dma_buf->dbuf.phys); in lpfc_sli4_rb_alloc()
578 if (!dma_buf->dbuf.virt) { in lpfc_sli4_rb_alloc()
579 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_rb_alloc()
580 dma_buf->hbuf.phys); in lpfc_sli4_rb_alloc()
584 dma_buf->total_size = LPFC_DATA_BUF_SIZE; in lpfc_sli4_rb_alloc()
589 * lpfc_sli4_rb_free - Frees a receive buffer
593 * Description: Frees both the container and the DMA-mapped buffers returned by
603 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_rb_free()
604 dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_rb_free()
609 * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
612 * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
613 * pool along a non-DMA-mapped container for it.
628 dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, in lpfc_sli4_nvmet_alloc()
629 &dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
630 if (!dma_buf->hbuf.virt) { in lpfc_sli4_nvmet_alloc()
634 dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_alloc()
635 GFP_KERNEL, &dma_buf->dbuf.phys); in lpfc_sli4_nvmet_alloc()
636 if (!dma_buf->dbuf.virt) { in lpfc_sli4_nvmet_alloc()
637 dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, in lpfc_sli4_nvmet_alloc()
638 dma_buf->hbuf.phys); in lpfc_sli4_nvmet_alloc()
642 dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE; in lpfc_sli4_nvmet_alloc()
647 * lpfc_sli4_nvmet_free - Frees a receive buffer
651 * Description: Frees both the container and the DMA-mapped buffers returned by
661 dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); in lpfc_sli4_nvmet_free()
662 dma_pool_free(phba->lpfc_nvmet_drb_pool, in lpfc_sli4_nvmet_free()
663 dmab->dbuf.virt, dmab->dbuf.phys); in lpfc_sli4_nvmet_free()
668 * lpfc_in_buf_free - Free a DMA buffer
675 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
688 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { in lpfc_in_buf_free()
691 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_in_buf_free()
692 if (!phba->hbq_in_use) { in lpfc_in_buf_free()
693 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
696 list_del(&hbq_entry->dbuf.list); in lpfc_in_buf_free()
697 if (hbq_entry->tag == -1) { in lpfc_in_buf_free()
698 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) in lpfc_in_buf_free()
703 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_in_buf_free()
705 lpfc_mbuf_free(phba, mp->virt, mp->phys); in lpfc_in_buf_free()
712 * lpfc_rq_buf_free - Free a RQ DMA buffer
719 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
737 rqbp = rqb_entry->hrq->rqbp; in lpfc_rq_buf_free()
739 spin_lock_irqsave(&phba->hbalock, flags); in lpfc_rq_buf_free()
740 list_del(&rqb_entry->hbuf.list); in lpfc_rq_buf_free()
741 hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
742 hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys); in lpfc_rq_buf_free()
743 drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
744 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); in lpfc_rq_buf_free()
745 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); in lpfc_rq_buf_free()
750 rqb_entry->hrq->queue_id, in lpfc_rq_buf_free()
751 rqb_entry->hrq->host_index, in lpfc_rq_buf_free()
752 rqb_entry->hrq->hba_index, in lpfc_rq_buf_free()
753 rqb_entry->hrq->entry_count, in lpfc_rq_buf_free()
754 rqb_entry->drq->host_index, in lpfc_rq_buf_free()
755 rqb_entry->drq->hba_index); in lpfc_rq_buf_free()
756 (rqbp->rqb_free_buffer)(phba, rqb_entry); in lpfc_rq_buf_free()
758 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); in lpfc_rq_buf_free()
759 rqbp->buffer_count++; in lpfc_rq_buf_free()
762 spin_unlock_irqrestore(&phba->hbalock, flags); in lpfc_rq_buf_free()