Lines Matching +full:multi +full:- +full:line

1 // SPDX-License-Identifier: GPL-2.0-only
25 struct snic *snic = svnic_dev_priv(wq->vdev); in snic_wq_cmpl_frame_send()
27 SNIC_BUG_ON(buf->os_buf == NULL); in snic_wq_cmpl_frame_send()
30 SNIC_HOST_INFO(snic->shost, in snic_wq_cmpl_frame_send()
32 buf->os_buf); in snic_wq_cmpl_frame_send()
34 SNIC_TRC(snic->shost->host_no, 0, 0, in snic_wq_cmpl_frame_send()
35 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, in snic_wq_cmpl_frame_send()
38 buf->os_buf = NULL; in snic_wq_cmpl_frame_send()
54 spin_lock_irqsave(&snic->wq_lock[q_num], flags); in snic_wq_cmpl_handler_cont()
55 svnic_wq_service(&snic->wq[q_num], in snic_wq_cmpl_handler_cont()
60 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); in snic_wq_cmpl_handler_cont()
71 snic->s_stats.misc.last_ack_time = jiffies; in snic_wq_cmpl_handler()
72 for (i = 0; i < snic->wq_count; i++) { in snic_wq_cmpl_handler()
73 work_done += svnic_cq_service(&snic->cq[i], in snic_wq_cmpl_handler()
86 struct snic_host_req *req = buf->os_buf; in snic_free_wq_buf()
87 struct snic *snic = svnic_dev_priv(wq->vdev); in snic_free_wq_buf()
91 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len, in snic_free_wq_buf()
95 spin_lock_irqsave(&snic->spl_cmd_lock, flags); in snic_free_wq_buf()
96 if (list_empty(&rqi->list)) { in snic_free_wq_buf()
97 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_free_wq_buf()
101 SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */ in snic_free_wq_buf()
102 list_del_init(&rqi->list); in snic_free_wq_buf()
103 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_free_wq_buf()
105 if (rqi->sge_va) { in snic_free_wq_buf()
107 kfree((void *)rqi->sge_va); in snic_free_wq_buf()
108 rqi->sge_va = 0; in snic_free_wq_buf()
111 SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n"); in snic_free_wq_buf()
117 /* Criteria to select work queue in multi queue mode */
121 /* No multi queue support for now */ in snic_select_wq()
130 int nr_wqdesc = snic->config.wq_enet_desc_count; in snic_wqdesc_avail()
134 * Multi Queue case, additional care is required. in snic_wqdesc_avail()
137 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); in snic_wqdesc_avail()
140 return -1; in snic_wqdesc_avail()
143 nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); in snic_wqdesc_avail()
145 return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); in snic_wqdesc_avail()
153 struct snic_fw_stats *fwstats = &snic->s_stats.fw; in snic_queue_wq_desc()
162 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE); in snic_queue_wq_desc()
163 if (dma_mapping_error(&snic->pdev->dev, pa)) { in snic_queue_wq_desc()
164 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); in snic_queue_wq_desc()
166 return -ENOMEM; in snic_queue_wq_desc()
169 req->req_pa = (ulong)pa; in snic_queue_wq_desc()
173 spin_lock_irqsave(&snic->wq_lock[q_num], flags); in snic_queue_wq_desc()
174 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); in snic_queue_wq_desc()
176 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE); in snic_queue_wq_desc()
177 req->req_pa = 0; in snic_queue_wq_desc()
178 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); in snic_queue_wq_desc()
179 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); in snic_queue_wq_desc()
180 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); in snic_queue_wq_desc()
182 return -ENOMEM; in snic_queue_wq_desc()
185 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); in snic_queue_wq_desc()
188 * note: when multi queue enabled, fw actv_reqs should be per queue. in snic_queue_wq_desc()
190 act_reqs = atomic64_inc_return(&fwstats->actv_reqs); in snic_queue_wq_desc()
191 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); in snic_queue_wq_desc()
193 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) in snic_queue_wq_desc()
194 atomic64_set(&fwstats->max_actv_reqs, act_reqs); in snic_queue_wq_desc()
208 INIT_LIST_HEAD(&rqi->list); in snic_handle_untagged_req()
210 spin_lock_irqsave(&snic->spl_cmd_lock, flags); in snic_handle_untagged_req()
211 list_add_tail(&rqi->list, &snic->spl_cmd_list); in snic_handle_untagged_req()
212 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_handle_untagged_req()
228 rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC); in snic_req_init()
230 atomic64_inc(&snic->s_stats.io.alloc_fail); in snic_req_init()
231 SNIC_HOST_ERR(snic->shost, in snic_req_init()
238 rqi->rq_pool_type = typ; in snic_req_init()
239 rqi->start_time = jiffies; in snic_req_init()
240 rqi->req = (struct snic_host_req *) (rqi + 1); in snic_req_init()
241 rqi->req_len = sizeof(struct snic_host_req); in snic_req_init()
242 rqi->snic = snic; in snic_req_init()
244 rqi->req = (struct snic_host_req *)(rqi + 1); in snic_req_init()
249 rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc)); in snic_req_init()
251 if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl)) in snic_req_init()
252 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt); in snic_req_init()
255 atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]); in snic_req_init()
258 memset(rqi->req, 0, rqi->req_len); in snic_req_init()
261 rqi->req->hdr.init_ctx = (ulong) rqi; in snic_req_init()
263 SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi); in snic_req_init()
279 if (rqi->abort_req) in snic_abort_req_init()
280 return rqi->abort_req; in snic_abort_req_init()
283 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); in snic_abort_req_init()
285 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n"); in snic_abort_req_init()
291 rqi->abort_req = req; in snic_abort_req_init()
294 req->hdr.init_ctx = (ulong) rqi; in snic_abort_req_init()
309 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); in snic_dr_req_init()
311 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n"); in snic_dr_req_init()
317 SNIC_BUG_ON(rqi->dr_req != NULL); in snic_dr_req_init()
318 rqi->dr_req = req; in snic_dr_req_init()
321 req->hdr.init_ctx = (ulong) rqi; in snic_dr_req_init()
330 SNIC_BUG_ON(rqi->req == rqi->abort_req); in snic_req_free()
331 SNIC_BUG_ON(rqi->req == rqi->dr_req); in snic_req_free()
332 SNIC_BUG_ON(rqi->sge_va != 0); in snic_req_free()
334 SNIC_SCSI_DBG(snic->shost, in snic_req_free()
336 rqi, rqi->req, rqi->abort_req, rqi->dr_req); in snic_req_free()
338 if (rqi->abort_req) { in snic_req_free()
339 if (rqi->abort_req->req_pa) in snic_req_free()
340 dma_unmap_single(&snic->pdev->dev, in snic_req_free()
341 rqi->abort_req->req_pa, in snic_req_free()
345 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); in snic_req_free()
348 if (rqi->dr_req) { in snic_req_free()
349 if (rqi->dr_req->req_pa) in snic_req_free()
350 dma_unmap_single(&snic->pdev->dev, in snic_req_free()
351 rqi->dr_req->req_pa, in snic_req_free()
355 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); in snic_req_free()
358 if (rqi->req->req_pa) in snic_req_free()
359 dma_unmap_single(&snic->pdev->dev, in snic_req_free()
360 rqi->req->req_pa, in snic_req_free()
361 rqi->req_len, in snic_req_free()
364 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); in snic_req_free()
374 dma_unmap_single(&snic->pdev->dev, in snic_pci_unmap_rsp_buf()
390 spin_lock_irqsave(&snic->spl_cmd_lock, flags); in snic_free_all_untagged_reqs()
391 list_for_each_safe(cur, nxt, &snic->spl_cmd_list) { in snic_free_all_untagged_reqs()
393 list_del_init(&rqi->list); in snic_free_all_untagged_reqs()
394 if (rqi->sge_va) { in snic_free_all_untagged_reqs()
396 kfree((void *)rqi->sge_va); in snic_free_all_untagged_reqs()
397 rqi->sge_va = 0; in snic_free_all_untagged_reqs()
402 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_free_all_untagged_reqs()
413 spin_lock_irqsave(&snic->snic_lock, flags); in snic_release_untagged_req()
414 if (snic->in_remove) { in snic_release_untagged_req()
415 spin_unlock_irqrestore(&snic->snic_lock, flags); in snic_release_untagged_req()
418 spin_unlock_irqrestore(&snic->snic_lock, flags); in snic_release_untagged_req()
420 spin_lock_irqsave(&snic->spl_cmd_lock, flags); in snic_release_untagged_req()
421 if (list_empty(&rqi->list)) { in snic_release_untagged_req()
422 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_release_untagged_req()
425 list_del_init(&rqi->list); in snic_release_untagged_req()
426 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); in snic_release_untagged_req()
448 char line[LINE_BUFSZ] = { '\0' }; in snic_dump_desc() local
451 if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) in snic_dump_desc()
452 rqi = (struct snic_req_info *) fwreq->hdr.init_ctx; in snic_dump_desc()
454 rqi = (struct snic_req_info *) req->hdr.init_ctx; in snic_dump_desc()
456 SNIC_BUG_ON(rqi == NULL || rqi->req == NULL); in snic_dump_desc()
457 switch (req->hdr.type) { in snic_dump_desc()
459 cmd_str = "report-tgt : "; in snic_dump_desc()
460 snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :"); in snic_dump_desc()
465 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :", in snic_dump_desc()
466 req->u.icmnd.cdb[0]); in snic_dump_desc()
471 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :"); in snic_dump_desc()
476 snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :"); in snic_dump_desc()
481 snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :"); in snic_dump_desc()
490 snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :"); in snic_dump_desc()
495 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :", in snic_dump_desc()
496 rqi->req->u.icmnd.cdb[0]); in snic_dump_desc()
501 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :"); in snic_dump_desc()
506 snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :"); in snic_dump_desc()
511 snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :"); in snic_dump_desc()
516 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :"); in snic_dump_desc()
521 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :"); in snic_dump_desc()
531 fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status, in snic_dump_desc()
532 req->hdr.init_ctx); in snic_dump_desc()
551 duration = jiffies - rqi->start_time; in snic_calc_io_process_time()
553 if (duration > atomic64_read(&snic->s_stats.io.max_time)) in snic_calc_io_process_time()
554 atomic64_set(&snic->s_stats.io.max_time, duration); in snic_calc_io_process_time()